source
stringlengths
3
92
original_c
stringlengths
26
2.25M
no_omp_formatted
stringlengths
0
2.25M
omp_formatted
stringlengths
0
2.25M
elemwise_binary_op.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file elemwise_binary_op.h * \brief Function definition of elementwise binary operators */ #ifndef MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_ #define MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_ #include <mxnet/operator_util.h> #include <mxnet/op_attr_types.h> #include <vector> #include <string> #include <utility> #include <typeinfo> #include <algorithm> #include "../mxnet_op.h" #include "../mshadow_op.h" #include "../../engine/openmp.h" #include "elemwise_unary_op.h" #include "../../common/utils.h" #include "./init_op.h" namespace mxnet { namespace op { /*! Gather binary operator functions into ElemwiseBinaryOp class */ class ElemwiseBinaryOp : public OpBase { public: /*! \brief For sparse, assume missing rvalue is 0 */ template<typename OP, int Req> struct MissingRValueOp { template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType *lhs) { KERNEL_ASSIGN(out[i], Req, OP::Map(lhs[i], DType(0))); } }; /*! \brief For sparse, assume missing lvalue is 0 */ template<typename OP, int Req> struct MissingLValueOp { template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType *rhs) { KERNEL_ASSIGN(out[i], Req, OP::Map(DType(0), rhs[i])); } }; private: /*! * \brief CSR operation requires temp space */ enum ResourceRequestType { kTempSpace }; /*! * \brief Fill contiguous dense output rows with value computed from 0 lhs and 0 rhs input * CPU-Only version */ template<typename DType, typename OP, typename xpu> static inline size_t FillDense(mshadow::Stream<xpu> *s, const size_t idx_l, const size_t idx_r, const OpReqType req, mshadow::Tensor<xpu, 2, DType> *out, const size_t iter_out) { const int index_out_min = static_cast<int>(std::min(idx_l, idx_r)); if (static_cast<size_t>(index_out_min) > iter_out) { const DType zero_input_val = OP::Map(DType(0), DType(0)); #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (int i = static_cast<int>(iter_out); i < index_out_min; ++i) { Fill<false>(s, (*out)[i], req, zero_input_val); } } return static_cast<size_t>(index_out_min); // MSVC wants OMP loops to always use 'int' } static inline bool IsSameArray(const NDArray& a1, const NDArray& a2) { return a1.var() == a2.var(); } /*! \brief Minimum of three */ static MSHADOW_XINLINE size_t minthree(const size_t a, const size_t b, const size_t c) { return a < b ? (a < c ? a : c) : (b < c ? b : c); } template<typename xpu, typename LOP, typename ROP, typename DType> static void BackwardUseNone_(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mxnet_op; Stream<xpu> *s = ctx.get_stream<xpu>(); const int size = static_cast<int>((outputs[0].Size() + DataType<DType>::kLanes - 1) / DataType<DType>::kLanes); const DType *ograd_dptr = inputs[0].dptr<DType>(); if (std::is_same<LOP, mshadow_op::identity>::value && req[0] == kWriteInplace) { CHECK_EQ(ograd_dptr, outputs[0].dptr<DType>()); } else if (req[0] != kNullOp) { DType *lgrad_dptr = outputs[0].dptr<DType>(); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { Kernel<mxnet_op::op_with_req<LOP, Req>, xpu>::Launch(s, size, lgrad_dptr, ograd_dptr); }); } if (std::is_same<ROP, mshadow_op::identity>::value && req[1] == kWriteInplace) { CHECK_EQ(ograd_dptr, outputs[1].dptr<DType>()); } else if (req[1] != kNullOp) { DType *rgrad_dptr = outputs[1].dptr<DType>(); MXNET_ASSIGN_REQ_SWITCH(req[1], Req, { Kernel<mxnet_op::op_with_req<ROP, Req>, xpu>::Launch(s, size, rgrad_dptr, ograd_dptr); }); } } template<typename xpu, typename LOP, typename ROP, typename DType> static void BackwardUseIn_(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { DCHECK_EQ(outputs.size(), 2U); DCHECK_EQ(inputs.size(), 3U); mxnet_op::Stream<xpu> *s = ctx.get_stream<xpu>(); const DType *ograd_dptr = inputs[0].dptr<DType>(); const DType *lhs_dptr = inputs[1].dptr<DType>(); const DType *rhs_dptr = inputs[2].dptr<DType>(); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { const int size = static_cast<int>( (outputs[0].Size() + mxnet_op::DataType<DType>::kLanes - 1) / mxnet_op::DataType<DType>::kLanes); DType * lgrad_dptr = outputs[0].dptr<DType>(); mxnet_op::Kernel<mxnet_op::op_with_req<mxnet_op::backward_grad<LOP>, Req>, xpu>::Launch( s, size, lgrad_dptr, ograd_dptr, lhs_dptr, rhs_dptr);}); MXNET_ASSIGN_REQ_SWITCH(req[1], Req, { const int size = static_cast<int>( (outputs[1].Size() + mxnet_op::DataType<DType>::kLanes - 1) / mxnet_op::DataType<DType>::kLanes); DType * rgrad_dptr = outputs[1].dptr<DType>(); mxnet_op::Kernel<mxnet_op::op_with_req<mxnet_op::backward_grad<ROP>, Req>, xpu>::Launch( s, size, rgrad_dptr, ograd_dptr, lhs_dptr, rhs_dptr);}); } template< typename xpu, typename LOP, typename ROP, typename DType, bool in0_ok_dense = false, bool in1_ok_dense = false, bool in2_ok_dense = false, typename BackupCompute> static inline void BackwardUseInEx_(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs, BackupCompute backup_compute) { mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); // lhs grad if (req[0] != kNullOp) { // RspRspOp can handle dense outputs so long as OP(0, 0) == 0 MSHADOW_IDX_TYPE_SWITCH(inputs[1].aux_type(rowsparse::kIdx), IType, { RspRspOp<DType, IType, LOP>( s, attrs, ctx, inputs[1], inputs[2], req[0], outputs[0], false, false, false, false); }); // lhs in-place MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(rowsparse::kIdx), IType, { RspRspOp<DType, IType, mshadow::op::mul>( s, attrs, ctx, outputs[0], inputs[0], req[0], outputs[0], false, false, true, false); }); } // rhs grad if (req[1] != kNullOp) { MSHADOW_IDX_TYPE_SWITCH(inputs[1].aux_type(rowsparse::kIdx), IType, { RspRspOp<DType, IType, ROP>( s, attrs, ctx, inputs[1], inputs[2], req[1], outputs[1], false, false, false, false); }); // rhs in-place MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(rowsparse::kIdx), IType, { RspRspOp<DType, IType, mshadow::op::mul>( s, attrs, ctx, inputs[0], outputs[1], req[1], outputs[1], false, false, true, false); }); } } protected: /*! \brief Binary op handling for lhr/rhs: RspDns, RspRsp, DnsRsp, or RspRsp->Dns result */ template<typename DType, typename IType, typename OP> static void RspRspOp(mshadow::Stream<cpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output, bool lhs_may_be_dense, bool rhs_may_be_dense, bool allow_inplace, bool scatter); /*! \brief CSR -op- CSR binary operator for non-canonical NDArray */ template<typename DType, typename IType, typename CType, typename OP> static inline void CsrCsrOp(mshadow::Stream<cpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output); public: /*! * \brief Rsp-op-Rsp operation which produces a dense result * \param attrs Attributes * \param dev_mask Device mask * \param dispatch_mode Dispatch Mode * \param in_attrs Input storage attributes * \param out_attrs Output storage attributes * \return true if handled */ static bool SparseSparseWithDenseResult(const nnvm::NodeAttrs& attrs, int dev_mask, DispatchMode* dispatch_mode, std::vector<int> *in_attrs, std::vector<int> *out_attrs); /*! * \brief Allow one of the inputs to be dense and still produce a sparse output * \param attrs Attributes * \param dev_mask Device mask * \param dispatch_mode Dispatch Mode * \param in_attrs Input storage attributes * \param out_attrs Output storage attributes * \return true if handled */ template<bool lhs_dense_ok = true, bool rhs_dense_ok = true> static bool AllowLRDenseInputWithSparseOutputStorageType(const nnvm::NodeAttrs& attrs, int dev_mask, DispatchMode* dispatch_mode, std::vector<int> *in_attrs, std::vector<int> *out_attrs) { CHECK_EQ(in_attrs->size(), 2U) << " in operator " << attrs.name; CHECK_EQ(out_attrs->size(), 1U) << " in operator " << attrs.name; const auto& lhs_stype = in_attrs->at(0); const auto& rhs_stype = in_attrs->at(1); auto& out_stype = out_attrs->at(0); bool dispatched = false; const bool invalid_ctx = dev_mask != mshadow::cpu::kDevMask; const auto dispatch_ex = invalid_ctx ? DispatchMode::kFComputeFallback : DispatchMode::kFComputeEx; if (!dispatched && lhs_stype == kDefaultStorage && rhs_stype == kDefaultStorage) { // dns, dns -> dns dispatched = storage_type_assign(&out_stype, kDefaultStorage, dispatch_mode, DispatchMode::kFCompute); } if (!dispatched) { if ((lhs_stype == kRowSparseStorage && rhs_stype == kRowSparseStorage) || (rhs_dense_ok && lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) || (lhs_dense_ok && lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage)) { // rsp, rsp -> rsp // rsp, dns -> rsp // dns, rsp -> rsp dispatched = storage_type_assign(&out_stype, kRowSparseStorage, dispatch_mode, dispatch_ex); } else if (lhs_stype == kCSRStorage && rhs_stype == kCSRStorage) { // csr, csr -> csr dispatched = storage_type_assign(&out_stype, kCSRStorage, dispatch_mode, dispatch_ex); } else if ((lhs_stype == kCSRStorage && rhs_dense_ok) || (rhs_stype == kCSRStorage && lhs_dense_ok)) { // csr, dns -> csr // dns, csr -> csr dispatched = storage_type_assign(&out_stype, kCSRStorage, dispatch_mode, DispatchMode::kFComputeFallback); } } if (!dispatched) { dispatch_fallback(out_attrs, dispatch_mode); } if (*dispatch_mode == DispatchMode::kFComputeFallback) { LogStorageFallback(attrs, dev_mask, in_attrs, out_attrs); } return true; } /*! * \brief Backward pass computing input gradient using forward inputs * \param attrs Attributes * \param dev_mask Device mask * \param dispatch_mode Dispatch Mode * \param in_attrs Input storage attributes * \param out_attrs Output storage attributes * \return true if handled */ static bool BackwardUseInStorageType(const nnvm::NodeAttrs& attrs, int dev_mask, DispatchMode* dispatch_mode, std::vector<int> *in_attrs, std::vector<int> *out_attrs); template<typename xpu, typename OP> static void Compute(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mxnet_op; if (req[0] != kNullOp) { Stream<xpu> *s = ctx.get_stream<xpu>(); CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1U); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size()) + DataType<DType>::kLanes - 1) / DataType<DType>::kLanes; Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size, outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>()); }); }); } } template<typename xpu, typename OP> static void ComputeWithHalf2(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mxnet_op; if (req[0] != kNullOp) { Stream<xpu> *s = ctx.get_stream<xpu>(); CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1U); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, { const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size()) + DataType<DType>::kLanes - 1) / DataType<DType>::kLanes; Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size, outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>()); }); }); } } template<typename xpu, typename OP> static void ComputeEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { CHECK_EQ(inputs.size(), 2); CHECK_EQ(outputs.size(), 1); if (req[0] == kNullOp) return; const auto lhs_stype = inputs[0].storage_type(); const auto out_stype = outputs[0].storage_type(); mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); if ((common::ContainsOnlyStorage(inputs, kRowSparseStorage)) && (out_stype == kRowSparseStorage || out_stype == kDefaultStorage)) { // rsp, rsp -> rsp // rsp, rsp -> dns const int rsp_input_idx = lhs_stype == kRowSparseStorage ? 0 : 1; MSHADOW_IDX_TYPE_SWITCH(inputs[rsp_input_idx].aux_type(rowsparse::kIdx), IType, { MSHADOW_TYPE_SWITCH(outputs[0].dtype(), DType, { RspRspOp<DType, IType, OP>( s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0], false, false, false, false); }); }); } else if (common::ContainsOnlyStorage(inputs, kCSRStorage) && out_stype == kCSRStorage) { // csr, csr -> csr MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(csr::kIdx), IType, { MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(csr::kIndPtr), CType, { MSHADOW_TYPE_SWITCH(outputs[0].dtype(), DType, { CsrCsrOp<DType, IType, CType, OP>( s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0]); }); }); }); } else { LOG(FATAL) << "Not implemented: " << operator_string(attrs, ctx, inputs, req, outputs); } } /*! \brief ComputeEx allowing dense lvalue and/or rvalue */ template<typename xpu, typename OP, bool lhs_may_be_dense, bool rhs_may_be_dense> static void ComputeDnsLRValueEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { using namespace mshadow; using namespace mshadow::expr; CHECK_EQ(inputs.size(), 2); CHECK_EQ(outputs.size(), 1); if (req[0] == kNullOp) return; const auto lhs_stype = inputs[0].storage_type(); const auto rhs_stype = inputs[1].storage_type(); const auto out_stype = outputs[0].storage_type(); if ((out_stype == kRowSparseStorage || out_stype == kDefaultStorage) && ((lhs_stype == kRowSparseStorage && rhs_stype == kRowSparseStorage) || (lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) || (lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage)) && lhs_may_be_dense && rhs_may_be_dense) { // rsp, rsp -> rsp // rsp, rsp -> dns // rsp, dns -> rsp // dns, rsp -> rsp // More than once dense not allowed (this will be checked in RspRspOp): // rsp, dns -> dns <-- NOT ALLOWED // dns, rsp -> dns <-- NOT ALLOWED mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); MSHADOW_TYPE_SWITCH(outputs[0].dtype(), DType, { MSHADOW_IDX_TYPE_SWITCH(outputs[0].aux_type(rowsparse::kIdx), IType, { RspRspOp<DType, IType, OP>( s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0], lhs_may_be_dense, rhs_may_be_dense, false, false); }); }); } else if (lhs_stype == kCSRStorage && rhs_stype == kCSRStorage) { ComputeEx<xpu, OP>(attrs, ctx, inputs, req, outputs); } else { LOG(FATAL) << "Not implemented: " << operator_string(attrs, ctx, inputs, req, outputs); } } template<typename xpu, typename LOP, typename ROP> static inline void BackwardUseNone(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { BackwardUseNone_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs); }); } template<typename xpu, typename LOP, typename ROP> static inline void BackwardUseNoneWithHalf2(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, { BackwardUseNone_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs); }); } template<typename xpu, typename LOP, typename ROP> static inline void BackwardUseNoneEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { CHECK_EQ(inputs.size(), 1U); // output grad CHECK_EQ(outputs.size(), 2U); // lhs input grad, rhs input grad const auto in_stype = inputs[0].storage_type(); const auto lhs_stype = outputs[0].storage_type(); const auto rhs_stype = outputs[1].storage_type(); // lhs grad if (req[0] != kNullOp) { if (in_stype == lhs_stype && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) { CHECK_EQ(outputs[0].storage_type(), in_stype); // rsp -> rsp, _. op requires 0-input returns 0-output DCHECK_LT(fabs(static_cast<float>(LOP::Map(0))), 1e-5f); UnaryOp::ComputeEx<xpu, LOP>(attrs, ctx, inputs, req, {outputs[0]}); } else { LOG(FATAL) << "Not implemented: " << operator_string(attrs, ctx, inputs, req, outputs); } } // rhs grad if (req[1] != kNullOp) { if (in_stype == rhs_stype && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) { CHECK_EQ(outputs[0].storage_type(), in_stype); // rsp -> _, rsp. op requires 0-input returns 0-output DCHECK_LT(fabs(static_cast<float>(ROP::Map(0))), 1e-5f); UnaryOp::ComputeEx<xpu, ROP>(attrs, ctx, inputs, req, {outputs[1]}); } else { LOG(FATAL) << "Not implemented: " << operator_string(attrs, ctx, inputs, req, outputs); } } } template<typename xpu, typename LOP, typename ROP> static inline void BackwardUseIn(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { BackwardUseIn_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs); }); } template<typename xpu, typename LOP, typename ROP> static inline void BackwardUseInWithHalf2(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, { BackwardUseIn_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs); }); } template< typename xpu, typename LOP, typename ROP, bool in0_ok_dense = false, bool in1_ok_dense = false, bool in2_ok_dense = false> static inline void BackwardUseInEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { using namespace common; CHECK_EQ(inputs.size(), 3U); CHECK_EQ(outputs.size(), 2U); // lhs input grad, rhs input grad const auto lhs_grad_stype = outputs[0].storage_type(); const auto rhs_grad_stype = outputs[1].storage_type(); if (ContainsOnlyStorage(inputs, kRowSparseStorage) && (lhs_grad_stype == kDefaultStorage || lhs_grad_stype == kRowSparseStorage) && (rhs_grad_stype == kDefaultStorage || rhs_grad_stype == kRowSparseStorage)) { // rsp, rsp, rsp -> [dns, rsp], [dns, rsp] MSHADOW_TYPE_SWITCH(outputs[0].dtype(), DType, { BackwardUseInEx_<xpu, LOP, ROP, DType, in0_ok_dense, in1_ok_dense, in2_ok_dense>( attrs, ctx, inputs, req, outputs, BackwardUseIn<xpu, LOP, ROP>); }); } } }; // class ElemwiseBinaryOp /*! \brief Binary launch */ #define MXNET_OPERATOR_REGISTER_BINARY(name) \ NNVM_REGISTER_OP(name) \ .set_num_inputs(2) \ .set_num_outputs(1) \ .set_attr<nnvm::FListInputNames>("FListInputNames", \ [](const NodeAttrs& attrs) { \ return std::vector<std::string>{"lhs", "rhs"}; \ }) \ .set_attr<nnvm::FInferShape>("FInferShape", ElemwiseShape<2, 1>) \ .set_attr<nnvm::FInferType>("FInferType", ElemwiseType<2, 1>) \ .set_attr<nnvm::FInplaceOption>("FInplaceOption", \ [](const NodeAttrs& attrs){ \ return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}}; \ }) \ .add_argument("lhs", "NDArray-or-Symbol", "first input") \ .add_argument("rhs", "NDArray-or-Symbol", "second input") /*! \brief Binary launch, with FComputeEx for csr and rsp available */ #define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU(__name$, __kernel$) \ MXNET_OPERATOR_REGISTER_BINARY(__name$) \ .set_attr<FInferStorageType>("FInferStorageType", \ ElemwiseStorageType<2, 1, true, true, true>) \ .set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \ .set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \ .set_attr<FResourceRequest>("FResourceRequest", /* For Sparse CSR */ \ [](const NodeAttrs& attrs) { \ return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};}) /*! \brief Binary launch, dense result * FInferStorageType attr is not set using this macro. * By default DefaultStorageType is used. */ #define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_DR(__name$, __kernel$) \ MXNET_OPERATOR_REGISTER_BINARY(__name$) \ .set_attr<FInferStorageType>("FInferStorageType", \ ElemwiseBinaryOp::SparseSparseWithDenseResult) \ .set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \ .set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_
/*! * \file elemwise_binary_op.h * \brief Function definition of elementwise binary operators */ #ifndef MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_ #define MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_ #include <mxnet/operator_util.h> #include <mxnet/op_attr_types.h> #include <vector> #include <string> #include <utility> #include <typeinfo> #include <algorithm> #include "../mxnet_op.h" #include "../mshadow_op.h" #include "../../engine/openmp.h" #include "elemwise_unary_op.h" #include "../../common/utils.h" #include "./init_op.h" namespace mxnet { namespace op { /*! Gather binary operator functions into ElemwiseBinaryOp class */ class ElemwiseBinaryOp : public OpBase { public: /*! \brief For sparse, assume missing rvalue is 0 */ template<typename OP, int Req> struct MissingRValueOp { template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType *lhs) { KERNEL_ASSIGN(out[i], Req, OP::Map(lhs[i], DType(0))); } }; /*! \brief For sparse, assume missing lvalue is 0 */ template<typename OP, int Req> struct MissingLValueOp { template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType *rhs) { KERNEL_ASSIGN(out[i], Req, OP::Map(DType(0), rhs[i])); } }; private: /*! * \brief CSR operation requires temp space */ enum ResourceRequestType { kTempSpace }; /*! * \brief Fill contiguous dense output rows with value computed from 0 lhs and 0 rhs input * CPU-Only version */ template<typename DType, typename OP, typename xpu> static inline size_t FillDense(mshadow::Stream<xpu> *s, const size_t idx_l, const size_t idx_r, const OpReqType req, mshadow::Tensor<xpu, 2, DType> *out, const size_t iter_out) { const int index_out_min = static_cast<int>(std::min(idx_l, idx_r)); if (static_cast<size_t>(index_out_min) > iter_out) { const DType zero_input_val = OP::Map(DType(0), DType(0)); for (int i = static_cast<int>(iter_out); i < index_out_min; ++i) { Fill<false>(s, (*out)[i], req, zero_input_val); } } return static_cast<size_t>(index_out_min); // MSVC wants OMP loops to always use 'int' } static inline bool IsSameArray(const NDArray& a1, const NDArray& a2) { return a1.var() == a2.var(); } /*! \brief Minimum of three */ static MSHADOW_XINLINE size_t minthree(const size_t a, const size_t b, const size_t c) { return a < b ? (a < c ? a : c) : (b < c ? b : c); } template<typename xpu, typename LOP, typename ROP, typename DType> static void BackwardUseNone_(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mxnet_op; Stream<xpu> *s = ctx.get_stream<xpu>(); const int size = static_cast<int>((outputs[0].Size() + DataType<DType>::kLanes - 1) / DataType<DType>::kLanes); const DType *ograd_dptr = inputs[0].dptr<DType>(); if (std::is_same<LOP, mshadow_op::identity>::value && req[0] == kWriteInplace) { CHECK_EQ(ograd_dptr, outputs[0].dptr<DType>()); } else if (req[0] != kNullOp) { DType *lgrad_dptr = outputs[0].dptr<DType>(); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { Kernel<mxnet_op::op_with_req<LOP, Req>, xpu>::Launch(s, size, lgrad_dptr, ograd_dptr); }); } if (std::is_same<ROP, mshadow_op::identity>::value && req[1] == kWriteInplace) { CHECK_EQ(ograd_dptr, outputs[1].dptr<DType>()); } else if (req[1] != kNullOp) { DType *rgrad_dptr = outputs[1].dptr<DType>(); MXNET_ASSIGN_REQ_SWITCH(req[1], Req, { Kernel<mxnet_op::op_with_req<ROP, Req>, xpu>::Launch(s, size, rgrad_dptr, ograd_dptr); }); } } template<typename xpu, typename LOP, typename ROP, typename DType> static void BackwardUseIn_(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { DCHECK_EQ(outputs.size(), 2U); DCHECK_EQ(inputs.size(), 3U); mxnet_op::Stream<xpu> *s = ctx.get_stream<xpu>(); const DType *ograd_dptr = inputs[0].dptr<DType>(); const DType *lhs_dptr = inputs[1].dptr<DType>(); const DType *rhs_dptr = inputs[2].dptr<DType>(); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { const int size = static_cast<int>( (outputs[0].Size() + mxnet_op::DataType<DType>::kLanes - 1) / mxnet_op::DataType<DType>::kLanes); DType * lgrad_dptr = outputs[0].dptr<DType>(); mxnet_op::Kernel<mxnet_op::op_with_req<mxnet_op::backward_grad<LOP>, Req>, xpu>::Launch( s, size, lgrad_dptr, ograd_dptr, lhs_dptr, rhs_dptr);}); MXNET_ASSIGN_REQ_SWITCH(req[1], Req, { const int size = static_cast<int>( (outputs[1].Size() + mxnet_op::DataType<DType>::kLanes - 1) / mxnet_op::DataType<DType>::kLanes); DType * rgrad_dptr = outputs[1].dptr<DType>(); mxnet_op::Kernel<mxnet_op::op_with_req<mxnet_op::backward_grad<ROP>, Req>, xpu>::Launch( s, size, rgrad_dptr, ograd_dptr, lhs_dptr, rhs_dptr);}); } template< typename xpu, typename LOP, typename ROP, typename DType, bool in0_ok_dense = false, bool in1_ok_dense = false, bool in2_ok_dense = false, typename BackupCompute> static inline void BackwardUseInEx_(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs, BackupCompute backup_compute) { mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); // lhs grad if (req[0] != kNullOp) { // RspRspOp can handle dense outputs so long as OP(0, 0) == 0 MSHADOW_IDX_TYPE_SWITCH(inputs[1].aux_type(rowsparse::kIdx), IType, { RspRspOp<DType, IType, LOP>( s, attrs, ctx, inputs[1], inputs[2], req[0], outputs[0], false, false, false, false); }); // lhs in-place MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(rowsparse::kIdx), IType, { RspRspOp<DType, IType, mshadow::op::mul>( s, attrs, ctx, outputs[0], inputs[0], req[0], outputs[0], false, false, true, false); }); } // rhs grad if (req[1] != kNullOp) { MSHADOW_IDX_TYPE_SWITCH(inputs[1].aux_type(rowsparse::kIdx), IType, { RspRspOp<DType, IType, ROP>( s, attrs, ctx, inputs[1], inputs[2], req[1], outputs[1], false, false, false, false); }); // rhs in-place MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(rowsparse::kIdx), IType, { RspRspOp<DType, IType, mshadow::op::mul>( s, attrs, ctx, inputs[0], outputs[1], req[1], outputs[1], false, false, true, false); }); } } protected: /*! \brief Binary op handling for lhr/rhs: RspDns, RspRsp, DnsRsp, or RspRsp->Dns result */ template<typename DType, typename IType, typename OP> static void RspRspOp(mshadow::Stream<cpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output, bool lhs_may_be_dense, bool rhs_may_be_dense, bool allow_inplace, bool scatter); /*! \brief CSR -op- CSR binary operator for non-canonical NDArray */ template<typename DType, typename IType, typename CType, typename OP> static inline void CsrCsrOp(mshadow::Stream<cpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output); public: /*! * \brief Rsp-op-Rsp operation which produces a dense result * \param attrs Attributes * \param dev_mask Device mask * \param dispatch_mode Dispatch Mode * \param in_attrs Input storage attributes * \param out_attrs Output storage attributes * \return true if handled */ static bool SparseSparseWithDenseResult(const nnvm::NodeAttrs& attrs, int dev_mask, DispatchMode* dispatch_mode, std::vector<int> *in_attrs, std::vector<int> *out_attrs); /*! * \brief Allow one of the inputs to be dense and still produce a sparse output * \param attrs Attributes * \param dev_mask Device mask * \param dispatch_mode Dispatch Mode * \param in_attrs Input storage attributes * \param out_attrs Output storage attributes * \return true if handled */ template<bool lhs_dense_ok = true, bool rhs_dense_ok = true> static bool AllowLRDenseInputWithSparseOutputStorageType(const nnvm::NodeAttrs& attrs, int dev_mask, DispatchMode* dispatch_mode, std::vector<int> *in_attrs, std::vector<int> *out_attrs) { CHECK_EQ(in_attrs->size(), 2U) << " in operator " << attrs.name; CHECK_EQ(out_attrs->size(), 1U) << " in operator " << attrs.name; const auto& lhs_stype = in_attrs->at(0); const auto& rhs_stype = in_attrs->at(1); auto& out_stype = out_attrs->at(0); bool dispatched = false; const bool invalid_ctx = dev_mask != mshadow::cpu::kDevMask; const auto dispatch_ex = invalid_ctx ? DispatchMode::kFComputeFallback : DispatchMode::kFComputeEx; if (!dispatched && lhs_stype == kDefaultStorage && rhs_stype == kDefaultStorage) { // dns, dns -> dns dispatched = storage_type_assign(&out_stype, kDefaultStorage, dispatch_mode, DispatchMode::kFCompute); } if (!dispatched) { if ((lhs_stype == kRowSparseStorage && rhs_stype == kRowSparseStorage) || (rhs_dense_ok && lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) || (lhs_dense_ok && lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage)) { // rsp, rsp -> rsp // rsp, dns -> rsp // dns, rsp -> rsp dispatched = storage_type_assign(&out_stype, kRowSparseStorage, dispatch_mode, dispatch_ex); } else if (lhs_stype == kCSRStorage && rhs_stype == kCSRStorage) { // csr, csr -> csr dispatched = storage_type_assign(&out_stype, kCSRStorage, dispatch_mode, dispatch_ex); } else if ((lhs_stype == kCSRStorage && rhs_dense_ok) || (rhs_stype == kCSRStorage && lhs_dense_ok)) { // csr, dns -> csr // dns, csr -> csr dispatched = storage_type_assign(&out_stype, kCSRStorage, dispatch_mode, DispatchMode::kFComputeFallback); } } if (!dispatched) { dispatch_fallback(out_attrs, dispatch_mode); } if (*dispatch_mode == DispatchMode::kFComputeFallback) { LogStorageFallback(attrs, dev_mask, in_attrs, out_attrs); } return true; } /*! * \brief Backward pass computing input gradient using forward inputs * \param attrs Attributes * \param dev_mask Device mask * \param dispatch_mode Dispatch Mode * \param in_attrs Input storage attributes * \param out_attrs Output storage attributes * \return true if handled */ static bool BackwardUseInStorageType(const nnvm::NodeAttrs& attrs, int dev_mask, DispatchMode* dispatch_mode, std::vector<int> *in_attrs, std::vector<int> *out_attrs); template<typename xpu, typename OP> static void Compute(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mxnet_op; if (req[0] != kNullOp) { Stream<xpu> *s = ctx.get_stream<xpu>(); CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1U); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size()) + DataType<DType>::kLanes - 1) / DataType<DType>::kLanes; Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size, outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>()); }); }); } } template<typename xpu, typename OP> static void ComputeWithHalf2(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mxnet_op; if (req[0] != kNullOp) { Stream<xpu> *s = ctx.get_stream<xpu>(); CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1U); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, { const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size()) + DataType<DType>::kLanes - 1) / DataType<DType>::kLanes; Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size, outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>()); }); }); } } template<typename xpu, typename OP> static void ComputeEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { CHECK_EQ(inputs.size(), 2); CHECK_EQ(outputs.size(), 1); if (req[0] == kNullOp) return; const auto lhs_stype = inputs[0].storage_type(); const auto out_stype = outputs[0].storage_type(); mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); if ((common::ContainsOnlyStorage(inputs, kRowSparseStorage)) && (out_stype == kRowSparseStorage || out_stype == kDefaultStorage)) { // rsp, rsp -> rsp // rsp, rsp -> dns const int rsp_input_idx = lhs_stype == kRowSparseStorage ? 0 : 1; MSHADOW_IDX_TYPE_SWITCH(inputs[rsp_input_idx].aux_type(rowsparse::kIdx), IType, { MSHADOW_TYPE_SWITCH(outputs[0].dtype(), DType, { RspRspOp<DType, IType, OP>( s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0], false, false, false, false); }); }); } else if (common::ContainsOnlyStorage(inputs, kCSRStorage) && out_stype == kCSRStorage) { // csr, csr -> csr MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(csr::kIdx), IType, { MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(csr::kIndPtr), CType, { MSHADOW_TYPE_SWITCH(outputs[0].dtype(), DType, { CsrCsrOp<DType, IType, CType, OP>( s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0]); }); }); }); } else { LOG(FATAL) << "Not implemented: " << operator_string(attrs, ctx, inputs, req, outputs); } } /*! \brief ComputeEx allowing dense lvalue and/or rvalue */ template<typename xpu, typename OP, bool lhs_may_be_dense, bool rhs_may_be_dense> static void ComputeDnsLRValueEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { using namespace mshadow; using namespace mshadow::expr; CHECK_EQ(inputs.size(), 2); CHECK_EQ(outputs.size(), 1); if (req[0] == kNullOp) return; const auto lhs_stype = inputs[0].storage_type(); const auto rhs_stype = inputs[1].storage_type(); const auto out_stype = outputs[0].storage_type(); if ((out_stype == kRowSparseStorage || out_stype == kDefaultStorage) && ((lhs_stype == kRowSparseStorage && rhs_stype == kRowSparseStorage) || (lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) || (lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage)) && lhs_may_be_dense && rhs_may_be_dense) { // rsp, rsp -> rsp // rsp, rsp -> dns // rsp, dns -> rsp // dns, rsp -> rsp // More than once dense not allowed (this will be checked in RspRspOp): // rsp, dns -> dns <-- NOT ALLOWED // dns, rsp -> dns <-- NOT ALLOWED mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); MSHADOW_TYPE_SWITCH(outputs[0].dtype(), DType, { MSHADOW_IDX_TYPE_SWITCH(outputs[0].aux_type(rowsparse::kIdx), IType, { RspRspOp<DType, IType, OP>( s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0], lhs_may_be_dense, rhs_may_be_dense, false, false); }); }); } else if (lhs_stype == kCSRStorage && rhs_stype == kCSRStorage) { ComputeEx<xpu, OP>(attrs, ctx, inputs, req, outputs); } else { LOG(FATAL) << "Not implemented: " << operator_string(attrs, ctx, inputs, req, outputs); } } template<typename xpu, typename LOP, typename ROP> static inline void BackwardUseNone(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { BackwardUseNone_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs); }); } template<typename xpu, typename LOP, typename ROP> static inline void BackwardUseNoneWithHalf2(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, { BackwardUseNone_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs); }); } template<typename xpu, typename LOP, typename ROP> static inline void BackwardUseNoneEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { CHECK_EQ(inputs.size(), 1U); // output grad CHECK_EQ(outputs.size(), 2U); // lhs input grad, rhs input grad const auto in_stype = inputs[0].storage_type(); const auto lhs_stype = outputs[0].storage_type(); const auto rhs_stype = outputs[1].storage_type(); // lhs grad if (req[0] != kNullOp) { if (in_stype == lhs_stype && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) { CHECK_EQ(outputs[0].storage_type(), in_stype); // rsp -> rsp, _. op requires 0-input returns 0-output DCHECK_LT(fabs(static_cast<float>(LOP::Map(0))), 1e-5f); UnaryOp::ComputeEx<xpu, LOP>(attrs, ctx, inputs, req, {outputs[0]}); } else { LOG(FATAL) << "Not implemented: " << operator_string(attrs, ctx, inputs, req, outputs); } } // rhs grad if (req[1] != kNullOp) { if (in_stype == rhs_stype && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) { CHECK_EQ(outputs[0].storage_type(), in_stype); // rsp -> _, rsp. op requires 0-input returns 0-output DCHECK_LT(fabs(static_cast<float>(ROP::Map(0))), 1e-5f); UnaryOp::ComputeEx<xpu, ROP>(attrs, ctx, inputs, req, {outputs[1]}); } else { LOG(FATAL) << "Not implemented: " << operator_string(attrs, ctx, inputs, req, outputs); } } } template<typename xpu, typename LOP, typename ROP> static inline void BackwardUseIn(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { BackwardUseIn_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs); }); } template<typename xpu, typename LOP, typename ROP> static inline void BackwardUseInWithHalf2(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, { BackwardUseIn_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs); }); } template< typename xpu, typename LOP, typename ROP, bool in0_ok_dense = false, bool in1_ok_dense = false, bool in2_ok_dense = false> static inline void BackwardUseInEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { using namespace common; CHECK_EQ(inputs.size(), 3U); CHECK_EQ(outputs.size(), 2U); // lhs input grad, rhs input grad const auto lhs_grad_stype = outputs[0].storage_type(); const auto rhs_grad_stype = outputs[1].storage_type(); if (ContainsOnlyStorage(inputs, kRowSparseStorage) && (lhs_grad_stype == kDefaultStorage || lhs_grad_stype == kRowSparseStorage) && (rhs_grad_stype == kDefaultStorage || rhs_grad_stype == kRowSparseStorage)) { // rsp, rsp, rsp -> [dns, rsp], [dns, rsp] MSHADOW_TYPE_SWITCH(outputs[0].dtype(), DType, { BackwardUseInEx_<xpu, LOP, ROP, DType, in0_ok_dense, in1_ok_dense, in2_ok_dense>( attrs, ctx, inputs, req, outputs, BackwardUseIn<xpu, LOP, ROP>); }); } } }; // class ElemwiseBinaryOp /*! \brief Binary launch */ #define MXNET_OPERATOR_REGISTER_BINARY(name) \ NNVM_REGISTER_OP(name) \ .set_num_inputs(2) \ .set_num_outputs(1) \ .set_attr<nnvm::FListInputNames>("FListInputNames", \ [](const NodeAttrs& attrs) { \ return std::vector<std::string>{"lhs", "rhs"}; \ }) \ .set_attr<nnvm::FInferShape>("FInferShape", ElemwiseShape<2, 1>) \ .set_attr<nnvm::FInferType>("FInferType", ElemwiseType<2, 1>) \ .set_attr<nnvm::FInplaceOption>("FInplaceOption", \ [](const NodeAttrs& attrs){ \ return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}}; \ }) \ .add_argument("lhs", "NDArray-or-Symbol", "first input") \ .add_argument("rhs", "NDArray-or-Symbol", "second input") /*! \brief Binary launch, with FComputeEx for csr and rsp available */ #define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU(__name$, __kernel$) \ MXNET_OPERATOR_REGISTER_BINARY(__name$) \ .set_attr<FInferStorageType>("FInferStorageType", \ ElemwiseStorageType<2, 1, true, true, true>) \ .set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \ .set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \ .set_attr<FResourceRequest>("FResourceRequest", /* For Sparse CSR */ \ [](const NodeAttrs& attrs) { \ return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};}) /*! \brief Binary launch, dense result * FInferStorageType attr is not set using this macro. * By default DefaultStorageType is used. */ #define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_DR(__name$, __kernel$) \ MXNET_OPERATOR_REGISTER_BINARY(__name$) \ .set_attr<FInferStorageType>("FInferStorageType", \ ElemwiseBinaryOp::SparseSparseWithDenseResult) \ .set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \ .set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_
/*! * \file elemwise_binary_op.h * \brief Function definition of elementwise binary operators */ #ifndef MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_ #define MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_ #include <mxnet/operator_util.h> #include <mxnet/op_attr_types.h> #include <vector> #include <string> #include <utility> #include <typeinfo> #include <algorithm> #include "../mxnet_op.h" #include "../mshadow_op.h" #include "../../engine/openmp.h" #include "elemwise_unary_op.h" #include "../../common/utils.h" #include "./init_op.h" namespace mxnet { namespace op { /*! Gather binary operator functions into ElemwiseBinaryOp class */ class ElemwiseBinaryOp : public OpBase { public: /*! \brief For sparse, assume missing rvalue is 0 */ template<typename OP, int Req> struct MissingRValueOp { template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType *lhs) { KERNEL_ASSIGN(out[i], Req, OP::Map(lhs[i], DType(0))); } }; /*! \brief For sparse, assume missing lvalue is 0 */ template<typename OP, int Req> struct MissingLValueOp { template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType *rhs) { KERNEL_ASSIGN(out[i], Req, OP::Map(DType(0), rhs[i])); } }; private: /*! * \brief CSR operation requires temp space */ enum ResourceRequestType { kTempSpace }; /*! * \brief Fill contiguous dense output rows with value computed from 0 lhs and 0 rhs input * CPU-Only version */ template<typename DType, typename OP, typename xpu> static inline size_t FillDense(mshadow::Stream<xpu> *s, const size_t idx_l, const size_t idx_r, const OpReqType req, mshadow::Tensor<xpu, 2, DType> *out, const size_t iter_out) { const int index_out_min = static_cast<int>(std::min(idx_l, idx_r)); if (static_cast<size_t>(index_out_min) > iter_out) { const DType zero_input_val = OP::Map(DType(0), DType(0)); #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (int i = static_cast<int>(iter_out); i < index_out_min; ++i) { Fill<false>(s, (*out)[i], req, zero_input_val); } } return static_cast<size_t>(index_out_min); // MSVC wants OMP loops to always use 'int' } static inline bool IsSameArray(const NDArray& a1, const NDArray& a2) { return a1.var() == a2.var(); } /*! \brief Minimum of three */ static MSHADOW_XINLINE size_t minthree(const size_t a, const size_t b, const size_t c) { return a < b ? (a < c ? a : c) : (b < c ? b : c); } template<typename xpu, typename LOP, typename ROP, typename DType> static void BackwardUseNone_(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mxnet_op; Stream<xpu> *s = ctx.get_stream<xpu>(); const int size = static_cast<int>((outputs[0].Size() + DataType<DType>::kLanes - 1) / DataType<DType>::kLanes); const DType *ograd_dptr = inputs[0].dptr<DType>(); if (std::is_same<LOP, mshadow_op::identity>::value && req[0] == kWriteInplace) { CHECK_EQ(ograd_dptr, outputs[0].dptr<DType>()); } else if (req[0] != kNullOp) { DType *lgrad_dptr = outputs[0].dptr<DType>(); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { Kernel<mxnet_op::op_with_req<LOP, Req>, xpu>::Launch(s, size, lgrad_dptr, ograd_dptr); }); } if (std::is_same<ROP, mshadow_op::identity>::value && req[1] == kWriteInplace) { CHECK_EQ(ograd_dptr, outputs[1].dptr<DType>()); } else if (req[1] != kNullOp) { DType *rgrad_dptr = outputs[1].dptr<DType>(); MXNET_ASSIGN_REQ_SWITCH(req[1], Req, { Kernel<mxnet_op::op_with_req<ROP, Req>, xpu>::Launch(s, size, rgrad_dptr, ograd_dptr); }); } } template<typename xpu, typename LOP, typename ROP, typename DType> static void BackwardUseIn_(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { DCHECK_EQ(outputs.size(), 2U); DCHECK_EQ(inputs.size(), 3U); mxnet_op::Stream<xpu> *s = ctx.get_stream<xpu>(); const DType *ograd_dptr = inputs[0].dptr<DType>(); const DType *lhs_dptr = inputs[1].dptr<DType>(); const DType *rhs_dptr = inputs[2].dptr<DType>(); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { const int size = static_cast<int>( (outputs[0].Size() + mxnet_op::DataType<DType>::kLanes - 1) / mxnet_op::DataType<DType>::kLanes); DType * lgrad_dptr = outputs[0].dptr<DType>(); mxnet_op::Kernel<mxnet_op::op_with_req<mxnet_op::backward_grad<LOP>, Req>, xpu>::Launch( s, size, lgrad_dptr, ograd_dptr, lhs_dptr, rhs_dptr);}); MXNET_ASSIGN_REQ_SWITCH(req[1], Req, { const int size = static_cast<int>( (outputs[1].Size() + mxnet_op::DataType<DType>::kLanes - 1) / mxnet_op::DataType<DType>::kLanes); DType * rgrad_dptr = outputs[1].dptr<DType>(); mxnet_op::Kernel<mxnet_op::op_with_req<mxnet_op::backward_grad<ROP>, Req>, xpu>::Launch( s, size, rgrad_dptr, ograd_dptr, lhs_dptr, rhs_dptr);}); } template< typename xpu, typename LOP, typename ROP, typename DType, bool in0_ok_dense = false, bool in1_ok_dense = false, bool in2_ok_dense = false, typename BackupCompute> static inline void BackwardUseInEx_(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs, BackupCompute backup_compute) { mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); // lhs grad if (req[0] != kNullOp) { // RspRspOp can handle dense outputs so long as OP(0, 0) == 0 MSHADOW_IDX_TYPE_SWITCH(inputs[1].aux_type(rowsparse::kIdx), IType, { RspRspOp<DType, IType, LOP>( s, attrs, ctx, inputs[1], inputs[2], req[0], outputs[0], false, false, false, false); }); // lhs in-place MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(rowsparse::kIdx), IType, { RspRspOp<DType, IType, mshadow::op::mul>( s, attrs, ctx, outputs[0], inputs[0], req[0], outputs[0], false, false, true, false); }); } // rhs grad if (req[1] != kNullOp) { MSHADOW_IDX_TYPE_SWITCH(inputs[1].aux_type(rowsparse::kIdx), IType, { RspRspOp<DType, IType, ROP>( s, attrs, ctx, inputs[1], inputs[2], req[1], outputs[1], false, false, false, false); }); // rhs in-place MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(rowsparse::kIdx), IType, { RspRspOp<DType, IType, mshadow::op::mul>( s, attrs, ctx, inputs[0], outputs[1], req[1], outputs[1], false, false, true, false); }); } } protected: /*! \brief Binary op handling for lhr/rhs: RspDns, RspRsp, DnsRsp, or RspRsp->Dns result */ template<typename DType, typename IType, typename OP> static void RspRspOp(mshadow::Stream<cpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output, bool lhs_may_be_dense, bool rhs_may_be_dense, bool allow_inplace, bool scatter); /*! \brief CSR -op- CSR binary operator for non-canonical NDArray */ template<typename DType, typename IType, typename CType, typename OP> static inline void CsrCsrOp(mshadow::Stream<cpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output); public: /*! * \brief Rsp-op-Rsp operation which produces a dense result * \param attrs Attributes * \param dev_mask Device mask * \param dispatch_mode Dispatch Mode * \param in_attrs Input storage attributes * \param out_attrs Output storage attributes * \return true if handled */ static bool SparseSparseWithDenseResult(const nnvm::NodeAttrs& attrs, int dev_mask, DispatchMode* dispatch_mode, std::vector<int> *in_attrs, std::vector<int> *out_attrs); /*! * \brief Allow one of the inputs to be dense and still produce a sparse output * \param attrs Attributes * \param dev_mask Device mask * \param dispatch_mode Dispatch Mode * \param in_attrs Input storage attributes * \param out_attrs Output storage attributes * \return true if handled */ template<bool lhs_dense_ok = true, bool rhs_dense_ok = true> static bool AllowLRDenseInputWithSparseOutputStorageType(const nnvm::NodeAttrs& attrs, int dev_mask, DispatchMode* dispatch_mode, std::vector<int> *in_attrs, std::vector<int> *out_attrs) { CHECK_EQ(in_attrs->size(), 2U) << " in operator " << attrs.name; CHECK_EQ(out_attrs->size(), 1U) << " in operator " << attrs.name; const auto& lhs_stype = in_attrs->at(0); const auto& rhs_stype = in_attrs->at(1); auto& out_stype = out_attrs->at(0); bool dispatched = false; const bool invalid_ctx = dev_mask != mshadow::cpu::kDevMask; const auto dispatch_ex = invalid_ctx ? DispatchMode::kFComputeFallback : DispatchMode::kFComputeEx; if (!dispatched && lhs_stype == kDefaultStorage && rhs_stype == kDefaultStorage) { // dns, dns -> dns dispatched = storage_type_assign(&out_stype, kDefaultStorage, dispatch_mode, DispatchMode::kFCompute); } if (!dispatched) { if ((lhs_stype == kRowSparseStorage && rhs_stype == kRowSparseStorage) || (rhs_dense_ok && lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) || (lhs_dense_ok && lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage)) { // rsp, rsp -> rsp // rsp, dns -> rsp // dns, rsp -> rsp dispatched = storage_type_assign(&out_stype, kRowSparseStorage, dispatch_mode, dispatch_ex); } else if (lhs_stype == kCSRStorage && rhs_stype == kCSRStorage) { // csr, csr -> csr dispatched = storage_type_assign(&out_stype, kCSRStorage, dispatch_mode, dispatch_ex); } else if ((lhs_stype == kCSRStorage && rhs_dense_ok) || (rhs_stype == kCSRStorage && lhs_dense_ok)) { // csr, dns -> csr // dns, csr -> csr dispatched = storage_type_assign(&out_stype, kCSRStorage, dispatch_mode, DispatchMode::kFComputeFallback); } } if (!dispatched) { dispatch_fallback(out_attrs, dispatch_mode); } if (*dispatch_mode == DispatchMode::kFComputeFallback) { LogStorageFallback(attrs, dev_mask, in_attrs, out_attrs); } return true; } /*! * \brief Backward pass computing input gradient using forward inputs * \param attrs Attributes * \param dev_mask Device mask * \param dispatch_mode Dispatch Mode * \param in_attrs Input storage attributes * \param out_attrs Output storage attributes * \return true if handled */ static bool BackwardUseInStorageType(const nnvm::NodeAttrs& attrs, int dev_mask, DispatchMode* dispatch_mode, std::vector<int> *in_attrs, std::vector<int> *out_attrs); template<typename xpu, typename OP> static void Compute(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mxnet_op; if (req[0] != kNullOp) { Stream<xpu> *s = ctx.get_stream<xpu>(); CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1U); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size()) + DataType<DType>::kLanes - 1) / DataType<DType>::kLanes; Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size, outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>()); }); }); } } template<typename xpu, typename OP> static void ComputeWithHalf2(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mxnet_op; if (req[0] != kNullOp) { Stream<xpu> *s = ctx.get_stream<xpu>(); CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1U); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, { const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size()) + DataType<DType>::kLanes - 1) / DataType<DType>::kLanes; Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size, outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>()); }); }); } } template<typename xpu, typename OP> static void ComputeEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { CHECK_EQ(inputs.size(), 2); CHECK_EQ(outputs.size(), 1); if (req[0] == kNullOp) return; const auto lhs_stype = inputs[0].storage_type(); const auto out_stype = outputs[0].storage_type(); mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); if ((common::ContainsOnlyStorage(inputs, kRowSparseStorage)) && (out_stype == kRowSparseStorage || out_stype == kDefaultStorage)) { // rsp, rsp -> rsp // rsp, rsp -> dns const int rsp_input_idx = lhs_stype == kRowSparseStorage ? 0 : 1; MSHADOW_IDX_TYPE_SWITCH(inputs[rsp_input_idx].aux_type(rowsparse::kIdx), IType, { MSHADOW_TYPE_SWITCH(outputs[0].dtype(), DType, { RspRspOp<DType, IType, OP>( s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0], false, false, false, false); }); }); } else if (common::ContainsOnlyStorage(inputs, kCSRStorage) && out_stype == kCSRStorage) { // csr, csr -> csr MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(csr::kIdx), IType, { MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(csr::kIndPtr), CType, { MSHADOW_TYPE_SWITCH(outputs[0].dtype(), DType, { CsrCsrOp<DType, IType, CType, OP>( s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0]); }); }); }); } else { LOG(FATAL) << "Not implemented: " << operator_string(attrs, ctx, inputs, req, outputs); } } /*! \brief ComputeEx allowing dense lvalue and/or rvalue */ template<typename xpu, typename OP, bool lhs_may_be_dense, bool rhs_may_be_dense> static void ComputeDnsLRValueEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { using namespace mshadow; using namespace mshadow::expr; CHECK_EQ(inputs.size(), 2); CHECK_EQ(outputs.size(), 1); if (req[0] == kNullOp) return; const auto lhs_stype = inputs[0].storage_type(); const auto rhs_stype = inputs[1].storage_type(); const auto out_stype = outputs[0].storage_type(); if ((out_stype == kRowSparseStorage || out_stype == kDefaultStorage) && ((lhs_stype == kRowSparseStorage && rhs_stype == kRowSparseStorage) || (lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) || (lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage)) && lhs_may_be_dense && rhs_may_be_dense) { // rsp, rsp -> rsp // rsp, rsp -> dns // rsp, dns -> rsp // dns, rsp -> rsp // More than once dense not allowed (this will be checked in RspRspOp): // rsp, dns -> dns <-- NOT ALLOWED // dns, rsp -> dns <-- NOT ALLOWED mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); MSHADOW_TYPE_SWITCH(outputs[0].dtype(), DType, { MSHADOW_IDX_TYPE_SWITCH(outputs[0].aux_type(rowsparse::kIdx), IType, { RspRspOp<DType, IType, OP>( s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0], lhs_may_be_dense, rhs_may_be_dense, false, false); }); }); } else if (lhs_stype == kCSRStorage && rhs_stype == kCSRStorage) { ComputeEx<xpu, OP>(attrs, ctx, inputs, req, outputs); } else { LOG(FATAL) << "Not implemented: " << operator_string(attrs, ctx, inputs, req, outputs); } } template<typename xpu, typename LOP, typename ROP> static inline void BackwardUseNone(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { BackwardUseNone_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs); }); } template<typename xpu, typename LOP, typename ROP> static inline void BackwardUseNoneWithHalf2(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, { BackwardUseNone_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs); }); } template<typename xpu, typename LOP, typename ROP> static inline void BackwardUseNoneEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { CHECK_EQ(inputs.size(), 1U); // output grad CHECK_EQ(outputs.size(), 2U); // lhs input grad, rhs input grad const auto in_stype = inputs[0].storage_type(); const auto lhs_stype = outputs[0].storage_type(); const auto rhs_stype = outputs[1].storage_type(); // lhs grad if (req[0] != kNullOp) { if (in_stype == lhs_stype && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) { CHECK_EQ(outputs[0].storage_type(), in_stype); // rsp -> rsp, _. op requires 0-input returns 0-output DCHECK_LT(fabs(static_cast<float>(LOP::Map(0))), 1e-5f); UnaryOp::ComputeEx<xpu, LOP>(attrs, ctx, inputs, req, {outputs[0]}); } else { LOG(FATAL) << "Not implemented: " << operator_string(attrs, ctx, inputs, req, outputs); } } // rhs grad if (req[1] != kNullOp) { if (in_stype == rhs_stype && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) { CHECK_EQ(outputs[0].storage_type(), in_stype); // rsp -> _, rsp. op requires 0-input returns 0-output DCHECK_LT(fabs(static_cast<float>(ROP::Map(0))), 1e-5f); UnaryOp::ComputeEx<xpu, ROP>(attrs, ctx, inputs, req, {outputs[1]}); } else { LOG(FATAL) << "Not implemented: " << operator_string(attrs, ctx, inputs, req, outputs); } } } template<typename xpu, typename LOP, typename ROP> static inline void BackwardUseIn(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { BackwardUseIn_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs); }); } template<typename xpu, typename LOP, typename ROP> static inline void BackwardUseInWithHalf2(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, { BackwardUseIn_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs); }); } template< typename xpu, typename LOP, typename ROP, bool in0_ok_dense = false, bool in1_ok_dense = false, bool in2_ok_dense = false> static inline void BackwardUseInEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { using namespace common; CHECK_EQ(inputs.size(), 3U); CHECK_EQ(outputs.size(), 2U); // lhs input grad, rhs input grad const auto lhs_grad_stype = outputs[0].storage_type(); const auto rhs_grad_stype = outputs[1].storage_type(); if (ContainsOnlyStorage(inputs, kRowSparseStorage) && (lhs_grad_stype == kDefaultStorage || lhs_grad_stype == kRowSparseStorage) && (rhs_grad_stype == kDefaultStorage || rhs_grad_stype == kRowSparseStorage)) { // rsp, rsp, rsp -> [dns, rsp], [dns, rsp] MSHADOW_TYPE_SWITCH(outputs[0].dtype(), DType, { BackwardUseInEx_<xpu, LOP, ROP, DType, in0_ok_dense, in1_ok_dense, in2_ok_dense>( attrs, ctx, inputs, req, outputs, BackwardUseIn<xpu, LOP, ROP>); }); } } }; // class ElemwiseBinaryOp /*! \brief Binary launch */ #define MXNET_OPERATOR_REGISTER_BINARY(name) \ NNVM_REGISTER_OP(name) \ .set_num_inputs(2) \ .set_num_outputs(1) \ .set_attr<nnvm::FListInputNames>("FListInputNames", \ [](const NodeAttrs& attrs) { \ return std::vector<std::string>{"lhs", "rhs"}; \ }) \ .set_attr<nnvm::FInferShape>("FInferShape", ElemwiseShape<2, 1>) \ .set_attr<nnvm::FInferType>("FInferType", ElemwiseType<2, 1>) \ .set_attr<nnvm::FInplaceOption>("FInplaceOption", \ [](const NodeAttrs& attrs){ \ return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}}; \ }) \ .add_argument("lhs", "NDArray-or-Symbol", "first input") \ .add_argument("rhs", "NDArray-or-Symbol", "second input") /*! \brief Binary launch, with FComputeEx for csr and rsp available */ #define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU(__name$, __kernel$) \ MXNET_OPERATOR_REGISTER_BINARY(__name$) \ .set_attr<FInferStorageType>("FInferStorageType", \ ElemwiseStorageType<2, 1, true, true, true>) \ .set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \ .set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \ .set_attr<FResourceRequest>("FResourceRequest", /* For Sparse CSR */ \ [](const NodeAttrs& attrs) { \ return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};}) /*! \brief Binary launch, dense result * FInferStorageType attr is not set using this macro. * By default DefaultStorageType is used. */ #define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_DR(__name$, __kernel$) \ MXNET_OPERATOR_REGISTER_BINARY(__name$) \ .set_attr<FInferStorageType>("FInferStorageType", \ ElemwiseBinaryOp::SparseSparseWithDenseResult) \ .set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \ .set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_
pf_fold.c
/* * partiton function for single RNA secondary structures * * Simplified interfaces and backward compatibility * wrappers * * Ivo L Hofacker + Ronny Lorenz * Vienna RNA package */ #ifdef HAVE_CONFIG_H #include "config.h" #endif /*###########################################*/ /*# deprecated functions below #*/ /*###########################################*/ #ifndef VRNA_DISABLE_BACKWARD_COMPATIBILITY #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <float.h> /* #defines FLT_MAX ... */ #include <limits.h> #include "ViennaRNA/utils/basic.h" #include "ViennaRNA/params/default.h" #include "ViennaRNA/fold_vars.h" #include "ViennaRNA/loops/all.h" #include "ViennaRNA/gquad.h" #include "ViennaRNA/constraints/hard.h" #include "ViennaRNA/constraints/soft.h" #include "ViennaRNA/mfe.h" #include "ViennaRNA/part_func.h" #ifdef _OPENMP #include <omp.h> #endif /* ################################# # GLOBAL VARIABLES # ################################# */ PUBLIC int st_back = 0; /* ################################# # PRIVATE VARIABLES # ################################# */ /* some backward compatibility stuff */ PRIVATE vrna_fold_compound_t *backward_compat_compound = NULL; PRIVATE int backward_compat = 0; #ifdef _OPENMP #pragma omp threadprivate(backward_compat_compound, backward_compat) #endif /* ################################# # PRIVATE FUNCTION DECLARATIONS # ################################# */ PRIVATE float wrap_pf_fold(const char *sequence, char *structure, vrna_exp_param_t *parameters, int calculate_bppm, int is_constrained, int is_circular); PRIVATE double wrap_mean_bp_distance(FLT_OR_DBL *p, int length, int *index, int turn); /* ################################# # BEGIN OF FUNCTION DEFINITIONS # ################################# */ PRIVATE double wrap_mean_bp_distance(FLT_OR_DBL *p, int length, int *index, int turn) { int i, j; double d = 0.; /* compute the mean base pair distance in the thermodynamic ensemble */ /* <d> = \sum_{a,b} p_a p_b d(S_a,S_b) * this can be computed from the pair probs p_ij as * <d> = \sum_{ij} p_{ij}(1-p_{ij}) */ for (i = 1; i <= length; i++) for (j = i + turn + 1; j <= length; j++) d += p[index[i] - j] * (1 - p[index[i] - j]); return 2 * d; } PRIVATE float wrap_pf_fold(const char *sequence, char *structure, vrna_exp_param_t *parameters, int calculate_bppm, int is_constrained, int is_circular) { vrna_fold_compound_t *vc; vrna_md_t md; vc = NULL; /* we need vrna_exp_param_t datastructure to correctly init default hard constraints */ if (parameters) md = parameters->model_details; else set_model_details(&md); /* get global default parameters */ md.circ = is_circular; md.compute_bpp = calculate_bppm; vc = vrna_fold_compound(sequence, &md, VRNA_OPTION_DEFAULT); /* prepare exp_params and set global pf_scale */ vc->exp_params = vrna_exp_params(&(vc->params->model_details)); vc->exp_params->pf_scale = pf_scale; if (is_constrained && structure) { unsigned int constraint_options = 0; constraint_options |= VRNA_CONSTRAINT_DB | VRNA_CONSTRAINT_DB_PIPE | VRNA_CONSTRAINT_DB_DOT | VRNA_CONSTRAINT_DB_X | VRNA_CONSTRAINT_DB_ANG_BRACK | VRNA_CONSTRAINT_DB_RND_BRACK; vrna_constraints_add(vc, (const char *)structure, constraint_options); } if (backward_compat_compound && backward_compat) vrna_fold_compound_free(backward_compat_compound); backward_compat_compound = vc; backward_compat = 1; iindx = backward_compat_compound->iindx; return vrna_pf(vc, structure); } PUBLIC vrna_ep_t * stackProb(double cutoff) { if (!(backward_compat_compound && backward_compat)) { vrna_message_warning("stackProb: " "run pf_fold() first!"); return NULL; } else if (!backward_compat_compound->exp_matrices->probs) { vrna_message_warning("stackProb: " "probs == NULL!"); return NULL; } return vrna_stack_prob(backward_compat_compound, cutoff); } PUBLIC char * centroid(int length, double *dist) { if (pr == NULL) { vrna_message_warning("centroid: " "pr == NULL. You need to call pf_fold() before centroid()"); return NULL; } return vrna_centroid_from_probs(length, dist, pr); } PUBLIC double mean_bp_dist(int length) { /* compute the mean base pair distance in the thermodynamic ensemble */ /* <d> = \sum_{a,b} p_a p_b d(S_a,S_b) * this can be computed from the pair probs p_ij as * <d> = \sum_{ij} p_{ij}(1-p_{ij}) */ int i, j, *my_iindx; double d = 0; if (pr == NULL) { vrna_message_warning("mean_bp_dist: " "pr == NULL. You need to call pf_fold() before mean_bp_dist()"); return d; } my_iindx = vrna_idx_row_wise(length); for (i = 1; i <= length; i++) for (j = i + TURN + 1; j <= length; j++) d += pr[my_iindx[i] - j] * (1 - pr[my_iindx[i] - j]); free(my_iindx); return 2 * d; } /* get the free energy of a subsequence from the q[] array */ PUBLIC double get_subseq_F(int i, int j) { if (backward_compat_compound) if (backward_compat_compound->exp_matrices) if (backward_compat_compound->exp_matrices->q) { int *my_iindx = backward_compat_compound->iindx; vrna_exp_param_t *pf_params = backward_compat_compound->exp_params; FLT_OR_DBL *q = backward_compat_compound->exp_matrices->q; return (-log(q[my_iindx[i] - j]) - (j - i + 1) * log(pf_params->pf_scale)) * pf_params->kT / 1000.0; } vrna_message_warning("get_subseq_F: " "call pf_fold() to fill q[] array before calling get_subseq_F()"); return 0.; /* we will never get to this point */ } /*----------------------------------------------------------------------*/ PUBLIC double expHairpinEnergy(int u, int type, short si1, short sj1, const char *string) { /* compute Boltzmann weight of a hairpin loop, multiply by scale[u+2] */ vrna_exp_param_t *pf_params = backward_compat_compound->exp_params; double q, kT; kT = pf_params->kT; /* kT in cal/mol */ if (u <= 30) q = pf_params->exphairpin[u]; else q = pf_params->exphairpin[30] * exp(-(pf_params->lxc * log(u / 30.)) * 10. / kT); if ((tetra_loop) && (u == 4)) { char tl[7] = { 0 }, *ts; strncpy(tl, string, 6); if ((ts = strstr(pf_params->Tetraloops, tl))) return pf_params->exptetra[(ts - pf_params->Tetraloops) / 7]; } if ((tetra_loop) && (u == 6)) { char tl[9] = { 0 }, *ts; strncpy(tl, string, 6); if ((ts = strstr(pf_params->Hexaloops, tl))) return pf_params->exphex[(ts - pf_params->Hexaloops) / 9]; } if (u == 3) { char tl[6] = { 0 }, *ts; strncpy(tl, string, 5); if ((ts = strstr(pf_params->Triloops, tl))) return pf_params->exptri[(ts - pf_params->Triloops) / 6]; if (type > 2) q *= pf_params->expTermAU; } else { /* no mismatches for tri-loops */ q *= pf_params->expmismatchH[type][si1][sj1]; } return q; } PUBLIC double expLoopEnergy(int u1, int u2, int type, int type2, short si1, short sj1, short sp1, short sq1) { /* compute Boltzmann weight of interior loop, * multiply by scale[u1+u2+2] for scaling */ double z = 0; int no_close = 0; vrna_exp_param_t *pf_params = backward_compat_compound->exp_params; if ((no_closingGU) && ((type2 == 3) || (type2 == 4) || (type == 2) || (type == 4))) no_close = 1; if ((u1 == 0) && (u2 == 0)) { /* stack */ z = pf_params->expstack[type][type2]; } else if (no_close == 0) { if ((u1 == 0) || (u2 == 0)) { /* bulge */ int u; u = (u1 == 0) ? u2 : u1; z = pf_params->expbulge[u]; if (u2 + u1 == 1) { z *= pf_params->expstack[type][type2]; } else { if (type > 2) z *= pf_params->expTermAU; if (type2 > 2) z *= pf_params->expTermAU; } } else { /* interior loop */ if (u1 + u2 == 2) { /* size 2 is special */ z = pf_params->expint11[type][type2][si1][sj1]; } else if ((u1 == 1) && (u2 == 2)) { z = pf_params->expint21[type][type2][si1][sq1][sj1]; } else if ((u1 == 2) && (u2 == 1)) { z = pf_params->expint21[type2][type][sq1][si1][sp1]; } else if ((u1 == 2) && (u2 == 2)) { z = pf_params->expint22[type][type2][si1][sp1][sq1][sj1]; } else if (((u1 == 2) && (u2 == 3)) || ((u1 == 3) && (u2 == 2))) { /*2-3 is special*/ z = pf_params->expinternal[5] * pf_params->expmismatch23I[type][si1][sj1] * pf_params->expmismatch23I[type2][sq1][sp1]; z *= pf_params->expninio[2][1]; } else if ((u1 == 1) || (u2 == 1)) { /*1-n is special*/ z = pf_params->expinternal[u1 + u2] * pf_params->expmismatch1nI[type][si1][sj1] * pf_params->expmismatch1nI[type2][sq1][sp1]; z *= pf_params->expninio[2][abs(u1 - u2)]; } else { z = pf_params->expinternal[u1 + u2] * pf_params->expmismatchI[type][si1][sj1] * pf_params->expmismatchI[type2][sq1][sp1]; z *= pf_params->expninio[2][abs(u1 - u2)]; } } } return z; } PUBLIC void init_pf_circ_fold(int length) { /* DO NOTHING */ } PUBLIC void init_pf_fold(int length) { /* DO NOTHING */ } /** *** Allocate memory for all matrices and other stuff **/ PUBLIC void free_pf_arrays(void) { if (backward_compat_compound && backward_compat) { vrna_fold_compound_free(backward_compat_compound); backward_compat_compound = NULL; backward_compat = 0; iindx = NULL; } } PUBLIC FLT_OR_DBL * export_bppm(void) { if (backward_compat_compound) if (backward_compat_compound->exp_matrices) if (backward_compat_compound->exp_matrices->probs) return backward_compat_compound->exp_matrices->probs; return NULL; } /*-------------------------------------------------------------------------*/ /* make arrays used for pf_fold available to other routines */ PUBLIC int get_pf_arrays(short **S_p, short **S1_p, char **ptype_p, FLT_OR_DBL **qb_p, FLT_OR_DBL **qm_p, FLT_OR_DBL **q1k_p, FLT_OR_DBL **qln_p) { if (backward_compat_compound) { if (backward_compat_compound->exp_matrices) if (backward_compat_compound->exp_matrices->qb) { *S_p = backward_compat_compound->sequence_encoding2; *S1_p = backward_compat_compound->sequence_encoding; *ptype_p = backward_compat_compound->ptype_pf_compat; *qb_p = backward_compat_compound->exp_matrices->qb; *qm_p = backward_compat_compound->exp_matrices->qm; *q1k_p = backward_compat_compound->exp_matrices->q1k; *qln_p = backward_compat_compound->exp_matrices->qln; return 1; } } return 0; } /*-----------------------------------------------------------------*/ PUBLIC float pf_fold(const char *sequence, char *structure) { return wrap_pf_fold(sequence, structure, NULL, do_backtrack, fold_constrained, 0); } PUBLIC float pf_circ_fold(const char *sequence, char *structure) { return wrap_pf_fold(sequence, structure, NULL, do_backtrack, fold_constrained, 1); } PUBLIC float pf_fold_par(const char *sequence, char *structure, vrna_exp_param_t *parameters, int calculate_bppm, int is_constrained, int is_circular) { return wrap_pf_fold(sequence, structure, parameters, calculate_bppm, is_constrained, is_circular); } PUBLIC char * pbacktrack(char *seq) { int n = (int)strlen(seq); return vrna_pbacktrack5(backward_compat_compound, n); } PUBLIC char * pbacktrack5(char *seq, int length) { /* the seq parameter must no differ to the one stored globally anyway, so we just ignore it */ return vrna_pbacktrack5(backward_compat_compound, length); } PUBLIC char * pbacktrack_circ(char *seq) { char *structure; vrna_md_t *md; structure = NULL; if (backward_compat_compound) { md = &(backward_compat_compound->exp_params->model_details); if (md->circ && backward_compat_compound->exp_matrices->qm2) structure = vrna_pbacktrack(backward_compat_compound); } return structure; } PUBLIC void update_pf_params(int length) { if (backward_compat_compound && backward_compat) { vrna_md_t md; set_model_details(&md); vrna_exp_params_reset(backward_compat_compound, &md); /* compatibility with RNAup, may be removed sometime */ pf_scale = backward_compat_compound->exp_params->pf_scale; } } PUBLIC void update_pf_params_par(int length, vrna_exp_param_t *parameters) { if (backward_compat_compound && backward_compat) { vrna_md_t md; if (parameters) { vrna_exp_params_subst(backward_compat_compound, parameters); } else { set_model_details(&md); vrna_exp_params_reset(backward_compat_compound, &md); } /* compatibility with RNAup, may be removed sometime */ pf_scale = backward_compat_compound->exp_params->pf_scale; } } PUBLIC char * get_centroid_struct_gquad_pr(int length, double *dist) { return vrna_centroid(backward_compat_compound, dist); } PUBLIC void assign_plist_gquad_from_pr(vrna_ep_t **pl, int length, /* ignored */ double cut_off) { if (!backward_compat_compound) *pl = NULL; else if (!backward_compat_compound->exp_matrices->probs) *pl = NULL; else *pl = vrna_plist_from_probs(backward_compat_compound, cut_off); } PUBLIC double mean_bp_distance(int length) { if (backward_compat_compound) if (backward_compat_compound->exp_matrices) if (backward_compat_compound->exp_matrices->probs) return vrna_mean_bp_distance(backward_compat_compound); vrna_message_warning("mean_bp_distance: " "you need to call vrna_pf_fold first"); return 0.; /* we will never get to this point */ } PUBLIC double mean_bp_distance_pr(int length, FLT_OR_DBL *p) { double d = 0; int *index = vrna_idx_row_wise((unsigned int)length); if (p == NULL) { vrna_message_warning("mean_bp_distance_pr: " "p == NULL. You need to supply a valid probability matrix for mean_bp_distance_pr()"); return d; } d = wrap_mean_bp_distance(p, length, index, TURN); free(index); return d; } #endif
/* * partiton function for single RNA secondary structures * * Simplified interfaces and backward compatibility wrappers * * Ivo L Hofacker + Ronny Lorenz Vienna RNA package */ #ifdef HAVE_CONFIG_H #include "config.h" #endif /* ########################################### */ /* # deprecated functions below # */ /* ########################################### */ #ifndef VRNA_DISABLE_BACKWARD_COMPATIBILITY #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <float.h> /* #defines FLT_MAX ... */ #include <limits.h> #include "ViennaRNA/utils/basic.h" #include "ViennaRNA/params/default.h" #include "ViennaRNA/fold_vars.h" #include "ViennaRNA/loops/all.h" #include "ViennaRNA/gquad.h" #include "ViennaRNA/constraints/hard.h" #include "ViennaRNA/constraints/soft.h" #include "ViennaRNA/mfe.h" #include "ViennaRNA/part_func.h" /* * ################################# # GLOBAL VARIABLES # * ################################# */ PUBLIC int st_back = 0; /* * ################################# # PRIVATE VARIABLES # * ################################# */ /* some backward compatibility stuff */ PRIVATE vrna_fold_compound_t *backward_compat_compound = NULL; PRIVATE int backward_compat = 0; /* * ################################# # PRIVATE FUNCTION DECLARATIONS # * ################################# */ PRIVATE float wrap_pf_fold(const char *sequence, char *structure, vrna_exp_param_t * parameters, int calculate_bppm, int is_constrained, int is_circular); PRIVATE double wrap_mean_bp_distance(FLT_OR_DBL * p, int length, int *index, int turn); /* * ################################# # BEGIN OF FUNCTION DEFINITIONS # * ################################# */ PRIVATE double wrap_mean_bp_distance(FLT_OR_DBL * p, int length, int *index, int turn) { int i, j; double d = 0.; /* compute the mean base pair distance in the thermodynamic ensemble */ /* * <d> = \sum_{a,b} p_a p_b d(S_a,S_b) this can be computed from the pair * probs p_ij as <d> = \sum_{ij} p_{ij}(1-p_{ij}) */ for (i = 1; i <= length; i++) for (j = i + turn + 1; j <= length; j++) d += p[index[i] - j] * (1 - p[index[i] - j]); return 2 * d; } PRIVATE float wrap_pf_fold(const char *sequence, char *structure, vrna_exp_param_t * parameters, int calculate_bppm, int is_constrained, int is_circular) { vrna_fold_compound_t *vc; vrna_md_t md; vc = NULL; /* * we need vrna_exp_param_t datastructure to correctly init default hard * constraints */ if (parameters) md = parameters->model_details; else set_model_details(&md); /* get global default parameters */ md.circ = is_circular; md.compute_bpp = calculate_bppm; vc = vrna_fold_compound(sequence, &md, VRNA_OPTION_DEFAULT); /* prepare exp_params and set global pf_scale */ vc->exp_params = vrna_exp_params(&(vc->params->model_details)); vc->exp_params->pf_scale = pf_scale; if (is_constrained && structure) { unsigned int constraint_options = 0; constraint_options |= VRNA_CONSTRAINT_DB | VRNA_CONSTRAINT_DB_PIPE | VRNA_CONSTRAINT_DB_DOT | VRNA_CONSTRAINT_DB_X | VRNA_CONSTRAINT_DB_ANG_BRACK | VRNA_CONSTRAINT_DB_RND_BRACK; vrna_constraints_add(vc, (const char *)structure, constraint_options); } if (backward_compat_compound && backward_compat) vrna_fold_compound_free(backward_compat_compound); backward_compat_compound = vc; backward_compat = 1; iindx = backward_compat_compound->iindx; return vrna_pf(vc, structure); } PUBLIC vrna_ep_t * stackProb(double cutoff) { if (!(backward_compat_compound && backward_compat)) { vrna_message_warning("stackProb: " "run pf_fold() first!"); return NULL; } else if (!backward_compat_compound->exp_matrices->probs) { vrna_message_warning("stackProb: " "probs == NULL!"); return NULL; } return vrna_stack_prob(backward_compat_compound, cutoff); } PUBLIC char * centroid(int length, double *dist) { if (pr == NULL) { vrna_message_warning("centroid: " "pr == NULL. You need to call pf_fold() before centroid()"); return NULL; } return vrna_centroid_from_probs(length, dist, pr); } PUBLIC double mean_bp_dist(int length) { /* compute the mean base pair distance in the thermodynamic ensemble */ /* * <d> = \sum_{a,b} p_a p_b d(S_a,S_b) this can be computed from the pair * probs p_ij as <d> = \sum_{ij} p_{ij}(1-p_{ij}) */ int i, j, *my_iindx; double d = 0; if (pr == NULL) { vrna_message_warning("mean_bp_dist: " "pr == NULL. You need to call pf_fold() before mean_bp_dist()"); return d; } my_iindx = vrna_idx_row_wise(length); for (i = 1; i <= length; i++) for (j = i + TURN + 1; j <= length; j++) d += pr[my_iindx[i] - j] * (1 - pr[my_iindx[i] - j]); free(my_iindx); return 2 * d; } /* get the free energy of a subsequence from the q[] array */ PUBLIC double get_subseq_F(int i, int j) { if (backward_compat_compound) if (backward_compat_compound->exp_matrices) if (backward_compat_compound->exp_matrices->q) { int *my_iindx = backward_compat_compound->iindx; vrna_exp_param_t *pf_params = backward_compat_compound->exp_params; FLT_OR_DBL *q = backward_compat_compound->exp_matrices->q; return (-log(q[my_iindx[i] - j]) - (j - i + 1) * log(pf_params->pf_scale)) * pf_params->kT / 1000.0; } vrna_message_warning("get_subseq_F: " "call pf_fold() to fill q[] array before calling get_subseq_F()"); return 0.; /* we will never get to this point */ } /*----------------------------------------------------------------------*/ PUBLIC double expHairpinEnergy(int u, int type, short si1, short sj1, const char *string) { /* compute Boltzmann weight of a hairpin loop, multiply by scale[u+2] */ vrna_exp_param_t *pf_params = backward_compat_compound->exp_params; double q, kT; kT = pf_params->kT; /* kT in cal/mol */ if (u <= 30) q = pf_params->exphairpin[u]; else q = pf_params->exphairpin[30] * exp(-(pf_params->lxc * log(u / 30.)) * 10. / kT); if ((tetra_loop) && (u == 4)) { char tl[7] = { 0 }, *ts; strncpy(tl, string, 6); if ((ts = strstr(pf_params->Tetraloops, tl))) return pf_params->exptetra[(ts - pf_params->Tetraloops) / 7]; } if ((tetra_loop) && (u == 6)) { char tl[9] = { 0 }, *ts; strncpy(tl, string, 6); if ((ts = strstr(pf_params->Hexaloops, tl))) return pf_params->exphex[(ts - pf_params->Hexaloops) / 9]; } if (u == 3) { char tl[6] = { 0 }, *ts; strncpy(tl, string, 5); if ((ts = strstr(pf_params->Triloops, tl))) return pf_params->exptri[(ts - pf_params->Triloops) / 6]; if (type > 2) q *= pf_params->expTermAU; } else { /* no mismatches for tri-loops */ q *= pf_params->expmismatchH[type][si1][sj1]; } return q; } PUBLIC double expLoopEnergy(int u1, int u2, int type, int type2, short si1, short sj1, short sp1, short sq1) { /* * compute Boltzmann weight of interior loop, multiply by scale[u1+u2+2] * for scaling */ double z = 0; int no_close = 0; vrna_exp_param_t *pf_params = backward_compat_compound->exp_params; if ((no_closingGU) && ((type2 == 3) || (type2 == 4) || (type == 2) || (type == 4))) no_close = 1; if ((u1 == 0) && (u2 == 0)) { /* stack */ z = pf_params->expstack[type][type2]; } else if (no_close == 0) { if ((u1 == 0) || (u2 == 0)) { /* bulge */ int u; u = (u1 == 0) ? u2 : u1; z = pf_params->expbulge[u]; if (u2 + u1 == 1) { z *= pf_params->expstack[type][type2]; } else { if (type > 2) z *= pf_params->expTermAU; if (type2 > 2) z *= pf_params->expTermAU; } } else { /* interior loop */ if (u1 + u2 == 2) { /* size 2 is special */ z = pf_params->expint11[type][type2][si1][sj1]; } else if ((u1 == 1) && (u2 == 2)) { z = pf_params->expint21[type][type2][si1][sq1][sj1]; } else if ((u1 == 2) && (u2 == 1)) { z = pf_params->expint21[type2][type][sq1][si1][sp1]; } else if ((u1 == 2) && (u2 == 2)) { z = pf_params->expint22[type][type2][si1][sp1][sq1][sj1]; } else if (((u1 == 2) && (u2 == 3)) || ((u1 == 3) && (u2 == 2))) { /* 2-3 is special */ z = pf_params->expinternal[5] * pf_params->expmismatch23I[type][si1][sj1] * pf_params->expmismatch23I[type2][sq1][sp1]; z *= pf_params->expninio[2][1]; } else if ((u1 == 1) || (u2 == 1)) { /* 1-n is special */ z = pf_params->expinternal[u1 + u2] * pf_params->expmismatch1nI[type][si1][sj1] * pf_params->expmismatch1nI[type2][sq1][sp1]; z *= pf_params->expninio[2][abs(u1 - u2)]; } else { z = pf_params->expinternal[u1 + u2] * pf_params->expmismatchI[type][si1][sj1] * pf_params->expmismatchI[type2][sq1][sp1]; z *= pf_params->expninio[2][abs(u1 - u2)]; } } } return z; } PUBLIC void init_pf_circ_fold(int length) { /* DO NOTHING */ } PUBLIC void init_pf_fold(int length) { /* DO NOTHING */ } /** *** Allocate memory for all matrices and other stuff **/ PUBLIC void free_pf_arrays(void) { if (backward_compat_compound && backward_compat) { vrna_fold_compound_free(backward_compat_compound); backward_compat_compound = NULL; backward_compat = 0; iindx = NULL; } } PUBLIC FLT_OR_DBL * export_bppm(void) { if (backward_compat_compound) if (backward_compat_compound->exp_matrices) if (backward_compat_compound->exp_matrices->probs) return backward_compat_compound->exp_matrices->probs; return NULL; } /*-------------------------------------------------------------------------*/ /* make arrays used for pf_fold available to other routines */ PUBLIC int get_pf_arrays(short **S_p, short **S1_p, char **ptype_p, FLT_OR_DBL ** qb_p, FLT_OR_DBL ** qm_p, FLT_OR_DBL ** q1k_p, FLT_OR_DBL ** qln_p) { if (backward_compat_compound) { if (backward_compat_compound->exp_matrices) if (backward_compat_compound->exp_matrices->qb) { *S_p = backward_compat_compound->sequence_encoding2; *S1_p = backward_compat_compound->sequence_encoding; *ptype_p = backward_compat_compound->ptype_pf_compat; *qb_p = backward_compat_compound->exp_matrices->qb; *qm_p = backward_compat_compound->exp_matrices->qm; *q1k_p = backward_compat_compound->exp_matrices->q1k; *qln_p = backward_compat_compound->exp_matrices->qln; return 1; } } return 0; } /*-----------------------------------------------------------------*/ PUBLIC float pf_fold(const char *sequence, char *structure) { return wrap_pf_fold(sequence, structure, NULL, do_backtrack, fold_constrained, 0); } PUBLIC float pf_circ_fold(const char *sequence, char *structure) { return wrap_pf_fold(sequence, structure, NULL, do_backtrack, fold_constrained, 1); } PUBLIC float pf_fold_par(const char *sequence, char *structure, vrna_exp_param_t * parameters, int calculate_bppm, int is_constrained, int is_circular) { return wrap_pf_fold(sequence, structure, parameters, calculate_bppm, is_constrained, is_circular); } PUBLIC char * pbacktrack(char *seq) { int n = (int)strlen(seq); return vrna_pbacktrack5(backward_compat_compound, n); } PUBLIC char * pbacktrack5(char *seq, int length) { /* * the seq parameter must no differ to the one stored globally anyway, so * we just ignore it */ return vrna_pbacktrack5(backward_compat_compound, length); } PUBLIC char * pbacktrack_circ(char *seq) { char *structure; vrna_md_t *md; structure = NULL; if (backward_compat_compound) { md = &(backward_compat_compound->exp_params->model_details); if (md->circ && backward_compat_compound->exp_matrices->qm2) structure = vrna_pbacktrack(backward_compat_compound); } return structure; } PUBLIC void update_pf_params(int length) { if (backward_compat_compound && backward_compat) { vrna_md_t md; set_model_details(&md); vrna_exp_params_reset(backward_compat_compound, &md); /* compatibility with RNAup, may be removed sometime */ pf_scale = backward_compat_compound->exp_params->pf_scale; } } PUBLIC void update_pf_params_par(int length, vrna_exp_param_t * parameters) { if (backward_compat_compound && backward_compat) { vrna_md_t md; if (parameters) { vrna_exp_params_subst(backward_compat_compound, parameters); } else { set_model_details(&md); vrna_exp_params_reset(backward_compat_compound, &md); } /* compatibility with RNAup, may be removed sometime */ pf_scale = backward_compat_compound->exp_params->pf_scale; } } PUBLIC char * get_centroid_struct_gquad_pr(int length, double *dist) { return vrna_centroid(backward_compat_compound, dist); } PUBLIC void assign_plist_gquad_from_pr(vrna_ep_t ** pl, int length, /* ignored */ double cut_off) { if (!backward_compat_compound) *pl = NULL; else if (!backward_compat_compound->exp_matrices->probs) *pl = NULL; else *pl = vrna_plist_from_probs(backward_compat_compound, cut_off); } PUBLIC double mean_bp_distance(int length) { if (backward_compat_compound) if (backward_compat_compound->exp_matrices) if (backward_compat_compound->exp_matrices->probs) return vrna_mean_bp_distance(backward_compat_compound); vrna_message_warning("mean_bp_distance: " "you need to call vrna_pf_fold first"); return 0.; /* we will never get to this point */ } PUBLIC double mean_bp_distance_pr(int length, FLT_OR_DBL * p) { double d = 0; int *index = vrna_idx_row_wise((unsigned int)length); if (p == NULL) { vrna_message_warning("mean_bp_distance_pr: " "p == NULL. You need to supply a valid probability matrix for mean_bp_distance_pr()"); return d; } d = wrap_mean_bp_distance(p, length, index, TURN); free(index); return d; } #endif
/* * partiton function for single RNA secondary structures * * Simplified interfaces and backward compatibility wrappers * * Ivo L Hofacker + Ronny Lorenz Vienna RNA package */ #ifdef HAVE_CONFIG_H #include "config.h" #endif /* ########################################### */ /* # deprecated functions below # */ /* ########################################### */ #ifndef VRNA_DISABLE_BACKWARD_COMPATIBILITY #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <float.h> /* #defines FLT_MAX ... */ #include <limits.h> #include "ViennaRNA/utils/basic.h" #include "ViennaRNA/params/default.h" #include "ViennaRNA/fold_vars.h" #include "ViennaRNA/loops/all.h" #include "ViennaRNA/gquad.h" #include "ViennaRNA/constraints/hard.h" #include "ViennaRNA/constraints/soft.h" #include "ViennaRNA/mfe.h" #include "ViennaRNA/part_func.h" #ifdef _OPENMP #include <omp.h> #endif /* * ################################# # GLOBAL VARIABLES # * ################################# */ PUBLIC int st_back = 0; /* * ################################# # PRIVATE VARIABLES # * ################################# */ /* some backward compatibility stuff */ PRIVATE vrna_fold_compound_t *backward_compat_compound = NULL; PRIVATE int backward_compat = 0; #ifdef _OPENMP #pragma omp threadprivate(backward_compat_compound, backward_compat) #endif /* * ################################# # PRIVATE FUNCTION DECLARATIONS # * ################################# */ PRIVATE float wrap_pf_fold(const char *sequence, char *structure, vrna_exp_param_t * parameters, int calculate_bppm, int is_constrained, int is_circular); PRIVATE double wrap_mean_bp_distance(FLT_OR_DBL * p, int length, int *index, int turn); /* * ################################# # BEGIN OF FUNCTION DEFINITIONS # * ################################# */ PRIVATE double wrap_mean_bp_distance(FLT_OR_DBL * p, int length, int *index, int turn) { int i, j; double d = 0.; /* compute the mean base pair distance in the thermodynamic ensemble */ /* * <d> = \sum_{a,b} p_a p_b d(S_a,S_b) this can be computed from the pair * probs p_ij as <d> = \sum_{ij} p_{ij}(1-p_{ij}) */ for (i = 1; i <= length; i++) for (j = i + turn + 1; j <= length; j++) d += p[index[i] - j] * (1 - p[index[i] - j]); return 2 * d; } PRIVATE float wrap_pf_fold(const char *sequence, char *structure, vrna_exp_param_t * parameters, int calculate_bppm, int is_constrained, int is_circular) { vrna_fold_compound_t *vc; vrna_md_t md; vc = NULL; /* * we need vrna_exp_param_t datastructure to correctly init default hard * constraints */ if (parameters) md = parameters->model_details; else set_model_details(&md); /* get global default parameters */ md.circ = is_circular; md.compute_bpp = calculate_bppm; vc = vrna_fold_compound(sequence, &md, VRNA_OPTION_DEFAULT); /* prepare exp_params and set global pf_scale */ vc->exp_params = vrna_exp_params(&(vc->params->model_details)); vc->exp_params->pf_scale = pf_scale; if (is_constrained && structure) { unsigned int constraint_options = 0; constraint_options |= VRNA_CONSTRAINT_DB | VRNA_CONSTRAINT_DB_PIPE | VRNA_CONSTRAINT_DB_DOT | VRNA_CONSTRAINT_DB_X | VRNA_CONSTRAINT_DB_ANG_BRACK | VRNA_CONSTRAINT_DB_RND_BRACK; vrna_constraints_add(vc, (const char *)structure, constraint_options); } if (backward_compat_compound && backward_compat) vrna_fold_compound_free(backward_compat_compound); backward_compat_compound = vc; backward_compat = 1; iindx = backward_compat_compound->iindx; return vrna_pf(vc, structure); } PUBLIC vrna_ep_t * stackProb(double cutoff) { if (!(backward_compat_compound && backward_compat)) { vrna_message_warning("stackProb: " "run pf_fold() first!"); return NULL; } else if (!backward_compat_compound->exp_matrices->probs) { vrna_message_warning("stackProb: " "probs == NULL!"); return NULL; } return vrna_stack_prob(backward_compat_compound, cutoff); } PUBLIC char * centroid(int length, double *dist) { if (pr == NULL) { vrna_message_warning("centroid: " "pr == NULL. You need to call pf_fold() before centroid()"); return NULL; } return vrna_centroid_from_probs(length, dist, pr); } PUBLIC double mean_bp_dist(int length) { /* compute the mean base pair distance in the thermodynamic ensemble */ /* * <d> = \sum_{a,b} p_a p_b d(S_a,S_b) this can be computed from the pair * probs p_ij as <d> = \sum_{ij} p_{ij}(1-p_{ij}) */ int i, j, *my_iindx; double d = 0; if (pr == NULL) { vrna_message_warning("mean_bp_dist: " "pr == NULL. You need to call pf_fold() before mean_bp_dist()"); return d; } my_iindx = vrna_idx_row_wise(length); for (i = 1; i <= length; i++) for (j = i + TURN + 1; j <= length; j++) d += pr[my_iindx[i] - j] * (1 - pr[my_iindx[i] - j]); free(my_iindx); return 2 * d; } /* get the free energy of a subsequence from the q[] array */ PUBLIC double get_subseq_F(int i, int j) { if (backward_compat_compound) if (backward_compat_compound->exp_matrices) if (backward_compat_compound->exp_matrices->q) { int *my_iindx = backward_compat_compound->iindx; vrna_exp_param_t *pf_params = backward_compat_compound->exp_params; FLT_OR_DBL *q = backward_compat_compound->exp_matrices->q; return (-log(q[my_iindx[i] - j]) - (j - i + 1) * log(pf_params->pf_scale)) * pf_params->kT / 1000.0; } vrna_message_warning("get_subseq_F: " "call pf_fold() to fill q[] array before calling get_subseq_F()"); return 0.; /* we will never get to this point */ } /*----------------------------------------------------------------------*/ PUBLIC double expHairpinEnergy(int u, int type, short si1, short sj1, const char *string) { /* compute Boltzmann weight of a hairpin loop, multiply by scale[u+2] */ vrna_exp_param_t *pf_params = backward_compat_compound->exp_params; double q, kT; kT = pf_params->kT; /* kT in cal/mol */ if (u <= 30) q = pf_params->exphairpin[u]; else q = pf_params->exphairpin[30] * exp(-(pf_params->lxc * log(u / 30.)) * 10. / kT); if ((tetra_loop) && (u == 4)) { char tl[7] = { 0 }, *ts; strncpy(tl, string, 6); if ((ts = strstr(pf_params->Tetraloops, tl))) return pf_params->exptetra[(ts - pf_params->Tetraloops) / 7]; } if ((tetra_loop) && (u == 6)) { char tl[9] = { 0 }, *ts; strncpy(tl, string, 6); if ((ts = strstr(pf_params->Hexaloops, tl))) return pf_params->exphex[(ts - pf_params->Hexaloops) / 9]; } if (u == 3) { char tl[6] = { 0 }, *ts; strncpy(tl, string, 5); if ((ts = strstr(pf_params->Triloops, tl))) return pf_params->exptri[(ts - pf_params->Triloops) / 6]; if (type > 2) q *= pf_params->expTermAU; } else { /* no mismatches for tri-loops */ q *= pf_params->expmismatchH[type][si1][sj1]; } return q; } PUBLIC double expLoopEnergy(int u1, int u2, int type, int type2, short si1, short sj1, short sp1, short sq1) { /* * compute Boltzmann weight of interior loop, multiply by scale[u1+u2+2] * for scaling */ double z = 0; int no_close = 0; vrna_exp_param_t *pf_params = backward_compat_compound->exp_params; if ((no_closingGU) && ((type2 == 3) || (type2 == 4) || (type == 2) || (type == 4))) no_close = 1; if ((u1 == 0) && (u2 == 0)) { /* stack */ z = pf_params->expstack[type][type2]; } else if (no_close == 0) { if ((u1 == 0) || (u2 == 0)) { /* bulge */ int u; u = (u1 == 0) ? u2 : u1; z = pf_params->expbulge[u]; if (u2 + u1 == 1) { z *= pf_params->expstack[type][type2]; } else { if (type > 2) z *= pf_params->expTermAU; if (type2 > 2) z *= pf_params->expTermAU; } } else { /* interior loop */ if (u1 + u2 == 2) { /* size 2 is special */ z = pf_params->expint11[type][type2][si1][sj1]; } else if ((u1 == 1) && (u2 == 2)) { z = pf_params->expint21[type][type2][si1][sq1][sj1]; } else if ((u1 == 2) && (u2 == 1)) { z = pf_params->expint21[type2][type][sq1][si1][sp1]; } else if ((u1 == 2) && (u2 == 2)) { z = pf_params->expint22[type][type2][si1][sp1][sq1][sj1]; } else if (((u1 == 2) && (u2 == 3)) || ((u1 == 3) && (u2 == 2))) { /* 2-3 is special */ z = pf_params->expinternal[5] * pf_params->expmismatch23I[type][si1][sj1] * pf_params->expmismatch23I[type2][sq1][sp1]; z *= pf_params->expninio[2][1]; } else if ((u1 == 1) || (u2 == 1)) { /* 1-n is special */ z = pf_params->expinternal[u1 + u2] * pf_params->expmismatch1nI[type][si1][sj1] * pf_params->expmismatch1nI[type2][sq1][sp1]; z *= pf_params->expninio[2][abs(u1 - u2)]; } else { z = pf_params->expinternal[u1 + u2] * pf_params->expmismatchI[type][si1][sj1] * pf_params->expmismatchI[type2][sq1][sp1]; z *= pf_params->expninio[2][abs(u1 - u2)]; } } } return z; } PUBLIC void init_pf_circ_fold(int length) { /* DO NOTHING */ } PUBLIC void init_pf_fold(int length) { /* DO NOTHING */ } /** *** Allocate memory for all matrices and other stuff **/ PUBLIC void free_pf_arrays(void) { if (backward_compat_compound && backward_compat) { vrna_fold_compound_free(backward_compat_compound); backward_compat_compound = NULL; backward_compat = 0; iindx = NULL; } } PUBLIC FLT_OR_DBL * export_bppm(void) { if (backward_compat_compound) if (backward_compat_compound->exp_matrices) if (backward_compat_compound->exp_matrices->probs) return backward_compat_compound->exp_matrices->probs; return NULL; } /*-------------------------------------------------------------------------*/ /* make arrays used for pf_fold available to other routines */ PUBLIC int get_pf_arrays(short **S_p, short **S1_p, char **ptype_p, FLT_OR_DBL ** qb_p, FLT_OR_DBL ** qm_p, FLT_OR_DBL ** q1k_p, FLT_OR_DBL ** qln_p) { if (backward_compat_compound) { if (backward_compat_compound->exp_matrices) if (backward_compat_compound->exp_matrices->qb) { *S_p = backward_compat_compound->sequence_encoding2; *S1_p = backward_compat_compound->sequence_encoding; *ptype_p = backward_compat_compound->ptype_pf_compat; *qb_p = backward_compat_compound->exp_matrices->qb; *qm_p = backward_compat_compound->exp_matrices->qm; *q1k_p = backward_compat_compound->exp_matrices->q1k; *qln_p = backward_compat_compound->exp_matrices->qln; return 1; } } return 0; } /*-----------------------------------------------------------------*/ PUBLIC float pf_fold(const char *sequence, char *structure) { return wrap_pf_fold(sequence, structure, NULL, do_backtrack, fold_constrained, 0); } PUBLIC float pf_circ_fold(const char *sequence, char *structure) { return wrap_pf_fold(sequence, structure, NULL, do_backtrack, fold_constrained, 1); } PUBLIC float pf_fold_par(const char *sequence, char *structure, vrna_exp_param_t * parameters, int calculate_bppm, int is_constrained, int is_circular) { return wrap_pf_fold(sequence, structure, parameters, calculate_bppm, is_constrained, is_circular); } PUBLIC char * pbacktrack(char *seq) { int n = (int)strlen(seq); return vrna_pbacktrack5(backward_compat_compound, n); } PUBLIC char * pbacktrack5(char *seq, int length) { /* * the seq parameter must no differ to the one stored globally anyway, so * we just ignore it */ return vrna_pbacktrack5(backward_compat_compound, length); } PUBLIC char * pbacktrack_circ(char *seq) { char *structure; vrna_md_t *md; structure = NULL; if (backward_compat_compound) { md = &(backward_compat_compound->exp_params->model_details); if (md->circ && backward_compat_compound->exp_matrices->qm2) structure = vrna_pbacktrack(backward_compat_compound); } return structure; } PUBLIC void update_pf_params(int length) { if (backward_compat_compound && backward_compat) { vrna_md_t md; set_model_details(&md); vrna_exp_params_reset(backward_compat_compound, &md); /* compatibility with RNAup, may be removed sometime */ pf_scale = backward_compat_compound->exp_params->pf_scale; } } PUBLIC void update_pf_params_par(int length, vrna_exp_param_t * parameters) { if (backward_compat_compound && backward_compat) { vrna_md_t md; if (parameters) { vrna_exp_params_subst(backward_compat_compound, parameters); } else { set_model_details(&md); vrna_exp_params_reset(backward_compat_compound, &md); } /* compatibility with RNAup, may be removed sometime */ pf_scale = backward_compat_compound->exp_params->pf_scale; } } PUBLIC char * get_centroid_struct_gquad_pr(int length, double *dist) { return vrna_centroid(backward_compat_compound, dist); } PUBLIC void assign_plist_gquad_from_pr(vrna_ep_t ** pl, int length, /* ignored */ double cut_off) { if (!backward_compat_compound) *pl = NULL; else if (!backward_compat_compound->exp_matrices->probs) *pl = NULL; else *pl = vrna_plist_from_probs(backward_compat_compound, cut_off); } PUBLIC double mean_bp_distance(int length) { if (backward_compat_compound) if (backward_compat_compound->exp_matrices) if (backward_compat_compound->exp_matrices->probs) return vrna_mean_bp_distance(backward_compat_compound); vrna_message_warning("mean_bp_distance: " "you need to call vrna_pf_fold first"); return 0.; /* we will never get to this point */ } PUBLIC double mean_bp_distance_pr(int length, FLT_OR_DBL * p) { double d = 0; int *index = vrna_idx_row_wise((unsigned int)length); if (p == NULL) { vrna_message_warning("mean_bp_distance_pr: " "p == NULL. You need to supply a valid probability matrix for mean_bp_distance_pr()"); return d; } d = wrap_mean_bp_distance(p, length, index, TURN); free(index); return d; } #endif
GB_binop__rdiv_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__rdiv_int8) // A.*B function (eWiseMult): GB (_AemultB_08__rdiv_int8) // A.*B function (eWiseMult): GB (_AemultB_02__rdiv_int8) // A.*B function (eWiseMult): GB (_AemultB_04__rdiv_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__rdiv_int8) // A*D function (colscale): GB (_AxD__rdiv_int8) // D*A function (rowscale): GB (_DxB__rdiv_int8) // C+=B function (dense accum): GB (_Cdense_accumB__rdiv_int8) // C+=b function (dense accum): GB (_Cdense_accumb__rdiv_int8) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rdiv_int8) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rdiv_int8) // C=scalar+B GB (_bind1st__rdiv_int8) // C=scalar+B' GB (_bind1st_tran__rdiv_int8) // C=A+scalar GB (_bind2nd__rdiv_int8) // C=A'+scalar GB (_bind2nd_tran__rdiv_int8) // C type: int8_t // A type: int8_t // A pattern? 0 // B type: int8_t // B pattern? 0 // BinaryOp: cij = GB_IDIV_SIGNED (bij, aij, 8) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_IDIV_SIGNED (y, x, 8) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RDIV || GxB_NO_INT8 || GxB_NO_RDIV_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__rdiv_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__rdiv_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int8_t alpha_scalar ; int8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int8_t *) alpha_scalar_in)) ; beta_scalar = (*((int8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__rdiv_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__rdiv_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__rdiv_int8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = GBX (Bx, p, false) ; Cx [p] = GB_IDIV_SIGNED (bij, x, 8) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__rdiv_int8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = GBX (Ax, p, false) ; Cx [p] = GB_IDIV_SIGNED (y, aij, 8) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_SIGNED (aij, x, 8) ; \ } GrB_Info GB (_bind1st_tran__rdiv_int8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_SIGNED (y, aij, 8) ; \ } GrB_Info GB (_bind2nd_tran__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__rdiv_int8) // A.*B function (eWiseMult): GB (_AemultB_08__rdiv_int8) // A.*B function (eWiseMult): GB (_AemultB_02__rdiv_int8) // A.*B function (eWiseMult): GB (_AemultB_04__rdiv_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__rdiv_int8) // A*D function (colscale): GB (_AxD__rdiv_int8) // D*A function (rowscale): GB (_DxB__rdiv_int8) // C+=B function (dense accum): GB (_Cdense_accumB__rdiv_int8) // C+=b function (dense accum): GB (_Cdense_accumb__rdiv_int8) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rdiv_int8) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rdiv_int8) // C=scalar+B GB (_bind1st__rdiv_int8) // C=scalar+B' GB (_bind1st_tran__rdiv_int8) // C=A+scalar GB (_bind2nd__rdiv_int8) // C=A'+scalar GB (_bind2nd_tran__rdiv_int8) // C type: int8_t // A type: int8_t // A pattern? 0 // B type: int8_t // B pattern? 0 // BinaryOp: cij = GB_IDIV_SIGNED (bij, aij, 8) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_IDIV_SIGNED (y, x, 8) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RDIV || GxB_NO_INT8 || GxB_NO_RDIV_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__rdiv_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__rdiv_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int8_t alpha_scalar ; int8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int8_t *) alpha_scalar_in)) ; beta_scalar = (*((int8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__rdiv_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__rdiv_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__rdiv_int8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = GBX (Bx, p, false) ; Cx [p] = GB_IDIV_SIGNED (bij, x, 8) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__rdiv_int8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = GBX (Ax, p, false) ; Cx [p] = GB_IDIV_SIGNED (y, aij, 8) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_SIGNED (aij, x, 8) ; \ } GrB_Info GB (_bind1st_tran__rdiv_int8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_SIGNED (y, aij, 8) ; \ } GrB_Info GB (_bind2nd_tran__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__rdiv_int8) // A.*B function (eWiseMult): GB (_AemultB_08__rdiv_int8) // A.*B function (eWiseMult): GB (_AemultB_02__rdiv_int8) // A.*B function (eWiseMult): GB (_AemultB_04__rdiv_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__rdiv_int8) // A*D function (colscale): GB (_AxD__rdiv_int8) // D*A function (rowscale): GB (_DxB__rdiv_int8) // C+=B function (dense accum): GB (_Cdense_accumB__rdiv_int8) // C+=b function (dense accum): GB (_Cdense_accumb__rdiv_int8) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rdiv_int8) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rdiv_int8) // C=scalar+B GB (_bind1st__rdiv_int8) // C=scalar+B' GB (_bind1st_tran__rdiv_int8) // C=A+scalar GB (_bind2nd__rdiv_int8) // C=A'+scalar GB (_bind2nd_tran__rdiv_int8) // C type: int8_t // A type: int8_t // A pattern? 0 // B type: int8_t // B pattern? 0 // BinaryOp: cij = GB_IDIV_SIGNED (bij, aij, 8) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_IDIV_SIGNED (y, x, 8) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RDIV || GxB_NO_INT8 || GxB_NO_RDIV_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__rdiv_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__rdiv_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int8_t alpha_scalar ; int8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int8_t *) alpha_scalar_in)) ; beta_scalar = (*((int8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__rdiv_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__rdiv_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__rdiv_int8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = GBX (Bx, p, false) ; Cx [p] = GB_IDIV_SIGNED (bij, x, 8) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__rdiv_int8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = GBX (Ax, p, false) ; Cx [p] = GB_IDIV_SIGNED (y, aij, 8) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_SIGNED (aij, x, 8) ; \ } GrB_Info GB (_bind1st_tran__rdiv_int8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_SIGNED (y, aij, 8) ; \ } GrB_Info GB (_bind2nd_tran__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
tree-vectorizer.h
/* Vectorizer Copyright (C) 2003-2019 Free Software Foundation, Inc. Contributed by Dorit Naishlos <dorit@il.ibm.com> This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #ifndef GCC_TREE_VECTORIZER_H #define GCC_TREE_VECTORIZER_H typedef struct _stmt_vec_info *stmt_vec_info; #include "tree-data-ref.h" #include "tree-hash-traits.h" #include "target.h" /* Used for naming of new temporaries. */ enum vect_var_kind { vect_simple_var, vect_pointer_var, vect_scalar_var, vect_mask_var }; /* Defines type of operation. */ enum operation_type { unary_op = 1, binary_op, ternary_op }; /* Define type of available alignment support. */ enum dr_alignment_support { dr_unaligned_unsupported, dr_unaligned_supported, dr_explicit_realign, dr_explicit_realign_optimized, dr_aligned }; /* Define type of def-use cross-iteration cycle. */ enum vect_def_type { vect_uninitialized_def = 0, vect_constant_def = 1, vect_external_def, vect_internal_def, vect_induction_def, vect_reduction_def, vect_double_reduction_def, vect_nested_cycle, vect_unknown_def_type }; /* Define type of reduction. */ enum vect_reduction_type { TREE_CODE_REDUCTION, COND_REDUCTION, INTEGER_INDUC_COND_REDUCTION, CONST_COND_REDUCTION, /* Retain a scalar phi and use a FOLD_EXTRACT_LAST within the loop to implement: for (int i = 0; i < VF; ++i) res = cond[i] ? val[i] : res; */ EXTRACT_LAST_REDUCTION, /* Use a folding reduction within the loop to implement: for (int i = 0; i < VF; ++i) res = res OP val[i]; (with no reassocation). */ FOLD_LEFT_REDUCTION }; #define VECTORIZABLE_CYCLE_DEF(D) (((D) == vect_reduction_def) \ || ((D) == vect_double_reduction_def) \ || ((D) == vect_nested_cycle)) /* Structure to encapsulate information about a group of like instructions to be presented to the target cost model. */ struct stmt_info_for_cost { int count; enum vect_cost_for_stmt kind; enum vect_cost_model_location where; stmt_vec_info stmt_info; int misalign; }; typedef vec<stmt_info_for_cost> stmt_vector_for_cost; /* Maps base addresses to an innermost_loop_behavior that gives the maximum known alignment for that base. */ typedef hash_map<tree_operand_hash, innermost_loop_behavior *> vec_base_alignments; /************************************************************************ SLP ************************************************************************/ typedef struct _slp_tree *slp_tree; /* A computation tree of an SLP instance. Each node corresponds to a group of stmts to be packed in a SIMD stmt. */ struct _slp_tree { /* Nodes that contain def-stmts of this node statements operands. */ vec<slp_tree> children; /* A group of scalar stmts to be vectorized together. */ vec<stmt_vec_info> stmts; /* Load permutation relative to the stores, NULL if there is no permutation. */ vec<unsigned> load_permutation; /* Vectorized stmt/s. */ vec<stmt_vec_info> vec_stmts; /* Number of vector stmts that are created to replace the group of scalar stmts. It is calculated during the transformation phase as the number of scalar elements in one scalar iteration (GROUP_SIZE) multiplied by VF divided by vector size. */ unsigned int vec_stmts_size; /* Reference count in the SLP graph. */ unsigned int refcnt; /* The maximum number of vector elements for the subtree rooted at this node. */ poly_uint64 max_nunits; /* Whether the scalar computations use two different operators. */ bool two_operators; /* The DEF type of this node. */ enum vect_def_type def_type; }; /* SLP instance is a sequence of stmts in a loop that can be packed into SIMD stmts. */ typedef struct _slp_instance { /* The root of SLP tree. */ slp_tree root; /* Size of groups of scalar stmts that will be replaced by SIMD stmt/s. */ unsigned int group_size; /* The unrolling factor required to vectorized this SLP instance. */ poly_uint64 unrolling_factor; /* The group of nodes that contain loads of this SLP instance. */ vec<slp_tree> loads; /* The SLP node containing the reduction PHIs. */ slp_tree reduc_phis; } *slp_instance; /* Access Functions. */ #define SLP_INSTANCE_TREE(S) (S)->root #define SLP_INSTANCE_GROUP_SIZE(S) (S)->group_size #define SLP_INSTANCE_UNROLLING_FACTOR(S) (S)->unrolling_factor #define SLP_INSTANCE_LOADS(S) (S)->loads #define SLP_TREE_CHILDREN(S) (S)->children #define SLP_TREE_SCALAR_STMTS(S) (S)->stmts #define SLP_TREE_VEC_STMTS(S) (S)->vec_stmts #define SLP_TREE_NUMBER_OF_VEC_STMTS(S) (S)->vec_stmts_size #define SLP_TREE_LOAD_PERMUTATION(S) (S)->load_permutation #define SLP_TREE_TWO_OPERATORS(S) (S)->two_operators #define SLP_TREE_DEF_TYPE(S) (S)->def_type /* Describes two objects whose addresses must be unequal for the vectorized loop to be valid. */ typedef std::pair<tree, tree> vec_object_pair; /* Records that vectorization is only possible if abs (EXPR) >= MIN_VALUE. UNSIGNED_P is true if we can assume that abs (EXPR) == EXPR. */ struct vec_lower_bound { vec_lower_bound () {} vec_lower_bound (tree e, bool u, poly_uint64 m) : expr (e), unsigned_p (u), min_value (m) {} tree expr; bool unsigned_p; poly_uint64 min_value; }; /* Vectorizer state shared between different analyses like vector sizes of the same CFG region. */ struct vec_info_shared { vec_info_shared(); ~vec_info_shared(); void save_datarefs(); void check_datarefs(); /* All data references. Freed by free_data_refs, so not an auto_vec. */ vec<data_reference_p> datarefs; vec<data_reference> datarefs_copy; /* The loop nest in which the data dependences are computed. */ auto_vec<loop_p> loop_nest; /* All data dependences. Freed by free_dependence_relations, so not an auto_vec. */ vec<ddr_p> ddrs; }; /* Vectorizer state common between loop and basic-block vectorization. */ struct vec_info { enum vec_kind { bb, loop }; vec_info (vec_kind, void *, vec_info_shared *); ~vec_info (); stmt_vec_info add_stmt (gimple *); stmt_vec_info lookup_stmt (gimple *); stmt_vec_info lookup_def (tree); stmt_vec_info lookup_single_use (tree); struct dr_vec_info *lookup_dr (data_reference *); void move_dr (stmt_vec_info, stmt_vec_info); void remove_stmt (stmt_vec_info); void replace_stmt (gimple_stmt_iterator *, stmt_vec_info, gimple *); /* The type of vectorization. */ vec_kind kind; /* Shared vectorizer state. */ vec_info_shared *shared; /* The mapping of GIMPLE UID to stmt_vec_info. */ vec<stmt_vec_info> stmt_vec_infos; /* All SLP instances. */ auto_vec<slp_instance> slp_instances; /* Maps base addresses to an innermost_loop_behavior that gives the maximum known alignment for that base. */ vec_base_alignments base_alignments; /* All interleaving chains of stores, represented by the first stmt in the chain. */ auto_vec<stmt_vec_info> grouped_stores; /* Cost data used by the target cost model. */ void *target_cost_data; private: stmt_vec_info new_stmt_vec_info (gimple *stmt); void set_vinfo_for_stmt (gimple *, stmt_vec_info); void free_stmt_vec_infos (); void free_stmt_vec_info (stmt_vec_info); }; struct _loop_vec_info; struct _bb_vec_info; template<> template<> inline bool is_a_helper <_loop_vec_info *>::test (vec_info *i) { return i->kind == vec_info::loop; } template<> template<> inline bool is_a_helper <_bb_vec_info *>::test (vec_info *i) { return i->kind == vec_info::bb; } /* In general, we can divide the vector statements in a vectorized loop into related groups ("rgroups") and say that for each rgroup there is some nS such that the rgroup operates on nS values from one scalar iteration followed by nS values from the next. That is, if VF is the vectorization factor of the loop, the rgroup operates on a sequence: (1,1) (1,2) ... (1,nS) (2,1) ... (2,nS) ... (VF,1) ... (VF,nS) where (i,j) represents a scalar value with index j in a scalar iteration with index i. [ We use the term "rgroup" to emphasise that this grouping isn't necessarily the same as the grouping of statements used elsewhere. For example, if we implement a group of scalar loads using gather loads, we'll use a separate gather load for each scalar load, and thus each gather load will belong to its own rgroup. ] In general this sequence will occupy nV vectors concatenated together. If these vectors have nL lanes each, the total number of scalar values N is given by: N = nS * VF = nV * nL None of nS, VF, nV and nL are required to be a power of 2. nS and nV are compile-time constants but VF and nL can be variable (if the target supports variable-length vectors). In classical vectorization, each iteration of the vector loop would handle exactly VF iterations of the original scalar loop. However, in a fully-masked loop, a particular iteration of the vector loop might handle fewer than VF iterations of the scalar loop. The vector lanes that correspond to iterations of the scalar loop are said to be "active" and the other lanes are said to be "inactive". In a fully-masked loop, many rgroups need to be masked to ensure that they have no effect for the inactive lanes. Each such rgroup needs a sequence of booleans in the same order as above, but with each (i,j) replaced by a boolean that indicates whether iteration i is active. This sequence occupies nV vector masks that again have nL lanes each. Thus the mask sequence as a whole consists of VF independent booleans that are each repeated nS times. We make the simplifying assumption that if a sequence of nV masks is suitable for one (nS,nL) pair, we can reuse it for (nS/2,nL/2) by VIEW_CONVERTing it. This holds for all current targets that support fully-masked loops. For example, suppose the scalar loop is: float *f; double *d; for (int i = 0; i < n; ++i) { f[i * 2 + 0] += 1.0f; f[i * 2 + 1] += 2.0f; d[i] += 3.0; } and suppose that vectors have 256 bits. The vectorized f accesses will belong to one rgroup and the vectorized d access to another: f rgroup: nS = 2, nV = 1, nL = 8 d rgroup: nS = 1, nV = 1, nL = 4 VF = 4 [ In this simple example the rgroups do correspond to the normal SLP grouping scheme. ] If only the first three lanes are active, the masks we need are: f rgroup: 1 1 | 1 1 | 1 1 | 0 0 d rgroup: 1 | 1 | 1 | 0 Here we can use a mask calculated for f's rgroup for d's, but not vice versa. Thus for each value of nV, it is enough to provide nV masks, with the mask being calculated based on the highest nL (or, equivalently, based on the highest nS) required by any rgroup with that nV. We therefore represent the entire collection of masks as a two-level table, with the first level being indexed by nV - 1 (since nV == 0 doesn't exist) and the second being indexed by the mask index 0 <= i < nV. */ /* The masks needed by rgroups with nV vectors, according to the description above. */ struct rgroup_masks { /* The largest nS for all rgroups that use these masks. */ unsigned int max_nscalars_per_iter; /* The type of mask to use, based on the highest nS recorded above. */ tree mask_type; /* A vector of nV masks, in iteration order. */ vec<tree> masks; }; typedef auto_vec<rgroup_masks> vec_loop_masks; /*-----------------------------------------------------------------*/ /* Info on vectorized loops. */ /*-----------------------------------------------------------------*/ typedef struct _loop_vec_info : public vec_info { _loop_vec_info (struct loop *, vec_info_shared *); ~_loop_vec_info (); /* The loop to which this info struct refers to. */ struct loop *loop; /* The loop basic blocks. */ basic_block *bbs; /* Number of latch executions. */ tree num_itersm1; /* Number of iterations. */ tree num_iters; /* Number of iterations of the original loop. */ tree num_iters_unchanged; /* Condition under which this loop is analyzed and versioned. */ tree num_iters_assumptions; /* Threshold of number of iterations below which vectorzation will not be performed. It is calculated from MIN_PROFITABLE_ITERS and PARAM_MIN_VECT_LOOP_BOUND. */ unsigned int th; /* When applying loop versioning, the vector form should only be used if the number of scalar iterations is >= this value, on top of all the other requirements. Ignored when loop versioning is not being used. */ poly_uint64 versioning_threshold; /* Unrolling factor */ poly_uint64 vectorization_factor; /* Maximum runtime vectorization factor, or MAX_VECTORIZATION_FACTOR if there is no particular limit. */ unsigned HOST_WIDE_INT max_vectorization_factor; /* The masks that a fully-masked loop should use to avoid operating on inactive scalars. */ vec_loop_masks masks; /* If we are using a loop mask to align memory addresses, this variable contains the number of vector elements that we should skip in the first iteration of the vector loop (i.e. the number of leading elements that should be false in the first mask). */ tree mask_skip_niters; /* Type of the variables to use in the WHILE_ULT call for fully-masked loops. */ tree mask_compare_type; /* For #pragma omp simd if (x) loops the x expression. If constant 0, the loop should not be vectorized, if constant non-zero, simd_if_cond shouldn't be set and loop vectorized normally, if SSA_NAME, the loop should be versioned on that condition, using scalar loop if the condition is false and vectorized loop otherwise. */ tree simd_if_cond; /* Unknown DRs according to which loop was peeled. */ struct dr_vec_info *unaligned_dr; /* peeling_for_alignment indicates whether peeling for alignment will take place, and what the peeling factor should be: peeling_for_alignment = X means: If X=0: Peeling for alignment will not be applied. If X>0: Peel first X iterations. If X=-1: Generate a runtime test to calculate the number of iterations to be peeled, using the dataref recorded in the field unaligned_dr. */ int peeling_for_alignment; /* The mask used to check the alignment of pointers or arrays. */ int ptr_mask; /* Data Dependence Relations defining address ranges that are candidates for a run-time aliasing check. */ auto_vec<ddr_p> may_alias_ddrs; /* Data Dependence Relations defining address ranges together with segment lengths from which the run-time aliasing check is built. */ auto_vec<dr_with_seg_len_pair_t> comp_alias_ddrs; /* Check that the addresses of each pair of objects is unequal. */ auto_vec<vec_object_pair> check_unequal_addrs; /* List of values that are required to be nonzero. This is used to check whether things like "x[i * n] += 1;" are safe and eventually gets added to the checks for lower bounds below. */ auto_vec<tree> check_nonzero; /* List of values that need to be checked for a minimum value. */ auto_vec<vec_lower_bound> lower_bounds; /* Statements in the loop that have data references that are candidates for a runtime (loop versioning) misalignment check. */ auto_vec<stmt_vec_info> may_misalign_stmts; /* Reduction cycles detected in the loop. Used in loop-aware SLP. */ auto_vec<stmt_vec_info> reductions; /* All reduction chains in the loop, represented by the first stmt in the chain. */ auto_vec<stmt_vec_info> reduction_chains; /* Cost vector for a single scalar iteration. */ auto_vec<stmt_info_for_cost> scalar_cost_vec; /* Map of IV base/step expressions to inserted name in the preheader. */ hash_map<tree_operand_hash, tree> *ivexpr_map; /* The unrolling factor needed to SLP the loop. In case of that pure SLP is applied to the loop, i.e., no unrolling is needed, this is 1. */ poly_uint64 slp_unrolling_factor; /* Cost of a single scalar iteration. */ int single_scalar_iteration_cost; /* Is the loop vectorizable? */ bool vectorizable; /* Records whether we still have the option of using a fully-masked loop. */ bool can_fully_mask_p; /* True if have decided to use a fully-masked loop. */ bool fully_masked_p; /* When we have grouped data accesses with gaps, we may introduce invalid memory accesses. We peel the last iteration of the loop to prevent this. */ bool peeling_for_gaps; /* When the number of iterations is not a multiple of the vector size we need to peel off iterations at the end to form an epilogue loop. */ bool peeling_for_niter; /* Reductions are canonicalized so that the last operand is the reduction operand. If this places a constant into RHS1, this decanonicalizes GIMPLE for other phases, so we must track when this has occurred and fix it up. */ bool operands_swapped; /* True if there are no loop carried data dependencies in the loop. If loop->safelen <= 1, then this is always true, either the loop didn't have any loop carried data dependencies, or the loop is being vectorized guarded with some runtime alias checks, or couldn't be vectorized at all, but then this field shouldn't be used. For loop->safelen >= 2, the user has asserted that there are no backward dependencies, but there still could be loop carried forward dependencies in such loops. This flag will be false if normal vectorizer data dependency analysis would fail or require versioning for alias, but because of loop->safelen >= 2 it has been vectorized even without versioning for alias. E.g. in: #pragma omp simd for (int i = 0; i < m; i++) a[i] = a[i + k] * c; (or #pragma simd or #pragma ivdep) we can vectorize this and it will DTRT even for k > 0 && k < m, but without safelen we would not vectorize this, so this field would be false. */ bool no_data_dependencies; /* Mark loops having masked stores. */ bool has_mask_store; /* If if-conversion versioned this loop before conversion, this is the loop version without if-conversion. */ struct loop *scalar_loop; /* For loops being epilogues of already vectorized loops this points to the original vectorized loop. Otherwise NULL. */ _loop_vec_info *orig_loop_info; } *loop_vec_info; /* Access Functions. */ #define LOOP_VINFO_LOOP(L) (L)->loop #define LOOP_VINFO_BBS(L) (L)->bbs #define LOOP_VINFO_NITERSM1(L) (L)->num_itersm1 #define LOOP_VINFO_NITERS(L) (L)->num_iters /* Since LOOP_VINFO_NITERS and LOOP_VINFO_NITERSM1 can change after prologue peeling retain total unchanged scalar loop iterations for cost model. */ #define LOOP_VINFO_NITERS_UNCHANGED(L) (L)->num_iters_unchanged #define LOOP_VINFO_NITERS_ASSUMPTIONS(L) (L)->num_iters_assumptions #define LOOP_VINFO_COST_MODEL_THRESHOLD(L) (L)->th #define LOOP_VINFO_VERSIONING_THRESHOLD(L) (L)->versioning_threshold #define LOOP_VINFO_VECTORIZABLE_P(L) (L)->vectorizable #define LOOP_VINFO_CAN_FULLY_MASK_P(L) (L)->can_fully_mask_p #define LOOP_VINFO_FULLY_MASKED_P(L) (L)->fully_masked_p #define LOOP_VINFO_VECT_FACTOR(L) (L)->vectorization_factor #define LOOP_VINFO_MAX_VECT_FACTOR(L) (L)->max_vectorization_factor #define LOOP_VINFO_MASKS(L) (L)->masks #define LOOP_VINFO_MASK_SKIP_NITERS(L) (L)->mask_skip_niters #define LOOP_VINFO_MASK_COMPARE_TYPE(L) (L)->mask_compare_type #define LOOP_VINFO_PTR_MASK(L) (L)->ptr_mask #define LOOP_VINFO_LOOP_NEST(L) (L)->shared->loop_nest #define LOOP_VINFO_DATAREFS(L) (L)->shared->datarefs #define LOOP_VINFO_DDRS(L) (L)->shared->ddrs #define LOOP_VINFO_INT_NITERS(L) (TREE_INT_CST_LOW ((L)->num_iters)) #define LOOP_VINFO_PEELING_FOR_ALIGNMENT(L) (L)->peeling_for_alignment #define LOOP_VINFO_UNALIGNED_DR(L) (L)->unaligned_dr #define LOOP_VINFO_MAY_MISALIGN_STMTS(L) (L)->may_misalign_stmts #define LOOP_VINFO_MAY_ALIAS_DDRS(L) (L)->may_alias_ddrs #define LOOP_VINFO_COMP_ALIAS_DDRS(L) (L)->comp_alias_ddrs #define LOOP_VINFO_CHECK_UNEQUAL_ADDRS(L) (L)->check_unequal_addrs #define LOOP_VINFO_CHECK_NONZERO(L) (L)->check_nonzero #define LOOP_VINFO_LOWER_BOUNDS(L) (L)->lower_bounds #define LOOP_VINFO_GROUPED_STORES(L) (L)->grouped_stores #define LOOP_VINFO_SLP_INSTANCES(L) (L)->slp_instances #define LOOP_VINFO_SLP_UNROLLING_FACTOR(L) (L)->slp_unrolling_factor #define LOOP_VINFO_REDUCTIONS(L) (L)->reductions #define LOOP_VINFO_REDUCTION_CHAINS(L) (L)->reduction_chains #define LOOP_VINFO_TARGET_COST_DATA(L) (L)->target_cost_data #define LOOP_VINFO_PEELING_FOR_GAPS(L) (L)->peeling_for_gaps #define LOOP_VINFO_OPERANDS_SWAPPED(L) (L)->operands_swapped #define LOOP_VINFO_PEELING_FOR_NITER(L) (L)->peeling_for_niter #define LOOP_VINFO_NO_DATA_DEPENDENCIES(L) (L)->no_data_dependencies #define LOOP_VINFO_SCALAR_LOOP(L) (L)->scalar_loop #define LOOP_VINFO_HAS_MASK_STORE(L) (L)->has_mask_store #define LOOP_VINFO_SCALAR_ITERATION_COST(L) (L)->scalar_cost_vec #define LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST(L) (L)->single_scalar_iteration_cost #define LOOP_VINFO_ORIG_LOOP_INFO(L) (L)->orig_loop_info #define LOOP_VINFO_SIMD_IF_COND(L) (L)->simd_if_cond #define LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT(L) \ ((L)->may_misalign_stmts.length () > 0) #define LOOP_REQUIRES_VERSIONING_FOR_ALIAS(L) \ ((L)->comp_alias_ddrs.length () > 0 \ || (L)->check_unequal_addrs.length () > 0 \ || (L)->lower_bounds.length () > 0) #define LOOP_REQUIRES_VERSIONING_FOR_NITERS(L) \ (LOOP_VINFO_NITERS_ASSUMPTIONS (L)) #define LOOP_REQUIRES_VERSIONING_FOR_SIMD_IF_COND(L) \ (LOOP_VINFO_SIMD_IF_COND (L)) #define LOOP_REQUIRES_VERSIONING(L) \ (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (L) \ || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (L) \ || LOOP_REQUIRES_VERSIONING_FOR_NITERS (L) \ || LOOP_REQUIRES_VERSIONING_FOR_SIMD_IF_COND (L)) #define LOOP_VINFO_NITERS_KNOWN_P(L) \ (tree_fits_shwi_p ((L)->num_iters) && tree_to_shwi ((L)->num_iters) > 0) #define LOOP_VINFO_EPILOGUE_P(L) \ (LOOP_VINFO_ORIG_LOOP_INFO (L) != NULL) #define LOOP_VINFO_ORIG_MAX_VECT_FACTOR(L) \ (LOOP_VINFO_MAX_VECT_FACTOR (LOOP_VINFO_ORIG_LOOP_INFO (L))) /* Wrapper for loop_vec_info, for tracking success/failure, where a non-NULL value signifies success, and a NULL value signifies failure, supporting propagating an opt_problem * describing the failure back up the call stack. */ typedef opt_pointer_wrapper <loop_vec_info> opt_loop_vec_info; static inline loop_vec_info loop_vec_info_for_loop (struct loop *loop) { return (loop_vec_info) loop->aux; } typedef struct _bb_vec_info : public vec_info { _bb_vec_info (gimple_stmt_iterator, gimple_stmt_iterator, vec_info_shared *); ~_bb_vec_info (); basic_block bb; gimple_stmt_iterator region_begin; gimple_stmt_iterator region_end; } *bb_vec_info; #define BB_VINFO_BB(B) (B)->bb #define BB_VINFO_GROUPED_STORES(B) (B)->grouped_stores #define BB_VINFO_SLP_INSTANCES(B) (B)->slp_instances #define BB_VINFO_DATAREFS(B) (B)->shared->datarefs #define BB_VINFO_DDRS(B) (B)->shared->ddrs #define BB_VINFO_TARGET_COST_DATA(B) (B)->target_cost_data static inline bb_vec_info vec_info_for_bb (basic_block bb) { return (bb_vec_info) bb->aux; } /*-----------------------------------------------------------------*/ /* Info on vectorized defs. */ /*-----------------------------------------------------------------*/ enum stmt_vec_info_type { undef_vec_info_type = 0, load_vec_info_type, store_vec_info_type, shift_vec_info_type, op_vec_info_type, call_vec_info_type, call_simd_clone_vec_info_type, assignment_vec_info_type, condition_vec_info_type, comparison_vec_info_type, reduc_vec_info_type, induc_vec_info_type, type_promotion_vec_info_type, type_demotion_vec_info_type, type_conversion_vec_info_type, loop_exit_ctrl_vec_info_type }; /* Indicates whether/how a variable is used in the scope of loop/basic block. */ enum vect_relevant { vect_unused_in_scope = 0, /* The def is only used outside the loop. */ vect_used_only_live, /* The def is in the inner loop, and the use is in the outer loop, and the use is a reduction stmt. */ vect_used_in_outer_by_reduction, /* The def is in the inner loop, and the use is in the outer loop (and is not part of reduction). */ vect_used_in_outer, /* defs that feed computations that end up (only) in a reduction. These defs may be used by non-reduction stmts, but eventually, any computations/values that are affected by these defs are used to compute a reduction (i.e. don't get stored to memory, for example). We use this to identify computations that we can change the order in which they are computed. */ vect_used_by_reduction, vect_used_in_scope }; /* The type of vectorization that can be applied to the stmt: regular loop-based vectorization; pure SLP - the stmt is a part of SLP instances and does not have uses outside SLP instances; or hybrid SLP and loop-based - the stmt is a part of SLP instance and also must be loop-based vectorized, since it has uses outside SLP sequences. In the loop context the meanings of pure and hybrid SLP are slightly different. By saying that pure SLP is applied to the loop, we mean that we exploit only intra-iteration parallelism in the loop; i.e., the loop can be vectorized without doing any conceptual unrolling, cause we don't pack together stmts from different iterations, only within a single iteration. Loop hybrid SLP means that we exploit both intra-iteration and inter-iteration parallelism (e.g., number of elements in the vector is 4 and the slp-group-size is 2, in which case we don't have enough parallelism within an iteration, so we obtain the rest of the parallelism from subsequent iterations by unrolling the loop by 2). */ enum slp_vect_type { loop_vect = 0, pure_slp, hybrid }; /* Says whether a statement is a load, a store of a vectorized statement result, or a store of an invariant value. */ enum vec_load_store_type { VLS_LOAD, VLS_STORE, VLS_STORE_INVARIANT }; /* Describes how we're going to vectorize an individual load or store, or a group of loads or stores. */ enum vect_memory_access_type { /* An access to an invariant address. This is used only for loads. */ VMAT_INVARIANT, /* A simple contiguous access. */ VMAT_CONTIGUOUS, /* A contiguous access that goes down in memory rather than up, with no additional permutation. This is used only for stores of invariants. */ VMAT_CONTIGUOUS_DOWN, /* A simple contiguous access in which the elements need to be permuted after loading or before storing. Only used for loop vectorization; SLP uses separate permutes. */ VMAT_CONTIGUOUS_PERMUTE, /* A simple contiguous access in which the elements need to be reversed after loading or before storing. */ VMAT_CONTIGUOUS_REVERSE, /* An access that uses IFN_LOAD_LANES or IFN_STORE_LANES. */ VMAT_LOAD_STORE_LANES, /* An access in which each scalar element is loaded or stored individually. */ VMAT_ELEMENTWISE, /* A hybrid of VMAT_CONTIGUOUS and VMAT_ELEMENTWISE, used for grouped SLP accesses. Each unrolled iteration uses a contiguous load or store for the whole group, but the groups from separate iterations are combined in the same way as for VMAT_ELEMENTWISE. */ VMAT_STRIDED_SLP, /* The access uses gather loads or scatter stores. */ VMAT_GATHER_SCATTER }; struct dr_vec_info { /* The data reference itself. */ data_reference *dr; /* The statement that contains the data reference. */ stmt_vec_info stmt; /* The misalignment in bytes of the reference, or -1 if not known. */ int misalignment; /* The byte alignment that we'd ideally like the reference to have, and the value that misalignment is measured against. */ poly_uint64 target_alignment; /* If true the alignment of base_decl needs to be increased. */ bool base_misaligned; tree base_decl; }; typedef struct data_reference *dr_p; struct _stmt_vec_info { enum stmt_vec_info_type type; /* Indicates whether this stmts is part of a computation whose result is used outside the loop. */ bool live; /* Stmt is part of some pattern (computation idiom) */ bool in_pattern_p; /* True if the statement was created during pattern recognition as part of the replacement for RELATED_STMT. This implies that the statement isn't part of any basic block, although for convenience its gimple_bb is the same as for RELATED_STMT. */ bool pattern_stmt_p; /* Is this statement vectorizable or should it be skipped in (partial) vectorization. */ bool vectorizable; /* The stmt to which this info struct refers to. */ gimple *stmt; /* The vec_info with respect to which STMT is vectorized. */ vec_info *vinfo; /* The vector type to be used for the LHS of this statement. */ tree vectype; /* The vectorized version of the stmt. */ stmt_vec_info vectorized_stmt; /* The following is relevant only for stmts that contain a non-scalar data-ref (array/pointer/struct access). A GIMPLE stmt is expected to have at most one such data-ref. */ dr_vec_info dr_aux; /* Information about the data-ref relative to this loop nest (the loop that is being considered for vectorization). */ innermost_loop_behavior dr_wrt_vec_loop; /* For loop PHI nodes, the base and evolution part of it. This makes sure this information is still available in vect_update_ivs_after_vectorizer where we may not be able to re-analyze the PHI nodes evolution as peeling for the prologue loop can make it unanalyzable. The evolution part is still correct after peeling, but the base may have changed from the version here. */ tree loop_phi_evolution_base_unchanged; tree loop_phi_evolution_part; /* Used for various bookkeeping purposes, generally holding a pointer to some other stmt S that is in some way "related" to this stmt. Current use of this field is: If this stmt is part of a pattern (i.e. the field 'in_pattern_p' is true): S is the "pattern stmt" that represents (and replaces) the sequence of stmts that constitutes the pattern. Similarly, the related_stmt of the "pattern stmt" points back to this stmt (which is the last stmt in the original sequence of stmts that constitutes the pattern). */ stmt_vec_info related_stmt; /* Used to keep a sequence of def stmts of a pattern stmt if such exists. The sequence is attached to the original statement rather than the pattern statement. */ gimple_seq pattern_def_seq; /* List of datarefs that are known to have the same alignment as the dataref of this stmt. */ vec<dr_p> same_align_refs; /* Selected SIMD clone's function info. First vector element is SIMD clone's function decl, followed by a pair of trees (base + step) for linear arguments (pair of NULLs for other arguments). */ vec<tree> simd_clone_info; /* Classify the def of this stmt. */ enum vect_def_type def_type; /* Whether the stmt is SLPed, loop-based vectorized, or both. */ enum slp_vect_type slp_type; /* Interleaving and reduction chains info. */ /* First element in the group. */ stmt_vec_info first_element; /* Pointer to the next element in the group. */ stmt_vec_info next_element; /* The size of the group. */ unsigned int size; /* For stores, number of stores from this group seen. We vectorize the last one. */ unsigned int store_count; /* For loads only, the gap from the previous load. For consecutive loads, GAP is 1. */ unsigned int gap; /* The minimum negative dependence distance this stmt participates in or zero if none. */ unsigned int min_neg_dist; /* Not all stmts in the loop need to be vectorized. e.g, the increment of the loop induction variable and computation of array indexes. relevant indicates whether the stmt needs to be vectorized. */ enum vect_relevant relevant; /* For loads if this is a gather, for stores if this is a scatter. */ bool gather_scatter_p; /* True if this is an access with loop-invariant stride. */ bool strided_p; /* For both loads and stores. */ bool simd_lane_access_p; /* Classifies how the load or store is going to be implemented for loop vectorization. */ vect_memory_access_type memory_access_type; /* For reduction loops, this is the type of reduction. */ enum vect_reduction_type v_reduc_type; /* For CONST_COND_REDUCTION, record the reduc code. */ enum tree_code const_cond_reduc_code; /* On a reduction PHI the reduction type as detected by vect_force_simple_reduction. */ enum vect_reduction_type reduc_type; /* On a reduction PHI the def returned by vect_force_simple_reduction. On the def returned by vect_force_simple_reduction the corresponding PHI. */ stmt_vec_info reduc_def; /* The number of scalar stmt references from active SLP instances. */ unsigned int num_slp_uses; /* If nonzero, the lhs of the statement could be truncated to this many bits without affecting any users of the result. */ unsigned int min_output_precision; /* If nonzero, all non-boolean input operands have the same precision, and they could each be truncated to this many bits without changing the result. */ unsigned int min_input_precision; /* If OPERATION_BITS is nonzero, the statement could be performed on an integer with the sign and number of bits given by OPERATION_SIGN and OPERATION_BITS without changing the result. */ unsigned int operation_precision; signop operation_sign; }; /* Information about a gather/scatter call. */ struct gather_scatter_info { /* The internal function to use for the gather/scatter operation, or IFN_LAST if a built-in function should be used instead. */ internal_fn ifn; /* The FUNCTION_DECL for the built-in gather/scatter function, or null if an internal function should be used instead. */ tree decl; /* The loop-invariant base value. */ tree base; /* The original scalar offset, which is a non-loop-invariant SSA_NAME. */ tree offset; /* Each offset element should be multiplied by this amount before being added to the base. */ int scale; /* The definition type for the vectorized offset. */ enum vect_def_type offset_dt; /* The type of the vectorized offset. */ tree offset_vectype; /* The type of the scalar elements after loading or before storing. */ tree element_type; /* The type of the scalar elements being loaded or stored. */ tree memory_type; }; /* Access Functions. */ #define STMT_VINFO_TYPE(S) (S)->type #define STMT_VINFO_STMT(S) (S)->stmt inline loop_vec_info STMT_VINFO_LOOP_VINFO (stmt_vec_info stmt_vinfo) { if (loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (stmt_vinfo->vinfo)) return loop_vinfo; return NULL; } inline bb_vec_info STMT_VINFO_BB_VINFO (stmt_vec_info stmt_vinfo) { if (bb_vec_info bb_vinfo = dyn_cast <bb_vec_info> (stmt_vinfo->vinfo)) return bb_vinfo; return NULL; } #define STMT_VINFO_RELEVANT(S) (S)->relevant #define STMT_VINFO_LIVE_P(S) (S)->live #define STMT_VINFO_VECTYPE(S) (S)->vectype #define STMT_VINFO_VEC_STMT(S) (S)->vectorized_stmt #define STMT_VINFO_VECTORIZABLE(S) (S)->vectorizable #define STMT_VINFO_DATA_REF(S) ((S)->dr_aux.dr + 0) #define STMT_VINFO_GATHER_SCATTER_P(S) (S)->gather_scatter_p #define STMT_VINFO_STRIDED_P(S) (S)->strided_p #define STMT_VINFO_MEMORY_ACCESS_TYPE(S) (S)->memory_access_type #define STMT_VINFO_SIMD_LANE_ACCESS_P(S) (S)->simd_lane_access_p #define STMT_VINFO_VEC_REDUCTION_TYPE(S) (S)->v_reduc_type #define STMT_VINFO_VEC_CONST_COND_REDUC_CODE(S) (S)->const_cond_reduc_code #define STMT_VINFO_DR_WRT_VEC_LOOP(S) (S)->dr_wrt_vec_loop #define STMT_VINFO_DR_BASE_ADDRESS(S) (S)->dr_wrt_vec_loop.base_address #define STMT_VINFO_DR_INIT(S) (S)->dr_wrt_vec_loop.init #define STMT_VINFO_DR_OFFSET(S) (S)->dr_wrt_vec_loop.offset #define STMT_VINFO_DR_STEP(S) (S)->dr_wrt_vec_loop.step #define STMT_VINFO_DR_BASE_ALIGNMENT(S) (S)->dr_wrt_vec_loop.base_alignment #define STMT_VINFO_DR_BASE_MISALIGNMENT(S) \ (S)->dr_wrt_vec_loop.base_misalignment #define STMT_VINFO_DR_OFFSET_ALIGNMENT(S) \ (S)->dr_wrt_vec_loop.offset_alignment #define STMT_VINFO_DR_STEP_ALIGNMENT(S) \ (S)->dr_wrt_vec_loop.step_alignment #define STMT_VINFO_DR_INFO(S) \ (gcc_checking_assert ((S)->dr_aux.stmt == (S)), &(S)->dr_aux) #define STMT_VINFO_IN_PATTERN_P(S) (S)->in_pattern_p #define STMT_VINFO_RELATED_STMT(S) (S)->related_stmt #define STMT_VINFO_PATTERN_DEF_SEQ(S) (S)->pattern_def_seq #define STMT_VINFO_SAME_ALIGN_REFS(S) (S)->same_align_refs #define STMT_VINFO_SIMD_CLONE_INFO(S) (S)->simd_clone_info #define STMT_VINFO_DEF_TYPE(S) (S)->def_type #define STMT_VINFO_GROUPED_ACCESS(S) \ ((S)->dr_aux.dr && DR_GROUP_FIRST_ELEMENT(S)) #define STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED(S) (S)->loop_phi_evolution_base_unchanged #define STMT_VINFO_LOOP_PHI_EVOLUTION_PART(S) (S)->loop_phi_evolution_part #define STMT_VINFO_MIN_NEG_DIST(S) (S)->min_neg_dist #define STMT_VINFO_NUM_SLP_USES(S) (S)->num_slp_uses #define STMT_VINFO_REDUC_TYPE(S) (S)->reduc_type #define STMT_VINFO_REDUC_DEF(S) (S)->reduc_def #define DR_GROUP_FIRST_ELEMENT(S) \ (gcc_checking_assert ((S)->dr_aux.dr), (S)->first_element) #define DR_GROUP_NEXT_ELEMENT(S) \ (gcc_checking_assert ((S)->dr_aux.dr), (S)->next_element) #define DR_GROUP_SIZE(S) \ (gcc_checking_assert ((S)->dr_aux.dr), (S)->size) #define DR_GROUP_STORE_COUNT(S) \ (gcc_checking_assert ((S)->dr_aux.dr), (S)->store_count) #define DR_GROUP_GAP(S) \ (gcc_checking_assert ((S)->dr_aux.dr), (S)->gap) #define REDUC_GROUP_FIRST_ELEMENT(S) \ (gcc_checking_assert (!(S)->dr_aux.dr), (S)->first_element) #define REDUC_GROUP_NEXT_ELEMENT(S) \ (gcc_checking_assert (!(S)->dr_aux.dr), (S)->next_element) #define REDUC_GROUP_SIZE(S) \ (gcc_checking_assert (!(S)->dr_aux.dr), (S)->size) #define STMT_VINFO_RELEVANT_P(S) ((S)->relevant != vect_unused_in_scope) #define HYBRID_SLP_STMT(S) ((S)->slp_type == hybrid) #define PURE_SLP_STMT(S) ((S)->slp_type == pure_slp) #define STMT_SLP_TYPE(S) (S)->slp_type #define VECT_MAX_COST 1000 /* The maximum number of intermediate steps required in multi-step type conversion. */ #define MAX_INTERM_CVT_STEPS 3 #define MAX_VECTORIZATION_FACTOR INT_MAX /* Nonzero if TYPE represents a (scalar) boolean type or type in the middle-end compatible with it (unsigned precision 1 integral types). Used to determine which types should be vectorized as VECTOR_BOOLEAN_TYPE_P. */ #define VECT_SCALAR_BOOLEAN_TYPE_P(TYPE) \ (TREE_CODE (TYPE) == BOOLEAN_TYPE \ || ((TREE_CODE (TYPE) == INTEGER_TYPE \ || TREE_CODE (TYPE) == ENUMERAL_TYPE) \ && TYPE_PRECISION (TYPE) == 1 \ && TYPE_UNSIGNED (TYPE))) static inline bool nested_in_vect_loop_p (struct loop *loop, stmt_vec_info stmt_info) { return (loop->inner && (loop->inner == (gimple_bb (stmt_info->stmt))->loop_father)); } /* Return TRUE if a statement represented by STMT_INFO is a part of a pattern. */ static inline bool is_pattern_stmt_p (stmt_vec_info stmt_info) { return stmt_info->pattern_stmt_p; } /* If STMT_INFO is a pattern statement, return the statement that it replaces, otherwise return STMT_INFO itself. */ inline stmt_vec_info vect_orig_stmt (stmt_vec_info stmt_info) { if (is_pattern_stmt_p (stmt_info)) return STMT_VINFO_RELATED_STMT (stmt_info); return stmt_info; } /* Return the later statement between STMT1_INFO and STMT2_INFO. */ static inline stmt_vec_info get_later_stmt (stmt_vec_info stmt1_info, stmt_vec_info stmt2_info) { if (gimple_uid (vect_orig_stmt (stmt1_info)->stmt) > gimple_uid (vect_orig_stmt (stmt2_info)->stmt)) return stmt1_info; else return stmt2_info; } /* If STMT_INFO has been replaced by a pattern statement, return the replacement statement, otherwise return STMT_INFO itself. */ inline stmt_vec_info vect_stmt_to_vectorize (stmt_vec_info stmt_info) { if (STMT_VINFO_IN_PATTERN_P (stmt_info)) return STMT_VINFO_RELATED_STMT (stmt_info); return stmt_info; } /* Return true if BB is a loop header. */ static inline bool is_loop_header_bb_p (basic_block bb) { if (bb == (bb->loop_father)->header) return true; gcc_checking_assert (EDGE_COUNT (bb->preds) == 1); return false; } /* Return pow2 (X). */ static inline int vect_pow2 (int x) { int i, res = 1; for (i = 0; i < x; i++) res *= 2; return res; } /* Alias targetm.vectorize.builtin_vectorization_cost. */ static inline int builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost, tree vectype, int misalign) { return targetm.vectorize.builtin_vectorization_cost (type_of_cost, vectype, misalign); } /* Get cost by calling cost target builtin. */ static inline int vect_get_stmt_cost (enum vect_cost_for_stmt type_of_cost) { return builtin_vectorization_cost (type_of_cost, NULL, 0); } /* Alias targetm.vectorize.init_cost. */ static inline void * init_cost (struct loop *loop_info) { return targetm.vectorize.init_cost (loop_info); } extern void dump_stmt_cost (FILE *, void *, int, enum vect_cost_for_stmt, stmt_vec_info, int, unsigned, enum vect_cost_model_location); /* Alias targetm.vectorize.add_stmt_cost. */ static inline unsigned add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind, stmt_vec_info stmt_info, int misalign, enum vect_cost_model_location where) { unsigned cost = targetm.vectorize.add_stmt_cost (data, count, kind, stmt_info, misalign, where); if (dump_file && (dump_flags & TDF_DETAILS)) dump_stmt_cost (dump_file, data, count, kind, stmt_info, misalign, cost, where); return cost; } /* Alias targetm.vectorize.finish_cost. */ static inline void finish_cost (void *data, unsigned *prologue_cost, unsigned *body_cost, unsigned *epilogue_cost) { targetm.vectorize.finish_cost (data, prologue_cost, body_cost, epilogue_cost); } /* Alias targetm.vectorize.destroy_cost_data. */ static inline void destroy_cost_data (void *data) { targetm.vectorize.destroy_cost_data (data); } inline void add_stmt_costs (void *data, stmt_vector_for_cost *cost_vec) { stmt_info_for_cost *cost; unsigned i; FOR_EACH_VEC_ELT (*cost_vec, i, cost) add_stmt_cost (data, cost->count, cost->kind, cost->stmt_info, cost->misalign, cost->where); } /*-----------------------------------------------------------------*/ /* Info on data references alignment. */ /*-----------------------------------------------------------------*/ #define DR_MISALIGNMENT_UNKNOWN (-1) #define DR_MISALIGNMENT_UNINITIALIZED (-2) inline void set_dr_misalignment (dr_vec_info *dr_info, int val) { dr_info->misalignment = val; } inline int dr_misalignment (dr_vec_info *dr_info) { int misalign = dr_info->misalignment; gcc_assert (misalign != DR_MISALIGNMENT_UNINITIALIZED); return misalign; } /* Reflects actual alignment of first access in the vectorized loop, taking into account peeling/versioning if applied. */ #define DR_MISALIGNMENT(DR) dr_misalignment (DR) #define SET_DR_MISALIGNMENT(DR, VAL) set_dr_misalignment (DR, VAL) /* Only defined once DR_MISALIGNMENT is defined. */ #define DR_TARGET_ALIGNMENT(DR) ((DR)->target_alignment) /* Return true if data access DR_INFO is aligned to its target alignment (which may be less than a full vector). */ static inline bool aligned_access_p (dr_vec_info *dr_info) { return (DR_MISALIGNMENT (dr_info) == 0); } /* Return TRUE if the alignment of the data access is known, and FALSE otherwise. */ static inline bool known_alignment_for_access_p (dr_vec_info *dr_info) { return (DR_MISALIGNMENT (dr_info) != DR_MISALIGNMENT_UNKNOWN); } /* Return the minimum alignment in bytes that the vectorized version of DR_INFO is guaranteed to have. */ static inline unsigned int vect_known_alignment_in_bytes (dr_vec_info *dr_info) { if (DR_MISALIGNMENT (dr_info) == DR_MISALIGNMENT_UNKNOWN) return TYPE_ALIGN_UNIT (TREE_TYPE (DR_REF (dr_info->dr))); if (DR_MISALIGNMENT (dr_info) == 0) return known_alignment (DR_TARGET_ALIGNMENT (dr_info)); return DR_MISALIGNMENT (dr_info) & -DR_MISALIGNMENT (dr_info); } /* Return the behavior of DR_INFO with respect to the vectorization context (which for outer loop vectorization might not be the behavior recorded in DR_INFO itself). */ static inline innermost_loop_behavior * vect_dr_behavior (dr_vec_info *dr_info) { stmt_vec_info stmt_info = dr_info->stmt; loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); if (loop_vinfo == NULL || !nested_in_vect_loop_p (LOOP_VINFO_LOOP (loop_vinfo), stmt_info)) return &DR_INNERMOST (dr_info->dr); else return &STMT_VINFO_DR_WRT_VEC_LOOP (stmt_info); } /* Return true if the vect cost model is unlimited. */ static inline bool unlimited_cost_model (loop_p loop) { if (loop != NULL && loop->force_vectorize && flag_simd_cost_model != VECT_COST_MODEL_DEFAULT) return flag_simd_cost_model == VECT_COST_MODEL_UNLIMITED; return (flag_vect_cost_model == VECT_COST_MODEL_UNLIMITED); } /* Return true if the loop described by LOOP_VINFO is fully-masked and if the first iteration should use a partial mask in order to achieve alignment. */ static inline bool vect_use_loop_mask_for_alignment_p (loop_vec_info loop_vinfo) { return (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo) && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo)); } /* Return the number of vectors of type VECTYPE that are needed to get NUNITS elements. NUNITS should be based on the vectorization factor, so it is always a known multiple of the number of elements in VECTYPE. */ static inline unsigned int vect_get_num_vectors (poly_uint64 nunits, tree vectype) { return exact_div (nunits, TYPE_VECTOR_SUBPARTS (vectype)).to_constant (); } /* Return the number of copies needed for loop vectorization when a statement operates on vectors of type VECTYPE. This is the vectorization factor divided by the number of elements in VECTYPE and is always known at compile time. */ static inline unsigned int vect_get_num_copies (loop_vec_info loop_vinfo, tree vectype) { return vect_get_num_vectors (LOOP_VINFO_VECT_FACTOR (loop_vinfo), vectype); } /* Update maximum unit count *MAX_NUNITS so that it accounts for NUNITS. *MAX_NUNITS can be 1 if we haven't yet recorded anything. */ static inline void vect_update_max_nunits (poly_uint64 *max_nunits, poly_uint64 nunits) { /* All unit counts have the form current_vector_size * X for some rational X, so two unit sizes must have a common multiple. Everything is a multiple of the initial value of 1. */ *max_nunits = force_common_multiple (*max_nunits, nunits); } /* Update maximum unit count *MAX_NUNITS so that it accounts for the number of units in vector type VECTYPE. *MAX_NUNITS can be 1 if we haven't yet recorded any vector types. */ static inline void vect_update_max_nunits (poly_uint64 *max_nunits, tree vectype) { vect_update_max_nunits (max_nunits, TYPE_VECTOR_SUBPARTS (vectype)); } /* Return the vectorization factor that should be used for costing purposes while vectorizing the loop described by LOOP_VINFO. Pick a reasonable estimate if the vectorization factor isn't known at compile time. */ static inline unsigned int vect_vf_for_cost (loop_vec_info loop_vinfo) { return estimated_poly_value (LOOP_VINFO_VECT_FACTOR (loop_vinfo)); } /* Estimate the number of elements in VEC_TYPE for costing purposes. Pick a reasonable estimate if the exact number isn't known at compile time. */ static inline unsigned int vect_nunits_for_cost (tree vec_type) { return estimated_poly_value (TYPE_VECTOR_SUBPARTS (vec_type)); } /* Return the maximum possible vectorization factor for LOOP_VINFO. */ static inline unsigned HOST_WIDE_INT vect_max_vf (loop_vec_info loop_vinfo) { unsigned HOST_WIDE_INT vf; if (LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&vf)) return vf; return MAX_VECTORIZATION_FACTOR; } /* Return the size of the value accessed by unvectorized data reference DR_INFO. This is only valid once STMT_VINFO_VECTYPE has been calculated for the associated gimple statement, since that guarantees that DR_INFO accesses either a scalar or a scalar equivalent. ("Scalar equivalent" here includes things like V1SI, which can be vectorized in the same way as a plain SI.) */ inline unsigned int vect_get_scalar_dr_size (dr_vec_info *dr_info) { return tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr_info->dr)))); } /* Source location + hotness information. */ extern dump_user_location_t vect_location; /* A macro for calling: dump_begin_scope (MSG, vect_location); via an RAII object, thus printing "=== MSG ===\n" to the dumpfile etc, and then calling dump_end_scope (); once the object goes out of scope, thus capturing the nesting of the scopes. These scopes affect dump messages within them: dump messages at the top level implicitly default to MSG_PRIORITY_USER_FACING, whereas those in a nested scope implicitly default to MSG_PRIORITY_INTERNALS. */ #define DUMP_VECT_SCOPE(MSG) \ AUTO_DUMP_SCOPE (MSG, vect_location) /* A sentinel class for ensuring that the "vect_location" global gets reset at the end of a scope. The "vect_location" global is used during dumping and contains a location_t, which could contain references to a tree block via the ad-hoc data. This data is used for tracking inlining information, but it's not a GC root; it's simply assumed that such locations never get accessed if the blocks are optimized away. Hence we need to ensure that such locations are purged at the end of any operations using them (e.g. via this class). */ class auto_purge_vect_location { public: ~auto_purge_vect_location (); }; /*-----------------------------------------------------------------*/ /* Function prototypes. */ /*-----------------------------------------------------------------*/ /* Simple loop peeling and versioning utilities for vectorizer's purposes - in tree-vect-loop-manip.c. */ extern void vect_set_loop_condition (struct loop *, loop_vec_info, tree, tree, tree, bool); extern bool slpeel_can_duplicate_loop_p (const struct loop *, const_edge); struct loop *slpeel_tree_duplicate_loop_to_edge_cfg (struct loop *, struct loop *, edge); struct loop *vect_loop_versioning (loop_vec_info, unsigned int, bool, poly_uint64); extern struct loop *vect_do_peeling (loop_vec_info, tree, tree, tree *, tree *, tree *, int, bool, bool); extern void vect_prepare_for_masked_peels (loop_vec_info); extern dump_user_location_t find_loop_location (struct loop *); extern bool vect_can_advance_ivs_p (loop_vec_info); /* In tree-vect-stmts.c. */ extern poly_uint64 current_vector_size; extern tree get_vectype_for_scalar_type (tree); extern tree get_vectype_for_scalar_type_and_size (tree, poly_uint64); extern tree get_mask_type_for_scalar_type (tree); extern tree get_same_sized_vectype (tree, tree); extern bool vect_get_loop_mask_type (loop_vec_info); extern bool vect_is_simple_use (tree, vec_info *, enum vect_def_type *, stmt_vec_info * = NULL, gimple ** = NULL); extern bool vect_is_simple_use (tree, vec_info *, enum vect_def_type *, tree *, stmt_vec_info * = NULL, gimple ** = NULL); extern bool supportable_widening_operation (enum tree_code, stmt_vec_info, tree, tree, enum tree_code *, enum tree_code *, int *, vec<tree> *); extern bool supportable_narrowing_operation (enum tree_code, tree, tree, enum tree_code *, int *, vec<tree> *); extern unsigned record_stmt_cost (stmt_vector_for_cost *, int, enum vect_cost_for_stmt, stmt_vec_info, int, enum vect_cost_model_location); extern stmt_vec_info vect_finish_replace_stmt (stmt_vec_info, gimple *); extern stmt_vec_info vect_finish_stmt_generation (stmt_vec_info, gimple *, gimple_stmt_iterator *); extern opt_result vect_mark_stmts_to_be_vectorized (loop_vec_info); extern tree vect_get_store_rhs (stmt_vec_info); extern tree vect_get_vec_def_for_operand_1 (stmt_vec_info, enum vect_def_type); extern tree vect_get_vec_def_for_operand (tree, stmt_vec_info, tree = NULL); extern void vect_get_vec_defs (tree, tree, stmt_vec_info, vec<tree> *, vec<tree> *, slp_tree); extern void vect_get_vec_defs_for_stmt_copy (vec_info *, vec<tree> *, vec<tree> *); extern tree vect_init_vector (stmt_vec_info, tree, tree, gimple_stmt_iterator *); extern tree vect_get_vec_def_for_stmt_copy (vec_info *, tree); extern bool vect_transform_stmt (stmt_vec_info, gimple_stmt_iterator *, slp_tree, slp_instance); extern void vect_remove_stores (stmt_vec_info); extern opt_result vect_analyze_stmt (stmt_vec_info, bool *, slp_tree, slp_instance, stmt_vector_for_cost *); extern bool vectorizable_condition (stmt_vec_info, gimple_stmt_iterator *, stmt_vec_info *, bool, slp_tree, stmt_vector_for_cost *); extern bool vectorizable_shift (stmt_vec_info, gimple_stmt_iterator *, stmt_vec_info *, slp_tree, stmt_vector_for_cost *); extern void vect_get_load_cost (stmt_vec_info, int, bool, unsigned int *, unsigned int *, stmt_vector_for_cost *, stmt_vector_for_cost *, bool); extern void vect_get_store_cost (stmt_vec_info, int, unsigned int *, stmt_vector_for_cost *); extern bool vect_supportable_shift (enum tree_code, tree); extern tree vect_gen_perm_mask_any (tree, const vec_perm_indices &); extern tree vect_gen_perm_mask_checked (tree, const vec_perm_indices &); extern void optimize_mask_stores (struct loop*); extern gcall *vect_gen_while (tree, tree, tree); extern tree vect_gen_while_not (gimple_seq *, tree, tree, tree); extern opt_result vect_get_vector_types_for_stmt (stmt_vec_info, tree *, tree *); extern opt_tree vect_get_mask_type_for_stmt (stmt_vec_info); /* In tree-vect-data-refs.c. */ extern bool vect_can_force_dr_alignment_p (const_tree, poly_uint64); extern enum dr_alignment_support vect_supportable_dr_alignment (dr_vec_info *, bool); extern tree vect_get_smallest_scalar_type (stmt_vec_info, HOST_WIDE_INT *, HOST_WIDE_INT *); extern opt_result vect_analyze_data_ref_dependences (loop_vec_info, unsigned int *); extern bool vect_slp_analyze_instance_dependence (slp_instance); extern opt_result vect_enhance_data_refs_alignment (loop_vec_info); extern opt_result vect_analyze_data_refs_alignment (loop_vec_info); extern opt_result vect_verify_datarefs_alignment (loop_vec_info); extern bool vect_slp_analyze_and_verify_instance_alignment (slp_instance); extern opt_result vect_analyze_data_ref_accesses (vec_info *); extern opt_result vect_prune_runtime_alias_test_list (loop_vec_info); extern bool vect_gather_scatter_fn_p (bool, bool, tree, tree, unsigned int, signop, int, internal_fn *, tree *); extern bool vect_check_gather_scatter (stmt_vec_info, loop_vec_info, gather_scatter_info *); extern opt_result vect_find_stmt_data_reference (loop_p, gimple *, vec<data_reference_p> *); extern opt_result vect_analyze_data_refs (vec_info *, poly_uint64 *); extern void vect_record_base_alignments (vec_info *); extern tree vect_create_data_ref_ptr (stmt_vec_info, tree, struct loop *, tree, tree *, gimple_stmt_iterator *, gimple **, bool, tree = NULL_TREE, tree = NULL_TREE); extern tree bump_vector_ptr (tree, gimple *, gimple_stmt_iterator *, stmt_vec_info, tree); extern void vect_copy_ref_info (tree, tree); extern tree vect_create_destination_var (tree, tree); extern bool vect_grouped_store_supported (tree, unsigned HOST_WIDE_INT); extern bool vect_store_lanes_supported (tree, unsigned HOST_WIDE_INT, bool); extern bool vect_grouped_load_supported (tree, bool, unsigned HOST_WIDE_INT); extern bool vect_load_lanes_supported (tree, unsigned HOST_WIDE_INT, bool); extern void vect_permute_store_chain (vec<tree> ,unsigned int, stmt_vec_info, gimple_stmt_iterator *, vec<tree> *); extern tree vect_setup_realignment (stmt_vec_info, gimple_stmt_iterator *, tree *, enum dr_alignment_support, tree, struct loop **); extern void vect_transform_grouped_load (stmt_vec_info, vec<tree> , int, gimple_stmt_iterator *); extern void vect_record_grouped_load_vectors (stmt_vec_info, vec<tree>); extern tree vect_get_new_vect_var (tree, enum vect_var_kind, const char *); extern tree vect_get_new_ssa_name (tree, enum vect_var_kind, const char * = NULL); extern tree vect_create_addr_base_for_vector_ref (stmt_vec_info, gimple_seq *, tree, tree = NULL_TREE); /* In tree-vect-loop.c. */ /* FORNOW: Used in tree-parloops.c. */ extern stmt_vec_info vect_force_simple_reduction (loop_vec_info, stmt_vec_info, bool *, bool); /* Used in gimple-loop-interchange.c. */ extern bool check_reduction_path (dump_user_location_t, loop_p, gphi *, tree, enum tree_code); /* Drive for loop analysis stage. */ extern opt_loop_vec_info vect_analyze_loop (struct loop *, loop_vec_info, vec_info_shared *); extern tree vect_build_loop_niters (loop_vec_info, bool * = NULL); extern void vect_gen_vector_loop_niters (loop_vec_info, tree, tree *, tree *, bool); extern tree vect_halve_mask_nunits (tree); extern tree vect_double_mask_nunits (tree); extern void vect_record_loop_mask (loop_vec_info, vec_loop_masks *, unsigned int, tree); extern tree vect_get_loop_mask (gimple_stmt_iterator *, vec_loop_masks *, unsigned int, tree, unsigned int); /* Drive for loop transformation stage. */ extern struct loop *vect_transform_loop (loop_vec_info); extern opt_loop_vec_info vect_analyze_loop_form (struct loop *, vec_info_shared *); extern bool vectorizable_live_operation (stmt_vec_info, gimple_stmt_iterator *, slp_tree, int, stmt_vec_info *, stmt_vector_for_cost *); extern bool vectorizable_reduction (stmt_vec_info, gimple_stmt_iterator *, stmt_vec_info *, slp_tree, slp_instance, stmt_vector_for_cost *); extern bool vectorizable_induction (stmt_vec_info, gimple_stmt_iterator *, stmt_vec_info *, slp_tree, stmt_vector_for_cost *); extern tree get_initial_def_for_reduction (stmt_vec_info, tree, tree *); extern bool vect_worthwhile_without_simd_p (vec_info *, tree_code); extern int vect_get_known_peeling_cost (loop_vec_info, int, int *, stmt_vector_for_cost *, stmt_vector_for_cost *, stmt_vector_for_cost *); extern tree cse_and_gimplify_to_preheader (loop_vec_info, tree); /* In tree-vect-slp.c. */ extern void vect_free_slp_instance (slp_instance, bool); extern bool vect_transform_slp_perm_load (slp_tree, vec<tree> , gimple_stmt_iterator *, poly_uint64, slp_instance, bool, unsigned *); extern bool vect_slp_analyze_operations (vec_info *); extern void vect_schedule_slp (vec_info *); extern opt_result vect_analyze_slp (vec_info *, unsigned); extern bool vect_make_slp_decision (loop_vec_info); extern void vect_detect_hybrid_slp (loop_vec_info); extern void vect_get_slp_defs (vec<tree> , slp_tree, vec<vec<tree> > *); extern bool vect_slp_bb (basic_block); extern stmt_vec_info vect_find_last_scalar_stmt_in_slp (slp_tree); extern bool is_simple_and_all_uses_invariant (stmt_vec_info, loop_vec_info); extern bool can_duplicate_and_interleave_p (unsigned int, machine_mode, unsigned int * = NULL, tree * = NULL, tree * = NULL); extern void duplicate_and_interleave (gimple_seq *, tree, vec<tree>, unsigned int, vec<tree> &); extern int vect_get_place_in_interleaving_chain (stmt_vec_info, stmt_vec_info); /* In tree-vect-patterns.c. */ /* Pattern recognition functions. Additional pattern recognition functions can (and will) be added in the future. */ void vect_pattern_recog (vec_info *); /* In tree-vectorizer.c. */ unsigned vectorize_loops (void); void vect_free_loop_info_assumptions (struct loop *); #endif /* GCC_TREE_VECTORIZER_H */
#ifndef GCC_TREE_VECTORIZER_H #define GCC_TREE_VECTORIZER_H typedef struct _stmt_vec_info *stmt_vec_info; #include "tree-data-ref.h" #include "tree-hash-traits.h" #include "target.h" /* Used for naming of new temporaries. */ enum vect_var_kind { vect_simple_var, vect_pointer_var, vect_scalar_var, vect_mask_var }; /* Defines type of operation. */ enum operation_type { unary_op = 1, binary_op, ternary_op }; /* Define type of available alignment support. */ enum dr_alignment_support { dr_unaligned_unsupported, dr_unaligned_supported, dr_explicit_realign, dr_explicit_realign_optimized, dr_aligned }; /* Define type of def-use cross-iteration cycle. */ enum vect_def_type { vect_uninitialized_def = 0, vect_constant_def = 1, vect_external_def, vect_internal_def, vect_induction_def, vect_reduction_def, vect_double_reduction_def, vect_nested_cycle, vect_unknown_def_type }; /* Define type of reduction. */ enum vect_reduction_type { TREE_CODE_REDUCTION, COND_REDUCTION, INTEGER_INDUC_COND_REDUCTION, CONST_COND_REDUCTION, /* * Retain a scalar phi and use a FOLD_EXTRACT_LAST within the loop to * implement: * * for (int i = 0; i < VF; ++i) res = cond[i] ? val[i] : res; */ EXTRACT_LAST_REDUCTION, /* * Use a folding reduction within the loop to implement: * * for (int i = 0; i < VF; ++i) res = res OP val[i]; * * (with no reassocation). */ FOLD_LEFT_REDUCTION }; #define VECTORIZABLE_CYCLE_DEF(D) (((D) == vect_reduction_def) \ || ((D) == vect_double_reduction_def) \ || ((D) == vect_nested_cycle)) /* * Structure to encapsulate information about a group of like instructions to * be presented to the target cost model. */ struct stmt_info_for_cost { int count; enum vect_cost_for_stmt kind; enum vect_cost_model_location where; stmt_vec_info stmt_info; int misalign; }; typedef vec < stmt_info_for_cost > stmt_vector_for_cost; /* * Maps base addresses to an innermost_loop_behavior that gives the maximum * known alignment for that base. */ typedef hash_map < tree_operand_hash, innermost_loop_behavior * >vec_base_alignments; /************************************************************************ SLP ************************************************************************/ typedef struct _slp_tree *slp_tree; /* * A computation tree of an SLP instance. Each node corresponds to a group * of stmts to be packed in a SIMD stmt. */ struct _slp_tree { /* Nodes that contain def-stmts of this node statements operands. */ vec < slp_tree > children; /* A group of scalar stmts to be vectorized together. */ vec < stmt_vec_info > stmts; /* * Load permutation relative to the stores, NULL if there is no * permutation. */ vec < unsigned >load_permutation; /* Vectorized stmt/s. */ vec < stmt_vec_info > vec_stmts; /* * Number of vector stmts that are created to replace the group of scalar * stmts. It is calculated during the transformation phase as the number * of scalar elements in one scalar iteration (GROUP_SIZE) multiplied by * VF divided by vector size. */ unsigned int vec_stmts_size; /* Reference count in the SLP graph. */ unsigned int refcnt; /* * The maximum number of vector elements for the subtree rooted at this * node. */ poly_uint64 max_nunits; /* Whether the scalar computations use two different operators. */ bool two_operators; /* The DEF type of this node. */ enum vect_def_type def_type; }; /* * SLP instance is a sequence of stmts in a loop that can be packed into SIMD * stmts. */ typedef struct _slp_instance { /* The root of SLP tree. */ slp_tree root; /* Size of groups of scalar stmts that will be replaced by SIMD stmt/s. */ unsigned int group_size; /* The unrolling factor required to vectorized this SLP instance. */ poly_uint64 unrolling_factor; /* The group of nodes that contain loads of this SLP instance. */ vec < slp_tree > loads; /* The SLP node containing the reduction PHIs. */ slp_tree reduc_phis; } *slp_instance; /* Access Functions. */ #define SLP_INSTANCE_TREE(S) (S)->root #define SLP_INSTANCE_GROUP_SIZE(S) (S)->group_size #define SLP_INSTANCE_UNROLLING_FACTOR(S) (S)->unrolling_factor #define SLP_INSTANCE_LOADS(S) (S)->loads #define SLP_TREE_CHILDREN(S) (S)->children #define SLP_TREE_SCALAR_STMTS(S) (S)->stmts #define SLP_TREE_VEC_STMTS(S) (S)->vec_stmts #define SLP_TREE_NUMBER_OF_VEC_STMTS(S) (S)->vec_stmts_size #define SLP_TREE_LOAD_PERMUTATION(S) (S)->load_permutation #define SLP_TREE_TWO_OPERATORS(S) (S)->two_operators #define SLP_TREE_DEF_TYPE(S) (S)->def_type /* * Describes two objects whose addresses must be unequal for the vectorized * loop to be valid. */ typedef std::pair < tree, tree > vec_object_pair; /* * Records that vectorization is only possible if abs (EXPR) >= MIN_VALUE. * UNSIGNED_P is true if we can assume that abs (EXPR) == EXPR. */ struct vec_lower_bound { vec_lower_bound() { } vec_lower_bound(tree e, bool u, poly_uint64 m) :expr(e), unsigned_p(u), min_value(m) { } tree expr; bool unsigned_p; poly_uint64 min_value; }; /* * Vectorizer state shared between different analyses like vector sizes of * the same CFG region. */ struct vec_info_shared { vec_info_shared(); ~vec_info_shared(); void save_datarefs(); void check_datarefs(); /* All data references. Freed by free_data_refs, so not an auto_vec. */ vec < data_reference_p > datarefs; vec < data_reference > datarefs_copy; /* The loop nest in which the data dependences are computed. */ auto_vec < loop_p > loop_nest; /* * All data dependences. Freed by free_dependence_relations, so not an * auto_vec. */ vec < ddr_p > ddrs; }; /* Vectorizer state common between loop and basic-block vectorization. */ struct vec_info { enum vec_kind { bb, loop }; vec_info(vec_kind, void *, vec_info_shared *); ~vec_info(); stmt_vec_info add_stmt(gimple *); stmt_vec_info lookup_stmt(gimple *); stmt_vec_info lookup_def(tree); stmt_vec_info lookup_single_use(tree); struct dr_vec_info *lookup_dr(data_reference *); void move_dr(stmt_vec_info, stmt_vec_info); void remove_stmt(stmt_vec_info); void replace_stmt(gimple_stmt_iterator *, stmt_vec_info, gimple *); /* The type of vectorization. */ vec_kind kind; /* Shared vectorizer state. */ vec_info_shared *shared; /* The mapping of GIMPLE UID to stmt_vec_info. */ vec < stmt_vec_info > stmt_vec_infos; /* All SLP instances. */ auto_vec < slp_instance > slp_instances; /* * Maps base addresses to an innermost_loop_behavior that gives the * maximum known alignment for that base. */ vec_base_alignments base_alignments; /* * All interleaving chains of stores, represented by the first stmt in * the chain. */ auto_vec < stmt_vec_info > grouped_stores; /* Cost data used by the target cost model. */ void *target_cost_data; private: stmt_vec_info new_stmt_vec_info(gimple * stmt); void set_vinfo_for_stmt(gimple *, stmt_vec_info); void free_stmt_vec_infos(); void free_stmt_vec_info(stmt_vec_info); }; struct _loop_vec_info; struct _bb_vec_info; template <> template <> inline bool is_a_helper < _loop_vec_info * >: :test(vec_info * i) { return i->kind == vec_info: :loop; } template <> template <> inline bool is_a_helper < _bb_vec_info * >: :test(vec_info * i) { return i->kind == vec_info: :bb; } /* * In general, we can divide the vector statements in a vectorized loop into * related groups ("rgroups") and say that for each rgroup there is some nS * such that the rgroup operates on nS values from one scalar iteration * followed by nS values from the next. That is, if VF is the vectorization * factor of the loop, the rgroup operates on a sequence: * * (1,1) (1,2) ... (1,nS) (2,1) ... (2,nS) ... (VF,1) ... (VF,nS) * * where (i,j) represents a scalar value with index j in a scalar iteration with * index i. * * [ We use the term "rgroup" to emphasise that this grouping isn't necessarily * the same as the grouping of statements used elsewhere. For example, if we * implement a group of scalar loads using gather loads, we'll use a separate * gather load for each scalar load, and thus each gather load will belong to * its own rgroup. ] * * In general this sequence will occupy nV vectors concatenated together. If * these vectors have nL lanes each, the total number of scalar values N is * given by: * * N = nS * VF = nV * nL * * None of nS, VF, nV and nL are required to be a power of 2. nS and nV are * compile-time constants but VF and nL can be variable (if the target * supports variable-length vectors). * * In classical vectorization, each iteration of the vector loop would handle * exactly VF iterations of the original scalar loop. However, in a * fully-masked loop, a particular iteration of the vector loop might handle * fewer than VF iterations of the scalar loop. The vector lanes that * correspond to iterations of the scalar loop are said to be "active" and * the other lanes are said to be "inactive". * * In a fully-masked loop, many rgroups need to be masked to ensure that they * have no effect for the inactive lanes. Each such rgroup needs a sequence * of booleans in the same order as above, but with each (i,j) replaced by a * boolean that indicates whether iteration i is active. This sequence * occupies nV vector masks that again have nL lanes each. Thus the mask * sequence as a whole consists of VF independent booleans that are each * repeated nS times. * * We make the simplifying assumption that if a sequence of nV masks is suitable * for one (nS,nL) pair, we can reuse it for (nS/2,nL/2) by VIEW_CONVERTing * it. This holds for all current targets that support fully-masked loops. * For example, suppose the scalar loop is: * * float *f; double *d; for (int i = 0; i < n; ++i) { f[i * 2 + 0] += 1.0f; f[i * * 2 + 1] += 2.0f; d[i] += 3.0; } * * and suppose that vectors have 256 bits. The vectorized f accesses will * belong to one rgroup and the vectorized d access to another: * * f rgroup: nS = 2, nV = 1, nL = 8 d rgroup: nS = 1, nV = 1, nL = 4 VF = 4 * * [ In this simple example the rgroups do correspond to the normal SLP grouping * scheme. ] * * If only the first three lanes are active, the masks we need are: * * f rgroup: 1 1 | 1 1 | 1 1 | 0 0 d rgroup: 1 | 1 | 1 | 0 * * Here we can use a mask calculated for f's rgroup for d's, but not vice versa. * * Thus for each value of nV, it is enough to provide nV masks, with the mask * being calculated based on the highest nL (or, equivalently, based on the * highest nS) required by any rgroup with that nV. We therefore represent * the entire collection of masks as a two-level table, with the first level * being indexed by nV - 1 (since nV == 0 doesn't exist) and the second being * indexed by the mask index 0 <= i < nV. */ /* * The masks needed by rgroups with nV vectors, according to the description * above. */ struct rgroup_masks { /* The largest nS for all rgroups that use these masks. */ unsigned int max_nscalars_per_iter; /* The type of mask to use, based on the highest nS recorded above. */ tree mask_type; /* A vector of nV masks, in iteration order. */ vec < tree > masks; }; typedef auto_vec < rgroup_masks > vec_loop_masks; /*-----------------------------------------------------------------*/ /* Info on vectorized loops. */ /*-----------------------------------------------------------------*/ typedef struct _loop_vec_info:public vec_info { _loop_vec_info(struct loop *, vec_info_shared *); ~_loop_vec_info(); /* The loop to which this info struct refers to. */ struct loop *loop; /* The loop basic blocks. */ basic_block *bbs; /* Number of latch executions. */ tree num_itersm1; /* Number of iterations. */ tree num_iters; /* Number of iterations of the original loop. */ tree num_iters_unchanged; /* Condition under which this loop is analyzed and versioned. */ tree num_iters_assumptions; /* * Threshold of number of iterations below which vectorzation will not be * performed. It is calculated from MIN_PROFITABLE_ITERS and * PARAM_MIN_VECT_LOOP_BOUND. */ unsigned int th; /* * When applying loop versioning, the vector form should only be used if * the number of scalar iterations is >= this value, on top of all the * other requirements. Ignored when loop versioning is not being used. */ poly_uint64 versioning_threshold; /* Unrolling factor */ poly_uint64 vectorization_factor; /* * Maximum runtime vectorization factor, or MAX_VECTORIZATION_FACTOR if * there is no particular limit. */ unsigned HOST_WIDE_INT max_vectorization_factor; /* * The masks that a fully-masked loop should use to avoid operating on * inactive scalars. */ vec_loop_masks masks; /* * If we are using a loop mask to align memory addresses, this variable * contains the number of vector elements that we should skip in the * first iteration of the vector loop (i.e. the number of leading * elements that should be false in the first mask). */ tree mask_skip_niters; /* * Type of the variables to use in the WHILE_ULT call for fully-masked * loops. */ tree mask_compare_type; /* * For the loop should not be vectorized, if constant non-zero, * simd_if_cond shouldn't be set and loop vectorized normally, if * SSA_NAME, the loop should be versioned on that condition, using scalar * loop if the condition is false and vectorized loop otherwise. */ tree simd_if_cond; /* Unknown DRs according to which loop was peeled. */ struct dr_vec_info *unaligned_dr; /* * peeling_for_alignment indicates whether peeling for alignment will * take place, and what the peeling factor should be: * peeling_for_alignment = X means: If X=0: Peeling for alignment will * not be applied. If X>0: Peel first X iterations. If X=-1: Generate a * runtime test to calculate the number of iterations to be peeled, using * the dataref recorded in the field unaligned_dr. */ int peeling_for_alignment; /* The mask used to check the alignment of pointers or arrays. */ int ptr_mask; /* * Data Dependence Relations defining address ranges that are candidates * for a run-time aliasing check. */ auto_vec < ddr_p > may_alias_ddrs; /* * Data Dependence Relations defining address ranges together with * segment lengths from which the run-time aliasing check is built. */ auto_vec < dr_with_seg_len_pair_t > comp_alias_ddrs; /* Check that the addresses of each pair of objects is unequal. */ auto_vec < vec_object_pair > check_unequal_addrs; /* * List of values that are required to be nonzero. This is used to check * whether things like "x[i * n] += 1;" are safe and eventually gets * added to the checks for lower bounds below. */ auto_vec < tree > check_nonzero; /* List of values that need to be checked for a minimum value. */ auto_vec < vec_lower_bound > lower_bounds; /* * Statements in the loop that have data references that are candidates * for a runtime (loop versioning) misalignment check. */ auto_vec < stmt_vec_info > may_misalign_stmts; /* Reduction cycles detected in the loop. Used in loop-aware SLP. */ auto_vec < stmt_vec_info > reductions; /* * All reduction chains in the loop, represented by the first stmt in the * chain. */ auto_vec < stmt_vec_info > reduction_chains; /* Cost vector for a single scalar iteration. */ auto_vec < stmt_info_for_cost > scalar_cost_vec; /* Map of IV base/step expressions to inserted name in the preheader. */ hash_map < tree_operand_hash, tree > *ivexpr_map; /* * The unrolling factor needed to SLP the loop. In case of that pure SLP * is applied to the loop, i.e., no unrolling is needed, this is 1. */ poly_uint64 slp_unrolling_factor; /* Cost of a single scalar iteration. */ int single_scalar_iteration_cost; /* Is the loop vectorizable? */ bool vectorizable; /* Records whether we still have the option of using a fully-masked loop. */ bool can_fully_mask_p; /* True if have decided to use a fully-masked loop. */ bool fully_masked_p; /* * When we have grouped data accesses with gaps, we may introduce invalid * memory accesses. We peel the last iteration of the loop to prevent * this. */ bool peeling_for_gaps; /* * When the number of iterations is not a multiple of the vector size we * need to peel off iterations at the end to form an epilogue loop. */ bool peeling_for_niter; /* * Reductions are canonicalized so that the last operand is the reduction * operand. If this places a constant into RHS1, this decanonicalizes * GIMPLE for other phases, so we must track when this has occurred and * fix it up. */ bool operands_swapped; /* * True if there are no loop carried data dependencies in the loop. If * loop->safelen <= 1, then this is always true, either the loop didn't * have any loop carried data dependencies, or the loop is being * vectorized guarded with some runtime alias checks, or couldn't be * vectorized at all, but then this field shouldn't be used. For * loop->safelen >= 2, the user has asserted that there are no backward * dependencies, but there still could be loop carried forward * dependencies in such loops. This flag will be false if normal * vectorizer data dependency analysis would fail or require versioning * for alias, but because of loop->safelen >= 2 it has been vectorized * even without versioning for alias. E.g. in: for (int i = 0; i < m; * i++) a[i] = a[i + k] * c; (or #pragma simd or #pragma ivdep) we can * vectorize this and it will DTRT even for k > 0 && k < m, but without * safelen we would not vectorize this, so this field would be false. */ bool no_data_dependencies; /* Mark loops having masked stores. */ bool has_mask_store; /* * If if-conversion versioned this loop before conversion, this is the * loop version without if-conversion. */ struct loop *scalar_loop; /* * For loops being epilogues of already vectorized loops this points to * the original vectorized loop. Otherwise NULL. */ _loop_vec_info *orig_loop_info; } *loop_vec_info; /* Access Functions. */ #define LOOP_VINFO_LOOP(L) (L)->loop #define LOOP_VINFO_BBS(L) (L)->bbs #define LOOP_VINFO_NITERSM1(L) (L)->num_itersm1 #define LOOP_VINFO_NITERS(L) (L)->num_iters /* * Since LOOP_VINFO_NITERS and LOOP_VINFO_NITERSM1 can change after prologue * peeling retain total unchanged scalar loop iterations for cost model. */ #define LOOP_VINFO_NITERS_UNCHANGED(L) (L)->num_iters_unchanged #define LOOP_VINFO_NITERS_ASSUMPTIONS(L) (L)->num_iters_assumptions #define LOOP_VINFO_COST_MODEL_THRESHOLD(L) (L)->th #define LOOP_VINFO_VERSIONING_THRESHOLD(L) (L)->versioning_threshold #define LOOP_VINFO_VECTORIZABLE_P(L) (L)->vectorizable #define LOOP_VINFO_CAN_FULLY_MASK_P(L) (L)->can_fully_mask_p #define LOOP_VINFO_FULLY_MASKED_P(L) (L)->fully_masked_p #define LOOP_VINFO_VECT_FACTOR(L) (L)->vectorization_factor #define LOOP_VINFO_MAX_VECT_FACTOR(L) (L)->max_vectorization_factor #define LOOP_VINFO_MASKS(L) (L)->masks #define LOOP_VINFO_MASK_SKIP_NITERS(L) (L)->mask_skip_niters #define LOOP_VINFO_MASK_COMPARE_TYPE(L) (L)->mask_compare_type #define LOOP_VINFO_PTR_MASK(L) (L)->ptr_mask #define LOOP_VINFO_LOOP_NEST(L) (L)->shared->loop_nest #define LOOP_VINFO_DATAREFS(L) (L)->shared->datarefs #define LOOP_VINFO_DDRS(L) (L)->shared->ddrs #define LOOP_VINFO_INT_NITERS(L) (TREE_INT_CST_LOW ((L)->num_iters)) #define LOOP_VINFO_PEELING_FOR_ALIGNMENT(L) (L)->peeling_for_alignment #define LOOP_VINFO_UNALIGNED_DR(L) (L)->unaligned_dr #define LOOP_VINFO_MAY_MISALIGN_STMTS(L) (L)->may_misalign_stmts #define LOOP_VINFO_MAY_ALIAS_DDRS(L) (L)->may_alias_ddrs #define LOOP_VINFO_COMP_ALIAS_DDRS(L) (L)->comp_alias_ddrs #define LOOP_VINFO_CHECK_UNEQUAL_ADDRS(L) (L)->check_unequal_addrs #define LOOP_VINFO_CHECK_NONZERO(L) (L)->check_nonzero #define LOOP_VINFO_LOWER_BOUNDS(L) (L)->lower_bounds #define LOOP_VINFO_GROUPED_STORES(L) (L)->grouped_stores #define LOOP_VINFO_SLP_INSTANCES(L) (L)->slp_instances #define LOOP_VINFO_SLP_UNROLLING_FACTOR(L) (L)->slp_unrolling_factor #define LOOP_VINFO_REDUCTIONS(L) (L)->reductions #define LOOP_VINFO_REDUCTION_CHAINS(L) (L)->reduction_chains #define LOOP_VINFO_TARGET_COST_DATA(L) (L)->target_cost_data #define LOOP_VINFO_PEELING_FOR_GAPS(L) (L)->peeling_for_gaps #define LOOP_VINFO_OPERANDS_SWAPPED(L) (L)->operands_swapped #define LOOP_VINFO_PEELING_FOR_NITER(L) (L)->peeling_for_niter #define LOOP_VINFO_NO_DATA_DEPENDENCIES(L) (L)->no_data_dependencies #define LOOP_VINFO_SCALAR_LOOP(L) (L)->scalar_loop #define LOOP_VINFO_HAS_MASK_STORE(L) (L)->has_mask_store #define LOOP_VINFO_SCALAR_ITERATION_COST(L) (L)->scalar_cost_vec #define LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST(L) (L)->single_scalar_iteration_cost #define LOOP_VINFO_ORIG_LOOP_INFO(L) (L)->orig_loop_info #define LOOP_VINFO_SIMD_IF_COND(L) (L)->simd_if_cond #define LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT(L) \ ((L)->may_misalign_stmts.length () > 0) #define LOOP_REQUIRES_VERSIONING_FOR_ALIAS(L) \ ((L)->comp_alias_ddrs.length () > 0 \ || (L)->check_unequal_addrs.length () > 0 \ || (L)->lower_bounds.length () > 0) #define LOOP_REQUIRES_VERSIONING_FOR_NITERS(L) \ (LOOP_VINFO_NITERS_ASSUMPTIONS (L)) #define LOOP_REQUIRES_VERSIONING_FOR_SIMD_IF_COND(L) \ (LOOP_VINFO_SIMD_IF_COND (L)) #define LOOP_REQUIRES_VERSIONING(L) \ (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (L) \ || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (L) \ || LOOP_REQUIRES_VERSIONING_FOR_NITERS (L) \ || LOOP_REQUIRES_VERSIONING_FOR_SIMD_IF_COND (L)) #define LOOP_VINFO_NITERS_KNOWN_P(L) \ (tree_fits_shwi_p ((L)->num_iters) && tree_to_shwi ((L)->num_iters) > 0) #define LOOP_VINFO_EPILOGUE_P(L) \ (LOOP_VINFO_ORIG_LOOP_INFO (L) != NULL) #define LOOP_VINFO_ORIG_MAX_VECT_FACTOR(L) \ (LOOP_VINFO_MAX_VECT_FACTOR (LOOP_VINFO_ORIG_LOOP_INFO (L))) /* * Wrapper for loop_vec_info, for tracking success/failure, where a non-NULL * value signifies success, and a NULL value signifies failure, supporting * propagating an opt_problem * describing the failure back up the call * stack. */ typedef opt_pointer_wrapper < loop_vec_info > opt_loop_vec_info; static inline loop_vec_info loop_vec_info_for_loop(struct loop *loop) { return (loop_vec_info) loop->aux; } typedef struct _bb_vec_info:public vec_info { _bb_vec_info(gimple_stmt_iterator, gimple_stmt_iterator, vec_info_shared *); ~_bb_vec_info(); basic_block bb; gimple_stmt_iterator region_begin; gimple_stmt_iterator region_end; } *bb_vec_info; #define BB_VINFO_BB(B) (B)->bb #define BB_VINFO_GROUPED_STORES(B) (B)->grouped_stores #define BB_VINFO_SLP_INSTANCES(B) (B)->slp_instances #define BB_VINFO_DATAREFS(B) (B)->shared->datarefs #define BB_VINFO_DDRS(B) (B)->shared->ddrs #define BB_VINFO_TARGET_COST_DATA(B) (B)->target_cost_data static inline bb_vec_info vec_info_for_bb(basic_block bb) { return (bb_vec_info) bb->aux; } /*-----------------------------------------------------------------*/ /* Info on vectorized defs. */ /*-----------------------------------------------------------------*/ enum stmt_vec_info_type { undef_vec_info_type = 0, load_vec_info_type, store_vec_info_type, shift_vec_info_type, op_vec_info_type, call_vec_info_type, call_simd_clone_vec_info_type, assignment_vec_info_type, condition_vec_info_type, comparison_vec_info_type, reduc_vec_info_type, induc_vec_info_type, type_promotion_vec_info_type, type_demotion_vec_info_type, type_conversion_vec_info_type, loop_exit_ctrl_vec_info_type }; /* * Indicates whether/how a variable is used in the scope of loop/basic block. */ enum vect_relevant { vect_unused_in_scope = 0, /* The def is only used outside the loop. */ vect_used_only_live, /* * The def is in the inner loop, and the use is in the outer loop, and * the use is a reduction stmt. */ vect_used_in_outer_by_reduction, /* * The def is in the inner loop, and the use is in the outer loop (and is * not part of reduction). */ vect_used_in_outer, /* * defs that feed computations that end up (only) in a reduction. These * defs may be used by non-reduction stmts, but eventually, any * computations/values that are affected by these defs are used to * compute a reduction (i.e. don't get stored to memory, for example). We * use this to identify computations that we can change the order in * which they are computed. */ vect_used_by_reduction, vect_used_in_scope }; /* * The type of vectorization that can be applied to the stmt: regular * loop-based vectorization; pure SLP - the stmt is a part of SLP instances * and does not have uses outside SLP instances; or hybrid SLP and loop-based * - the stmt is a part of SLP instance and also must be loop-based * vectorized, since it has uses outside SLP sequences. * * In the loop context the meanings of pure and hybrid SLP are slightly * different. By saying that pure SLP is applied to the loop, we mean that we * exploit only intra-iteration parallelism in the loop; i.e., the loop can * be vectorized without doing any conceptual unrolling, cause we don't pack * together stmts from different iterations, only within a single iteration. * Loop hybrid SLP means that we exploit both intra-iteration and * inter-iteration parallelism (e.g., number of elements in the vector is 4 * and the slp-group-size is 2, in which case we don't have enough * parallelism within an iteration, so we obtain the rest of the parallelism * from subsequent iterations by unrolling the loop by 2). */ enum slp_vect_type { loop_vect = 0, pure_slp, hybrid }; /* * Says whether a statement is a load, a store of a vectorized statement * result, or a store of an invariant value. */ enum vec_load_store_type { VLS_LOAD, VLS_STORE, VLS_STORE_INVARIANT }; /* * Describes how we're going to vectorize an individual load or store, or a * group of loads or stores. */ enum vect_memory_access_type { /* An access to an invariant address. This is used only for loads. */ VMAT_INVARIANT, /* A simple contiguous access. */ VMAT_CONTIGUOUS, /* * A contiguous access that goes down in memory rather than up, with no * additional permutation. This is used only for stores of invariants. */ VMAT_CONTIGUOUS_DOWN, /* * A simple contiguous access in which the elements need to be permuted * after loading or before storing. Only used for loop vectorization; * SLP uses separate permutes. */ VMAT_CONTIGUOUS_PERMUTE, /* * A simple contiguous access in which the elements need to be reversed * after loading or before storing. */ VMAT_CONTIGUOUS_REVERSE, /* An access that uses IFN_LOAD_LANES or IFN_STORE_LANES. */ VMAT_LOAD_STORE_LANES, /* * An access in which each scalar element is loaded or stored * individually. */ VMAT_ELEMENTWISE, /* * A hybrid of VMAT_CONTIGUOUS and VMAT_ELEMENTWISE, used for grouped SLP * accesses. Each unrolled iteration uses a contiguous load or store for * the whole group, but the groups from separate iterations are combined * in the same way as for VMAT_ELEMENTWISE. */ VMAT_STRIDED_SLP, /* The access uses gather loads or scatter stores. */ VMAT_GATHER_SCATTER }; struct dr_vec_info { /* The data reference itself. */ data_reference *dr; /* The statement that contains the data reference. */ stmt_vec_info stmt; /* The misalignment in bytes of the reference, or -1 if not known. */ int misalignment; /* * The byte alignment that we'd ideally like the reference to have, and * the value that misalignment is measured against. */ poly_uint64 target_alignment; /* If true the alignment of base_decl needs to be increased. */ bool base_misaligned; tree base_decl; }; typedef struct data_reference *dr_p; struct _stmt_vec_info { enum stmt_vec_info_type type; /* * Indicates whether this stmts is part of a computation whose result is * used outside the loop. */ bool live; /* Stmt is part of some pattern (computation idiom) */ bool in_pattern_p; /* * True if the statement was created during pattern recognition as part * of the replacement for RELATED_STMT. This implies that the statement * isn't part of any basic block, although for convenience its gimple_bb * is the same as for RELATED_STMT. */ bool pattern_stmt_p; /* * Is this statement vectorizable or should it be skipped in (partial) * vectorization. */ bool vectorizable; /* The stmt to which this info struct refers to. */ gimple *stmt; /* The vec_info with respect to which STMT is vectorized. */ vec_info *vinfo; /* The vector type to be used for the LHS of this statement. */ tree vectype; /* The vectorized version of the stmt. */ stmt_vec_info vectorized_stmt; /* * The following is relevant only for stmts that contain a non-scalar * data-ref (array/pointer/struct access). A GIMPLE stmt is expected to * have at most one such data-ref. */ dr_vec_info dr_aux; /* * Information about the data-ref relative to this loop nest (the loop * that is being considered for vectorization). */ innermost_loop_behavior dr_wrt_vec_loop; /* * For loop PHI nodes, the base and evolution part of it. This makes * sure this information is still available in * vect_update_ivs_after_vectorizer where we may not be able to * re-analyze the PHI nodes evolution as peeling for the prologue loop * can make it unanalyzable. The evolution part is still correct after * peeling, but the base may have changed from the version here. */ tree loop_phi_evolution_base_unchanged; tree loop_phi_evolution_part; /* * Used for various bookkeeping purposes, generally holding a pointer to * some other stmt S that is in some way "related" to this stmt. Current * use of this field is: If this stmt is part of a pattern (i.e. the * field 'in_pattern_p' is true): S is the "pattern stmt" that represents * (and replaces) the sequence of stmts that constitutes the pattern. * Similarly, the related_stmt of the "pattern stmt" points back to this * stmt (which is the last stmt in the original sequence of stmts that * constitutes the pattern). */ stmt_vec_info related_stmt; /* * Used to keep a sequence of def stmts of a pattern stmt if such exists. * The sequence is attached to the original statement rather than the * pattern statement. */ gimple_seq pattern_def_seq; /* * List of datarefs that are known to have the same alignment as the * dataref of this stmt. */ vec < dr_p > same_align_refs; /* * Selected SIMD clone's function info. First vector element is SIMD * clone's function decl, followed by a pair of trees (base + step) for * linear arguments (pair of NULLs for other arguments). */ vec < tree > simd_clone_info; /* Classify the def of this stmt. */ enum vect_def_type def_type; /* Whether the stmt is SLPed, loop-based vectorized, or both. */ enum slp_vect_type slp_type; /* Interleaving and reduction chains info. */ /* First element in the group. */ stmt_vec_info first_element; /* Pointer to the next element in the group. */ stmt_vec_info next_element; /* The size of the group. */ unsigned int size; /* * For stores, number of stores from this group seen. We vectorize the * last one. */ unsigned int store_count; /* * For loads only, the gap from the previous load. For consecutive loads, * GAP is 1. */ unsigned int gap; /* * The minimum negative dependence distance this stmt participates in or * zero if none. */ unsigned int min_neg_dist; /* * Not all stmts in the loop need to be vectorized. e.g, the increment of * the loop induction variable and computation of array indexes. relevant * indicates whether the stmt needs to be vectorized. */ enum vect_relevant relevant; /* For loads if this is a gather, for stores if this is a scatter. */ bool gather_scatter_p; /* True if this is an access with loop-invariant stride. */ bool strided_p; /* For both loads and stores. */ bool simd_lane_access_p; /* * Classifies how the load or store is going to be implemented for loop * vectorization. */ vect_memory_access_type memory_access_type; /* For reduction loops, this is the type of reduction. */ enum vect_reduction_type v_reduc_type; /* For CONST_COND_REDUCTION, record the reduc code. */ enum tree_code const_cond_reduc_code; /* * On a reduction PHI the reduction type as detected by * vect_force_simple_reduction. */ enum vect_reduction_type reduc_type; /* * On a reduction PHI the def returned by vect_force_simple_reduction. On * the def returned by vect_force_simple_reduction the corresponding PHI. */ stmt_vec_info reduc_def; /* The number of scalar stmt references from active SLP instances. */ unsigned int num_slp_uses; /* * If nonzero, the lhs of the statement could be truncated to this many * bits without affecting any users of the result. */ unsigned int min_output_precision; /* * If nonzero, all non-boolean input operands have the same precision, * and they could each be truncated to this many bits without changing * the result. */ unsigned int min_input_precision; /* * If OPERATION_BITS is nonzero, the statement could be performed on an * integer with the sign and number of bits given by OPERATION_SIGN and * OPERATION_BITS without changing the result. */ unsigned int operation_precision; signop operation_sign; }; /* Information about a gather/scatter call. */ struct gather_scatter_info { /* * The internal function to use for the gather/scatter operation, or * IFN_LAST if a built-in function should be used instead. */ internal_fn ifn; /* * The FUNCTION_DECL for the built-in gather/scatter function, or null if * an internal function should be used instead. */ tree decl; /* The loop-invariant base value. */ tree base; /* The original scalar offset, which is a non-loop-invariant SSA_NAME. */ tree offset; /* * Each offset element should be multiplied by this amount before being * added to the base. */ int scale; /* The definition type for the vectorized offset. */ enum vect_def_type offset_dt; /* The type of the vectorized offset. */ tree offset_vectype; /* The type of the scalar elements after loading or before storing. */ tree element_type; /* The type of the scalar elements being loaded or stored. */ tree memory_type; }; /* Access Functions. */ #define STMT_VINFO_TYPE(S) (S)->type #define STMT_VINFO_STMT(S) (S)->stmt inline loop_vec_info STMT_VINFO_LOOP_VINFO(stmt_vec_info stmt_vinfo) { if (loop_vec_info loop_vinfo = dyn_cast < loop_vec_info > (stmt_vinfo->vinfo)) return loop_vinfo; return NULL; } inline bb_vec_info STMT_VINFO_BB_VINFO(stmt_vec_info stmt_vinfo) { if (bb_vec_info bb_vinfo = dyn_cast < bb_vec_info > (stmt_vinfo->vinfo)) return bb_vinfo; return NULL; } #define STMT_VINFO_RELEVANT(S) (S)->relevant #define STMT_VINFO_LIVE_P(S) (S)->live #define STMT_VINFO_VECTYPE(S) (S)->vectype #define STMT_VINFO_VEC_STMT(S) (S)->vectorized_stmt #define STMT_VINFO_VECTORIZABLE(S) (S)->vectorizable #define STMT_VINFO_DATA_REF(S) ((S)->dr_aux.dr + 0) #define STMT_VINFO_GATHER_SCATTER_P(S) (S)->gather_scatter_p #define STMT_VINFO_STRIDED_P(S) (S)->strided_p #define STMT_VINFO_MEMORY_ACCESS_TYPE(S) (S)->memory_access_type #define STMT_VINFO_SIMD_LANE_ACCESS_P(S) (S)->simd_lane_access_p #define STMT_VINFO_VEC_REDUCTION_TYPE(S) (S)->v_reduc_type #define STMT_VINFO_VEC_CONST_COND_REDUC_CODE(S) (S)->const_cond_reduc_code #define STMT_VINFO_DR_WRT_VEC_LOOP(S) (S)->dr_wrt_vec_loop #define STMT_VINFO_DR_BASE_ADDRESS(S) (S)->dr_wrt_vec_loop.base_address #define STMT_VINFO_DR_INIT(S) (S)->dr_wrt_vec_loop.init #define STMT_VINFO_DR_OFFSET(S) (S)->dr_wrt_vec_loop.offset #define STMT_VINFO_DR_STEP(S) (S)->dr_wrt_vec_loop.step #define STMT_VINFO_DR_BASE_ALIGNMENT(S) (S)->dr_wrt_vec_loop.base_alignment #define STMT_VINFO_DR_BASE_MISALIGNMENT(S) \ (S)->dr_wrt_vec_loop.base_misalignment #define STMT_VINFO_DR_OFFSET_ALIGNMENT(S) \ (S)->dr_wrt_vec_loop.offset_alignment #define STMT_VINFO_DR_STEP_ALIGNMENT(S) \ (S)->dr_wrt_vec_loop.step_alignment #define STMT_VINFO_DR_INFO(S) \ (gcc_checking_assert ((S)->dr_aux.stmt == (S)), &(S)->dr_aux) #define STMT_VINFO_IN_PATTERN_P(S) (S)->in_pattern_p #define STMT_VINFO_RELATED_STMT(S) (S)->related_stmt #define STMT_VINFO_PATTERN_DEF_SEQ(S) (S)->pattern_def_seq #define STMT_VINFO_SAME_ALIGN_REFS(S) (S)->same_align_refs #define STMT_VINFO_SIMD_CLONE_INFO(S) (S)->simd_clone_info #define STMT_VINFO_DEF_TYPE(S) (S)->def_type #define STMT_VINFO_GROUPED_ACCESS(S) \ ((S)->dr_aux.dr && DR_GROUP_FIRST_ELEMENT(S)) #define STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED(S) (S)->loop_phi_evolution_base_unchanged #define STMT_VINFO_LOOP_PHI_EVOLUTION_PART(S) (S)->loop_phi_evolution_part #define STMT_VINFO_MIN_NEG_DIST(S) (S)->min_neg_dist #define STMT_VINFO_NUM_SLP_USES(S) (S)->num_slp_uses #define STMT_VINFO_REDUC_TYPE(S) (S)->reduc_type #define STMT_VINFO_REDUC_DEF(S) (S)->reduc_def #define DR_GROUP_FIRST_ELEMENT(S) \ (gcc_checking_assert ((S)->dr_aux.dr), (S)->first_element) #define DR_GROUP_NEXT_ELEMENT(S) \ (gcc_checking_assert ((S)->dr_aux.dr), (S)->next_element) #define DR_GROUP_SIZE(S) \ (gcc_checking_assert ((S)->dr_aux.dr), (S)->size) #define DR_GROUP_STORE_COUNT(S) \ (gcc_checking_assert ((S)->dr_aux.dr), (S)->store_count) #define DR_GROUP_GAP(S) \ (gcc_checking_assert ((S)->dr_aux.dr), (S)->gap) #define REDUC_GROUP_FIRST_ELEMENT(S) \ (gcc_checking_assert (!(S)->dr_aux.dr), (S)->first_element) #define REDUC_GROUP_NEXT_ELEMENT(S) \ (gcc_checking_assert (!(S)->dr_aux.dr), (S)->next_element) #define REDUC_GROUP_SIZE(S) \ (gcc_checking_assert (!(S)->dr_aux.dr), (S)->size) #define STMT_VINFO_RELEVANT_P(S) ((S)->relevant != vect_unused_in_scope) #define HYBRID_SLP_STMT(S) ((S)->slp_type == hybrid) #define PURE_SLP_STMT(S) ((S)->slp_type == pure_slp) #define STMT_SLP_TYPE(S) (S)->slp_type #define VECT_MAX_COST 1000 /* * The maximum number of intermediate steps required in multi-step type * conversion. */ #define MAX_INTERM_CVT_STEPS 3 #define MAX_VECTORIZATION_FACTOR INT_MAX /* * Nonzero if TYPE represents a (scalar) boolean type or type in the * middle-end compatible with it (unsigned precision 1 integral types). Used * to determine which types should be vectorized as VECTOR_BOOLEAN_TYPE_P. */ #define VECT_SCALAR_BOOLEAN_TYPE_P(TYPE) \ (TREE_CODE (TYPE) == BOOLEAN_TYPE \ || ((TREE_CODE (TYPE) == INTEGER_TYPE \ || TREE_CODE (TYPE) == ENUMERAL_TYPE) \ && TYPE_PRECISION (TYPE) == 1 \ && TYPE_UNSIGNED (TYPE))) static inline bool nested_in_vect_loop_p(struct loop *loop, stmt_vec_info stmt_info) { return (loop->inner && (loop->inner == (gimple_bb(stmt_info->stmt))->loop_father)); } /* * Return TRUE if a statement represented by STMT_INFO is a part of a * pattern. */ static inline bool is_pattern_stmt_p(stmt_vec_info stmt_info) { return stmt_info->pattern_stmt_p; } /* * If STMT_INFO is a pattern statement, return the statement that it * replaces, otherwise return STMT_INFO itself. */ inline stmt_vec_info vect_orig_stmt(stmt_vec_info stmt_info) { if (is_pattern_stmt_p(stmt_info)) return STMT_VINFO_RELATED_STMT(stmt_info); return stmt_info; } /* Return the later statement between STMT1_INFO and STMT2_INFO. */ static inline stmt_vec_info get_later_stmt(stmt_vec_info stmt1_info, stmt_vec_info stmt2_info) { if (gimple_uid(vect_orig_stmt(stmt1_info)->stmt) > gimple_uid(vect_orig_stmt(stmt2_info)->stmt)) return stmt1_info; else return stmt2_info; } /* * If STMT_INFO has been replaced by a pattern statement, return the * replacement statement, otherwise return STMT_INFO itself. */ inline stmt_vec_info vect_stmt_to_vectorize(stmt_vec_info stmt_info) { if (STMT_VINFO_IN_PATTERN_P(stmt_info)) return STMT_VINFO_RELATED_STMT(stmt_info); return stmt_info; } /* Return true if BB is a loop header. */ static inline bool is_loop_header_bb_p(basic_block bb) { if (bb == (bb->loop_father)->header) return true; gcc_checking_assert(EDGE_COUNT(bb->preds) == 1); return false; } /* Return pow2 (X). */ static inline int vect_pow2(int x) { int i, res = 1; for (i = 0; i < x; i++) res *= 2; return res; } /* Alias targetm.vectorize.builtin_vectorization_cost. */ static inline int builtin_vectorization_cost(enum vect_cost_for_stmt type_of_cost, tree vectype, int misalign) { return targetm.vectorize.builtin_vectorization_cost(type_of_cost, vectype, misalign); } /* Get cost by calling cost target builtin. */ static inline int vect_get_stmt_cost(enum vect_cost_for_stmt type_of_cost) { return builtin_vectorization_cost(type_of_cost, NULL, 0); } /* Alias targetm.vectorize.init_cost. */ static inline void * init_cost(struct loop *loop_info) { return targetm.vectorize.init_cost(loop_info); } extern void dump_stmt_cost(FILE *, void *, int, enum vect_cost_for_stmt, stmt_vec_info, int, unsigned, enum vect_cost_model_location); /* Alias targetm.vectorize.add_stmt_cost. */ static inline unsigned add_stmt_cost(void *data, int count, enum vect_cost_for_stmt kind, stmt_vec_info stmt_info, int misalign, enum vect_cost_model_location where) { unsigned cost = targetm.vectorize.add_stmt_cost(data, count, kind, stmt_info, misalign, where); if (dump_file && (dump_flags & TDF_DETAILS)) dump_stmt_cost(dump_file, data, count, kind, stmt_info, misalign, cost, where); return cost; } /* Alias targetm.vectorize.finish_cost. */ static inline void finish_cost(void *data, unsigned *prologue_cost, unsigned *body_cost, unsigned *epilogue_cost) { targetm.vectorize.finish_cost(data, prologue_cost, body_cost, epilogue_cost); } /* Alias targetm.vectorize.destroy_cost_data. */ static inline void destroy_cost_data(void *data) { targetm.vectorize.destroy_cost_data(data); } inline void add_stmt_costs(void *data, stmt_vector_for_cost * cost_vec) { stmt_info_for_cost *cost; unsigned i; FOR_EACH_VEC_ELT(*cost_vec, i, cost) add_stmt_cost(data, cost->count, cost->kind, cost->stmt_info, cost->misalign, cost->where); } /*-----------------------------------------------------------------*/ /* Info on data references alignment. */ /*-----------------------------------------------------------------*/ #define DR_MISALIGNMENT_UNKNOWN (-1) #define DR_MISALIGNMENT_UNINITIALIZED (-2) inline void set_dr_misalignment(dr_vec_info * dr_info, int val) { dr_info->misalignment = val; } inline int dr_misalignment(dr_vec_info * dr_info) { int misalign = dr_info->misalignment; gcc_assert(misalign != DR_MISALIGNMENT_UNINITIALIZED); return misalign; } /* * Reflects actual alignment of first access in the vectorized loop, taking * into account peeling/versioning if applied. */ #define DR_MISALIGNMENT(DR) dr_misalignment (DR) #define SET_DR_MISALIGNMENT(DR, VAL) set_dr_misalignment (DR, VAL) /* Only defined once DR_MISALIGNMENT is defined. */ #define DR_TARGET_ALIGNMENT(DR) ((DR)->target_alignment) /* * Return true if data access DR_INFO is aligned to its target alignment * (which may be less than a full vector). */ static inline bool aligned_access_p(dr_vec_info * dr_info) { return (DR_MISALIGNMENT(dr_info) == 0); } /* * Return TRUE if the alignment of the data access is known, and FALSE * otherwise. */ static inline bool known_alignment_for_access_p(dr_vec_info * dr_info) { return (DR_MISALIGNMENT(dr_info) != DR_MISALIGNMENT_UNKNOWN); } /* * Return the minimum alignment in bytes that the vectorized version of * DR_INFO is guaranteed to have. */ static inline unsigned int vect_known_alignment_in_bytes(dr_vec_info * dr_info) { if (DR_MISALIGNMENT(dr_info) == DR_MISALIGNMENT_UNKNOWN) return TYPE_ALIGN_UNIT(TREE_TYPE(DR_REF(dr_info->dr))); if (DR_MISALIGNMENT(dr_info) == 0) return known_alignment(DR_TARGET_ALIGNMENT(dr_info)); return DR_MISALIGNMENT(dr_info) & -DR_MISALIGNMENT(dr_info); } /* * Return the behavior of DR_INFO with respect to the vectorization context * (which for outer loop vectorization might not be the behavior recorded in * DR_INFO itself). */ static inline innermost_loop_behavior * vect_dr_behavior(dr_vec_info * dr_info) { stmt_vec_info stmt_info = dr_info->stmt; loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO(stmt_info); if (loop_vinfo == NULL || !nested_in_vect_loop_p(LOOP_VINFO_LOOP(loop_vinfo), stmt_info)) return &DR_INNERMOST(dr_info->dr); else return &STMT_VINFO_DR_WRT_VEC_LOOP(stmt_info); } /* Return true if the vect cost model is unlimited. */ static inline bool unlimited_cost_model(loop_p loop) { if (loop != NULL && loop->force_vectorize && flag_simd_cost_model != VECT_COST_MODEL_DEFAULT) return flag_simd_cost_model == VECT_COST_MODEL_UNLIMITED; return (flag_vect_cost_model == VECT_COST_MODEL_UNLIMITED); } /* * Return true if the loop described by LOOP_VINFO is fully-masked and if the * first iteration should use a partial mask in order to achieve alignment. */ static inline bool vect_use_loop_mask_for_alignment_p(loop_vec_info loop_vinfo) { return (LOOP_VINFO_FULLY_MASKED_P(loop_vinfo) && LOOP_VINFO_PEELING_FOR_ALIGNMENT(loop_vinfo)); } /* * Return the number of vectors of type VECTYPE that are needed to get NUNITS * elements. NUNITS should be based on the vectorization factor, so it is * always a known multiple of the number of elements in VECTYPE. */ static inline unsigned int vect_get_num_vectors(poly_uint64 nunits, tree vectype) { return exact_div(nunits, TYPE_VECTOR_SUBPARTS(vectype)).to_constant(); } /* * Return the number of copies needed for loop vectorization when a statement * operates on vectors of type VECTYPE. This is the vectorization factor * divided by the number of elements in VECTYPE and is always known at * compile time. */ static inline unsigned int vect_get_num_copies(loop_vec_info loop_vinfo, tree vectype) { return vect_get_num_vectors(LOOP_VINFO_VECT_FACTOR(loop_vinfo), vectype); } /* * Update maximum unit count *MAX_NUNITS so that it accounts for NUNITS. * *MAX_NUNITS can be 1 if we haven't yet recorded anything. */ static inline void vect_update_max_nunits(poly_uint64 * max_nunits, poly_uint64 nunits) { /* * All unit counts have the form current_vector_size * X for some * rational X, so two unit sizes must have a common multiple. Everything * is a multiple of the initial value of 1. */ *max_nunits = force_common_multiple(*max_nunits, nunits); } /* * Update maximum unit count *MAX_NUNITS so that it accounts for the number * of units in vector type VECTYPE. *MAX_NUNITS can be 1 if we haven't yet * recorded any vector types. */ static inline void vect_update_max_nunits(poly_uint64 * max_nunits, tree vectype) { vect_update_max_nunits(max_nunits, TYPE_VECTOR_SUBPARTS(vectype)); } /* * Return the vectorization factor that should be used for costing purposes * while vectorizing the loop described by LOOP_VINFO. Pick a reasonable * estimate if the vectorization factor isn't known at compile time. */ static inline unsigned int vect_vf_for_cost(loop_vec_info loop_vinfo) { return estimated_poly_value(LOOP_VINFO_VECT_FACTOR(loop_vinfo)); } /* * Estimate the number of elements in VEC_TYPE for costing purposes. Pick a * reasonable estimate if the exact number isn't known at compile time. */ static inline unsigned int vect_nunits_for_cost(tree vec_type) { return estimated_poly_value(TYPE_VECTOR_SUBPARTS(vec_type)); } /* Return the maximum possible vectorization factor for LOOP_VINFO. */ static inline unsigned HOST_WIDE_INT vect_max_vf(loop_vec_info loop_vinfo) { unsigned HOST_WIDE_INT vf; if (LOOP_VINFO_VECT_FACTOR(loop_vinfo).is_constant(&vf)) return vf; return MAX_VECTORIZATION_FACTOR; } /* * Return the size of the value accessed by unvectorized data reference * DR_INFO. This is only valid once STMT_VINFO_VECTYPE has been calculated * for the associated gimple statement, since that guarantees that DR_INFO * accesses either a scalar or a scalar equivalent. ("Scalar equivalent" * here includes things like V1SI, which can be vectorized in the same way as * a plain SI.) */ inline unsigned int vect_get_scalar_dr_size(dr_vec_info * dr_info) { return tree_to_uhwi(TYPE_SIZE_UNIT(TREE_TYPE(DR_REF(dr_info->dr)))); } /* Source location + hotness information. */ extern dump_user_location_t vect_location; /* * A macro for calling: dump_begin_scope (MSG, vect_location); via an RAII * object, thus printing "=== MSG ===\n" to the dumpfile etc, and then * calling dump_end_scope (); once the object goes out of scope, thus * capturing the nesting of the scopes. * * These scopes affect dump messages within them: dump messages at the top level * implicitly default to MSG_PRIORITY_USER_FACING, whereas those in a nested * scope implicitly default to MSG_PRIORITY_INTERNALS. */ #define DUMP_VECT_SCOPE(MSG) \ AUTO_DUMP_SCOPE (MSG, vect_location) /* * A sentinel class for ensuring that the "vect_location" global gets reset * at the end of a scope. * * The "vect_location" global is used during dumping and contains a location_t, * which could contain references to a tree block via the ad-hoc data. This * data is used for tracking inlining information, but it's not a GC root; * it's simply assumed that such locations never get accessed if the blocks * are optimized away. * * Hence we need to ensure that such locations are purged at the end of any * operations using them (e.g. via this class). */ class auto_purge_vect_location { public: ~auto_purge_vect_location(); }; /*-----------------------------------------------------------------*/ /* Function prototypes. */ /*-----------------------------------------------------------------*/ /* * Simple loop peeling and versioning utilities for vectorizer's purposes - * in tree-vect-loop-manip.c. */ extern void vect_set_loop_condition(struct loop *, loop_vec_info, tree, tree, tree, bool); extern bool slpeel_can_duplicate_loop_p(const struct loop *, const_edge); struct loop * slpeel_tree_duplicate_loop_to_edge_cfg(struct loop *, struct loop *, edge); struct loop * vect_loop_versioning(loop_vec_info, unsigned int, bool, poly_uint64); extern struct loop * vect_do_peeling(loop_vec_info, tree, tree, tree *, tree *, tree *, int, bool, bool); extern void vect_prepare_for_masked_peels(loop_vec_info); extern dump_user_location_t find_loop_location(struct loop *); extern bool vect_can_advance_ivs_p(loop_vec_info); /* In tree-vect-stmts.c. */ extern poly_uint64 current_vector_size; extern tree get_vectype_for_scalar_type(tree); extern tree get_vectype_for_scalar_type_and_size(tree, poly_uint64); extern tree get_mask_type_for_scalar_type(tree); extern tree get_same_sized_vectype(tree, tree); extern bool vect_get_loop_mask_type(loop_vec_info); extern bool vect_is_simple_use(tree, vec_info *, enum vect_def_type *, stmt_vec_info * = NULL, gimple ** = NULL); extern bool vect_is_simple_use(tree, vec_info *, enum vect_def_type *, tree *, stmt_vec_info * = NULL, gimple ** = NULL); extern bool supportable_widening_operation(enum tree_code, stmt_vec_info, tree, tree, enum tree_code *, enum tree_code *, int *, vec < tree > *); extern bool supportable_narrowing_operation(enum tree_code, tree, tree, enum tree_code *, int *, vec < tree > *); extern unsigned record_stmt_cost(stmt_vector_for_cost *, int, enum vect_cost_for_stmt, stmt_vec_info, int, enum vect_cost_model_location); extern stmt_vec_info vect_finish_replace_stmt(stmt_vec_info, gimple *); extern stmt_vec_info vect_finish_stmt_generation(stmt_vec_info, gimple *, gimple_stmt_iterator *); extern opt_result vect_mark_stmts_to_be_vectorized(loop_vec_info); extern tree vect_get_store_rhs(stmt_vec_info); extern tree vect_get_vec_def_for_operand_1(stmt_vec_info, enum vect_def_type); extern tree vect_get_vec_def_for_operand(tree, stmt_vec_info, tree = NULL); extern void vect_get_vec_defs(tree, tree, stmt_vec_info, vec < tree > *, vec < tree > *, slp_tree); extern void vect_get_vec_defs_for_stmt_copy(vec_info *, vec < tree > *, vec < tree > *); extern tree vect_init_vector(stmt_vec_info, tree, tree, gimple_stmt_iterator *); extern tree vect_get_vec_def_for_stmt_copy(vec_info *, tree); extern bool vect_transform_stmt(stmt_vec_info, gimple_stmt_iterator *, slp_tree, slp_instance); extern void vect_remove_stores(stmt_vec_info); extern opt_result vect_analyze_stmt(stmt_vec_info, bool *, slp_tree, slp_instance, stmt_vector_for_cost *); extern bool vectorizable_condition(stmt_vec_info, gimple_stmt_iterator *, stmt_vec_info *, bool, slp_tree, stmt_vector_for_cost *); extern bool vectorizable_shift(stmt_vec_info, gimple_stmt_iterator *, stmt_vec_info *, slp_tree, stmt_vector_for_cost *); extern void vect_get_load_cost(stmt_vec_info, int, bool, unsigned int *, unsigned int *, stmt_vector_for_cost *, stmt_vector_for_cost *, bool); extern void vect_get_store_cost(stmt_vec_info, int, unsigned int *, stmt_vector_for_cost *); extern bool vect_supportable_shift(enum tree_code, tree); extern tree vect_gen_perm_mask_any(tree, const vec_perm_indices &); extern tree vect_gen_perm_mask_checked(tree, const vec_perm_indices &); extern void optimize_mask_stores(struct loop *); extern gcall *vect_gen_while(tree, tree, tree); extern tree vect_gen_while_not(gimple_seq *, tree, tree, tree); extern opt_result vect_get_vector_types_for_stmt(stmt_vec_info, tree *, tree *); extern opt_tree vect_get_mask_type_for_stmt(stmt_vec_info); /* In tree-vect-data-refs.c. */ extern bool vect_can_force_dr_alignment_p(const_tree, poly_uint64); extern enum dr_alignment_support vect_supportable_dr_alignment (dr_vec_info *, bool); extern tree vect_get_smallest_scalar_type(stmt_vec_info, HOST_WIDE_INT *, HOST_WIDE_INT *); extern opt_result vect_analyze_data_ref_dependences(loop_vec_info, unsigned int *); extern bool vect_slp_analyze_instance_dependence(slp_instance); extern opt_result vect_enhance_data_refs_alignment(loop_vec_info); extern opt_result vect_analyze_data_refs_alignment(loop_vec_info); extern opt_result vect_verify_datarefs_alignment(loop_vec_info); extern bool vect_slp_analyze_and_verify_instance_alignment(slp_instance); extern opt_result vect_analyze_data_ref_accesses(vec_info *); extern opt_result vect_prune_runtime_alias_test_list(loop_vec_info); extern bool vect_gather_scatter_fn_p(bool, bool, tree, tree, unsigned int, signop, int, internal_fn *, tree *); extern bool vect_check_gather_scatter(stmt_vec_info, loop_vec_info, gather_scatter_info *); extern opt_result vect_find_stmt_data_reference(loop_p, gimple *, vec < data_reference_p > *); extern opt_result vect_analyze_data_refs(vec_info *, poly_uint64 *); extern void vect_record_base_alignments(vec_info *); extern tree vect_create_data_ref_ptr(stmt_vec_info, tree, struct loop *, tree, tree *, gimple_stmt_iterator *, gimple **, bool, tree = NULL_TREE, tree = NULL_TREE); extern tree bump_vector_ptr(tree, gimple *, gimple_stmt_iterator *, stmt_vec_info, tree); extern void vect_copy_ref_info(tree, tree); extern tree vect_create_destination_var(tree, tree); extern bool vect_grouped_store_supported(tree, unsigned HOST_WIDE_INT); extern bool vect_store_lanes_supported(tree, unsigned HOST_WIDE_INT, bool); extern bool vect_grouped_load_supported(tree, bool, unsigned HOST_WIDE_INT); extern bool vect_load_lanes_supported(tree, unsigned HOST_WIDE_INT, bool); extern void vect_permute_store_chain(vec < tree >, unsigned int, stmt_vec_info, gimple_stmt_iterator *, vec < tree > *); extern tree vect_setup_realignment(stmt_vec_info, gimple_stmt_iterator *, tree *, enum dr_alignment_support, tree, struct loop **); extern void vect_transform_grouped_load(stmt_vec_info, vec < tree >, int, gimple_stmt_iterator *); extern void vect_record_grouped_load_vectors(stmt_vec_info, vec < tree >); extern tree vect_get_new_vect_var(tree, enum vect_var_kind, const char *); extern tree vect_get_new_ssa_name(tree, enum vect_var_kind, const char *= NULL); extern tree vect_create_addr_base_for_vector_ref(stmt_vec_info, gimple_seq *, tree, tree = NULL_TREE); /* In tree-vect-loop.c. */ /* FORNOW: Used in tree-parloops.c. */ extern stmt_vec_info vect_force_simple_reduction(loop_vec_info, stmt_vec_info, bool *, bool); /* Used in gimple-loop-interchange.c. */ extern bool check_reduction_path(dump_user_location_t, loop_p, gphi *, tree, enum tree_code); /* Drive for loop analysis stage. */ extern opt_loop_vec_info vect_analyze_loop(struct loop *, loop_vec_info, vec_info_shared *); extern tree vect_build_loop_niters(loop_vec_info, bool * = NULL); extern void vect_gen_vector_loop_niters(loop_vec_info, tree, tree *, tree *, bool); extern tree vect_halve_mask_nunits(tree); extern tree vect_double_mask_nunits(tree); extern void vect_record_loop_mask(loop_vec_info, vec_loop_masks *, unsigned int, tree); extern tree vect_get_loop_mask(gimple_stmt_iterator *, vec_loop_masks *, unsigned int, tree, unsigned int); /* Drive for loop transformation stage. */ extern struct loop *vect_transform_loop(loop_vec_info); extern opt_loop_vec_info vect_analyze_loop_form(struct loop *, vec_info_shared *); extern bool vectorizable_live_operation(stmt_vec_info, gimple_stmt_iterator *, slp_tree, int, stmt_vec_info *, stmt_vector_for_cost *); extern bool vectorizable_reduction(stmt_vec_info, gimple_stmt_iterator *, stmt_vec_info *, slp_tree, slp_instance, stmt_vector_for_cost *); extern bool vectorizable_induction(stmt_vec_info, gimple_stmt_iterator *, stmt_vec_info *, slp_tree, stmt_vector_for_cost *); extern tree get_initial_def_for_reduction(stmt_vec_info, tree, tree *); extern bool vect_worthwhile_without_simd_p(vec_info *, tree_code); extern int vect_get_known_peeling_cost(loop_vec_info, int, int *, stmt_vector_for_cost *, stmt_vector_for_cost *, stmt_vector_for_cost *); extern tree cse_and_gimplify_to_preheader(loop_vec_info, tree); /* In tree-vect-slp.c. */ extern void vect_free_slp_instance(slp_instance, bool); extern bool vect_transform_slp_perm_load(slp_tree, vec < tree >, gimple_stmt_iterator *, poly_uint64, slp_instance, bool, unsigned *); extern bool vect_slp_analyze_operations(vec_info *); extern void vect_schedule_slp(vec_info *); extern opt_result vect_analyze_slp(vec_info *, unsigned); extern bool vect_make_slp_decision(loop_vec_info); extern void vect_detect_hybrid_slp(loop_vec_info); extern void vect_get_slp_defs(vec < tree >, slp_tree, vec < vec < tree > >*); extern bool vect_slp_bb(basic_block); extern stmt_vec_info vect_find_last_scalar_stmt_in_slp(slp_tree); extern bool is_simple_and_all_uses_invariant(stmt_vec_info, loop_vec_info); extern bool can_duplicate_and_interleave_p(unsigned int, machine_mode, unsigned int *= NULL, tree * = NULL, tree * = NULL); extern void duplicate_and_interleave(gimple_seq *, tree, vec < tree >, unsigned int, vec < tree > &); extern int vect_get_place_in_interleaving_chain(stmt_vec_info, stmt_vec_info); /* In tree-vect-patterns.c. */ /* * Pattern recognition functions. Additional pattern recognition functions * can (and will) be added in the future. */ void vect_pattern_recog(vec_info *); /* In tree-vectorizer.c. */ unsigned vectorize_loops(void); void vect_free_loop_info_assumptions(struct loop *); #endif /* GCC_TREE_VECTORIZER_H */
#ifndef GCC_TREE_VECTORIZER_H #define GCC_TREE_VECTORIZER_H typedef struct _stmt_vec_info *stmt_vec_info; #include "tree-data-ref.h" #include "tree-hash-traits.h" #include "target.h" /* Used for naming of new temporaries. */ enum vect_var_kind { vect_simple_var, vect_pointer_var, vect_scalar_var, vect_mask_var }; /* Defines type of operation. */ enum operation_type { unary_op = 1, binary_op, ternary_op }; /* Define type of available alignment support. */ enum dr_alignment_support { dr_unaligned_unsupported, dr_unaligned_supported, dr_explicit_realign, dr_explicit_realign_optimized, dr_aligned }; /* Define type of def-use cross-iteration cycle. */ enum vect_def_type { vect_uninitialized_def = 0, vect_constant_def = 1, vect_external_def, vect_internal_def, vect_induction_def, vect_reduction_def, vect_double_reduction_def, vect_nested_cycle, vect_unknown_def_type }; /* Define type of reduction. */ enum vect_reduction_type { TREE_CODE_REDUCTION, COND_REDUCTION, INTEGER_INDUC_COND_REDUCTION, CONST_COND_REDUCTION, /* * Retain a scalar phi and use a FOLD_EXTRACT_LAST within the loop to * implement: * * for (int i = 0; i < VF; ++i) res = cond[i] ? val[i] : res; */ EXTRACT_LAST_REDUCTION, /* * Use a folding reduction within the loop to implement: * * for (int i = 0; i < VF; ++i) res = res OP val[i]; * * (with no reassocation). */ FOLD_LEFT_REDUCTION }; #define VECTORIZABLE_CYCLE_DEF(D) (((D) == vect_reduction_def) \ || ((D) == vect_double_reduction_def) \ || ((D) == vect_nested_cycle)) /* * Structure to encapsulate information about a group of like instructions to * be presented to the target cost model. */ struct stmt_info_for_cost { int count; enum vect_cost_for_stmt kind; enum vect_cost_model_location where; stmt_vec_info stmt_info; int misalign; }; typedef vec < stmt_info_for_cost > stmt_vector_for_cost; /* * Maps base addresses to an innermost_loop_behavior that gives the maximum * known alignment for that base. */ typedef hash_map < tree_operand_hash, innermost_loop_behavior * >vec_base_alignments; /************************************************************************ SLP ************************************************************************/ typedef struct _slp_tree *slp_tree; /* * A computation tree of an SLP instance. Each node corresponds to a group * of stmts to be packed in a SIMD stmt. */ struct _slp_tree { /* Nodes that contain def-stmts of this node statements operands. */ vec < slp_tree > children; /* A group of scalar stmts to be vectorized together. */ vec < stmt_vec_info > stmts; /* * Load permutation relative to the stores, NULL if there is no * permutation. */ vec < unsigned >load_permutation; /* Vectorized stmt/s. */ vec < stmt_vec_info > vec_stmts; /* * Number of vector stmts that are created to replace the group of scalar * stmts. It is calculated during the transformation phase as the number * of scalar elements in one scalar iteration (GROUP_SIZE) multiplied by * VF divided by vector size. */ unsigned int vec_stmts_size; /* Reference count in the SLP graph. */ unsigned int refcnt; /* * The maximum number of vector elements for the subtree rooted at this * node. */ poly_uint64 max_nunits; /* Whether the scalar computations use two different operators. */ bool two_operators; /* The DEF type of this node. */ enum vect_def_type def_type; }; /* * SLP instance is a sequence of stmts in a loop that can be packed into SIMD * stmts. */ typedef struct _slp_instance { /* The root of SLP tree. */ slp_tree root; /* Size of groups of scalar stmts that will be replaced by SIMD stmt/s. */ unsigned int group_size; /* The unrolling factor required to vectorized this SLP instance. */ poly_uint64 unrolling_factor; /* The group of nodes that contain loads of this SLP instance. */ vec < slp_tree > loads; /* The SLP node containing the reduction PHIs. */ slp_tree reduc_phis; } *slp_instance; /* Access Functions. */ #define SLP_INSTANCE_TREE(S) (S)->root #define SLP_INSTANCE_GROUP_SIZE(S) (S)->group_size #define SLP_INSTANCE_UNROLLING_FACTOR(S) (S)->unrolling_factor #define SLP_INSTANCE_LOADS(S) (S)->loads #define SLP_TREE_CHILDREN(S) (S)->children #define SLP_TREE_SCALAR_STMTS(S) (S)->stmts #define SLP_TREE_VEC_STMTS(S) (S)->vec_stmts #define SLP_TREE_NUMBER_OF_VEC_STMTS(S) (S)->vec_stmts_size #define SLP_TREE_LOAD_PERMUTATION(S) (S)->load_permutation #define SLP_TREE_TWO_OPERATORS(S) (S)->two_operators #define SLP_TREE_DEF_TYPE(S) (S)->def_type /* * Describes two objects whose addresses must be unequal for the vectorized * loop to be valid. */ typedef std::pair < tree, tree > vec_object_pair; /* * Records that vectorization is only possible if abs (EXPR) >= MIN_VALUE. * UNSIGNED_P is true if we can assume that abs (EXPR) == EXPR. */ struct vec_lower_bound { vec_lower_bound() { } vec_lower_bound(tree e, bool u, poly_uint64 m) :expr(e), unsigned_p(u), min_value(m) { } tree expr; bool unsigned_p; poly_uint64 min_value; }; /* * Vectorizer state shared between different analyses like vector sizes of * the same CFG region. */ struct vec_info_shared { vec_info_shared(); ~vec_info_shared(); void save_datarefs(); void check_datarefs(); /* All data references. Freed by free_data_refs, so not an auto_vec. */ vec < data_reference_p > datarefs; vec < data_reference > datarefs_copy; /* The loop nest in which the data dependences are computed. */ auto_vec < loop_p > loop_nest; /* * All data dependences. Freed by free_dependence_relations, so not an * auto_vec. */ vec < ddr_p > ddrs; }; /* Vectorizer state common between loop and basic-block vectorization. */ struct vec_info { enum vec_kind { bb, loop }; vec_info(vec_kind, void *, vec_info_shared *); ~vec_info(); stmt_vec_info add_stmt(gimple *); stmt_vec_info lookup_stmt(gimple *); stmt_vec_info lookup_def(tree); stmt_vec_info lookup_single_use(tree); struct dr_vec_info *lookup_dr(data_reference *); void move_dr(stmt_vec_info, stmt_vec_info); void remove_stmt(stmt_vec_info); void replace_stmt(gimple_stmt_iterator *, stmt_vec_info, gimple *); /* The type of vectorization. */ vec_kind kind; /* Shared vectorizer state. */ vec_info_shared *shared; /* The mapping of GIMPLE UID to stmt_vec_info. */ vec < stmt_vec_info > stmt_vec_infos; /* All SLP instances. */ auto_vec < slp_instance > slp_instances; /* * Maps base addresses to an innermost_loop_behavior that gives the * maximum known alignment for that base. */ vec_base_alignments base_alignments; /* * All interleaving chains of stores, represented by the first stmt in * the chain. */ auto_vec < stmt_vec_info > grouped_stores; /* Cost data used by the target cost model. */ void *target_cost_data; private: stmt_vec_info new_stmt_vec_info(gimple * stmt); void set_vinfo_for_stmt(gimple *, stmt_vec_info); void free_stmt_vec_infos(); void free_stmt_vec_info(stmt_vec_info); }; struct _loop_vec_info; struct _bb_vec_info; template <> template <> inline bool is_a_helper < _loop_vec_info * >: :test(vec_info * i) { return i->kind == vec_info: :loop; } template <> template <> inline bool is_a_helper < _bb_vec_info * >: :test(vec_info * i) { return i->kind == vec_info: :bb; } /* * In general, we can divide the vector statements in a vectorized loop into * related groups ("rgroups") and say that for each rgroup there is some nS * such that the rgroup operates on nS values from one scalar iteration * followed by nS values from the next. That is, if VF is the vectorization * factor of the loop, the rgroup operates on a sequence: * * (1,1) (1,2) ... (1,nS) (2,1) ... (2,nS) ... (VF,1) ... (VF,nS) * * where (i,j) represents a scalar value with index j in a scalar iteration with * index i. * * [ We use the term "rgroup" to emphasise that this grouping isn't necessarily * the same as the grouping of statements used elsewhere. For example, if we * implement a group of scalar loads using gather loads, we'll use a separate * gather load for each scalar load, and thus each gather load will belong to * its own rgroup. ] * * In general this sequence will occupy nV vectors concatenated together. If * these vectors have nL lanes each, the total number of scalar values N is * given by: * * N = nS * VF = nV * nL * * None of nS, VF, nV and nL are required to be a power of 2. nS and nV are * compile-time constants but VF and nL can be variable (if the target * supports variable-length vectors). * * In classical vectorization, each iteration of the vector loop would handle * exactly VF iterations of the original scalar loop. However, in a * fully-masked loop, a particular iteration of the vector loop might handle * fewer than VF iterations of the scalar loop. The vector lanes that * correspond to iterations of the scalar loop are said to be "active" and * the other lanes are said to be "inactive". * * In a fully-masked loop, many rgroups need to be masked to ensure that they * have no effect for the inactive lanes. Each such rgroup needs a sequence * of booleans in the same order as above, but with each (i,j) replaced by a * boolean that indicates whether iteration i is active. This sequence * occupies nV vector masks that again have nL lanes each. Thus the mask * sequence as a whole consists of VF independent booleans that are each * repeated nS times. * * We make the simplifying assumption that if a sequence of nV masks is suitable * for one (nS,nL) pair, we can reuse it for (nS/2,nL/2) by VIEW_CONVERTing * it. This holds for all current targets that support fully-masked loops. * For example, suppose the scalar loop is: * * float *f; double *d; for (int i = 0; i < n; ++i) { f[i * 2 + 0] += 1.0f; f[i * * 2 + 1] += 2.0f; d[i] += 3.0; } * * and suppose that vectors have 256 bits. The vectorized f accesses will * belong to one rgroup and the vectorized d access to another: * * f rgroup: nS = 2, nV = 1, nL = 8 d rgroup: nS = 1, nV = 1, nL = 4 VF = 4 * * [ In this simple example the rgroups do correspond to the normal SLP grouping * scheme. ] * * If only the first three lanes are active, the masks we need are: * * f rgroup: 1 1 | 1 1 | 1 1 | 0 0 d rgroup: 1 | 1 | 1 | 0 * * Here we can use a mask calculated for f's rgroup for d's, but not vice versa. * * Thus for each value of nV, it is enough to provide nV masks, with the mask * being calculated based on the highest nL (or, equivalently, based on the * highest nS) required by any rgroup with that nV. We therefore represent * the entire collection of masks as a two-level table, with the first level * being indexed by nV - 1 (since nV == 0 doesn't exist) and the second being * indexed by the mask index 0 <= i < nV. */ /* * The masks needed by rgroups with nV vectors, according to the description * above. */ struct rgroup_masks { /* The largest nS for all rgroups that use these masks. */ unsigned int max_nscalars_per_iter; /* The type of mask to use, based on the highest nS recorded above. */ tree mask_type; /* A vector of nV masks, in iteration order. */ vec < tree > masks; }; typedef auto_vec < rgroup_masks > vec_loop_masks; /*-----------------------------------------------------------------*/ /* Info on vectorized loops. */ /*-----------------------------------------------------------------*/ typedef struct _loop_vec_info:public vec_info { _loop_vec_info(struct loop *, vec_info_shared *); ~_loop_vec_info(); /* The loop to which this info struct refers to. */ struct loop *loop; /* The loop basic blocks. */ basic_block *bbs; /* Number of latch executions. */ tree num_itersm1; /* Number of iterations. */ tree num_iters; /* Number of iterations of the original loop. */ tree num_iters_unchanged; /* Condition under which this loop is analyzed and versioned. */ tree num_iters_assumptions; /* * Threshold of number of iterations below which vectorzation will not be * performed. It is calculated from MIN_PROFITABLE_ITERS and * PARAM_MIN_VECT_LOOP_BOUND. */ unsigned int th; /* * When applying loop versioning, the vector form should only be used if * the number of scalar iterations is >= this value, on top of all the * other requirements. Ignored when loop versioning is not being used. */ poly_uint64 versioning_threshold; /* Unrolling factor */ poly_uint64 vectorization_factor; /* * Maximum runtime vectorization factor, or MAX_VECTORIZATION_FACTOR if * there is no particular limit. */ unsigned HOST_WIDE_INT max_vectorization_factor; /* * The masks that a fully-masked loop should use to avoid operating on * inactive scalars. */ vec_loop_masks masks; /* * If we are using a loop mask to align memory addresses, this variable * contains the number of vector elements that we should skip in the * first iteration of the vector loop (i.e. the number of leading * elements that should be false in the first mask). */ tree mask_skip_niters; /* * Type of the variables to use in the WHILE_ULT call for fully-masked * loops. */ tree mask_compare_type; /* * For #pragma omp simd if (x) loops the x expression. If constant 0, * the loop should not be vectorized, if constant non-zero, simd_if_cond * shouldn't be set and loop vectorized normally, if SSA_NAME, the loop * should be versioned on that condition, using scalar loop if the * condition is false and vectorized loop otherwise. */ tree simd_if_cond; /* Unknown DRs according to which loop was peeled. */ struct dr_vec_info *unaligned_dr; /* * peeling_for_alignment indicates whether peeling for alignment will * take place, and what the peeling factor should be: * peeling_for_alignment = X means: If X=0: Peeling for alignment will * not be applied. If X>0: Peel first X iterations. If X=-1: Generate a * runtime test to calculate the number of iterations to be peeled, using * the dataref recorded in the field unaligned_dr. */ int peeling_for_alignment; /* The mask used to check the alignment of pointers or arrays. */ int ptr_mask; /* * Data Dependence Relations defining address ranges that are candidates * for a run-time aliasing check. */ auto_vec < ddr_p > may_alias_ddrs; /* * Data Dependence Relations defining address ranges together with * segment lengths from which the run-time aliasing check is built. */ auto_vec < dr_with_seg_len_pair_t > comp_alias_ddrs; /* Check that the addresses of each pair of objects is unequal. */ auto_vec < vec_object_pair > check_unequal_addrs; /* * List of values that are required to be nonzero. This is used to check * whether things like "x[i * n] += 1;" are safe and eventually gets * added to the checks for lower bounds below. */ auto_vec < tree > check_nonzero; /* List of values that need to be checked for a minimum value. */ auto_vec < vec_lower_bound > lower_bounds; /* * Statements in the loop that have data references that are candidates * for a runtime (loop versioning) misalignment check. */ auto_vec < stmt_vec_info > may_misalign_stmts; /* Reduction cycles detected in the loop. Used in loop-aware SLP. */ auto_vec < stmt_vec_info > reductions; /* * All reduction chains in the loop, represented by the first stmt in the * chain. */ auto_vec < stmt_vec_info > reduction_chains; /* Cost vector for a single scalar iteration. */ auto_vec < stmt_info_for_cost > scalar_cost_vec; /* Map of IV base/step expressions to inserted name in the preheader. */ hash_map < tree_operand_hash, tree > *ivexpr_map; /* * The unrolling factor needed to SLP the loop. In case of that pure SLP * is applied to the loop, i.e., no unrolling is needed, this is 1. */ poly_uint64 slp_unrolling_factor; /* Cost of a single scalar iteration. */ int single_scalar_iteration_cost; /* Is the loop vectorizable? */ bool vectorizable; /* Records whether we still have the option of using a fully-masked loop. */ bool can_fully_mask_p; /* True if have decided to use a fully-masked loop. */ bool fully_masked_p; /* * When we have grouped data accesses with gaps, we may introduce invalid * memory accesses. We peel the last iteration of the loop to prevent * this. */ bool peeling_for_gaps; /* * When the number of iterations is not a multiple of the vector size we * need to peel off iterations at the end to form an epilogue loop. */ bool peeling_for_niter; /* * Reductions are canonicalized so that the last operand is the reduction * operand. If this places a constant into RHS1, this decanonicalizes * GIMPLE for other phases, so we must track when this has occurred and * fix it up. */ bool operands_swapped; /* * True if there are no loop carried data dependencies in the loop. If * loop->safelen <= 1, then this is always true, either the loop didn't * have any loop carried data dependencies, or the loop is being * vectorized guarded with some runtime alias checks, or couldn't be * vectorized at all, but then this field shouldn't be used. For * loop->safelen >= 2, the user has asserted that there are no backward * dependencies, but there still could be loop carried forward * dependencies in such loops. This flag will be false if normal * vectorizer data dependency analysis would fail or require versioning * for alias, but because of loop->safelen >= 2 it has been vectorized * even without versioning for alias. E.g. in: #pragma omp simd for (int * i = 0; i < m; i++) a[i] = a[i + k] * c; (or #pragma simd or #pragma * ivdep) we can vectorize this and it will DTRT even for k > 0 && k < m, * but without safelen we would not vectorize this, so this field would * be false. */ bool no_data_dependencies; /* Mark loops having masked stores. */ bool has_mask_store; /* * If if-conversion versioned this loop before conversion, this is the * loop version without if-conversion. */ struct loop *scalar_loop; /* * For loops being epilogues of already vectorized loops this points to * the original vectorized loop. Otherwise NULL. */ _loop_vec_info *orig_loop_info; } *loop_vec_info; /* Access Functions. */ #define LOOP_VINFO_LOOP(L) (L)->loop #define LOOP_VINFO_BBS(L) (L)->bbs #define LOOP_VINFO_NITERSM1(L) (L)->num_itersm1 #define LOOP_VINFO_NITERS(L) (L)->num_iters /* * Since LOOP_VINFO_NITERS and LOOP_VINFO_NITERSM1 can change after prologue * peeling retain total unchanged scalar loop iterations for cost model. */ #define LOOP_VINFO_NITERS_UNCHANGED(L) (L)->num_iters_unchanged #define LOOP_VINFO_NITERS_ASSUMPTIONS(L) (L)->num_iters_assumptions #define LOOP_VINFO_COST_MODEL_THRESHOLD(L) (L)->th #define LOOP_VINFO_VERSIONING_THRESHOLD(L) (L)->versioning_threshold #define LOOP_VINFO_VECTORIZABLE_P(L) (L)->vectorizable #define LOOP_VINFO_CAN_FULLY_MASK_P(L) (L)->can_fully_mask_p #define LOOP_VINFO_FULLY_MASKED_P(L) (L)->fully_masked_p #define LOOP_VINFO_VECT_FACTOR(L) (L)->vectorization_factor #define LOOP_VINFO_MAX_VECT_FACTOR(L) (L)->max_vectorization_factor #define LOOP_VINFO_MASKS(L) (L)->masks #define LOOP_VINFO_MASK_SKIP_NITERS(L) (L)->mask_skip_niters #define LOOP_VINFO_MASK_COMPARE_TYPE(L) (L)->mask_compare_type #define LOOP_VINFO_PTR_MASK(L) (L)->ptr_mask #define LOOP_VINFO_LOOP_NEST(L) (L)->shared->loop_nest #define LOOP_VINFO_DATAREFS(L) (L)->shared->datarefs #define LOOP_VINFO_DDRS(L) (L)->shared->ddrs #define LOOP_VINFO_INT_NITERS(L) (TREE_INT_CST_LOW ((L)->num_iters)) #define LOOP_VINFO_PEELING_FOR_ALIGNMENT(L) (L)->peeling_for_alignment #define LOOP_VINFO_UNALIGNED_DR(L) (L)->unaligned_dr #define LOOP_VINFO_MAY_MISALIGN_STMTS(L) (L)->may_misalign_stmts #define LOOP_VINFO_MAY_ALIAS_DDRS(L) (L)->may_alias_ddrs #define LOOP_VINFO_COMP_ALIAS_DDRS(L) (L)->comp_alias_ddrs #define LOOP_VINFO_CHECK_UNEQUAL_ADDRS(L) (L)->check_unequal_addrs #define LOOP_VINFO_CHECK_NONZERO(L) (L)->check_nonzero #define LOOP_VINFO_LOWER_BOUNDS(L) (L)->lower_bounds #define LOOP_VINFO_GROUPED_STORES(L) (L)->grouped_stores #define LOOP_VINFO_SLP_INSTANCES(L) (L)->slp_instances #define LOOP_VINFO_SLP_UNROLLING_FACTOR(L) (L)->slp_unrolling_factor #define LOOP_VINFO_REDUCTIONS(L) (L)->reductions #define LOOP_VINFO_REDUCTION_CHAINS(L) (L)->reduction_chains #define LOOP_VINFO_TARGET_COST_DATA(L) (L)->target_cost_data #define LOOP_VINFO_PEELING_FOR_GAPS(L) (L)->peeling_for_gaps #define LOOP_VINFO_OPERANDS_SWAPPED(L) (L)->operands_swapped #define LOOP_VINFO_PEELING_FOR_NITER(L) (L)->peeling_for_niter #define LOOP_VINFO_NO_DATA_DEPENDENCIES(L) (L)->no_data_dependencies #define LOOP_VINFO_SCALAR_LOOP(L) (L)->scalar_loop #define LOOP_VINFO_HAS_MASK_STORE(L) (L)->has_mask_store #define LOOP_VINFO_SCALAR_ITERATION_COST(L) (L)->scalar_cost_vec #define LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST(L) (L)->single_scalar_iteration_cost #define LOOP_VINFO_ORIG_LOOP_INFO(L) (L)->orig_loop_info #define LOOP_VINFO_SIMD_IF_COND(L) (L)->simd_if_cond #define LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT(L) \ ((L)->may_misalign_stmts.length () > 0) #define LOOP_REQUIRES_VERSIONING_FOR_ALIAS(L) \ ((L)->comp_alias_ddrs.length () > 0 \ || (L)->check_unequal_addrs.length () > 0 \ || (L)->lower_bounds.length () > 0) #define LOOP_REQUIRES_VERSIONING_FOR_NITERS(L) \ (LOOP_VINFO_NITERS_ASSUMPTIONS (L)) #define LOOP_REQUIRES_VERSIONING_FOR_SIMD_IF_COND(L) \ (LOOP_VINFO_SIMD_IF_COND (L)) #define LOOP_REQUIRES_VERSIONING(L) \ (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (L) \ || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (L) \ || LOOP_REQUIRES_VERSIONING_FOR_NITERS (L) \ || LOOP_REQUIRES_VERSIONING_FOR_SIMD_IF_COND (L)) #define LOOP_VINFO_NITERS_KNOWN_P(L) \ (tree_fits_shwi_p ((L)->num_iters) && tree_to_shwi ((L)->num_iters) > 0) #define LOOP_VINFO_EPILOGUE_P(L) \ (LOOP_VINFO_ORIG_LOOP_INFO (L) != NULL) #define LOOP_VINFO_ORIG_MAX_VECT_FACTOR(L) \ (LOOP_VINFO_MAX_VECT_FACTOR (LOOP_VINFO_ORIG_LOOP_INFO (L))) /* * Wrapper for loop_vec_info, for tracking success/failure, where a non-NULL * value signifies success, and a NULL value signifies failure, supporting * propagating an opt_problem * describing the failure back up the call * stack. */ typedef opt_pointer_wrapper < loop_vec_info > opt_loop_vec_info; static inline loop_vec_info loop_vec_info_for_loop(struct loop *loop) { return (loop_vec_info) loop->aux; } typedef struct _bb_vec_info:public vec_info { _bb_vec_info(gimple_stmt_iterator, gimple_stmt_iterator, vec_info_shared *); ~_bb_vec_info(); basic_block bb; gimple_stmt_iterator region_begin; gimple_stmt_iterator region_end; } *bb_vec_info; #define BB_VINFO_BB(B) (B)->bb #define BB_VINFO_GROUPED_STORES(B) (B)->grouped_stores #define BB_VINFO_SLP_INSTANCES(B) (B)->slp_instances #define BB_VINFO_DATAREFS(B) (B)->shared->datarefs #define BB_VINFO_DDRS(B) (B)->shared->ddrs #define BB_VINFO_TARGET_COST_DATA(B) (B)->target_cost_data static inline bb_vec_info vec_info_for_bb(basic_block bb) { return (bb_vec_info) bb->aux; } /*-----------------------------------------------------------------*/ /* Info on vectorized defs. */ /*-----------------------------------------------------------------*/ enum stmt_vec_info_type { undef_vec_info_type = 0, load_vec_info_type, store_vec_info_type, shift_vec_info_type, op_vec_info_type, call_vec_info_type, call_simd_clone_vec_info_type, assignment_vec_info_type, condition_vec_info_type, comparison_vec_info_type, reduc_vec_info_type, induc_vec_info_type, type_promotion_vec_info_type, type_demotion_vec_info_type, type_conversion_vec_info_type, loop_exit_ctrl_vec_info_type }; /* * Indicates whether/how a variable is used in the scope of loop/basic block. */ enum vect_relevant { vect_unused_in_scope = 0, /* The def is only used outside the loop. */ vect_used_only_live, /* * The def is in the inner loop, and the use is in the outer loop, and * the use is a reduction stmt. */ vect_used_in_outer_by_reduction, /* * The def is in the inner loop, and the use is in the outer loop (and is * not part of reduction). */ vect_used_in_outer, /* * defs that feed computations that end up (only) in a reduction. These * defs may be used by non-reduction stmts, but eventually, any * computations/values that are affected by these defs are used to * compute a reduction (i.e. don't get stored to memory, for example). We * use this to identify computations that we can change the order in * which they are computed. */ vect_used_by_reduction, vect_used_in_scope }; /* * The type of vectorization that can be applied to the stmt: regular * loop-based vectorization; pure SLP - the stmt is a part of SLP instances * and does not have uses outside SLP instances; or hybrid SLP and loop-based * - the stmt is a part of SLP instance and also must be loop-based * vectorized, since it has uses outside SLP sequences. * * In the loop context the meanings of pure and hybrid SLP are slightly * different. By saying that pure SLP is applied to the loop, we mean that we * exploit only intra-iteration parallelism in the loop; i.e., the loop can * be vectorized without doing any conceptual unrolling, cause we don't pack * together stmts from different iterations, only within a single iteration. * Loop hybrid SLP means that we exploit both intra-iteration and * inter-iteration parallelism (e.g., number of elements in the vector is 4 * and the slp-group-size is 2, in which case we don't have enough * parallelism within an iteration, so we obtain the rest of the parallelism * from subsequent iterations by unrolling the loop by 2). */ enum slp_vect_type { loop_vect = 0, pure_slp, hybrid }; /* * Says whether a statement is a load, a store of a vectorized statement * result, or a store of an invariant value. */ enum vec_load_store_type { VLS_LOAD, VLS_STORE, VLS_STORE_INVARIANT }; /* * Describes how we're going to vectorize an individual load or store, or a * group of loads or stores. */ enum vect_memory_access_type { /* An access to an invariant address. This is used only for loads. */ VMAT_INVARIANT, /* A simple contiguous access. */ VMAT_CONTIGUOUS, /* * A contiguous access that goes down in memory rather than up, with no * additional permutation. This is used only for stores of invariants. */ VMAT_CONTIGUOUS_DOWN, /* * A simple contiguous access in which the elements need to be permuted * after loading or before storing. Only used for loop vectorization; * SLP uses separate permutes. */ VMAT_CONTIGUOUS_PERMUTE, /* * A simple contiguous access in which the elements need to be reversed * after loading or before storing. */ VMAT_CONTIGUOUS_REVERSE, /* An access that uses IFN_LOAD_LANES or IFN_STORE_LANES. */ VMAT_LOAD_STORE_LANES, /* * An access in which each scalar element is loaded or stored * individually. */ VMAT_ELEMENTWISE, /* * A hybrid of VMAT_CONTIGUOUS and VMAT_ELEMENTWISE, used for grouped SLP * accesses. Each unrolled iteration uses a contiguous load or store for * the whole group, but the groups from separate iterations are combined * in the same way as for VMAT_ELEMENTWISE. */ VMAT_STRIDED_SLP, /* The access uses gather loads or scatter stores. */ VMAT_GATHER_SCATTER }; struct dr_vec_info { /* The data reference itself. */ data_reference *dr; /* The statement that contains the data reference. */ stmt_vec_info stmt; /* The misalignment in bytes of the reference, or -1 if not known. */ int misalignment; /* * The byte alignment that we'd ideally like the reference to have, and * the value that misalignment is measured against. */ poly_uint64 target_alignment; /* If true the alignment of base_decl needs to be increased. */ bool base_misaligned; tree base_decl; }; typedef struct data_reference *dr_p; struct _stmt_vec_info { enum stmt_vec_info_type type; /* * Indicates whether this stmts is part of a computation whose result is * used outside the loop. */ bool live; /* Stmt is part of some pattern (computation idiom) */ bool in_pattern_p; /* * True if the statement was created during pattern recognition as part * of the replacement for RELATED_STMT. This implies that the statement * isn't part of any basic block, although for convenience its gimple_bb * is the same as for RELATED_STMT. */ bool pattern_stmt_p; /* * Is this statement vectorizable or should it be skipped in (partial) * vectorization. */ bool vectorizable; /* The stmt to which this info struct refers to. */ gimple *stmt; /* The vec_info with respect to which STMT is vectorized. */ vec_info *vinfo; /* The vector type to be used for the LHS of this statement. */ tree vectype; /* The vectorized version of the stmt. */ stmt_vec_info vectorized_stmt; /* * The following is relevant only for stmts that contain a non-scalar * data-ref (array/pointer/struct access). A GIMPLE stmt is expected to * have at most one such data-ref. */ dr_vec_info dr_aux; /* * Information about the data-ref relative to this loop nest (the loop * that is being considered for vectorization). */ innermost_loop_behavior dr_wrt_vec_loop; /* * For loop PHI nodes, the base and evolution part of it. This makes * sure this information is still available in * vect_update_ivs_after_vectorizer where we may not be able to * re-analyze the PHI nodes evolution as peeling for the prologue loop * can make it unanalyzable. The evolution part is still correct after * peeling, but the base may have changed from the version here. */ tree loop_phi_evolution_base_unchanged; tree loop_phi_evolution_part; /* * Used for various bookkeeping purposes, generally holding a pointer to * some other stmt S that is in some way "related" to this stmt. Current * use of this field is: If this stmt is part of a pattern (i.e. the * field 'in_pattern_p' is true): S is the "pattern stmt" that represents * (and replaces) the sequence of stmts that constitutes the pattern. * Similarly, the related_stmt of the "pattern stmt" points back to this * stmt (which is the last stmt in the original sequence of stmts that * constitutes the pattern). */ stmt_vec_info related_stmt; /* * Used to keep a sequence of def stmts of a pattern stmt if such exists. * The sequence is attached to the original statement rather than the * pattern statement. */ gimple_seq pattern_def_seq; /* * List of datarefs that are known to have the same alignment as the * dataref of this stmt. */ vec < dr_p > same_align_refs; /* * Selected SIMD clone's function info. First vector element is SIMD * clone's function decl, followed by a pair of trees (base + step) for * linear arguments (pair of NULLs for other arguments). */ vec < tree > simd_clone_info; /* Classify the def of this stmt. */ enum vect_def_type def_type; /* Whether the stmt is SLPed, loop-based vectorized, or both. */ enum slp_vect_type slp_type; /* Interleaving and reduction chains info. */ /* First element in the group. */ stmt_vec_info first_element; /* Pointer to the next element in the group. */ stmt_vec_info next_element; /* The size of the group. */ unsigned int size; /* * For stores, number of stores from this group seen. We vectorize the * last one. */ unsigned int store_count; /* * For loads only, the gap from the previous load. For consecutive loads, * GAP is 1. */ unsigned int gap; /* * The minimum negative dependence distance this stmt participates in or * zero if none. */ unsigned int min_neg_dist; /* * Not all stmts in the loop need to be vectorized. e.g, the increment of * the loop induction variable and computation of array indexes. relevant * indicates whether the stmt needs to be vectorized. */ enum vect_relevant relevant; /* For loads if this is a gather, for stores if this is a scatter. */ bool gather_scatter_p; /* True if this is an access with loop-invariant stride. */ bool strided_p; /* For both loads and stores. */ bool simd_lane_access_p; /* * Classifies how the load or store is going to be implemented for loop * vectorization. */ vect_memory_access_type memory_access_type; /* For reduction loops, this is the type of reduction. */ enum vect_reduction_type v_reduc_type; /* For CONST_COND_REDUCTION, record the reduc code. */ enum tree_code const_cond_reduc_code; /* * On a reduction PHI the reduction type as detected by * vect_force_simple_reduction. */ enum vect_reduction_type reduc_type; /* * On a reduction PHI the def returned by vect_force_simple_reduction. On * the def returned by vect_force_simple_reduction the corresponding PHI. */ stmt_vec_info reduc_def; /* The number of scalar stmt references from active SLP instances. */ unsigned int num_slp_uses; /* * If nonzero, the lhs of the statement could be truncated to this many * bits without affecting any users of the result. */ unsigned int min_output_precision; /* * If nonzero, all non-boolean input operands have the same precision, * and they could each be truncated to this many bits without changing * the result. */ unsigned int min_input_precision; /* * If OPERATION_BITS is nonzero, the statement could be performed on an * integer with the sign and number of bits given by OPERATION_SIGN and * OPERATION_BITS without changing the result. */ unsigned int operation_precision; signop operation_sign; }; /* Information about a gather/scatter call. */ struct gather_scatter_info { /* * The internal function to use for the gather/scatter operation, or * IFN_LAST if a built-in function should be used instead. */ internal_fn ifn; /* * The FUNCTION_DECL for the built-in gather/scatter function, or null if * an internal function should be used instead. */ tree decl; /* The loop-invariant base value. */ tree base; /* The original scalar offset, which is a non-loop-invariant SSA_NAME. */ tree offset; /* * Each offset element should be multiplied by this amount before being * added to the base. */ int scale; /* The definition type for the vectorized offset. */ enum vect_def_type offset_dt; /* The type of the vectorized offset. */ tree offset_vectype; /* The type of the scalar elements after loading or before storing. */ tree element_type; /* The type of the scalar elements being loaded or stored. */ tree memory_type; }; /* Access Functions. */ #define STMT_VINFO_TYPE(S) (S)->type #define STMT_VINFO_STMT(S) (S)->stmt inline loop_vec_info STMT_VINFO_LOOP_VINFO(stmt_vec_info stmt_vinfo) { if (loop_vec_info loop_vinfo = dyn_cast < loop_vec_info > (stmt_vinfo->vinfo)) return loop_vinfo; return NULL; } inline bb_vec_info STMT_VINFO_BB_VINFO(stmt_vec_info stmt_vinfo) { if (bb_vec_info bb_vinfo = dyn_cast < bb_vec_info > (stmt_vinfo->vinfo)) return bb_vinfo; return NULL; } #define STMT_VINFO_RELEVANT(S) (S)->relevant #define STMT_VINFO_LIVE_P(S) (S)->live #define STMT_VINFO_VECTYPE(S) (S)->vectype #define STMT_VINFO_VEC_STMT(S) (S)->vectorized_stmt #define STMT_VINFO_VECTORIZABLE(S) (S)->vectorizable #define STMT_VINFO_DATA_REF(S) ((S)->dr_aux.dr + 0) #define STMT_VINFO_GATHER_SCATTER_P(S) (S)->gather_scatter_p #define STMT_VINFO_STRIDED_P(S) (S)->strided_p #define STMT_VINFO_MEMORY_ACCESS_TYPE(S) (S)->memory_access_type #define STMT_VINFO_SIMD_LANE_ACCESS_P(S) (S)->simd_lane_access_p #define STMT_VINFO_VEC_REDUCTION_TYPE(S) (S)->v_reduc_type #define STMT_VINFO_VEC_CONST_COND_REDUC_CODE(S) (S)->const_cond_reduc_code #define STMT_VINFO_DR_WRT_VEC_LOOP(S) (S)->dr_wrt_vec_loop #define STMT_VINFO_DR_BASE_ADDRESS(S) (S)->dr_wrt_vec_loop.base_address #define STMT_VINFO_DR_INIT(S) (S)->dr_wrt_vec_loop.init #define STMT_VINFO_DR_OFFSET(S) (S)->dr_wrt_vec_loop.offset #define STMT_VINFO_DR_STEP(S) (S)->dr_wrt_vec_loop.step #define STMT_VINFO_DR_BASE_ALIGNMENT(S) (S)->dr_wrt_vec_loop.base_alignment #define STMT_VINFO_DR_BASE_MISALIGNMENT(S) \ (S)->dr_wrt_vec_loop.base_misalignment #define STMT_VINFO_DR_OFFSET_ALIGNMENT(S) \ (S)->dr_wrt_vec_loop.offset_alignment #define STMT_VINFO_DR_STEP_ALIGNMENT(S) \ (S)->dr_wrt_vec_loop.step_alignment #define STMT_VINFO_DR_INFO(S) \ (gcc_checking_assert ((S)->dr_aux.stmt == (S)), &(S)->dr_aux) #define STMT_VINFO_IN_PATTERN_P(S) (S)->in_pattern_p #define STMT_VINFO_RELATED_STMT(S) (S)->related_stmt #define STMT_VINFO_PATTERN_DEF_SEQ(S) (S)->pattern_def_seq #define STMT_VINFO_SAME_ALIGN_REFS(S) (S)->same_align_refs #define STMT_VINFO_SIMD_CLONE_INFO(S) (S)->simd_clone_info #define STMT_VINFO_DEF_TYPE(S) (S)->def_type #define STMT_VINFO_GROUPED_ACCESS(S) \ ((S)->dr_aux.dr && DR_GROUP_FIRST_ELEMENT(S)) #define STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED(S) (S)->loop_phi_evolution_base_unchanged #define STMT_VINFO_LOOP_PHI_EVOLUTION_PART(S) (S)->loop_phi_evolution_part #define STMT_VINFO_MIN_NEG_DIST(S) (S)->min_neg_dist #define STMT_VINFO_NUM_SLP_USES(S) (S)->num_slp_uses #define STMT_VINFO_REDUC_TYPE(S) (S)->reduc_type #define STMT_VINFO_REDUC_DEF(S) (S)->reduc_def #define DR_GROUP_FIRST_ELEMENT(S) \ (gcc_checking_assert ((S)->dr_aux.dr), (S)->first_element) #define DR_GROUP_NEXT_ELEMENT(S) \ (gcc_checking_assert ((S)->dr_aux.dr), (S)->next_element) #define DR_GROUP_SIZE(S) \ (gcc_checking_assert ((S)->dr_aux.dr), (S)->size) #define DR_GROUP_STORE_COUNT(S) \ (gcc_checking_assert ((S)->dr_aux.dr), (S)->store_count) #define DR_GROUP_GAP(S) \ (gcc_checking_assert ((S)->dr_aux.dr), (S)->gap) #define REDUC_GROUP_FIRST_ELEMENT(S) \ (gcc_checking_assert (!(S)->dr_aux.dr), (S)->first_element) #define REDUC_GROUP_NEXT_ELEMENT(S) \ (gcc_checking_assert (!(S)->dr_aux.dr), (S)->next_element) #define REDUC_GROUP_SIZE(S) \ (gcc_checking_assert (!(S)->dr_aux.dr), (S)->size) #define STMT_VINFO_RELEVANT_P(S) ((S)->relevant != vect_unused_in_scope) #define HYBRID_SLP_STMT(S) ((S)->slp_type == hybrid) #define PURE_SLP_STMT(S) ((S)->slp_type == pure_slp) #define STMT_SLP_TYPE(S) (S)->slp_type #define VECT_MAX_COST 1000 /* * The maximum number of intermediate steps required in multi-step type * conversion. */ #define MAX_INTERM_CVT_STEPS 3 #define MAX_VECTORIZATION_FACTOR INT_MAX /* * Nonzero if TYPE represents a (scalar) boolean type or type in the * middle-end compatible with it (unsigned precision 1 integral types). Used * to determine which types should be vectorized as VECTOR_BOOLEAN_TYPE_P. */ #define VECT_SCALAR_BOOLEAN_TYPE_P(TYPE) \ (TREE_CODE (TYPE) == BOOLEAN_TYPE \ || ((TREE_CODE (TYPE) == INTEGER_TYPE \ || TREE_CODE (TYPE) == ENUMERAL_TYPE) \ && TYPE_PRECISION (TYPE) == 1 \ && TYPE_UNSIGNED (TYPE))) static inline bool nested_in_vect_loop_p(struct loop *loop, stmt_vec_info stmt_info) { return (loop->inner && (loop->inner == (gimple_bb(stmt_info->stmt))->loop_father)); } /* * Return TRUE if a statement represented by STMT_INFO is a part of a * pattern. */ static inline bool is_pattern_stmt_p(stmt_vec_info stmt_info) { return stmt_info->pattern_stmt_p; } /* * If STMT_INFO is a pattern statement, return the statement that it * replaces, otherwise return STMT_INFO itself. */ inline stmt_vec_info vect_orig_stmt(stmt_vec_info stmt_info) { if (is_pattern_stmt_p(stmt_info)) return STMT_VINFO_RELATED_STMT(stmt_info); return stmt_info; } /* Return the later statement between STMT1_INFO and STMT2_INFO. */ static inline stmt_vec_info get_later_stmt(stmt_vec_info stmt1_info, stmt_vec_info stmt2_info) { if (gimple_uid(vect_orig_stmt(stmt1_info)->stmt) > gimple_uid(vect_orig_stmt(stmt2_info)->stmt)) return stmt1_info; else return stmt2_info; } /* * If STMT_INFO has been replaced by a pattern statement, return the * replacement statement, otherwise return STMT_INFO itself. */ inline stmt_vec_info vect_stmt_to_vectorize(stmt_vec_info stmt_info) { if (STMT_VINFO_IN_PATTERN_P(stmt_info)) return STMT_VINFO_RELATED_STMT(stmt_info); return stmt_info; } /* Return true if BB is a loop header. */ static inline bool is_loop_header_bb_p(basic_block bb) { if (bb == (bb->loop_father)->header) return true; gcc_checking_assert(EDGE_COUNT(bb->preds) == 1); return false; } /* Return pow2 (X). */ static inline int vect_pow2(int x) { int i, res = 1; for (i = 0; i < x; i++) res *= 2; return res; } /* Alias targetm.vectorize.builtin_vectorization_cost. */ static inline int builtin_vectorization_cost(enum vect_cost_for_stmt type_of_cost, tree vectype, int misalign) { return targetm.vectorize.builtin_vectorization_cost(type_of_cost, vectype, misalign); } /* Get cost by calling cost target builtin. */ static inline int vect_get_stmt_cost(enum vect_cost_for_stmt type_of_cost) { return builtin_vectorization_cost(type_of_cost, NULL, 0); } /* Alias targetm.vectorize.init_cost. */ static inline void * init_cost(struct loop *loop_info) { return targetm.vectorize.init_cost(loop_info); } extern void dump_stmt_cost(FILE *, void *, int, enum vect_cost_for_stmt, stmt_vec_info, int, unsigned, enum vect_cost_model_location); /* Alias targetm.vectorize.add_stmt_cost. */ static inline unsigned add_stmt_cost(void *data, int count, enum vect_cost_for_stmt kind, stmt_vec_info stmt_info, int misalign, enum vect_cost_model_location where) { unsigned cost = targetm.vectorize.add_stmt_cost(data, count, kind, stmt_info, misalign, where); if (dump_file && (dump_flags & TDF_DETAILS)) dump_stmt_cost(dump_file, data, count, kind, stmt_info, misalign, cost, where); return cost; } /* Alias targetm.vectorize.finish_cost. */ static inline void finish_cost(void *data, unsigned *prologue_cost, unsigned *body_cost, unsigned *epilogue_cost) { targetm.vectorize.finish_cost(data, prologue_cost, body_cost, epilogue_cost); } /* Alias targetm.vectorize.destroy_cost_data. */ static inline void destroy_cost_data(void *data) { targetm.vectorize.destroy_cost_data(data); } inline void add_stmt_costs(void *data, stmt_vector_for_cost * cost_vec) { stmt_info_for_cost *cost; unsigned i; FOR_EACH_VEC_ELT(*cost_vec, i, cost) add_stmt_cost(data, cost->count, cost->kind, cost->stmt_info, cost->misalign, cost->where); } /*-----------------------------------------------------------------*/ /* Info on data references alignment. */ /*-----------------------------------------------------------------*/ #define DR_MISALIGNMENT_UNKNOWN (-1) #define DR_MISALIGNMENT_UNINITIALIZED (-2) inline void set_dr_misalignment(dr_vec_info * dr_info, int val) { dr_info->misalignment = val; } inline int dr_misalignment(dr_vec_info * dr_info) { int misalign = dr_info->misalignment; gcc_assert(misalign != DR_MISALIGNMENT_UNINITIALIZED); return misalign; } /* * Reflects actual alignment of first access in the vectorized loop, taking * into account peeling/versioning if applied. */ #define DR_MISALIGNMENT(DR) dr_misalignment (DR) #define SET_DR_MISALIGNMENT(DR, VAL) set_dr_misalignment (DR, VAL) /* Only defined once DR_MISALIGNMENT is defined. */ #define DR_TARGET_ALIGNMENT(DR) ((DR)->target_alignment) /* * Return true if data access DR_INFO is aligned to its target alignment * (which may be less than a full vector). */ static inline bool aligned_access_p(dr_vec_info * dr_info) { return (DR_MISALIGNMENT(dr_info) == 0); } /* * Return TRUE if the alignment of the data access is known, and FALSE * otherwise. */ static inline bool known_alignment_for_access_p(dr_vec_info * dr_info) { return (DR_MISALIGNMENT(dr_info) != DR_MISALIGNMENT_UNKNOWN); } /* * Return the minimum alignment in bytes that the vectorized version of * DR_INFO is guaranteed to have. */ static inline unsigned int vect_known_alignment_in_bytes(dr_vec_info * dr_info) { if (DR_MISALIGNMENT(dr_info) == DR_MISALIGNMENT_UNKNOWN) return TYPE_ALIGN_UNIT(TREE_TYPE(DR_REF(dr_info->dr))); if (DR_MISALIGNMENT(dr_info) == 0) return known_alignment(DR_TARGET_ALIGNMENT(dr_info)); return DR_MISALIGNMENT(dr_info) & -DR_MISALIGNMENT(dr_info); } /* * Return the behavior of DR_INFO with respect to the vectorization context * (which for outer loop vectorization might not be the behavior recorded in * DR_INFO itself). */ static inline innermost_loop_behavior * vect_dr_behavior(dr_vec_info * dr_info) { stmt_vec_info stmt_info = dr_info->stmt; loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO(stmt_info); if (loop_vinfo == NULL || !nested_in_vect_loop_p(LOOP_VINFO_LOOP(loop_vinfo), stmt_info)) return &DR_INNERMOST(dr_info->dr); else return &STMT_VINFO_DR_WRT_VEC_LOOP(stmt_info); } /* Return true if the vect cost model is unlimited. */ static inline bool unlimited_cost_model(loop_p loop) { if (loop != NULL && loop->force_vectorize && flag_simd_cost_model != VECT_COST_MODEL_DEFAULT) return flag_simd_cost_model == VECT_COST_MODEL_UNLIMITED; return (flag_vect_cost_model == VECT_COST_MODEL_UNLIMITED); } /* * Return true if the loop described by LOOP_VINFO is fully-masked and if the * first iteration should use a partial mask in order to achieve alignment. */ static inline bool vect_use_loop_mask_for_alignment_p(loop_vec_info loop_vinfo) { return (LOOP_VINFO_FULLY_MASKED_P(loop_vinfo) && LOOP_VINFO_PEELING_FOR_ALIGNMENT(loop_vinfo)); } /* * Return the number of vectors of type VECTYPE that are needed to get NUNITS * elements. NUNITS should be based on the vectorization factor, so it is * always a known multiple of the number of elements in VECTYPE. */ static inline unsigned int vect_get_num_vectors(poly_uint64 nunits, tree vectype) { return exact_div(nunits, TYPE_VECTOR_SUBPARTS(vectype)).to_constant(); } /* * Return the number of copies needed for loop vectorization when a statement * operates on vectors of type VECTYPE. This is the vectorization factor * divided by the number of elements in VECTYPE and is always known at * compile time. */ static inline unsigned int vect_get_num_copies(loop_vec_info loop_vinfo, tree vectype) { return vect_get_num_vectors(LOOP_VINFO_VECT_FACTOR(loop_vinfo), vectype); } /* * Update maximum unit count *MAX_NUNITS so that it accounts for NUNITS. * *MAX_NUNITS can be 1 if we haven't yet recorded anything. */ static inline void vect_update_max_nunits(poly_uint64 * max_nunits, poly_uint64 nunits) { /* * All unit counts have the form current_vector_size * X for some * rational X, so two unit sizes must have a common multiple. Everything * is a multiple of the initial value of 1. */ *max_nunits = force_common_multiple(*max_nunits, nunits); } /* * Update maximum unit count *MAX_NUNITS so that it accounts for the number * of units in vector type VECTYPE. *MAX_NUNITS can be 1 if we haven't yet * recorded any vector types. */ static inline void vect_update_max_nunits(poly_uint64 * max_nunits, tree vectype) { vect_update_max_nunits(max_nunits, TYPE_VECTOR_SUBPARTS(vectype)); } /* * Return the vectorization factor that should be used for costing purposes * while vectorizing the loop described by LOOP_VINFO. Pick a reasonable * estimate if the vectorization factor isn't known at compile time. */ static inline unsigned int vect_vf_for_cost(loop_vec_info loop_vinfo) { return estimated_poly_value(LOOP_VINFO_VECT_FACTOR(loop_vinfo)); } /* * Estimate the number of elements in VEC_TYPE for costing purposes. Pick a * reasonable estimate if the exact number isn't known at compile time. */ static inline unsigned int vect_nunits_for_cost(tree vec_type) { return estimated_poly_value(TYPE_VECTOR_SUBPARTS(vec_type)); } /* Return the maximum possible vectorization factor for LOOP_VINFO. */ static inline unsigned HOST_WIDE_INT vect_max_vf(loop_vec_info loop_vinfo) { unsigned HOST_WIDE_INT vf; if (LOOP_VINFO_VECT_FACTOR(loop_vinfo).is_constant(&vf)) return vf; return MAX_VECTORIZATION_FACTOR; } /* * Return the size of the value accessed by unvectorized data reference * DR_INFO. This is only valid once STMT_VINFO_VECTYPE has been calculated * for the associated gimple statement, since that guarantees that DR_INFO * accesses either a scalar or a scalar equivalent. ("Scalar equivalent" * here includes things like V1SI, which can be vectorized in the same way as * a plain SI.) */ inline unsigned int vect_get_scalar_dr_size(dr_vec_info * dr_info) { return tree_to_uhwi(TYPE_SIZE_UNIT(TREE_TYPE(DR_REF(dr_info->dr)))); } /* Source location + hotness information. */ extern dump_user_location_t vect_location; /* * A macro for calling: dump_begin_scope (MSG, vect_location); via an RAII * object, thus printing "=== MSG ===\n" to the dumpfile etc, and then * calling dump_end_scope (); once the object goes out of scope, thus * capturing the nesting of the scopes. * * These scopes affect dump messages within them: dump messages at the top level * implicitly default to MSG_PRIORITY_USER_FACING, whereas those in a nested * scope implicitly default to MSG_PRIORITY_INTERNALS. */ #define DUMP_VECT_SCOPE(MSG) \ AUTO_DUMP_SCOPE (MSG, vect_location) /* * A sentinel class for ensuring that the "vect_location" global gets reset * at the end of a scope. * * The "vect_location" global is used during dumping and contains a location_t, * which could contain references to a tree block via the ad-hoc data. This * data is used for tracking inlining information, but it's not a GC root; * it's simply assumed that such locations never get accessed if the blocks * are optimized away. * * Hence we need to ensure that such locations are purged at the end of any * operations using them (e.g. via this class). */ class auto_purge_vect_location { public: ~auto_purge_vect_location(); }; /*-----------------------------------------------------------------*/ /* Function prototypes. */ /*-----------------------------------------------------------------*/ /* * Simple loop peeling and versioning utilities for vectorizer's purposes - * in tree-vect-loop-manip.c. */ extern void vect_set_loop_condition(struct loop *, loop_vec_info, tree, tree, tree, bool); extern bool slpeel_can_duplicate_loop_p(const struct loop *, const_edge); struct loop * slpeel_tree_duplicate_loop_to_edge_cfg(struct loop *, struct loop *, edge); struct loop * vect_loop_versioning(loop_vec_info, unsigned int, bool, poly_uint64); extern struct loop * vect_do_peeling(loop_vec_info, tree, tree, tree *, tree *, tree *, int, bool, bool); extern void vect_prepare_for_masked_peels(loop_vec_info); extern dump_user_location_t find_loop_location(struct loop *); extern bool vect_can_advance_ivs_p(loop_vec_info); /* In tree-vect-stmts.c. */ extern poly_uint64 current_vector_size; extern tree get_vectype_for_scalar_type(tree); extern tree get_vectype_for_scalar_type_and_size(tree, poly_uint64); extern tree get_mask_type_for_scalar_type(tree); extern tree get_same_sized_vectype(tree, tree); extern bool vect_get_loop_mask_type(loop_vec_info); extern bool vect_is_simple_use(tree, vec_info *, enum vect_def_type *, stmt_vec_info * = NULL, gimple ** = NULL); extern bool vect_is_simple_use(tree, vec_info *, enum vect_def_type *, tree *, stmt_vec_info * = NULL, gimple ** = NULL); extern bool supportable_widening_operation(enum tree_code, stmt_vec_info, tree, tree, enum tree_code *, enum tree_code *, int *, vec < tree > *); extern bool supportable_narrowing_operation(enum tree_code, tree, tree, enum tree_code *, int *, vec < tree > *); extern unsigned record_stmt_cost(stmt_vector_for_cost *, int, enum vect_cost_for_stmt, stmt_vec_info, int, enum vect_cost_model_location); extern stmt_vec_info vect_finish_replace_stmt(stmt_vec_info, gimple *); extern stmt_vec_info vect_finish_stmt_generation(stmt_vec_info, gimple *, gimple_stmt_iterator *); extern opt_result vect_mark_stmts_to_be_vectorized(loop_vec_info); extern tree vect_get_store_rhs(stmt_vec_info); extern tree vect_get_vec_def_for_operand_1(stmt_vec_info, enum vect_def_type); extern tree vect_get_vec_def_for_operand(tree, stmt_vec_info, tree = NULL); extern void vect_get_vec_defs(tree, tree, stmt_vec_info, vec < tree > *, vec < tree > *, slp_tree); extern void vect_get_vec_defs_for_stmt_copy(vec_info *, vec < tree > *, vec < tree > *); extern tree vect_init_vector(stmt_vec_info, tree, tree, gimple_stmt_iterator *); extern tree vect_get_vec_def_for_stmt_copy(vec_info *, tree); extern bool vect_transform_stmt(stmt_vec_info, gimple_stmt_iterator *, slp_tree, slp_instance); extern void vect_remove_stores(stmt_vec_info); extern opt_result vect_analyze_stmt(stmt_vec_info, bool *, slp_tree, slp_instance, stmt_vector_for_cost *); extern bool vectorizable_condition(stmt_vec_info, gimple_stmt_iterator *, stmt_vec_info *, bool, slp_tree, stmt_vector_for_cost *); extern bool vectorizable_shift(stmt_vec_info, gimple_stmt_iterator *, stmt_vec_info *, slp_tree, stmt_vector_for_cost *); extern void vect_get_load_cost(stmt_vec_info, int, bool, unsigned int *, unsigned int *, stmt_vector_for_cost *, stmt_vector_for_cost *, bool); extern void vect_get_store_cost(stmt_vec_info, int, unsigned int *, stmt_vector_for_cost *); extern bool vect_supportable_shift(enum tree_code, tree); extern tree vect_gen_perm_mask_any(tree, const vec_perm_indices &); extern tree vect_gen_perm_mask_checked(tree, const vec_perm_indices &); extern void optimize_mask_stores(struct loop *); extern gcall *vect_gen_while(tree, tree, tree); extern tree vect_gen_while_not(gimple_seq *, tree, tree, tree); extern opt_result vect_get_vector_types_for_stmt(stmt_vec_info, tree *, tree *); extern opt_tree vect_get_mask_type_for_stmt(stmt_vec_info); /* In tree-vect-data-refs.c. */ extern bool vect_can_force_dr_alignment_p(const_tree, poly_uint64); extern enum dr_alignment_support vect_supportable_dr_alignment (dr_vec_info *, bool); extern tree vect_get_smallest_scalar_type(stmt_vec_info, HOST_WIDE_INT *, HOST_WIDE_INT *); extern opt_result vect_analyze_data_ref_dependences(loop_vec_info, unsigned int *); extern bool vect_slp_analyze_instance_dependence(slp_instance); extern opt_result vect_enhance_data_refs_alignment(loop_vec_info); extern opt_result vect_analyze_data_refs_alignment(loop_vec_info); extern opt_result vect_verify_datarefs_alignment(loop_vec_info); extern bool vect_slp_analyze_and_verify_instance_alignment(slp_instance); extern opt_result vect_analyze_data_ref_accesses(vec_info *); extern opt_result vect_prune_runtime_alias_test_list(loop_vec_info); extern bool vect_gather_scatter_fn_p(bool, bool, tree, tree, unsigned int, signop, int, internal_fn *, tree *); extern bool vect_check_gather_scatter(stmt_vec_info, loop_vec_info, gather_scatter_info *); extern opt_result vect_find_stmt_data_reference(loop_p, gimple *, vec < data_reference_p > *); extern opt_result vect_analyze_data_refs(vec_info *, poly_uint64 *); extern void vect_record_base_alignments(vec_info *); extern tree vect_create_data_ref_ptr(stmt_vec_info, tree, struct loop *, tree, tree *, gimple_stmt_iterator *, gimple **, bool, tree = NULL_TREE, tree = NULL_TREE); extern tree bump_vector_ptr(tree, gimple *, gimple_stmt_iterator *, stmt_vec_info, tree); extern void vect_copy_ref_info(tree, tree); extern tree vect_create_destination_var(tree, tree); extern bool vect_grouped_store_supported(tree, unsigned HOST_WIDE_INT); extern bool vect_store_lanes_supported(tree, unsigned HOST_WIDE_INT, bool); extern bool vect_grouped_load_supported(tree, bool, unsigned HOST_WIDE_INT); extern bool vect_load_lanes_supported(tree, unsigned HOST_WIDE_INT, bool); extern void vect_permute_store_chain(vec < tree >, unsigned int, stmt_vec_info, gimple_stmt_iterator *, vec < tree > *); extern tree vect_setup_realignment(stmt_vec_info, gimple_stmt_iterator *, tree *, enum dr_alignment_support, tree, struct loop **); extern void vect_transform_grouped_load(stmt_vec_info, vec < tree >, int, gimple_stmt_iterator *); extern void vect_record_grouped_load_vectors(stmt_vec_info, vec < tree >); extern tree vect_get_new_vect_var(tree, enum vect_var_kind, const char *); extern tree vect_get_new_ssa_name(tree, enum vect_var_kind, const char *= NULL); extern tree vect_create_addr_base_for_vector_ref(stmt_vec_info, gimple_seq *, tree, tree = NULL_TREE); /* In tree-vect-loop.c. */ /* FORNOW: Used in tree-parloops.c. */ extern stmt_vec_info vect_force_simple_reduction(loop_vec_info, stmt_vec_info, bool *, bool); /* Used in gimple-loop-interchange.c. */ extern bool check_reduction_path(dump_user_location_t, loop_p, gphi *, tree, enum tree_code); /* Drive for loop analysis stage. */ extern opt_loop_vec_info vect_analyze_loop(struct loop *, loop_vec_info, vec_info_shared *); extern tree vect_build_loop_niters(loop_vec_info, bool * = NULL); extern void vect_gen_vector_loop_niters(loop_vec_info, tree, tree *, tree *, bool); extern tree vect_halve_mask_nunits(tree); extern tree vect_double_mask_nunits(tree); extern void vect_record_loop_mask(loop_vec_info, vec_loop_masks *, unsigned int, tree); extern tree vect_get_loop_mask(gimple_stmt_iterator *, vec_loop_masks *, unsigned int, tree, unsigned int); /* Drive for loop transformation stage. */ extern struct loop *vect_transform_loop(loop_vec_info); extern opt_loop_vec_info vect_analyze_loop_form(struct loop *, vec_info_shared *); extern bool vectorizable_live_operation(stmt_vec_info, gimple_stmt_iterator *, slp_tree, int, stmt_vec_info *, stmt_vector_for_cost *); extern bool vectorizable_reduction(stmt_vec_info, gimple_stmt_iterator *, stmt_vec_info *, slp_tree, slp_instance, stmt_vector_for_cost *); extern bool vectorizable_induction(stmt_vec_info, gimple_stmt_iterator *, stmt_vec_info *, slp_tree, stmt_vector_for_cost *); extern tree get_initial_def_for_reduction(stmt_vec_info, tree, tree *); extern bool vect_worthwhile_without_simd_p(vec_info *, tree_code); extern int vect_get_known_peeling_cost(loop_vec_info, int, int *, stmt_vector_for_cost *, stmt_vector_for_cost *, stmt_vector_for_cost *); extern tree cse_and_gimplify_to_preheader(loop_vec_info, tree); /* In tree-vect-slp.c. */ extern void vect_free_slp_instance(slp_instance, bool); extern bool vect_transform_slp_perm_load(slp_tree, vec < tree >, gimple_stmt_iterator *, poly_uint64, slp_instance, bool, unsigned *); extern bool vect_slp_analyze_operations(vec_info *); extern void vect_schedule_slp(vec_info *); extern opt_result vect_analyze_slp(vec_info *, unsigned); extern bool vect_make_slp_decision(loop_vec_info); extern void vect_detect_hybrid_slp(loop_vec_info); extern void vect_get_slp_defs(vec < tree >, slp_tree, vec < vec < tree > >*); extern bool vect_slp_bb(basic_block); extern stmt_vec_info vect_find_last_scalar_stmt_in_slp(slp_tree); extern bool is_simple_and_all_uses_invariant(stmt_vec_info, loop_vec_info); extern bool can_duplicate_and_interleave_p(unsigned int, machine_mode, unsigned int *= NULL, tree * = NULL, tree * = NULL); extern void duplicate_and_interleave(gimple_seq *, tree, vec < tree >, unsigned int, vec < tree > &); extern int vect_get_place_in_interleaving_chain(stmt_vec_info, stmt_vec_info); /* In tree-vect-patterns.c. */ /* * Pattern recognition functions. Additional pattern recognition functions * can (and will) be added in the future. */ void vect_pattern_recog(vec_info *); /* In tree-vectorizer.c. */ unsigned vectorize_loops(void); void vect_free_loop_info_assumptions(struct loop *); #endif /* GCC_TREE_VECTORIZER_H */
ztrsm.c
#include "blas.h" #include "error.h" #include <stdio.h> #include "handle.h" #include "config.h" #include "ztrsm.fatbin.c" static inline size_t min(size_t a, size_t b) { return (a < b) ? a : b; } static inline size_t max(size_t a, size_t b) { return (a > b) ? a : b; } static inline CUresult cuMemcpyHtoD2DAsync(CUdeviceptr A, size_t lda, size_t ai, size_t aj, const void * B, size_t ldb, size_t bi, size_t bj, size_t m, size_t n, size_t elemSize, CUstream stream) { CUDA_MEMCPY2D copy = { bi * elemSize, bj, CU_MEMORYTYPE_HOST, B, 0, 0, ldb * elemSize, ai * elemSize, aj, CU_MEMORYTYPE_DEVICE, NULL, A, 0, lda * elemSize, m * elemSize, n }; return cuMemcpy2DAsync(&copy, stream); } static inline CUresult cuMemcpyDtoH2DAsync(void * A, size_t lda, size_t ai, size_t aj, CUdeviceptr B, size_t ldb, size_t bi, size_t bj, size_t m, size_t n, size_t elemSize, CUstream stream) { CUDA_MEMCPY2D copy = { bi * elemSize, bj, CU_MEMORYTYPE_DEVICE, NULL, B, 0, ldb * elemSize, ai * elemSize, aj, CU_MEMORYTYPE_HOST, A, 0, 0, lda * elemSize, m * elemSize, n }; return cuMemcpy2DAsync(&copy, stream); } static const double complex zero = 0.0 + 0.0 * I; static const double complex one = 1.0 + 0.0 * I; void ztrsm(CBlasSide side, CBlasUplo uplo, CBlasTranspose transA, CBlasDiag diag, size_t m, size_t n, double complex alpha, const double complex * restrict A, size_t lda, double complex * restrict B, size_t ldb) { const size_t nRowA = (side == CBlasLeft) ? m : n; int info = 0; if (lda < nRowA) info = 9; else if (ldb < m) info = 11; if (info != 0) { XERBLA(info); return; } if (m == 0 || n == 0) return; if (alpha == zero) { #pragma omp parallel for for (size_t j = 0; j < n; j++) { for (size_t i = 0; i < m; i++) B[j * ldb + i] = zero; } return; } if (side == CBlasLeft) { if (transA == CBlasNoTrans) { if (uplo == CBlasUpper) { #pragma omp parallel for for (size_t j = 0; j < n; j++) { if (alpha != one) { for (size_t i = 0; i < m; i++) B[j * ldb + i] *= alpha; } size_t k = m - 1; do { if (B[j * ldb + k] != zero) { if (diag == CBlasNonUnit) B[j * ldb + k] /= A[k * lda + k]; register double complex temp = B[j * ldb + k]; for (size_t i = 0; i < k; i++) B[j * ldb + i] -= temp * A[k * lda + i]; } } while (k-- > 0); } } else { #pragma omp parallel for for (size_t j = 0; j < n; j++) { if (alpha != one) { for (size_t i = 0; i < m; i++) B[j * ldb + i] *= alpha; } for (size_t k = 0; k < m; k++) { if (B[j * ldb + k] != zero) { if (diag == CBlasNonUnit) B[j * ldb + k] /= A[k * lda + k]; register double complex temp = B[j * ldb + k]; for (size_t i = k + 1; i < m; i++) B[j * ldb + i] -= temp * A[k * lda + i]; } } } } } else { if (uplo == CBlasUpper) { #pragma omp parallel for for (size_t j = 0; j < n; j++) { for (size_t i = 0; i < m; i++) { register double complex temp = alpha * B[j * ldb + i]; if (transA == CBlasTrans) { for (size_t k = 0; k < i; k++) temp -= A[i * lda + k] * B[j * ldb + k]; if (diag == CBlasNonUnit) temp /= A[i * lda + i]; } else { for (size_t k = 0; k < i; k++) temp -= conj(A[i * lda + k]) * B[j * ldb + k]; if (diag == CBlasNonUnit) temp /= conj(A[i * lda + i]); } B[j * ldb + i] = temp; } } } else { #pragma omp parallel for for (size_t j = 0; j < n; j++) { size_t i = m - 1; do { register double complex temp = alpha * B[j * ldb + i]; if (transA == CBlasTrans) { for (size_t k = i + 1; k < m; k++) temp -= A[i * lda + k] * B[j * ldb + k]; if (diag == CBlasNonUnit) temp /= A[i * lda + i]; } else { for (size_t k = i + 1; k < m; k++) temp -= conj(A[i * lda + k]) * B[j * ldb + k]; if (diag == CBlasNonUnit) temp /= conj(A[i * lda + i]); } B[j * ldb + i] = temp; } while (i-- > 0); } } } } else { if (transA == CBlasNoTrans) { if (uplo == CBlasUpper) { for (size_t j = 0; j < n; j++) { if (alpha != one) { for (size_t i = 0; i < m; i++) B[j * ldb + i] *= alpha; } for (size_t k = 0; k < j; k++) { if (A[j * lda + k] != zero) { register double complex temp = A[j * lda + k]; for (size_t i = 0; i < m; i++) B[j * ldb + i] -= temp * B[k * ldb + i]; } } if (diag == CBlasNonUnit) { register double complex temp = one / A[j * lda + j]; for (size_t i = 0; i < m; i++) B[j * ldb + i] *= temp; } } } else { size_t j = n - 1; do { if (alpha != one) { for (size_t i = 0; i < m; i++) B[j * ldb + i] *= alpha; } for (size_t k = j + 1; k < n; k++) { if (A[j * lda + k] != zero) { register double complex temp = A[j * lda + k]; for (size_t i = 0; i < m; i++) B[j * ldb + i] -= temp * B[k * ldb + i]; } } if (diag == CBlasNonUnit) { register double complex temp = one / A[j * lda + j]; for (size_t i = 0; i < m; i++) B[j * ldb + i] *= temp; } } while (j-- > 0); } } else { if (uplo == CBlasUpper) { size_t k = n - 1; do { if (diag == CBlasNonUnit) { register double complex temp; if (transA == CBlasTrans) temp = one / A[k * lda + k]; else temp = one / conj(A[k * lda + k]); for (size_t i = 0; i < m; i++) B[k * ldb + i] *= temp; } for (size_t j = 0; j < k; j++) { if (A[k * lda + j] != zero) { register double complex temp; if (transA == CBlasTrans) temp = A[k * lda + j]; else temp = conj(A[k * lda + j]); for (size_t i = 0; i < m; i++) B[j * ldb + i] -= temp * B[k * ldb + i]; } } if (alpha != one) { for (size_t i = 0; i < m; i++) B[k * ldb + i] *= alpha; } } while (k-- > 0); } else { for (size_t k = 0; k < n; k++) { if (diag == CBlasNonUnit) { register double complex temp; if (transA == CBlasTrans) temp = one / A[k * lda + k]; else temp = one / conj(A[k * lda + k]); for (size_t i = 0; i < m; i++) B[k * ldb + i] *= temp; } for (size_t j = k + 1; j < n; j++) { if (A[k * lda + j] != zero) { register double complex temp; if (transA == CBlasTrans) temp = A[k * lda + j]; else temp = conj(A[k * lda + j]); for (size_t i = 0; i < m; i++) B[j * ldb + i] -= temp * B[k * ldb + i]; } } if (alpha != one) { for (size_t i = 0; i < m; i++) B[k * ldb + i] *= alpha; } } } } } } CUresult cuZtrsm(CUBLAShandle handle, CBlasSide side, CBlasUplo uplo, CBlasTranspose transA, CBlasDiag diag, size_t m, size_t n, double complex alpha, CUdeviceptr A, size_t lda, CUdeviceptr B, size_t ldb, CUstream stream) { const size_t nRowA = (side == CBlasLeft) ? m : n; int info = 0; if (lda < nRowA) info = 9; else if (ldb < m) info = 11; if (info != 0) { XERBLA(info); return CUDA_ERROR_INVALID_VALUE; } if (m == 0 || n == 0) return CUDA_SUCCESS; CU_ERROR_CHECK(cuCtxPushCurrent(handle->context)); if (handle->ztrsm == NULL) CU_ERROR_CHECK(cuModuleLoadData(&handle->ztrsm, imageBytes)); const unsigned int bx = 2; const unsigned int by = 2; const unsigned int mb = (side == CBlasLeft) ? 2 : 8; const unsigned int nb = (side == CBlasLeft) ? 8 : 2; char name[112]; snprintf(name, 112, "_Z5ztrsmIL9CBlasSide%dEL9CBlasUplo%dEL14CBlasTranspose%dEL9CBlasDiag%dELj%uELj%uELj%uELj%uEEv7double2PKS4_PS4_iiii", side, uplo, transA, diag, mb, nb, bx, by); CUfunction function; CU_ERROR_CHECK(cuModuleGetFunction(&function, handle->ztrsm, name)); void * params[] = { &alpha, &A, &B, &lda, &ldb, &m, &n }; CU_ERROR_CHECK(cuLaunchKernel(function, (unsigned int)(m + mb - 1) / mb, (unsigned int)(n + nb - 1) / nb, 1, bx, by, 1, 0, stream, params, NULL)); CU_ERROR_CHECK(cuCtxPopCurrent(&handle->context)); return CUDA_SUCCESS; } CUresult cuMultiGPUZtrsm(CUmultiGPUBLAShandle handle, CBlasSide side, CBlasUplo uplo, CBlasTranspose transA, CBlasDiag diag, size_t m, size_t n, double complex alpha, const double complex * restrict A, size_t lda, double complex * restrict B, size_t ldb) { const size_t nRowA = (side == CBlasLeft) ? m : n; int info = 0; if (lda < nRowA) info = 9; else if (ldb < m) info = 11; if (info != 0) { XERBLA(info); return CUDA_ERROR_INVALID_VALUE; } if (m == 0 || n == 0) return CUDA_SUCCESS; if (alpha == zero) { zgemm(CBlasNoTrans, CBlasNoTrans, m, n, 0, zero, A, lda, B, ldb, zero, B, ldb); return CUDA_SUCCESS; } const size_t mb = (transA == CBlasNoTrans) ? ZGEMM_N_MB : ZGEMM_CN_MB; const size_t nb = ZGEMM_N_NB; if (side == CBlasLeft) { if (transA == CBlasNoTrans) { if (uplo == CBlasUpper) { size_t r = m % mb; size_t i = (r == 0) ? m : m + mb - r; do { i -= mb; const size_t ib = min(mb, m - i); CU_ERROR_CHECK(cuMultiGPUZgemm(handle, CBlasNoTrans, CBlasNoTrans, ib, n, m - i - ib, -one, &A[(i + ib) * lda + i], lda, &B[i + ib], ldb, alpha, &B[i], ldb)); CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle)); ztrsm(CBlasLeft, CBlasUpper, CBlasNoTrans, diag, ib, n, one, &A[i * lda + i], lda, &B[i], ldb); } while (i > 0); } else { for (size_t i = 0; i < m; i += mb) { const size_t ib = min(mb, m - i); CU_ERROR_CHECK(cuMultiGPUZgemm(handle, CBlasNoTrans, CBlasNoTrans, ib, n, i, -one, &A[i], lda, B, ldb, alpha, &B[i], ldb)); CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle)); ztrsm(CBlasLeft, CBlasLower, CBlasNoTrans, diag, ib, n, one, &A[i * lda + i], lda, &B[i], ldb); } } } else { if (uplo == CBlasUpper) { for (size_t i = 0; i < m; i += mb) { const size_t ib = min(mb, m - i); CU_ERROR_CHECK(cuMultiGPUZgemm(handle, transA, CBlasNoTrans, ib, n, i, -one, &A[i * lda], lda, B, ldb, alpha, &B[i], ldb)); CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle)); ztrsm(CBlasLeft, CBlasUpper, transA, diag, ib, n, one, &A[i * lda + i], lda, &B[i], ldb); } } else { size_t r = m % mb; size_t i = (r == 0) ? m : m + mb - r; do { i -= mb; const size_t ib = min(mb, m - i); CU_ERROR_CHECK(cuMultiGPUZgemm(handle, transA, CBlasNoTrans, ib, n, m - i - ib, -one, &A[i * lda + i + ib], lda, &B[i + ib], ldb, alpha, &B[i], ldb)); CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle)); ztrsm(CBlasLeft, CBlasLower, transA, diag, ib, n, one, &A[i * lda + i], lda, &B[i], ldb); } while (i > 0); } } } else { if (transA == CBlasNoTrans) { if (uplo == CBlasUpper) { for (size_t j = 0; j < n; j += nb) { const size_t jb = min(nb, n - j); CU_ERROR_CHECK(cuMultiGPUZgemm(handle, CBlasNoTrans, CBlasNoTrans, m, jb, j, -one, B, ldb, &A[j * lda], lda, alpha, &B[j * ldb], ldb)); CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle)); ztrsm(CBlasRight, CBlasUpper, CBlasNoTrans, diag, m, jb, one, &A[j * lda + j], lda, &B[j * ldb], ldb); } } else { size_t r = n % nb; size_t j = (r == 0) ? n : n + nb - r; do { j -= nb; const size_t jb = min(nb, n - j); CU_ERROR_CHECK(cuMultiGPUZgemm(handle, CBlasNoTrans, CBlasNoTrans, m, jb, n - j - jb, -one, &B[(j + jb) * ldb], ldb, &A[j * lda + j + jb], lda, alpha, &B[j * ldb], ldb)); CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle)); ztrsm(CBlasRight, CBlasLower, CBlasNoTrans, diag, m, jb, one, &A[j * lda + j], lda, &B[j * ldb], ldb); } while (j > 0); } } else { if (uplo == CBlasUpper) { size_t r = n % nb; size_t j = (r == 0) ? n : n + nb - r; do { j -= nb; const size_t jb = min(nb, n - j); CU_ERROR_CHECK(cuMultiGPUZgemm(handle, CBlasNoTrans, transA, m, jb, n - j - jb, -one, &B[(j + jb) * ldb], ldb, &A[(j + jb) * lda + j], lda, alpha, &B[j * ldb], ldb)); CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle)); ztrsm(CBlasRight, CBlasUpper, transA, diag, m, jb, one, &A[j * lda + j], lda, &B[j * ldb], ldb); } while (j > 0); } else { for (size_t j = 0; j < n; j += nb) { const size_t jb = min(nb, n - j); CU_ERROR_CHECK(cuMultiGPUZgemm(handle, CBlasNoTrans, transA, m, jb, j, -one, B, ldb, &A[j], lda, alpha, &B[j * ldb], ldb)); CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle)); ztrsm(CBlasRight, CBlasLower, transA, diag, m, jb, one, &A[j * lda + j], lda, &B[j * ldb], ldb); } } } } return CUDA_SUCCESS; }
#include "blas.h" #include "error.h" #include <stdio.h> #include "handle.h" #include "config.h" #include "ztrsm.fatbin.c" static inline size_t min(size_t a, size_t b) { return (a < b) ? a : b; } static inline size_t max(size_t a, size_t b) { return (a > b) ? a : b; } static inline CUresult cuMemcpyHtoD2DAsync(CUdeviceptr A, size_t lda, size_t ai, size_t aj, const void *B, size_t ldb, size_t bi, size_t bj, size_t m, size_t n, size_t elemSize, CUstream stream) { CUDA_MEMCPY2D copy = { bi * elemSize, bj, CU_MEMORYTYPE_HOST, B, 0, 0, ldb * elemSize, ai * elemSize, aj, CU_MEMORYTYPE_DEVICE, NULL, A, 0, lda * elemSize, m * elemSize, n}; return cuMemcpy2DAsync(&copy, stream); } static inline CUresult cuMemcpyDtoH2DAsync(void *A, size_t lda, size_t ai, size_t aj, CUdeviceptr B, size_t ldb, size_t bi, size_t bj, size_t m, size_t n, size_t elemSize, CUstream stream) { CUDA_MEMCPY2D copy = { bi * elemSize, bj, CU_MEMORYTYPE_DEVICE, NULL, B, 0, ldb * elemSize, ai * elemSize, aj, CU_MEMORYTYPE_HOST, A, 0, 0, lda * elemSize, m * elemSize, n}; return cuMemcpy2DAsync(&copy, stream); } static const double complex zero = 0.0 + 0.0 * I; static const double complex one = 1.0 + 0.0 * I; void ztrsm(CBlasSide side, CBlasUplo uplo, CBlasTranspose transA, CBlasDiag diag, size_t m, size_t n, double complex alpha, const double complex * restrict A, size_t lda, double complex * restrict B, size_t ldb) { const size_t nRowA = (side == CBlasLeft) ? m : n; int info = 0; if (lda < nRowA) info = 9; else if (ldb < m) info = 11; if (info != 0) { XERBLA(info); return; } if (m == 0 || n == 0) return; if (alpha == zero) { for (size_t j = 0; j < n; j++) { for (size_t i = 0; i < m; i++) B[j * ldb + i] = zero; } return; } if (side == CBlasLeft) { if (transA == CBlasNoTrans) { if (uplo == CBlasUpper) { for (size_t j = 0; j < n; j++) { if (alpha != one) { for (size_t i = 0; i < m; i++) B[j * ldb + i] *= alpha; } size_t k = m - 1; do { if (B[j * ldb + k] != zero) { if (diag == CBlasNonUnit) B[j * ldb + k] /= A[k * lda + k]; register double complex temp = B[j * ldb + k]; for (size_t i = 0; i < k; i++) B[j * ldb + i] -= temp * A[k * lda + i]; } } while (k-- > 0); } } else { for (size_t j = 0; j < n; j++) { if (alpha != one) { for (size_t i = 0; i < m; i++) B[j * ldb + i] *= alpha; } for (size_t k = 0; k < m; k++) { if (B[j * ldb + k] != zero) { if (diag == CBlasNonUnit) B[j * ldb + k] /= A[k * lda + k]; register double complex temp = B[j * ldb + k]; for (size_t i = k + 1; i < m; i++) B[j * ldb + i] -= temp * A[k * lda + i]; } } } } } else { if (uplo == CBlasUpper) { for (size_t j = 0; j < n; j++) { for (size_t i = 0; i < m; i++) { register double complex temp = alpha * B[j * ldb + i]; if (transA == CBlasTrans) { for (size_t k = 0; k < i; k++) temp -= A[i * lda + k] * B[j * ldb + k]; if (diag == CBlasNonUnit) temp /= A[i * lda + i]; } else { for (size_t k = 0; k < i; k++) temp -= conj(A[i * lda + k]) * B[j * ldb + k]; if (diag == CBlasNonUnit) temp /= conj(A[i * lda + i]); } B[j * ldb + i] = temp; } } } else { for (size_t j = 0; j < n; j++) { size_t i = m - 1; do { register double complex temp = alpha * B[j * ldb + i]; if (transA == CBlasTrans) { for (size_t k = i + 1; k < m; k++) temp -= A[i * lda + k] * B[j * ldb + k]; if (diag == CBlasNonUnit) temp /= A[i * lda + i]; } else { for (size_t k = i + 1; k < m; k++) temp -= conj(A[i * lda + k]) * B[j * ldb + k]; if (diag == CBlasNonUnit) temp /= conj(A[i * lda + i]); } B[j * ldb + i] = temp; } while (i-- > 0); } } } } else { if (transA == CBlasNoTrans) { if (uplo == CBlasUpper) { for (size_t j = 0; j < n; j++) { if (alpha != one) { for (size_t i = 0; i < m; i++) B[j * ldb + i] *= alpha; } for (size_t k = 0; k < j; k++) { if (A[j * lda + k] != zero) { register double complex temp = A[j * lda + k]; for (size_t i = 0; i < m; i++) B[j * ldb + i] -= temp * B[k * ldb + i]; } } if (diag == CBlasNonUnit) { register double complex temp = one / A[j * lda + j]; for (size_t i = 0; i < m; i++) B[j * ldb + i] *= temp; } } } else { size_t j = n - 1; do { if (alpha != one) { for (size_t i = 0; i < m; i++) B[j * ldb + i] *= alpha; } for (size_t k = j + 1; k < n; k++) { if (A[j * lda + k] != zero) { register double complex temp = A[j * lda + k]; for (size_t i = 0; i < m; i++) B[j * ldb + i] -= temp * B[k * ldb + i]; } } if (diag == CBlasNonUnit) { register double complex temp = one / A[j * lda + j]; for (size_t i = 0; i < m; i++) B[j * ldb + i] *= temp; } } while (j-- > 0); } } else { if (uplo == CBlasUpper) { size_t k = n - 1; do { if (diag == CBlasNonUnit) { register double complex temp; if (transA == CBlasTrans) temp = one / A[k * lda + k]; else temp = one / conj(A[k * lda + k]); for (size_t i = 0; i < m; i++) B[k * ldb + i] *= temp; } for (size_t j = 0; j < k; j++) { if (A[k * lda + j] != zero) { register double complex temp; if (transA == CBlasTrans) temp = A[k * lda + j]; else temp = conj(A[k * lda + j]); for (size_t i = 0; i < m; i++) B[j * ldb + i] -= temp * B[k * ldb + i]; } } if (alpha != one) { for (size_t i = 0; i < m; i++) B[k * ldb + i] *= alpha; } } while (k-- > 0); } else { for (size_t k = 0; k < n; k++) { if (diag == CBlasNonUnit) { register double complex temp; if (transA == CBlasTrans) temp = one / A[k * lda + k]; else temp = one / conj(A[k * lda + k]); for (size_t i = 0; i < m; i++) B[k * ldb + i] *= temp; } for (size_t j = k + 1; j < n; j++) { if (A[k * lda + j] != zero) { register double complex temp; if (transA == CBlasTrans) temp = A[k * lda + j]; else temp = conj(A[k * lda + j]); for (size_t i = 0; i < m; i++) B[j * ldb + i] -= temp * B[k * ldb + i]; } } if (alpha != one) { for (size_t i = 0; i < m; i++) B[k * ldb + i] *= alpha; } } } } } } CUresult cuZtrsm(CUBLAShandle handle, CBlasSide side, CBlasUplo uplo, CBlasTranspose transA, CBlasDiag diag, size_t m, size_t n, double complex alpha, CUdeviceptr A, size_t lda, CUdeviceptr B, size_t ldb, CUstream stream) { const size_t nRowA = (side == CBlasLeft) ? m : n; int info = 0; if (lda < nRowA) info = 9; else if (ldb < m) info = 11; if (info != 0) { XERBLA(info); return CUDA_ERROR_INVALID_VALUE; } if (m == 0 || n == 0) return CUDA_SUCCESS; CU_ERROR_CHECK(cuCtxPushCurrent(handle->context)); if (handle->ztrsm == NULL) CU_ERROR_CHECK(cuModuleLoadData(&handle->ztrsm, imageBytes)); const unsigned int bx = 2; const unsigned int by = 2; const unsigned int mb = (side == CBlasLeft) ? 2 : 8; const unsigned int nb = (side == CBlasLeft) ? 8 : 2; char name[112]; snprintf(name, 112, "_Z5ztrsmIL9CBlasSide%dEL9CBlasUplo%dEL14CBlasTranspose%dEL9CBlasDiag%dELj%uELj%uELj%uELj%uEEv7double2PKS4_PS4_iiii", side, uplo, transA, diag, mb, nb, bx, by); CUfunction function; CU_ERROR_CHECK(cuModuleGetFunction(&function, handle->ztrsm, name)); void *params[] = {&alpha, &A, &B, &lda, &ldb, &m, &n}; CU_ERROR_CHECK(cuLaunchKernel(function, (unsigned int)(m + mb - 1) / mb, (unsigned int)(n + nb - 1) / nb, 1, bx, by, 1, 0, stream, params, NULL)); CU_ERROR_CHECK(cuCtxPopCurrent(&handle->context)); return CUDA_SUCCESS; } CUresult cuMultiGPUZtrsm(CUmultiGPUBLAShandle handle, CBlasSide side, CBlasUplo uplo, CBlasTranspose transA, CBlasDiag diag, size_t m, size_t n, double complex alpha, const double complex * restrict A, size_t lda, double complex * restrict B, size_t ldb) { const size_t nRowA = (side == CBlasLeft) ? m : n; int info = 0; if (lda < nRowA) info = 9; else if (ldb < m) info = 11; if (info != 0) { XERBLA(info); return CUDA_ERROR_INVALID_VALUE; } if (m == 0 || n == 0) return CUDA_SUCCESS; if (alpha == zero) { zgemm(CBlasNoTrans, CBlasNoTrans, m, n, 0, zero, A, lda, B, ldb, zero, B, ldb); return CUDA_SUCCESS; } const size_t mb = (transA == CBlasNoTrans) ? ZGEMM_N_MB : ZGEMM_CN_MB; const size_t nb = ZGEMM_N_NB; if (side == CBlasLeft) { if (transA == CBlasNoTrans) { if (uplo == CBlasUpper) { size_t r = m % mb; size_t i = (r == 0) ? m : m + mb - r; do { i -= mb; const size_t ib = min(mb, m - i); CU_ERROR_CHECK(cuMultiGPUZgemm(handle, CBlasNoTrans, CBlasNoTrans, ib, n, m - i - ib, -one, &A[(i + ib) * lda + i], lda, &B[i + ib], ldb, alpha, &B[i], ldb)); CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle)); ztrsm(CBlasLeft, CBlasUpper, CBlasNoTrans, diag, ib, n, one, &A[i * lda + i], lda, &B[i], ldb); } while (i > 0); } else { for (size_t i = 0; i < m; i += mb) { const size_t ib = min(mb, m - i); CU_ERROR_CHECK(cuMultiGPUZgemm(handle, CBlasNoTrans, CBlasNoTrans, ib, n, i, -one, &A[i], lda, B, ldb, alpha, &B[i], ldb)); CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle)); ztrsm(CBlasLeft, CBlasLower, CBlasNoTrans, diag, ib, n, one, &A[i * lda + i], lda, &B[i], ldb); } } } else { if (uplo == CBlasUpper) { for (size_t i = 0; i < m; i += mb) { const size_t ib = min(mb, m - i); CU_ERROR_CHECK(cuMultiGPUZgemm(handle, transA, CBlasNoTrans, ib, n, i, -one, &A[i * lda], lda, B, ldb, alpha, &B[i], ldb)); CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle)); ztrsm(CBlasLeft, CBlasUpper, transA, diag, ib, n, one, &A[i * lda + i], lda, &B[i], ldb); } } else { size_t r = m % mb; size_t i = (r == 0) ? m : m + mb - r; do { i -= mb; const size_t ib = min(mb, m - i); CU_ERROR_CHECK(cuMultiGPUZgemm(handle, transA, CBlasNoTrans, ib, n, m - i - ib, -one, &A[i * lda + i + ib], lda, &B[i + ib], ldb, alpha, &B[i], ldb)); CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle)); ztrsm(CBlasLeft, CBlasLower, transA, diag, ib, n, one, &A[i * lda + i], lda, &B[i], ldb); } while (i > 0); } } } else { if (transA == CBlasNoTrans) { if (uplo == CBlasUpper) { for (size_t j = 0; j < n; j += nb) { const size_t jb = min(nb, n - j); CU_ERROR_CHECK(cuMultiGPUZgemm(handle, CBlasNoTrans, CBlasNoTrans, m, jb, j, -one, B, ldb, &A[j * lda], lda, alpha, &B[j * ldb], ldb)); CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle)); ztrsm(CBlasRight, CBlasUpper, CBlasNoTrans, diag, m, jb, one, &A[j * lda + j], lda, &B[j * ldb], ldb); } } else { size_t r = n % nb; size_t j = (r == 0) ? n : n + nb - r; do { j -= nb; const size_t jb = min(nb, n - j); CU_ERROR_CHECK(cuMultiGPUZgemm(handle, CBlasNoTrans, CBlasNoTrans, m, jb, n - j - jb, -one, &B[(j + jb) * ldb], ldb, &A[j * lda + j + jb], lda, alpha, &B[j * ldb], ldb)); CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle)); ztrsm(CBlasRight, CBlasLower, CBlasNoTrans, diag, m, jb, one, &A[j * lda + j], lda, &B[j * ldb], ldb); } while (j > 0); } } else { if (uplo == CBlasUpper) { size_t r = n % nb; size_t j = (r == 0) ? n : n + nb - r; do { j -= nb; const size_t jb = min(nb, n - j); CU_ERROR_CHECK(cuMultiGPUZgemm(handle, CBlasNoTrans, transA, m, jb, n - j - jb, -one, &B[(j + jb) * ldb], ldb, &A[(j + jb) * lda + j], lda, alpha, &B[j * ldb], ldb)); CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle)); ztrsm(CBlasRight, CBlasUpper, transA, diag, m, jb, one, &A[j * lda + j], lda, &B[j * ldb], ldb); } while (j > 0); } else { for (size_t j = 0; j < n; j += nb) { const size_t jb = min(nb, n - j); CU_ERROR_CHECK(cuMultiGPUZgemm(handle, CBlasNoTrans, transA, m, jb, j, -one, B, ldb, &A[j], lda, alpha, &B[j * ldb], ldb)); CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle)); ztrsm(CBlasRight, CBlasLower, transA, diag, m, jb, one, &A[j * lda + j], lda, &B[j * ldb], ldb); } } } } return CUDA_SUCCESS; }
#include "blas.h" #include "error.h" #include <stdio.h> #include "handle.h" #include "config.h" #include "ztrsm.fatbin.c" static inline size_t min(size_t a, size_t b) { return (a < b) ? a : b; } static inline size_t max(size_t a, size_t b) { return (a > b) ? a : b; } static inline CUresult cuMemcpyHtoD2DAsync(CUdeviceptr A, size_t lda, size_t ai, size_t aj, const void *B, size_t ldb, size_t bi, size_t bj, size_t m, size_t n, size_t elemSize, CUstream stream) { CUDA_MEMCPY2D copy = { bi * elemSize, bj, CU_MEMORYTYPE_HOST, B, 0, 0, ldb * elemSize, ai * elemSize, aj, CU_MEMORYTYPE_DEVICE, NULL, A, 0, lda * elemSize, m * elemSize, n}; return cuMemcpy2DAsync(&copy, stream); } static inline CUresult cuMemcpyDtoH2DAsync(void *A, size_t lda, size_t ai, size_t aj, CUdeviceptr B, size_t ldb, size_t bi, size_t bj, size_t m, size_t n, size_t elemSize, CUstream stream) { CUDA_MEMCPY2D copy = { bi * elemSize, bj, CU_MEMORYTYPE_DEVICE, NULL, B, 0, ldb * elemSize, ai * elemSize, aj, CU_MEMORYTYPE_HOST, A, 0, 0, lda * elemSize, m * elemSize, n}; return cuMemcpy2DAsync(&copy, stream); } static const double complex zero = 0.0 + 0.0 * I; static const double complex one = 1.0 + 0.0 * I; void ztrsm(CBlasSide side, CBlasUplo uplo, CBlasTranspose transA, CBlasDiag diag, size_t m, size_t n, double complex alpha, const double complex * restrict A, size_t lda, double complex * restrict B, size_t ldb) { const size_t nRowA = (side == CBlasLeft) ? m : n; int info = 0; if (lda < nRowA) info = 9; else if (ldb < m) info = 11; if (info != 0) { XERBLA(info); return; } if (m == 0 || n == 0) return; if (alpha == zero) { #pragma omp parallel for for (size_t j = 0; j < n; j++) { for (size_t i = 0; i < m; i++) B[j * ldb + i] = zero; } return; } if (side == CBlasLeft) { if (transA == CBlasNoTrans) { if (uplo == CBlasUpper) { #pragma omp parallel for for (size_t j = 0; j < n; j++) { if (alpha != one) { for (size_t i = 0; i < m; i++) B[j * ldb + i] *= alpha; } size_t k = m - 1; do { if (B[j * ldb + k] != zero) { if (diag == CBlasNonUnit) B[j * ldb + k] /= A[k * lda + k]; register double complex temp = B[j * ldb + k]; for (size_t i = 0; i < k; i++) B[j * ldb + i] -= temp * A[k * lda + i]; } } while (k-- > 0); } } else { #pragma omp parallel for for (size_t j = 0; j < n; j++) { if (alpha != one) { for (size_t i = 0; i < m; i++) B[j * ldb + i] *= alpha; } for (size_t k = 0; k < m; k++) { if (B[j * ldb + k] != zero) { if (diag == CBlasNonUnit) B[j * ldb + k] /= A[k * lda + k]; register double complex temp = B[j * ldb + k]; for (size_t i = k + 1; i < m; i++) B[j * ldb + i] -= temp * A[k * lda + i]; } } } } } else { if (uplo == CBlasUpper) { #pragma omp parallel for for (size_t j = 0; j < n; j++) { for (size_t i = 0; i < m; i++) { register double complex temp = alpha * B[j * ldb + i]; if (transA == CBlasTrans) { for (size_t k = 0; k < i; k++) temp -= A[i * lda + k] * B[j * ldb + k]; if (diag == CBlasNonUnit) temp /= A[i * lda + i]; } else { for (size_t k = 0; k < i; k++) temp -= conj(A[i * lda + k]) * B[j * ldb + k]; if (diag == CBlasNonUnit) temp /= conj(A[i * lda + i]); } B[j * ldb + i] = temp; } } } else { #pragma omp parallel for for (size_t j = 0; j < n; j++) { size_t i = m - 1; do { register double complex temp = alpha * B[j * ldb + i]; if (transA == CBlasTrans) { for (size_t k = i + 1; k < m; k++) temp -= A[i * lda + k] * B[j * ldb + k]; if (diag == CBlasNonUnit) temp /= A[i * lda + i]; } else { for (size_t k = i + 1; k < m; k++) temp -= conj(A[i * lda + k]) * B[j * ldb + k]; if (diag == CBlasNonUnit) temp /= conj(A[i * lda + i]); } B[j * ldb + i] = temp; } while (i-- > 0); } } } } else { if (transA == CBlasNoTrans) { if (uplo == CBlasUpper) { for (size_t j = 0; j < n; j++) { if (alpha != one) { for (size_t i = 0; i < m; i++) B[j * ldb + i] *= alpha; } for (size_t k = 0; k < j; k++) { if (A[j * lda + k] != zero) { register double complex temp = A[j * lda + k]; for (size_t i = 0; i < m; i++) B[j * ldb + i] -= temp * B[k * ldb + i]; } } if (diag == CBlasNonUnit) { register double complex temp = one / A[j * lda + j]; for (size_t i = 0; i < m; i++) B[j * ldb + i] *= temp; } } } else { size_t j = n - 1; do { if (alpha != one) { for (size_t i = 0; i < m; i++) B[j * ldb + i] *= alpha; } for (size_t k = j + 1; k < n; k++) { if (A[j * lda + k] != zero) { register double complex temp = A[j * lda + k]; for (size_t i = 0; i < m; i++) B[j * ldb + i] -= temp * B[k * ldb + i]; } } if (diag == CBlasNonUnit) { register double complex temp = one / A[j * lda + j]; for (size_t i = 0; i < m; i++) B[j * ldb + i] *= temp; } } while (j-- > 0); } } else { if (uplo == CBlasUpper) { size_t k = n - 1; do { if (diag == CBlasNonUnit) { register double complex temp; if (transA == CBlasTrans) temp = one / A[k * lda + k]; else temp = one / conj(A[k * lda + k]); for (size_t i = 0; i < m; i++) B[k * ldb + i] *= temp; } for (size_t j = 0; j < k; j++) { if (A[k * lda + j] != zero) { register double complex temp; if (transA == CBlasTrans) temp = A[k * lda + j]; else temp = conj(A[k * lda + j]); for (size_t i = 0; i < m; i++) B[j * ldb + i] -= temp * B[k * ldb + i]; } } if (alpha != one) { for (size_t i = 0; i < m; i++) B[k * ldb + i] *= alpha; } } while (k-- > 0); } else { for (size_t k = 0; k < n; k++) { if (diag == CBlasNonUnit) { register double complex temp; if (transA == CBlasTrans) temp = one / A[k * lda + k]; else temp = one / conj(A[k * lda + k]); for (size_t i = 0; i < m; i++) B[k * ldb + i] *= temp; } for (size_t j = k + 1; j < n; j++) { if (A[k * lda + j] != zero) { register double complex temp; if (transA == CBlasTrans) temp = A[k * lda + j]; else temp = conj(A[k * lda + j]); for (size_t i = 0; i < m; i++) B[j * ldb + i] -= temp * B[k * ldb + i]; } } if (alpha != one) { for (size_t i = 0; i < m; i++) B[k * ldb + i] *= alpha; } } } } } } CUresult cuZtrsm(CUBLAShandle handle, CBlasSide side, CBlasUplo uplo, CBlasTranspose transA, CBlasDiag diag, size_t m, size_t n, double complex alpha, CUdeviceptr A, size_t lda, CUdeviceptr B, size_t ldb, CUstream stream) { const size_t nRowA = (side == CBlasLeft) ? m : n; int info = 0; if (lda < nRowA) info = 9; else if (ldb < m) info = 11; if (info != 0) { XERBLA(info); return CUDA_ERROR_INVALID_VALUE; } if (m == 0 || n == 0) return CUDA_SUCCESS; CU_ERROR_CHECK(cuCtxPushCurrent(handle->context)); if (handle->ztrsm == NULL) CU_ERROR_CHECK(cuModuleLoadData(&handle->ztrsm, imageBytes)); const unsigned int bx = 2; const unsigned int by = 2; const unsigned int mb = (side == CBlasLeft) ? 2 : 8; const unsigned int nb = (side == CBlasLeft) ? 8 : 2; char name[112]; snprintf(name, 112, "_Z5ztrsmIL9CBlasSide%dEL9CBlasUplo%dEL14CBlasTranspose%dEL9CBlasDiag%dELj%uELj%uELj%uELj%uEEv7double2PKS4_PS4_iiii", side, uplo, transA, diag, mb, nb, bx, by); CUfunction function; CU_ERROR_CHECK(cuModuleGetFunction(&function, handle->ztrsm, name)); void *params[] = {&alpha, &A, &B, &lda, &ldb, &m, &n}; CU_ERROR_CHECK(cuLaunchKernel(function, (unsigned int)(m + mb - 1) / mb, (unsigned int)(n + nb - 1) / nb, 1, bx, by, 1, 0, stream, params, NULL)); CU_ERROR_CHECK(cuCtxPopCurrent(&handle->context)); return CUDA_SUCCESS; } CUresult cuMultiGPUZtrsm(CUmultiGPUBLAShandle handle, CBlasSide side, CBlasUplo uplo, CBlasTranspose transA, CBlasDiag diag, size_t m, size_t n, double complex alpha, const double complex * restrict A, size_t lda, double complex * restrict B, size_t ldb) { const size_t nRowA = (side == CBlasLeft) ? m : n; int info = 0; if (lda < nRowA) info = 9; else if (ldb < m) info = 11; if (info != 0) { XERBLA(info); return CUDA_ERROR_INVALID_VALUE; } if (m == 0 || n == 0) return CUDA_SUCCESS; if (alpha == zero) { zgemm(CBlasNoTrans, CBlasNoTrans, m, n, 0, zero, A, lda, B, ldb, zero, B, ldb); return CUDA_SUCCESS; } const size_t mb = (transA == CBlasNoTrans) ? ZGEMM_N_MB : ZGEMM_CN_MB; const size_t nb = ZGEMM_N_NB; if (side == CBlasLeft) { if (transA == CBlasNoTrans) { if (uplo == CBlasUpper) { size_t r = m % mb; size_t i = (r == 0) ? m : m + mb - r; do { i -= mb; const size_t ib = min(mb, m - i); CU_ERROR_CHECK(cuMultiGPUZgemm(handle, CBlasNoTrans, CBlasNoTrans, ib, n, m - i - ib, -one, &A[(i + ib) * lda + i], lda, &B[i + ib], ldb, alpha, &B[i], ldb)); CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle)); ztrsm(CBlasLeft, CBlasUpper, CBlasNoTrans, diag, ib, n, one, &A[i * lda + i], lda, &B[i], ldb); } while (i > 0); } else { for (size_t i = 0; i < m; i += mb) { const size_t ib = min(mb, m - i); CU_ERROR_CHECK(cuMultiGPUZgemm(handle, CBlasNoTrans, CBlasNoTrans, ib, n, i, -one, &A[i], lda, B, ldb, alpha, &B[i], ldb)); CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle)); ztrsm(CBlasLeft, CBlasLower, CBlasNoTrans, diag, ib, n, one, &A[i * lda + i], lda, &B[i], ldb); } } } else { if (uplo == CBlasUpper) { for (size_t i = 0; i < m; i += mb) { const size_t ib = min(mb, m - i); CU_ERROR_CHECK(cuMultiGPUZgemm(handle, transA, CBlasNoTrans, ib, n, i, -one, &A[i * lda], lda, B, ldb, alpha, &B[i], ldb)); CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle)); ztrsm(CBlasLeft, CBlasUpper, transA, diag, ib, n, one, &A[i * lda + i], lda, &B[i], ldb); } } else { size_t r = m % mb; size_t i = (r == 0) ? m : m + mb - r; do { i -= mb; const size_t ib = min(mb, m - i); CU_ERROR_CHECK(cuMultiGPUZgemm(handle, transA, CBlasNoTrans, ib, n, m - i - ib, -one, &A[i * lda + i + ib], lda, &B[i + ib], ldb, alpha, &B[i], ldb)); CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle)); ztrsm(CBlasLeft, CBlasLower, transA, diag, ib, n, one, &A[i * lda + i], lda, &B[i], ldb); } while (i > 0); } } } else { if (transA == CBlasNoTrans) { if (uplo == CBlasUpper) { for (size_t j = 0; j < n; j += nb) { const size_t jb = min(nb, n - j); CU_ERROR_CHECK(cuMultiGPUZgemm(handle, CBlasNoTrans, CBlasNoTrans, m, jb, j, -one, B, ldb, &A[j * lda], lda, alpha, &B[j * ldb], ldb)); CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle)); ztrsm(CBlasRight, CBlasUpper, CBlasNoTrans, diag, m, jb, one, &A[j * lda + j], lda, &B[j * ldb], ldb); } } else { size_t r = n % nb; size_t j = (r == 0) ? n : n + nb - r; do { j -= nb; const size_t jb = min(nb, n - j); CU_ERROR_CHECK(cuMultiGPUZgemm(handle, CBlasNoTrans, CBlasNoTrans, m, jb, n - j - jb, -one, &B[(j + jb) * ldb], ldb, &A[j * lda + j + jb], lda, alpha, &B[j * ldb], ldb)); CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle)); ztrsm(CBlasRight, CBlasLower, CBlasNoTrans, diag, m, jb, one, &A[j * lda + j], lda, &B[j * ldb], ldb); } while (j > 0); } } else { if (uplo == CBlasUpper) { size_t r = n % nb; size_t j = (r == 0) ? n : n + nb - r; do { j -= nb; const size_t jb = min(nb, n - j); CU_ERROR_CHECK(cuMultiGPUZgemm(handle, CBlasNoTrans, transA, m, jb, n - j - jb, -one, &B[(j + jb) * ldb], ldb, &A[(j + jb) * lda + j], lda, alpha, &B[j * ldb], ldb)); CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle)); ztrsm(CBlasRight, CBlasUpper, transA, diag, m, jb, one, &A[j * lda + j], lda, &B[j * ldb], ldb); } while (j > 0); } else { for (size_t j = 0; j < n; j += nb) { const size_t jb = min(nb, n - j); CU_ERROR_CHECK(cuMultiGPUZgemm(handle, CBlasNoTrans, transA, m, jb, j, -one, B, ldb, &A[j], lda, alpha, &B[j * ldb], ldb)); CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle)); ztrsm(CBlasRight, CBlasLower, transA, diag, m, jb, one, &A[j * lda + j], lda, &B[j * ldb], ldb); } } } } return CUDA_SUCCESS; }
binStruct.h
#ifndef binStruct_h #define binStruct_h #include "../../baseFunctions/fpBaseNode.h" #include "../../baseFunctions/MWC.h" #include "obsIndexAndClassVec.h" #include "zipClassAndValue.h" #include "processingNodeBin.h" #include <vector> #include <assert.h> namespace fp{ template <typename T, typename Q> class binStruct { protected: float OOBAccuracy; float correctOOB; float totalOOB; std::vector< fpBaseNode<T,Q> > bin; std::vector<processingNodeBin<T,Q> > nodeQueue; int numberOfNodes; int numOfTreesInBin; int currTree; obsIndexAndClassVec indicesHolder; std::vector<zipClassAndValue<int, T> > zipper; std::vector<int> nodeIndices; randomNumberRerFMWC randNum; //obsIndexAndClassVec indexHolder(numClasses); //std::vector<zipClassAndValue<int, float> > zipVec(testSize); inline bool rightNode(){ return false; } inline bool leftNode(){ return true; } public: binStruct() : OOBAccuracy(-1.0),correctOOB(0),totalOOB(0),numberOfNodes(0),numOfTreesInBin(0),currTree(0), indicesHolder(fpSingleton::getSingleton().returnNumClasses()){ } inline void loadFirstNode(){ //inline void loadFirstNode(obsIndexAndClassVec& indicesHolder, std::vector<zipClassAndValue<int, T> >& zipper){ nodeQueue.emplace_back(0,0,0,randNum); nodeQueue.back().setupRoot(indicesHolder, zipper); nodeQueue.back().processNode(); if(nodeQueue.back().isLeafNode()){ makeRootALeaf(); }else{ copyProcessedRootToBin(); createRootChildNodes(); } } inline void makeRootALeaf(){ bin[returnRootLocation()].setClass(nodeQueue.back().returnNodeClass()); bin[returnRootLocation()].setDepth(0); } inline void setSharedVectors(obsIndexAndClassVec& indicesInNode){ indicesInNode.resetVectors(); int numUnusedObs = fpSingleton::getSingleton().returnNumObservations(); int randomObsID; int tempMoveObs; for(int n = 0; n < fpSingleton::getSingleton().returnNumObservations(); n++){ randomObsID = randNum.gen(fpSingleton::getSingleton().returnNumObservations()); indicesInNode.insertIndex(nodeIndices[randomObsID], fpSingleton::getSingleton().returnLabel(nodeIndices[randomObsID])); if(randomObsID < numUnusedObs){ --numUnusedObs; tempMoveObs = nodeIndices[numUnusedObs]; nodeIndices[numUnusedObs] = nodeIndices[randomObsID]; nodeIndices[randomObsID] = tempMoveObs; } } } inline bool shouldProcessNode(){ return !nodeQueue.back().isLeafNode(); } inline int positionOfNextNode(){ return (int)bin.size()-1; } inline int parentNodesPosition(){ return (int)bin.size()-1; } inline void makeLeafNodes(){ for(int i= 0; i < fpSingleton::getSingleton().returnNumClasses(); ++i){ bin[i].setSharedClass(i); } } inline int returnDepthOfNode(){ assert(!nodeQueue.empty()); return bin[nodeQueue.back().returnParentNodeNumber()].returnDepth()+1; } inline void copyProcessedNodeToBin(){ bin.emplace_back(nodeQueue.back().returnNodeCutValue(), returnDepthOfNode(), nodeQueue.back().returnNodeCutFeature()); } inline void copyProcessedRootToBin(){ bin[returnRootLocation()].setCutValue(nodeQueue.back().returnNodeCutValue()); bin[returnRootLocation()].setDepth(0); bin[returnRootLocation()].setFeatureValue(nodeQueue.back().returnNodeCutFeature()); } inline int returnRootLocation(){ return currTree+fpSingleton::getSingleton().returnNumClasses(); } inline void linkParentToChild(){ if(nodeQueue.back().returnIsLeftNode()){ bin[nodeQueue.back().returnParentNodeNumber()].setLeftValue(positionOfNextNode()); }else{ bin[nodeQueue.back().returnParentNodeNumber()].setRightValue(positionOfNextNode()); } } inline void linkParentToLeaf(){ assert(nodeQueue.back().returnParentNodeNumber() >= fpSingleton::getSingleton().returnNumClasses()); assert(nodeQueue.back().returnParentNodeNumber() <= parentNodesPosition()); assert(nodeQueue.back().returnNodeClass() >= 0); assert(nodeQueue.back().returnNodeClass() < fpSingleton::getSingleton().returnNumClasses()); if(nodeQueue.back().returnIsLeftNode()){ bin[nodeQueue.back().returnParentNodeNumber()].setLeftValue(nodeQueue.back().returnNodeClass()); }else{ bin[nodeQueue.back().returnParentNodeNumber()].setRightValue(nodeQueue.back().returnNodeClass()); } } inline void createChildNodes(){ nodeIterators nodeIts(nodeQueue.back().returnNodeIterators()); zipperIterators<int,T> zipIts(nodeQueue.back().returnZipIterators()); int childDepth = returnDepthOfNode()+1; if(nodeQueue.back().isLeftChildLarger()){ nodeQueue.pop_back(); //TODO: don't emplace_back if should be leaf node. nodeQueue.emplace_back(1,parentNodesPosition(), childDepth, randNum); nodeQueue.back().setupNode(nodeIts, zipIts, rightNode()); nodeQueue.emplace_back(1,parentNodesPosition(), childDepth, randNum); nodeQueue.back().setupNode(nodeIts, zipIts, leftNode()); }else{ nodeQueue.pop_back(); nodeQueue.emplace_back(1,parentNodesPosition(), childDepth, randNum); nodeQueue.back().setupNode(nodeIts, zipIts, leftNode()); nodeQueue.emplace_back(1,parentNodesPosition(), childDepth, randNum); nodeQueue.back().setupNode(nodeIts, zipIts, rightNode()); } } inline void createRootChildNodes(){ nodeIterators nodeIts(nodeQueue.back().returnNodeIterators()); zipperIterators<int,T> zipIts(nodeQueue.back().returnZipIterators()); int childDepth = returnDepthOfNode()+1; if(nodeQueue.back().isLeftChildLarger()){ nodeQueue.pop_back(); //TODO: don't emplace_back if should be leaf node. nodeQueue.emplace_back(1,returnRootLocation(), childDepth,randNum); nodeQueue.back().setupNode(nodeIts, zipIts, rightNode()); nodeQueue.emplace_back(1,returnRootLocation(), childDepth, randNum); nodeQueue.back().setupNode(nodeIts, zipIts, leftNode()); }else{ nodeQueue.pop_back(); nodeQueue.emplace_back(1,returnRootLocation(), childDepth,randNum); nodeQueue.back().setupNode(nodeIts, zipIts, leftNode()); nodeQueue.emplace_back(1,returnRootLocation(), childDepth,randNum); nodeQueue.back().setupNode(nodeIts, zipIts, rightNode()); } } inline void processLeafNode(){ assert(nodeQueue.back().returnNodeSize() > 0); assert(nodeQueue.back().returnNodeSize() <= fpSingleton::getSingleton().returnNumObservations()); linkParentToLeaf(); nodeQueue.pop_back(); } inline int returnNumTrees(){ return numOfTreesInBin; } inline void processInternalNode(){ copyProcessedNodeToBin(); linkParentToChild(); createChildNodes(); } inline void processNode(){ // process the node, i.e. calculate best split, ... nodeQueue.back().processNode(); if (nodeQueue.back().isLeafNode()) { // label the processed node as a leaf. processLeafNode(); } else { // label the processed node as internal. processInternalNode(); } } inline void createBin(int numTrees, int randSeed){ numOfTreesInBin = numTrees; randNum.initialize(randSeed); initializeStructures(); for(; currTree < numOfTreesInBin; ++currTree){ setSharedVectors(indicesHolder); loadFirstNode(); while(!nodeQueue.empty()){ processNode(); } } removeStructures(); } inline void initializeStructures(){ zipper.resize(fpSingleton::getSingleton().returnNumObservations()); nodeIndices.resize(fpSingleton::getSingleton().returnNumObservations()); for(int i = 0; i < fpSingleton::getSingleton().returnNumObservations(); ++i){ nodeIndices[i] =i; } bin.resize(numOfTreesInBin+fpSingleton::getSingleton().returnNumClasses()); makeLeafNodes(); } inline void removeStructures(){ std::vector<processingNodeBin<T,Q> >().swap( nodeQueue ); //indicesHolder.removeObsIndexAndClassVec(); std::vector<zipClassAndValue<int, T> >().swap( zipper ); std::vector<int>().swap( nodeIndices); } inline int returnMaxDepth(){ int maxDepth=0; for(auto& node : bin){ // +1 accounts for the leaf nodes which are never created (optimization that cuts memory required for a forest in half) if(maxDepth < node.returnDepth()+1){ maxDepth = node.returnDepth()+1; } } return maxDepth; } inline int returnNumLeafNodes(){ return (int)bin.size() - fpSingleton::getSingleton().returnNumClasses() + numOfTreesInBin; } inline int returnLeafDepthSum(){ int leafDepthSums=0; for(auto& node : bin){ if(node.isInternalNodeFront()){ if(node.returnLeftNodeID() < fpSingleton::getSingleton().returnNumClasses()){ leafDepthSums += node.returnDepth()+1; } if(node.returnRightNodeID() < fpSingleton::getSingleton().returnNumClasses()){ leafDepthSums += node.returnDepth()+1; } } } return leafDepthSums; } ///////////////////////// // This is required to template the predictObservation function // ////////////////////////////// template<typename U> struct identity { typedef U type; }; inline void predictBinObservation(int observationNum, std::vector<int>& preds){ predictBinObservation(observationNum,preds, identity<Q>()); } inline void predictBinObservation(std::vector<T>& observation, std::vector<int>& preds){ predictBinObservation(observation,preds,identity<Q>()); } //////////////////////////////// //PredictForRF inline void predictBinObservation(int observationNum,std::vector<int>& preds, identity<int> ){ std::vector<int> currNode(numOfTreesInBin); int numberNotInLeaf; int featureNum; T featureVal; int q; for( q=0; q<numOfTreesInBin; ++q){ currNode[q] = q+fpSingleton::getSingleton().returnNumClasses(); __builtin_prefetch(&bin[currNode[q]], 0, 3); } do{ numberNotInLeaf = 0; for( q=0; q<numOfTreesInBin; ++q){ if(bin[currNode[q]].isInternalNodeFront()){ featureNum = bin[currNode[q]].returnFeatureNumber(); featureVal = fpSingleton::getSingleton().returnTestFeatureVal(featureNum,observationNum); currNode[q] = bin[currNode[q]].fpBaseNode<T, Q>::nextNode(featureVal); __builtin_prefetch(&bin[currNode[q]], 0, 3); ++numberNotInLeaf; } } }while(numberNotInLeaf); for( q=0; q<numOfTreesInBin; q++){ #pragma omp atomic update ++preds[bin[currNode[q]].returnClass()]; } } inline void predictBinObservation(int observationNum, std::vector<int>& preds, identity<std::vector<int> >){ std::vector<int> currNode(numOfTreesInBin); int numberNotInLeaf; T featureVal; int q; for( q=0; q<numOfTreesInBin; ++q){ currNode[q] = q+fpSingleton::getSingleton().returnNumClasses(); __builtin_prefetch(&bin[currNode[q]], 0, 3); } do{ numberNotInLeaf = 0; for( q=0; q<numOfTreesInBin; ++q){ if(bin[currNode[q]].isInternalNodeFront()){ featureVal = 0; for(auto i : bin[currNode[q]].returnFeatureNumber()){ featureVal += fpSingleton::getSingleton().returnTestFeatureVal(i,observationNum); } currNode[q] = bin[currNode[q]].fpBaseNode<T, Q>::nextNode(featureVal); __builtin_prefetch(&bin[currNode[q]], 0, 3); ++numberNotInLeaf; } } }while(numberNotInLeaf); for( q=0; q<numOfTreesInBin; q++){ #pragma omp atomic update ++preds[bin[currNode[q]].returnClass()]; } } inline void predictBinObservation(int observationNum, std::vector<int>& preds, identity<weightedFeature>){ std::vector<int> currNode(numOfTreesInBin); int numberNotInLeaf; T featureVal; int weightNum; int q; for( q=0; q<numOfTreesInBin; ++q){ currNode[q] = q+fpSingleton::getSingleton().returnNumClasses(); __builtin_prefetch(&bin[currNode[q]], 0, 3); } do{ numberNotInLeaf = 0; for( q=0; q<numOfTreesInBin; ++q){ if(bin[currNode[q]].isInternalNodeFront()){ featureVal = 0; weightNum = 0; for(auto i : bin[currNode[q]].returnFeatureNumber().returnFeatures()){ featureVal += fpSingleton::getSingleton().returnTestFeatureVal(i,observationNum)*bin[currNode[q]].returnFeatureNumber().returnWeights()[weightNum++]; } currNode[q] = bin[currNode[q]].fpBaseNode<T, Q>::nextNode(featureVal); __builtin_prefetch(&bin[currNode[q]], 0, 3); ++numberNotInLeaf; } } }while(numberNotInLeaf); for( q=0; q<numOfTreesInBin; q++){ #pragma omp atomic update ++preds[bin[currNode[q]].returnClass()]; } } inline void predictBinObservation(std::vector<T>& observation, std::vector<int>& preds,identity<int> ){ std::vector<int> currNode(numOfTreesInBin); int numberNotInLeaf; int featureNum; int q; for( q=0; q<numOfTreesInBin; ++q){ currNode[q] = q+fpSingleton::getSingleton().returnNumClasses(); __builtin_prefetch(&bin[currNode[q]], 0, 3); } do{ numberNotInLeaf = 0; for( q=0; q<numOfTreesInBin; ++q){ if(bin[currNode[q]].isInternalNodeFront()){ featureNum = bin[currNode[q]].returnFeatureNumber(); currNode[q] = bin[currNode[q]].fpBaseNode<T, Q>::nextNode(observation[featureNum]); __builtin_prefetch(&bin[currNode[q]], 0, 3); ++numberNotInLeaf; } } }while(numberNotInLeaf); for( q=0; q<numOfTreesInBin; q++){ #pragma omp atomic update ++preds[bin[currNode[q]].returnClass()]; } } inline void predictBinObservation(std::vector<T>& observation, std::vector<int>& preds, identity<std::vector<int> >){ std::vector<int> currNode(numOfTreesInBin); int numberNotInLeaf; T featureVal; int q; for( q=0; q<numOfTreesInBin; ++q){ currNode[q] = q+fpSingleton::getSingleton().returnNumClasses(); __builtin_prefetch(&bin[currNode[q]], 0, 3); } do{ numberNotInLeaf = 0; for( q=0; q<numOfTreesInBin; ++q){ if(bin[currNode[q]].isInternalNodeFront()){ featureVal = 0; for(auto i : bin[currNode[q]].returnFeatureNumber()){ featureVal +=observation[i]; } currNode[q] = bin[currNode[q]].fpBaseNode<T, Q>::nextNode(featureVal); __builtin_prefetch(&bin[currNode[q]], 0, 3); ++numberNotInLeaf; } } }while(numberNotInLeaf); for( q=0; q<numOfTreesInBin; q++){ #pragma omp atomic update ++preds[bin[currNode[q]].returnClass()]; } } //Prediction function for ternary sparse matrix inline void predictBinObservation(std::vector<T>& observation, std::vector<int>& preds, identity<weightedFeature>){ std::vector<int> currNode(numOfTreesInBin); int numberNotInLeaf; T featureVal; int weightNum; int q; for( q=0; q<numOfTreesInBin; ++q){ currNode[q] = q+fpSingleton::getSingleton().returnNumClasses(); __builtin_prefetch(&bin[currNode[q]], 0, 3); } do{ numberNotInLeaf = 0; for( q=0; q<numOfTreesInBin; ++q){ if(bin[currNode[q]].isInternalNodeFront()){ featureVal = 0; weightNum = 0; for(auto i : bin[currNode[q]].returnFeatureNumber().returnFeatures()){ featureVal +=observation[i]*bin[currNode[q]].returnFeatureNumber().returnWeights()[weightNum++]; } currNode[q] = bin[currNode[q]].fpBaseNode<T, Q>::nextNode(featureVal); __builtin_prefetch(&bin[currNode[q]], 0, 3); ++numberNotInLeaf; } } }while(numberNotInLeaf); for( q=0; q<numOfTreesInBin; q++){ #pragma omp atomic update ++preds[bin[currNode[q]].returnClass()]; } } /////////////////////////////////// /// Test Functions not to be used in production ////////////////////////////////// inline std::vector< fpBaseNode<T,Q> >& exposeBinTest(){ return bin; } void printBin(){ std::cout << "\n"; for(auto nd : bin){ nd.printNode(); } } }; }//fp #endif //binStruct_h
#ifndef binStruct_h #define binStruct_h #include "../../baseFunctions/fpBaseNode.h" #include "../../baseFunctions/MWC.h" #include "obsIndexAndClassVec.h" #include "zipClassAndValue.h" #include "processingNodeBin.h" #include <vector> #include <assert.h> namespace fp{ template <typename T, typename Q> class binStruct { protected: float OOBAccuracy; float correctOOB; float totalOOB; std::vector< fpBaseNode<T,Q> > bin; std::vector<processingNodeBin<T,Q> > nodeQueue; int numberOfNodes; int numOfTreesInBin; int currTree; obsIndexAndClassVec indicesHolder; std::vector<zipClassAndValue<int, T> > zipper; std::vector<int> nodeIndices; randomNumberRerFMWC randNum; //obsIndexAndClassVec indexHolder(numClasses); //std::vector<zipClassAndValue<int, float> > zipVec(testSize); inline bool rightNode(){ return false; } inline bool leftNode(){ return true; } public: binStruct() : OOBAccuracy(-1.0),correctOOB(0),totalOOB(0),numberOfNodes(0),numOfTreesInBin(0),currTree(0), indicesHolder(fpSingleton::getSingleton().returnNumClasses()){ } inline void loadFirstNode(){ //inline void loadFirstNode(obsIndexAndClassVec& indicesHolder, std::vector<zipClassAndValue<int, T> >& zipper){ nodeQueue.emplace_back(0,0,0,randNum); nodeQueue.back().setupRoot(indicesHolder, zipper); nodeQueue.back().processNode(); if(nodeQueue.back().isLeafNode()){ makeRootALeaf(); }else{ copyProcessedRootToBin(); createRootChildNodes(); } } inline void makeRootALeaf(){ bin[returnRootLocation()].setClass(nodeQueue.back().returnNodeClass()); bin[returnRootLocation()].setDepth(0); } inline void setSharedVectors(obsIndexAndClassVec& indicesInNode){ indicesInNode.resetVectors(); int numUnusedObs = fpSingleton::getSingleton().returnNumObservations(); int randomObsID; int tempMoveObs; for(int n = 0; n < fpSingleton::getSingleton().returnNumObservations(); n++){ randomObsID = randNum.gen(fpSingleton::getSingleton().returnNumObservations()); indicesInNode.insertIndex(nodeIndices[randomObsID], fpSingleton::getSingleton().returnLabel(nodeIndices[randomObsID])); if(randomObsID < numUnusedObs){ --numUnusedObs; tempMoveObs = nodeIndices[numUnusedObs]; nodeIndices[numUnusedObs] = nodeIndices[randomObsID]; nodeIndices[randomObsID] = tempMoveObs; } } } inline bool shouldProcessNode(){ return !nodeQueue.back().isLeafNode(); } inline int positionOfNextNode(){ return (int)bin.size()-1; } inline int parentNodesPosition(){ return (int)bin.size()-1; } inline void makeLeafNodes(){ for(int i= 0; i < fpSingleton::getSingleton().returnNumClasses(); ++i){ bin[i].setSharedClass(i); } } inline int returnDepthOfNode(){ assert(!nodeQueue.empty()); return bin[nodeQueue.back().returnParentNodeNumber()].returnDepth()+1; } inline void copyProcessedNodeToBin(){ bin.emplace_back(nodeQueue.back().returnNodeCutValue(), returnDepthOfNode(), nodeQueue.back().returnNodeCutFeature()); } inline void copyProcessedRootToBin(){ bin[returnRootLocation()].setCutValue(nodeQueue.back().returnNodeCutValue()); bin[returnRootLocation()].setDepth(0); bin[returnRootLocation()].setFeatureValue(nodeQueue.back().returnNodeCutFeature()); } inline int returnRootLocation(){ return currTree+fpSingleton::getSingleton().returnNumClasses(); } inline void linkParentToChild(){ if(nodeQueue.back().returnIsLeftNode()){ bin[nodeQueue.back().returnParentNodeNumber()].setLeftValue(positionOfNextNode()); }else{ bin[nodeQueue.back().returnParentNodeNumber()].setRightValue(positionOfNextNode()); } } inline void linkParentToLeaf(){ assert(nodeQueue.back().returnParentNodeNumber() >= fpSingleton::getSingleton().returnNumClasses()); assert(nodeQueue.back().returnParentNodeNumber() <= parentNodesPosition()); assert(nodeQueue.back().returnNodeClass() >= 0); assert(nodeQueue.back().returnNodeClass() < fpSingleton::getSingleton().returnNumClasses()); if(nodeQueue.back().returnIsLeftNode()){ bin[nodeQueue.back().returnParentNodeNumber()].setLeftValue(nodeQueue.back().returnNodeClass()); }else{ bin[nodeQueue.back().returnParentNodeNumber()].setRightValue(nodeQueue.back().returnNodeClass()); } } inline void createChildNodes(){ nodeIterators nodeIts(nodeQueue.back().returnNodeIterators()); zipperIterators<int,T> zipIts(nodeQueue.back().returnZipIterators()); int childDepth = returnDepthOfNode()+1; if(nodeQueue.back().isLeftChildLarger()){ nodeQueue.pop_back(); //TODO: don't emplace_back if should be leaf node. nodeQueue.emplace_back(1,parentNodesPosition(), childDepth, randNum); nodeQueue.back().setupNode(nodeIts, zipIts, rightNode()); nodeQueue.emplace_back(1,parentNodesPosition(), childDepth, randNum); nodeQueue.back().setupNode(nodeIts, zipIts, leftNode()); }else{ nodeQueue.pop_back(); nodeQueue.emplace_back(1,parentNodesPosition(), childDepth, randNum); nodeQueue.back().setupNode(nodeIts, zipIts, leftNode()); nodeQueue.emplace_back(1,parentNodesPosition(), childDepth, randNum); nodeQueue.back().setupNode(nodeIts, zipIts, rightNode()); } } inline void createRootChildNodes(){ nodeIterators nodeIts(nodeQueue.back().returnNodeIterators()); zipperIterators<int,T> zipIts(nodeQueue.back().returnZipIterators()); int childDepth = returnDepthOfNode()+1; if(nodeQueue.back().isLeftChildLarger()){ nodeQueue.pop_back(); //TODO: don't emplace_back if should be leaf node. nodeQueue.emplace_back(1,returnRootLocation(), childDepth,randNum); nodeQueue.back().setupNode(nodeIts, zipIts, rightNode()); nodeQueue.emplace_back(1,returnRootLocation(), childDepth, randNum); nodeQueue.back().setupNode(nodeIts, zipIts, leftNode()); }else{ nodeQueue.pop_back(); nodeQueue.emplace_back(1,returnRootLocation(), childDepth,randNum); nodeQueue.back().setupNode(nodeIts, zipIts, leftNode()); nodeQueue.emplace_back(1,returnRootLocation(), childDepth,randNum); nodeQueue.back().setupNode(nodeIts, zipIts, rightNode()); } } inline void processLeafNode(){ assert(nodeQueue.back().returnNodeSize() > 0); assert(nodeQueue.back().returnNodeSize() <= fpSingleton::getSingleton().returnNumObservations()); linkParentToLeaf(); nodeQueue.pop_back(); } inline int returnNumTrees(){ return numOfTreesInBin; } inline void processInternalNode(){ copyProcessedNodeToBin(); linkParentToChild(); createChildNodes(); } inline void processNode(){ // process the node, i.e. calculate best split, ... nodeQueue.back().processNode(); if (nodeQueue.back().isLeafNode()) { // label the processed node as a leaf. processLeafNode(); } else { // label the processed node as internal. processInternalNode(); } } inline void createBin(int numTrees, int randSeed){ numOfTreesInBin = numTrees; randNum.initialize(randSeed); initializeStructures(); for(; currTree < numOfTreesInBin; ++currTree){ setSharedVectors(indicesHolder); loadFirstNode(); while(!nodeQueue.empty()){ processNode(); } } removeStructures(); } inline void initializeStructures(){ zipper.resize(fpSingleton::getSingleton().returnNumObservations()); nodeIndices.resize(fpSingleton::getSingleton().returnNumObservations()); for(int i = 0; i < fpSingleton::getSingleton().returnNumObservations(); ++i){ nodeIndices[i] =i; } bin.resize(numOfTreesInBin+fpSingleton::getSingleton().returnNumClasses()); makeLeafNodes(); } inline void removeStructures(){ std::vector<processingNodeBin<T,Q> >().swap( nodeQueue ); //indicesHolder.removeObsIndexAndClassVec(); std::vector<zipClassAndValue<int, T> >().swap( zipper ); std::vector<int>().swap( nodeIndices); } inline int returnMaxDepth(){ int maxDepth=0; for(auto& node : bin){ // +1 accounts for the leaf nodes which are never created (optimization that cuts memory required for a forest in half) if(maxDepth < node.returnDepth()+1){ maxDepth = node.returnDepth()+1; } } return maxDepth; } inline int returnNumLeafNodes(){ return (int)bin.size() - fpSingleton::getSingleton().returnNumClasses() + numOfTreesInBin; } inline int returnLeafDepthSum(){ int leafDepthSums=0; for(auto& node : bin){ if(node.isInternalNodeFront()){ if(node.returnLeftNodeID() < fpSingleton::getSingleton().returnNumClasses()){ leafDepthSums += node.returnDepth()+1; } if(node.returnRightNodeID() < fpSingleton::getSingleton().returnNumClasses()){ leafDepthSums += node.returnDepth()+1; } } } return leafDepthSums; } ///////////////////////// // This is required to template the predictObservation function // ////////////////////////////// template<typename U> struct identity { typedef U type; }; inline void predictBinObservation(int observationNum, std::vector<int>& preds){ predictBinObservation(observationNum,preds, identity<Q>()); } inline void predictBinObservation(std::vector<T>& observation, std::vector<int>& preds){ predictBinObservation(observation,preds,identity<Q>()); } //////////////////////////////// //PredictForRF inline void predictBinObservation(int observationNum,std::vector<int>& preds, identity<int> ){ std::vector<int> currNode(numOfTreesInBin); int numberNotInLeaf; int featureNum; T featureVal; int q; for( q=0; q<numOfTreesInBin; ++q){ currNode[q] = q+fpSingleton::getSingleton().returnNumClasses(); __builtin_prefetch(&bin[currNode[q]], 0, 3); } do{ numberNotInLeaf = 0; for( q=0; q<numOfTreesInBin; ++q){ if(bin[currNode[q]].isInternalNodeFront()){ featureNum = bin[currNode[q]].returnFeatureNumber(); featureVal = fpSingleton::getSingleton().returnTestFeatureVal(featureNum,observationNum); currNode[q] = bin[currNode[q]].fpBaseNode<T, Q>::nextNode(featureVal); __builtin_prefetch(&bin[currNode[q]], 0, 3); ++numberNotInLeaf; } } }while(numberNotInLeaf); for( q=0; q<numOfTreesInBin; q++){ ++preds[bin[currNode[q]].returnClass()]; } } inline void predictBinObservation(int observationNum, std::vector<int>& preds, identity<std::vector<int> >){ std::vector<int> currNode(numOfTreesInBin); int numberNotInLeaf; T featureVal; int q; for( q=0; q<numOfTreesInBin; ++q){ currNode[q] = q+fpSingleton::getSingleton().returnNumClasses(); __builtin_prefetch(&bin[currNode[q]], 0, 3); } do{ numberNotInLeaf = 0; for( q=0; q<numOfTreesInBin; ++q){ if(bin[currNode[q]].isInternalNodeFront()){ featureVal = 0; for(auto i : bin[currNode[q]].returnFeatureNumber()){ featureVal += fpSingleton::getSingleton().returnTestFeatureVal(i,observationNum); } currNode[q] = bin[currNode[q]].fpBaseNode<T, Q>::nextNode(featureVal); __builtin_prefetch(&bin[currNode[q]], 0, 3); ++numberNotInLeaf; } } }while(numberNotInLeaf); for( q=0; q<numOfTreesInBin; q++){ ++preds[bin[currNode[q]].returnClass()]; } } inline void predictBinObservation(int observationNum, std::vector<int>& preds, identity<weightedFeature>){ std::vector<int> currNode(numOfTreesInBin); int numberNotInLeaf; T featureVal; int weightNum; int q; for( q=0; q<numOfTreesInBin; ++q){ currNode[q] = q+fpSingleton::getSingleton().returnNumClasses(); __builtin_prefetch(&bin[currNode[q]], 0, 3); } do{ numberNotInLeaf = 0; for( q=0; q<numOfTreesInBin; ++q){ if(bin[currNode[q]].isInternalNodeFront()){ featureVal = 0; weightNum = 0; for(auto i : bin[currNode[q]].returnFeatureNumber().returnFeatures()){ featureVal += fpSingleton::getSingleton().returnTestFeatureVal(i,observationNum)*bin[currNode[q]].returnFeatureNumber().returnWeights()[weightNum++]; } currNode[q] = bin[currNode[q]].fpBaseNode<T, Q>::nextNode(featureVal); __builtin_prefetch(&bin[currNode[q]], 0, 3); ++numberNotInLeaf; } } }while(numberNotInLeaf); for( q=0; q<numOfTreesInBin; q++){ ++preds[bin[currNode[q]].returnClass()]; } } inline void predictBinObservation(std::vector<T>& observation, std::vector<int>& preds,identity<int> ){ std::vector<int> currNode(numOfTreesInBin); int numberNotInLeaf; int featureNum; int q; for( q=0; q<numOfTreesInBin; ++q){ currNode[q] = q+fpSingleton::getSingleton().returnNumClasses(); __builtin_prefetch(&bin[currNode[q]], 0, 3); } do{ numberNotInLeaf = 0; for( q=0; q<numOfTreesInBin; ++q){ if(bin[currNode[q]].isInternalNodeFront()){ featureNum = bin[currNode[q]].returnFeatureNumber(); currNode[q] = bin[currNode[q]].fpBaseNode<T, Q>::nextNode(observation[featureNum]); __builtin_prefetch(&bin[currNode[q]], 0, 3); ++numberNotInLeaf; } } }while(numberNotInLeaf); for( q=0; q<numOfTreesInBin; q++){ ++preds[bin[currNode[q]].returnClass()]; } } inline void predictBinObservation(std::vector<T>& observation, std::vector<int>& preds, identity<std::vector<int> >){ std::vector<int> currNode(numOfTreesInBin); int numberNotInLeaf; T featureVal; int q; for( q=0; q<numOfTreesInBin; ++q){ currNode[q] = q+fpSingleton::getSingleton().returnNumClasses(); __builtin_prefetch(&bin[currNode[q]], 0, 3); } do{ numberNotInLeaf = 0; for( q=0; q<numOfTreesInBin; ++q){ if(bin[currNode[q]].isInternalNodeFront()){ featureVal = 0; for(auto i : bin[currNode[q]].returnFeatureNumber()){ featureVal +=observation[i]; } currNode[q] = bin[currNode[q]].fpBaseNode<T, Q>::nextNode(featureVal); __builtin_prefetch(&bin[currNode[q]], 0, 3); ++numberNotInLeaf; } } }while(numberNotInLeaf); for( q=0; q<numOfTreesInBin; q++){ ++preds[bin[currNode[q]].returnClass()]; } } //Prediction function for ternary sparse matrix inline void predictBinObservation(std::vector<T>& observation, std::vector<int>& preds, identity<weightedFeature>){ std::vector<int> currNode(numOfTreesInBin); int numberNotInLeaf; T featureVal; int weightNum; int q; for( q=0; q<numOfTreesInBin; ++q){ currNode[q] = q+fpSingleton::getSingleton().returnNumClasses(); __builtin_prefetch(&bin[currNode[q]], 0, 3); } do{ numberNotInLeaf = 0; for( q=0; q<numOfTreesInBin; ++q){ if(bin[currNode[q]].isInternalNodeFront()){ featureVal = 0; weightNum = 0; for(auto i : bin[currNode[q]].returnFeatureNumber().returnFeatures()){ featureVal +=observation[i]*bin[currNode[q]].returnFeatureNumber().returnWeights()[weightNum++]; } currNode[q] = bin[currNode[q]].fpBaseNode<T, Q>::nextNode(featureVal); __builtin_prefetch(&bin[currNode[q]], 0, 3); ++numberNotInLeaf; } } }while(numberNotInLeaf); for( q=0; q<numOfTreesInBin; q++){ ++preds[bin[currNode[q]].returnClass()]; } } /////////////////////////////////// /// Test Functions not to be used in production ////////////////////////////////// inline std::vector< fpBaseNode<T,Q> >& exposeBinTest(){ return bin; } void printBin(){ std::cout << "\n"; for(auto nd : bin){ nd.printNode(); } } }; }//fp #endif //binStruct_h
#ifndef binStruct_h #define binStruct_h #include "../../baseFunctions/fpBaseNode.h" #include "../../baseFunctions/MWC.h" #include "obsIndexAndClassVec.h" #include "zipClassAndValue.h" #include "processingNodeBin.h" #include <vector> #include <assert.h> namespace fp{ template <typename T, typename Q> class binStruct { protected: float OOBAccuracy; float correctOOB; float totalOOB; std::vector< fpBaseNode<T,Q> > bin; std::vector<processingNodeBin<T,Q> > nodeQueue; int numberOfNodes; int numOfTreesInBin; int currTree; obsIndexAndClassVec indicesHolder; std::vector<zipClassAndValue<int, T> > zipper; std::vector<int> nodeIndices; randomNumberRerFMWC randNum; //obsIndexAndClassVec indexHolder(numClasses); //std::vector<zipClassAndValue<int, float> > zipVec(testSize); inline bool rightNode(){ return false; } inline bool leftNode(){ return true; } public: binStruct() : OOBAccuracy(-1.0),correctOOB(0),totalOOB(0),numberOfNodes(0),numOfTreesInBin(0),currTree(0), indicesHolder(fpSingleton::getSingleton().returnNumClasses()){ } inline void loadFirstNode(){ //inline void loadFirstNode(obsIndexAndClassVec& indicesHolder, std::vector<zipClassAndValue<int, T> >& zipper){ nodeQueue.emplace_back(0,0,0,randNum); nodeQueue.back().setupRoot(indicesHolder, zipper); nodeQueue.back().processNode(); if(nodeQueue.back().isLeafNode()){ makeRootALeaf(); }else{ copyProcessedRootToBin(); createRootChildNodes(); } } inline void makeRootALeaf(){ bin[returnRootLocation()].setClass(nodeQueue.back().returnNodeClass()); bin[returnRootLocation()].setDepth(0); } inline void setSharedVectors(obsIndexAndClassVec& indicesInNode){ indicesInNode.resetVectors(); int numUnusedObs = fpSingleton::getSingleton().returnNumObservations(); int randomObsID; int tempMoveObs; for(int n = 0; n < fpSingleton::getSingleton().returnNumObservations(); n++){ randomObsID = randNum.gen(fpSingleton::getSingleton().returnNumObservations()); indicesInNode.insertIndex(nodeIndices[randomObsID], fpSingleton::getSingleton().returnLabel(nodeIndices[randomObsID])); if(randomObsID < numUnusedObs){ --numUnusedObs; tempMoveObs = nodeIndices[numUnusedObs]; nodeIndices[numUnusedObs] = nodeIndices[randomObsID]; nodeIndices[randomObsID] = tempMoveObs; } } } inline bool shouldProcessNode(){ return !nodeQueue.back().isLeafNode(); } inline int positionOfNextNode(){ return (int)bin.size()-1; } inline int parentNodesPosition(){ return (int)bin.size()-1; } inline void makeLeafNodes(){ for(int i= 0; i < fpSingleton::getSingleton().returnNumClasses(); ++i){ bin[i].setSharedClass(i); } } inline int returnDepthOfNode(){ assert(!nodeQueue.empty()); return bin[nodeQueue.back().returnParentNodeNumber()].returnDepth()+1; } inline void copyProcessedNodeToBin(){ bin.emplace_back(nodeQueue.back().returnNodeCutValue(), returnDepthOfNode(), nodeQueue.back().returnNodeCutFeature()); } inline void copyProcessedRootToBin(){ bin[returnRootLocation()].setCutValue(nodeQueue.back().returnNodeCutValue()); bin[returnRootLocation()].setDepth(0); bin[returnRootLocation()].setFeatureValue(nodeQueue.back().returnNodeCutFeature()); } inline int returnRootLocation(){ return currTree+fpSingleton::getSingleton().returnNumClasses(); } inline void linkParentToChild(){ if(nodeQueue.back().returnIsLeftNode()){ bin[nodeQueue.back().returnParentNodeNumber()].setLeftValue(positionOfNextNode()); }else{ bin[nodeQueue.back().returnParentNodeNumber()].setRightValue(positionOfNextNode()); } } inline void linkParentToLeaf(){ assert(nodeQueue.back().returnParentNodeNumber() >= fpSingleton::getSingleton().returnNumClasses()); assert(nodeQueue.back().returnParentNodeNumber() <= parentNodesPosition()); assert(nodeQueue.back().returnNodeClass() >= 0); assert(nodeQueue.back().returnNodeClass() < fpSingleton::getSingleton().returnNumClasses()); if(nodeQueue.back().returnIsLeftNode()){ bin[nodeQueue.back().returnParentNodeNumber()].setLeftValue(nodeQueue.back().returnNodeClass()); }else{ bin[nodeQueue.back().returnParentNodeNumber()].setRightValue(nodeQueue.back().returnNodeClass()); } } inline void createChildNodes(){ nodeIterators nodeIts(nodeQueue.back().returnNodeIterators()); zipperIterators<int,T> zipIts(nodeQueue.back().returnZipIterators()); int childDepth = returnDepthOfNode()+1; if(nodeQueue.back().isLeftChildLarger()){ nodeQueue.pop_back(); //TODO: don't emplace_back if should be leaf node. nodeQueue.emplace_back(1,parentNodesPosition(), childDepth, randNum); nodeQueue.back().setupNode(nodeIts, zipIts, rightNode()); nodeQueue.emplace_back(1,parentNodesPosition(), childDepth, randNum); nodeQueue.back().setupNode(nodeIts, zipIts, leftNode()); }else{ nodeQueue.pop_back(); nodeQueue.emplace_back(1,parentNodesPosition(), childDepth, randNum); nodeQueue.back().setupNode(nodeIts, zipIts, leftNode()); nodeQueue.emplace_back(1,parentNodesPosition(), childDepth, randNum); nodeQueue.back().setupNode(nodeIts, zipIts, rightNode()); } } inline void createRootChildNodes(){ nodeIterators nodeIts(nodeQueue.back().returnNodeIterators()); zipperIterators<int,T> zipIts(nodeQueue.back().returnZipIterators()); int childDepth = returnDepthOfNode()+1; if(nodeQueue.back().isLeftChildLarger()){ nodeQueue.pop_back(); //TODO: don't emplace_back if should be leaf node. nodeQueue.emplace_back(1,returnRootLocation(), childDepth,randNum); nodeQueue.back().setupNode(nodeIts, zipIts, rightNode()); nodeQueue.emplace_back(1,returnRootLocation(), childDepth, randNum); nodeQueue.back().setupNode(nodeIts, zipIts, leftNode()); }else{ nodeQueue.pop_back(); nodeQueue.emplace_back(1,returnRootLocation(), childDepth,randNum); nodeQueue.back().setupNode(nodeIts, zipIts, leftNode()); nodeQueue.emplace_back(1,returnRootLocation(), childDepth,randNum); nodeQueue.back().setupNode(nodeIts, zipIts, rightNode()); } } inline void processLeafNode(){ assert(nodeQueue.back().returnNodeSize() > 0); assert(nodeQueue.back().returnNodeSize() <= fpSingleton::getSingleton().returnNumObservations()); linkParentToLeaf(); nodeQueue.pop_back(); } inline int returnNumTrees(){ return numOfTreesInBin; } inline void processInternalNode(){ copyProcessedNodeToBin(); linkParentToChild(); createChildNodes(); } inline void processNode(){ // process the node, i.e. calculate best split, ... nodeQueue.back().processNode(); if (nodeQueue.back().isLeafNode()) { // label the processed node as a leaf. processLeafNode(); } else { // label the processed node as internal. processInternalNode(); } } inline void createBin(int numTrees, int randSeed){ numOfTreesInBin = numTrees; randNum.initialize(randSeed); initializeStructures(); for(; currTree < numOfTreesInBin; ++currTree){ setSharedVectors(indicesHolder); loadFirstNode(); while(!nodeQueue.empty()){ processNode(); } } removeStructures(); } inline void initializeStructures(){ zipper.resize(fpSingleton::getSingleton().returnNumObservations()); nodeIndices.resize(fpSingleton::getSingleton().returnNumObservations()); for(int i = 0; i < fpSingleton::getSingleton().returnNumObservations(); ++i){ nodeIndices[i] =i; } bin.resize(numOfTreesInBin+fpSingleton::getSingleton().returnNumClasses()); makeLeafNodes(); } inline void removeStructures(){ std::vector<processingNodeBin<T,Q> >().swap( nodeQueue ); //indicesHolder.removeObsIndexAndClassVec(); std::vector<zipClassAndValue<int, T> >().swap( zipper ); std::vector<int>().swap( nodeIndices); } inline int returnMaxDepth(){ int maxDepth=0; for(auto& node : bin){ // +1 accounts for the leaf nodes which are never created (optimization that cuts memory required for a forest in half) if(maxDepth < node.returnDepth()+1){ maxDepth = node.returnDepth()+1; } } return maxDepth; } inline int returnNumLeafNodes(){ return (int)bin.size() - fpSingleton::getSingleton().returnNumClasses() + numOfTreesInBin; } inline int returnLeafDepthSum(){ int leafDepthSums=0; for(auto& node : bin){ if(node.isInternalNodeFront()){ if(node.returnLeftNodeID() < fpSingleton::getSingleton().returnNumClasses()){ leafDepthSums += node.returnDepth()+1; } if(node.returnRightNodeID() < fpSingleton::getSingleton().returnNumClasses()){ leafDepthSums += node.returnDepth()+1; } } } return leafDepthSums; } ///////////////////////// // This is required to template the predictObservation function // ////////////////////////////// template<typename U> struct identity { typedef U type; }; inline void predictBinObservation(int observationNum, std::vector<int>& preds){ predictBinObservation(observationNum,preds, identity<Q>()); } inline void predictBinObservation(std::vector<T>& observation, std::vector<int>& preds){ predictBinObservation(observation,preds,identity<Q>()); } //////////////////////////////// //PredictForRF inline void predictBinObservation(int observationNum,std::vector<int>& preds, identity<int> ){ std::vector<int> currNode(numOfTreesInBin); int numberNotInLeaf; int featureNum; T featureVal; int q; for( q=0; q<numOfTreesInBin; ++q){ currNode[q] = q+fpSingleton::getSingleton().returnNumClasses(); __builtin_prefetch(&bin[currNode[q]], 0, 3); } do{ numberNotInLeaf = 0; for( q=0; q<numOfTreesInBin; ++q){ if(bin[currNode[q]].isInternalNodeFront()){ featureNum = bin[currNode[q]].returnFeatureNumber(); featureVal = fpSingleton::getSingleton().returnTestFeatureVal(featureNum,observationNum); currNode[q] = bin[currNode[q]].fpBaseNode<T, Q>::nextNode(featureVal); __builtin_prefetch(&bin[currNode[q]], 0, 3); ++numberNotInLeaf; } } }while(numberNotInLeaf); for( q=0; q<numOfTreesInBin; q++){ #pragma omp atomic update ++preds[bin[currNode[q]].returnClass()]; } } inline void predictBinObservation(int observationNum, std::vector<int>& preds, identity<std::vector<int> >){ std::vector<int> currNode(numOfTreesInBin); int numberNotInLeaf; T featureVal; int q; for( q=0; q<numOfTreesInBin; ++q){ currNode[q] = q+fpSingleton::getSingleton().returnNumClasses(); __builtin_prefetch(&bin[currNode[q]], 0, 3); } do{ numberNotInLeaf = 0; for( q=0; q<numOfTreesInBin; ++q){ if(bin[currNode[q]].isInternalNodeFront()){ featureVal = 0; for(auto i : bin[currNode[q]].returnFeatureNumber()){ featureVal += fpSingleton::getSingleton().returnTestFeatureVal(i,observationNum); } currNode[q] = bin[currNode[q]].fpBaseNode<T, Q>::nextNode(featureVal); __builtin_prefetch(&bin[currNode[q]], 0, 3); ++numberNotInLeaf; } } }while(numberNotInLeaf); for( q=0; q<numOfTreesInBin; q++){ #pragma omp atomic update ++preds[bin[currNode[q]].returnClass()]; } } inline void predictBinObservation(int observationNum, std::vector<int>& preds, identity<weightedFeature>){ std::vector<int> currNode(numOfTreesInBin); int numberNotInLeaf; T featureVal; int weightNum; int q; for( q=0; q<numOfTreesInBin; ++q){ currNode[q] = q+fpSingleton::getSingleton().returnNumClasses(); __builtin_prefetch(&bin[currNode[q]], 0, 3); } do{ numberNotInLeaf = 0; for( q=0; q<numOfTreesInBin; ++q){ if(bin[currNode[q]].isInternalNodeFront()){ featureVal = 0; weightNum = 0; for(auto i : bin[currNode[q]].returnFeatureNumber().returnFeatures()){ featureVal += fpSingleton::getSingleton().returnTestFeatureVal(i,observationNum)*bin[currNode[q]].returnFeatureNumber().returnWeights()[weightNum++]; } currNode[q] = bin[currNode[q]].fpBaseNode<T, Q>::nextNode(featureVal); __builtin_prefetch(&bin[currNode[q]], 0, 3); ++numberNotInLeaf; } } }while(numberNotInLeaf); for( q=0; q<numOfTreesInBin; q++){ #pragma omp atomic update ++preds[bin[currNode[q]].returnClass()]; } } inline void predictBinObservation(std::vector<T>& observation, std::vector<int>& preds,identity<int> ){ std::vector<int> currNode(numOfTreesInBin); int numberNotInLeaf; int featureNum; int q; for( q=0; q<numOfTreesInBin; ++q){ currNode[q] = q+fpSingleton::getSingleton().returnNumClasses(); __builtin_prefetch(&bin[currNode[q]], 0, 3); } do{ numberNotInLeaf = 0; for( q=0; q<numOfTreesInBin; ++q){ if(bin[currNode[q]].isInternalNodeFront()){ featureNum = bin[currNode[q]].returnFeatureNumber(); currNode[q] = bin[currNode[q]].fpBaseNode<T, Q>::nextNode(observation[featureNum]); __builtin_prefetch(&bin[currNode[q]], 0, 3); ++numberNotInLeaf; } } }while(numberNotInLeaf); for( q=0; q<numOfTreesInBin; q++){ #pragma omp atomic update ++preds[bin[currNode[q]].returnClass()]; } } inline void predictBinObservation(std::vector<T>& observation, std::vector<int>& preds, identity<std::vector<int> >){ std::vector<int> currNode(numOfTreesInBin); int numberNotInLeaf; T featureVal; int q; for( q=0; q<numOfTreesInBin; ++q){ currNode[q] = q+fpSingleton::getSingleton().returnNumClasses(); __builtin_prefetch(&bin[currNode[q]], 0, 3); } do{ numberNotInLeaf = 0; for( q=0; q<numOfTreesInBin; ++q){ if(bin[currNode[q]].isInternalNodeFront()){ featureVal = 0; for(auto i : bin[currNode[q]].returnFeatureNumber()){ featureVal +=observation[i]; } currNode[q] = bin[currNode[q]].fpBaseNode<T, Q>::nextNode(featureVal); __builtin_prefetch(&bin[currNode[q]], 0, 3); ++numberNotInLeaf; } } }while(numberNotInLeaf); for( q=0; q<numOfTreesInBin; q++){ #pragma omp atomic update ++preds[bin[currNode[q]].returnClass()]; } } //Prediction function for ternary sparse matrix inline void predictBinObservation(std::vector<T>& observation, std::vector<int>& preds, identity<weightedFeature>){ std::vector<int> currNode(numOfTreesInBin); int numberNotInLeaf; T featureVal; int weightNum; int q; for( q=0; q<numOfTreesInBin; ++q){ currNode[q] = q+fpSingleton::getSingleton().returnNumClasses(); __builtin_prefetch(&bin[currNode[q]], 0, 3); } do{ numberNotInLeaf = 0; for( q=0; q<numOfTreesInBin; ++q){ if(bin[currNode[q]].isInternalNodeFront()){ featureVal = 0; weightNum = 0; for(auto i : bin[currNode[q]].returnFeatureNumber().returnFeatures()){ featureVal +=observation[i]*bin[currNode[q]].returnFeatureNumber().returnWeights()[weightNum++]; } currNode[q] = bin[currNode[q]].fpBaseNode<T, Q>::nextNode(featureVal); __builtin_prefetch(&bin[currNode[q]], 0, 3); ++numberNotInLeaf; } } }while(numberNotInLeaf); for( q=0; q<numOfTreesInBin; q++){ #pragma omp atomic update ++preds[bin[currNode[q]].returnClass()]; } } /////////////////////////////////// /// Test Functions not to be used in production ////////////////////////////////// inline std::vector< fpBaseNode<T,Q> >& exposeBinTest(){ return bin; } void printBin(){ std::cout << "\n"; for(auto nd : bin){ nd.printNode(); } } }; }//fp #endif //binStruct_h
particle_levelset_utilities.h
/* ============================================================================== KratosTestApplication A library based on: Kratos A General Purpose Software for Multi-Physics Finite Element Analysis Version 1.0 (Released on march 05, 2007). Copyright 2010 Pooyan Dadvand, Riccardo Rossi pooyan@cimne.upc.edu rrossi@cimne.upc.edu - CIMNE (International Center for Numerical Methods in Engineering), Gran Capita' s/n, 08034 Barcelona, Spain Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following condition: Distribution of this code for any commercial purpose is permissible ONLY BY DIRECT ARRANGEMENT WITH THE COPYRIGHT OWNERS. The above copyright notice and this permission notice sKRATOS_WATCH(disp);hall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ============================================================================== */ // // Project Name: Kratos // Last Modified by: $Author: rrossi $ // Date: $Date: 2007-03-06 10:30:31 $ // Revision: $Revision: 1.2 $ // // #if !defined(KRATOS_PARTICLE_LEVELSET_UTILITIES_INCLUDED ) #define KRATOS_PARTICLE_LEVELSET_UTILITIES_INCLUDED // System includes #include <string> #include <iostream> #include <algorithm> // External includes #include "pybind11/pybind11.h" // Project includes #include "includes/define.h" #include "includes/model_part.h" #include "includes/deprecated_variables.h" #include "includes/node.h" #include "utilities/geometry_utilities.h" #include "geometries/tetrahedra_3d_4.h" #include "thermo_mechanical_application.h" #include "spatial_containers/spatial_containers.h" #include "utilities/timer.h" #include "processes/node_erase_process.h" #include "utilities/binbased_fast_point_locator.h" #include "utilities/timer.h" // #include <boost/random/linear_congruential.hpp> // #include <boost/random/uniform_int.hpp> // #include <boost/random/uniform_real.hpp> // #include <boost/random/variate_generator.hpp> // #include <boost/generator_iterator.hpp> // #include <tr1/random> #include <time.h> #ifdef _OPENMP #include "omp.h" #endif namespace Kratos { template<std::size_t TDim> class ParticleLevelSetUtils { public: KRATOS_CLASS_POINTER_DEFINITION(ParticleLevelSetUtils<TDim>); //********************************************************************************************** //********************************************************************************************** //function to seed a list of new nodes void Seed(ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart, const double max_seed_distance, const double min_edge_size) { KRATOS_TRY; rLagrangianModelPart.Nodes().clear(); unsigned int ele_id = 1; for (ModelPart::ElementsContainerType::iterator el_it = rEulerianModelPart.ElementsBegin(); el_it != rEulerianModelPart.ElementsEnd(); el_it++) { el_it->SetId(ele_id); ele_id++; } if(TDim==2){ BoundedMatrix<double, 16, 3 > pos; BoundedMatrix<double, 16, 3 > N; CreateParticles2D(rEulerianModelPart,rLagrangianModelPart,pos,N,max_seed_distance,min_edge_size); } else { // BoundedMatrix<double, 56, 3 > pos; // BoundedMatrix<double, 56, 4 > N; // CreateParticles3D(rEulerianModelPart,rLagrangianModelPart,pos,N,max_seed_distance,min_edge_size); BoundedMatrix<double, 10, 3 > pos; BoundedMatrix<double, 10, 4 > N; FewCreateParticles3D(rEulerianModelPart,rLagrangianModelPart,pos,N,max_seed_distance,min_edge_size); } for (ModelPart::NodesContainerType::iterator node_it = rLagrangianModelPart.NodesBegin(); node_it != rLagrangianModelPart.NodesEnd(); node_it++) { node_it->FastGetSolutionStepValue(VELOCITY, 1) = node_it->FastGetSolutionStepValue(VELOCITY); // node_it->FastGetSolutionStepValue(DISTANCE, 1) = node_it->FastGetSolutionStepValue(DISTANCE); } KRATOS_CATCH(""); } //********************************************************************************************** //********************************************************************************************** void StreamlineMove(const double dt, ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart, BinBasedFastPointLocator<TDim>& node_locator) { KRATOS_TRY array_1d<double, 3 > veulerian; Vector N; const int max_results = 10000; typename BinBasedFastPointLocator<TDim>::ResultContainerType results(max_results); const int nparticles = rLagrangianModelPart.Nodes().size(); //KRATOS_WATCH("551") #pragma omp parallel for firstprivate(results,N,veulerian) for (int i = 0; i < nparticles; i++) { unsigned int substep = 0; unsigned int subdivisions = 1; double small_dt = dt; while(substep++ < subdivisions) { ModelPart::NodesContainerType::iterator iparticle = rLagrangianModelPart.NodesBegin() + i; (iparticle)->Set(TO_ERASE, true); Node < 3 > ::Pointer pparticle = *(iparticle.base()); typename BinBasedFastPointLocator<TDim>::ResultIteratorType result_begin = results.begin(); Element::Pointer pelement; // KRATOS_WATCH("561") bool is_found = node_locator.FindPointOnMesh(pparticle->Coordinates(), N, pelement, result_begin, max_results); // KRATOS_WATCH("564") if (is_found == true) { (pparticle)->GetValue(IS_VISITED) = 1; Geometry< Node < 3 > >& geom = pelement->GetGeometry(); noalias(veulerian) = N[0] * geom[0].FastGetSolutionStepValue(VELOCITY); for (unsigned int k = 1; k < geom.size(); k++) noalias(veulerian) += N[k] * geom[k].FastGetSolutionStepValue(VELOCITY); //compute adaptive subdivisions if(substep == 1) { //compute h double h = N[0] * geom[0].FastGetSolutionStepValue(NODAL_H); for (unsigned int k = 1; k < geom.size(); k++) h += N[k] * geom[k].FastGetSolutionStepValue(NODAL_H); //compute number of subdivisions needed const unsigned int min_subdivisions = 3; const unsigned int max_subdivisions = 20; double v = norm_2(veulerian); subdivisions = double(floor(2*dt*v/h)); subdivisions = (subdivisions<min_subdivisions) ? min_subdivisions : (subdivisions>max_subdivisions) ? max_subdivisions : subdivisions; //compute subdivisions time step small_dt = dt / subdivisions; // KRATOS_WATCH(subdivisions) } //move according to the streamline array_1d<double, 3 > & disp = (iparticle)->FastGetSolutionStepValue(DISPLACEMENT); noalias(disp) += small_dt*veulerian; (pparticle)->Set(TO_ERASE, false); // KRATOS_WATCH("585") //update position noalias(iparticle->Coordinates()) = iparticle->GetInitialPosition(); noalias(iparticle->Coordinates()) += iparticle->FastGetSolutionStepValue(DISPLACEMENT); (iparticle)->GetValue(IS_VISITED) = 0; //KRATOS_WATCH("619") } } } KRATOS_CATCH("") } //********************************************************************************************** //********************************************************************************************** void ParticleLevelSetCorrection(ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart, BinBasedFastPointLocator<TDim>& node_locator) { KRATOS_TRY //Initilize NAGATIVE_DISTANCE & POSETIVE_DISTANCE const int nnodes= rEulerianModelPart.Nodes().size(); #pragma omp parallel for for (int jj = 0; jj < nnodes; jj++) { ModelPart::NodesContainerType::iterator node_itr = rEulerianModelPart.NodesBegin() + jj; const double nd_dist = node_itr->FastGetSolutionStepValue(DISTANCE); node_itr->SetValue(POSETIVE_DISTANCE,nd_dist ); node_itr->SetValue(NAGATIVE_DISTANCE,nd_dist ); } //loop over particles double particle_dist= 0.0; Vector N; const int max_results = 10000; typename BinBasedFastPointLocator<TDim>::ResultContainerType results(max_results); const int nparticles = rLagrangianModelPart.Nodes().size(); #pragma omp parallel for firstprivate(results,N,particle_dist) for (int i = 0; i < nparticles; i++) { ModelPart::NodesContainerType::iterator particle_itr = rLagrangianModelPart.NodesBegin() + i; Node < 3 > ::Pointer p_pointer = *(particle_itr.base()); typename BinBasedFastPointLocator<TDim>::ResultIteratorType result_begin = results.begin(); Element::Pointer pelement; bool is_found = node_locator.FindPointOnMesh(p_pointer->Coordinates(), N, pelement, result_begin, max_results); if (is_found == true) { Geometry< Node < 3 > >& geom = pelement->GetGeometry(); //interpolate the particle distance particle_dist = N[0] * geom[0].FastGetSolutionStepValue(DISTANCE); for (unsigned int k = 1; k < geom.size(); k++) particle_dist += N[k] * geom[k].FastGetSolutionStepValue(DISTANCE); //check if correction is needed const double p_sign = particle_itr->FastGetSolutionStepValue(IS_WATER); const double p_radi = particle_itr->FastGetSolutionStepValue(PARTICLE_RADIUS); if( particle_dist*p_sign < 0.0 && fabs(particle_dist) > p_radi) { double p_xx = particle_itr->X(); double p_yy = particle_itr->Y(); double p_zz = particle_itr->Z(); // const Variable<double> posetive_negative_dist_var; /* if( p_sign == -1.0 ) posetive_negative_dist_var = NAGATIVE_DISTANCE; else if( p_sign == 1.0 ) posetive_negative_dist_var = POSETIVE_DISTANCE; */ for (unsigned int kk = 1; kk < geom.size(); kk++){ p_xx -= geom[kk].X(); p_yy -= geom[kk].Y(); p_zz -= geom[kk].Z(); double dd = p_xx*p_xx + p_yy*p_yy + p_zz*p_zz; dd = sqrt(dd); double dist_to_particle = p_sign * (p_radi - dd); //correction due to particle distance and sign geom[kk].SetLock(); if( p_sign == 1.0){ double& pos_distance = geom[kk].GetValue(POSETIVE_DISTANCE); if ( dist_to_particle > pos_distance) pos_distance = dist_to_particle;} else if( p_sign == -1.0){ double& neg_distance = geom[kk].GetValue(NAGATIVE_DISTANCE); if ( dist_to_particle < neg_distance) neg_distance = dist_to_particle; } geom[kk].UnSetLock(); } } } }//end of loop over particles //final correction, choose between NAGATIVE_DISTANCE & POSETIVE_DISTANCE // const int nnodes= rEulerianModelPart.Nodes().size(); #pragma omp parallel for for (int jj = 0; jj < nnodes; jj++) { ModelPart::NodesContainerType::iterator node_itr = rEulerianModelPart.NodesBegin() + jj; double posetive = node_itr->GetValue(POSETIVE_DISTANCE); double negative = node_itr->GetValue(NAGATIVE_DISTANCE); double & nd_dist = node_itr->FastGetSolutionStepValue(DISTANCE); if ( posetive != negative){ if( fabs(posetive) < fabs(negative) ) nd_dist = posetive; else nd_dist = negative; node_itr->SetValue(POSETIVE_DISTANCE,nd_dist ); node_itr->SetValue(NAGATIVE_DISTANCE,nd_dist ); } } KRATOS_CATCH("") } //********************************************************************************************** //********************************************************************************************** void ResetParticleRadius(const double min_edge_length, ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart, BinBasedFastPointLocator<TDim>& node_locator) { KRATOS_TRY; double particle_dist = 0.0; Vector N; const int max_results = 10000; typename BinBasedFastPointLocator<TDim>::ResultContainerType results(max_results); const int nparticles = rLagrangianModelPart.Nodes().size(); #pragma omp parallel for firstprivate(results,N,particle_dist) for (int i = 0; i < nparticles; i++) { ModelPart::NodesContainerType::iterator particle_itr = rLagrangianModelPart.NodesBegin() + i; Node < 3 > ::Pointer p_pointer = *(particle_itr.base()); typename BinBasedFastPointLocator<TDim>::ResultIteratorType result_begin = results.begin(); Element::Pointer pelement; bool is_found = node_locator.FindPointOnMesh(p_pointer->Coordinates(), N, pelement, result_begin, max_results); if (is_found == true) { Geometry< Node < 3 > >& geom = pelement->GetGeometry(); //interpolate the particle distance particle_dist = N[0] * geom[0].FastGetSolutionStepValue(DISTANCE); for (unsigned int k = 1; k < geom.size(); k++) particle_dist += N[k] * geom[k].FastGetSolutionStepValue(DISTANCE); if( fabs(particle_dist) < 0.1*min_edge_length) particle_itr->FastGetSolutionStepValue(PARTICLE_RADIUS) = 0.1*min_edge_length; else if(fabs(particle_dist) > 0.5*min_edge_length) particle_itr->FastGetSolutionStepValue(PARTICLE_RADIUS) = 0.5*min_edge_length; else particle_itr->FastGetSolutionStepValue(PARTICLE_RADIUS) = fabs(particle_dist); } } KRATOS_CATCH("") } //********************************************************************************************** //********************************************************************************************** void ParticleReseeding(ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart, BinBasedFastPointLocator<TDim>& node_locator, const double max_seed_distance, const double min_edge_size) { KRATOS_TRY; //generate a tree with the position of the lagrangian nodes // typedef Node < 3 > PointType; // typedef Node < 3 > ::Pointer PointTypePointer; //unsigned int min_number_of_particles = 1; for (ModelPart::ElementsContainerType::iterator el_it = rEulerianModelPart.ElementsBegin(); el_it != rEulerianModelPart.ElementsEnd(); el_it++) { el_it->SetValue(YOUNG_MODULUS,0.0); } for (ModelPart::NodesContainerType::iterator pparticle = rLagrangianModelPart.NodesBegin(); pparticle != rLagrangianModelPart.NodesEnd(); pparticle++) { pparticle->Set(TO_ERASE,false); pparticle->SetValue(NL_ITERATION_NUMBER,(rEulerianModelPart.ElementsBegin())->Id()); pparticle->SetValue(IS_ESCAPED,false); pparticle->SetValue(IS_VISITED,0); } //count particles that fall within an element Vector N; const int max_results = 10000; typename BinBasedFastPointLocator<TDim>::ResultContainerType results(max_results); const int nparticles = rLagrangianModelPart.Nodes().size(); //count particles within an element #pragma omp parallel for firstprivate(results,N) for (int i = 0; i < nparticles; i++) { ModelPart::NodesContainerType::iterator iparticle = rLagrangianModelPart.NodesBegin() + i; Node < 3 > ::Pointer pparticle = *(iparticle.base()); typename BinBasedFastPointLocator<TDim>::ResultIteratorType result_begin = results.begin(); Element::Pointer pelement; bool is_found = node_locator.FindPointOnMesh(pparticle->Coordinates(), N, pelement, result_begin, max_results); if (is_found == true) { const double particle_sign = iparticle->FastGetSolutionStepValue(IS_WATER); Geometry< Node < 3 > >& geom = pelement->GetGeometry(); bool is_scaped = CheckIfEscaped(geom,N,particle_sign); iparticle->SetValue(IS_ESCAPED,is_scaped); if( CheckElemDist(geom,max_seed_distance) )// if it is inside the 3h band { double& counter = pelement->GetValue(YOUNG_MODULUS); #pragma omp atomic counter += 1.0; iparticle->SetValue(NL_ITERATION_NUMBER , pelement->Id()); } else { if( is_scaped == false) //delete if it is not an escaped particle iparticle->Set(TO_ERASE,true); } } } //loop over close to the surface elements to ressed or delet particles if(TDim==2){ ReseedOrDelete2D(rEulerianModelPart, rLagrangianModelPart, max_seed_distance, min_edge_size); } else { const int max_num_ptr = 16;//70; const int num_ptr = 10;//56; const int min_num_ptr = 6;//40; MarkEraseExtraParticles3D(rEulerianModelPart, rLagrangianModelPart, max_seed_distance, min_edge_size, max_num_ptr, num_ptr); ReseedPoorElements3D(rEulerianModelPart, rLagrangianModelPart, max_seed_distance, min_edge_size, min_num_ptr, num_ptr ); FewReseedPoorElements3D(rEulerianModelPart, rLagrangianModelPart, max_seed_distance, min_edge_size, min_num_ptr, num_ptr ); } //perform the erase NodeEraseProcess(rLagrangianModelPart).Execute(); KRATOS_CATCH(""); } //********************************************************************************************** //********************************************************************************************** void VisualizationModelPart(ModelPart& rCompleteModelPart, ModelPart& rEulerianModelPart, ModelPart & rLagrangianModelPart) { KRATOS_TRY; rCompleteModelPart.Elements() = rEulerianModelPart.Elements(); rCompleteModelPart.Nodes() = rEulerianModelPart.Nodes(); unsigned int id; if(rEulerianModelPart.Nodes().size()!= 0) id = (rEulerianModelPart.Nodes().end() - 1)->Id() + 1; else id = 1; //preallocate the memory needed int tot_nodes = rEulerianModelPart.Nodes().size() + rLagrangianModelPart.Nodes().size(); rCompleteModelPart.Nodes().reserve( tot_nodes ); //note that here we renumber the nodes for (ModelPart::NodesContainerType::iterator node_it = rLagrangianModelPart.NodesBegin(); node_it != rLagrangianModelPart.NodesEnd(); node_it++) { rCompleteModelPart.AddNode(*(node_it.base())); node_it->SetId(id++); } KRATOS_CATCH(""); } //********************************************************************************** //********************************************************************************** void FindMaxMinEdgeSize(ModelPart& r_model_part, pybind11::list& maxmin) { KRATOS_TRY double max_edge = 0.0; double min_edge = 1000.0; for(ModelPart::ElementsContainerType::iterator it=r_model_part.ElementsBegin(); it!=r_model_part.ElementsEnd(); it++) { Geometry<Node<3> >&geom = it->GetGeometry(); double loc_h_max = 0.0; double loc_h_min = 1000.0; for(unsigned int i=0; i<TDim+1; i++) { double xc = geom[i].X(); double yc = geom[i].Y(); double zc = geom[i].Z(); for(unsigned int j=i+1; j<TDim+1; j++) { double x = geom[j].X(); double y = geom[j].Y(); double z = geom[j].Z(); double l = (x - xc)*(x - xc); l += (y - yc)*(y - yc); l += (z - zc)*(z - zc); if (l > loc_h_max) loc_h_max = l; else if(l < loc_h_min) loc_h_min = l; } } loc_h_max = sqrt(loc_h_max); loc_h_min = sqrt(loc_h_min); if(loc_h_max > max_edge ) max_edge = loc_h_max; if(loc_h_min < min_edge ) min_edge = loc_h_min; } // r_model_part.GetCommunicator().MaxAll(h_max); maxmin.append(max_edge); maxmin.append(min_edge); KRATOS_CATCH(""); } private: void CreateParticles3D(ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart, BoundedMatrix<double, 56, 3 > pos, BoundedMatrix<double, 56, 4 > N, const double max_seed_distance, const double min_edge_size) { unsigned int id = (rEulerianModelPart.Nodes().end() - 1)->Id() + 1; for (ModelPart::ElementsContainerType::iterator el_it = rEulerianModelPart.ElementsBegin(); el_it != rEulerianModelPart.ElementsEnd(); el_it++) { Geometry<Node < 3 > >& geom = el_it->GetGeometry(); if(CheckElemDist(geom,max_seed_distance)) { ComputeGaussPointPositions3D(geom, pos, N); for (unsigned int i = 0; i < pos.size1(); i++) { int node_id = id++; Node < 3 > ::Pointer pnode = rLagrangianModelPart.CreateNewNode(node_id, pos(i, 0), pos(i, 1), pos(i, 2)); array_1d<double, 3 > & vel = pnode->FastGetSolutionStepValue(VELOCITY); noalias(vel) = ZeroVector(3); // double & p_dist = pnode->FastGetSolutionStepValue(DISTANCE); double p_distance = 0.0; for (unsigned int j = 0; j < TDim + 1; j++){ noalias(vel) += N(i, j) * geom[j].FastGetSolutionStepValue(VELOCITY); p_distance += N(i, j) * geom[j].FastGetSolutionStepValue(DISTANCE); } // Assign particle sign if(p_distance < 0.0) pnode->FastGetSolutionStepValue(IS_WATER)=-1.0; else if(p_distance > 0.0) pnode->FastGetSolutionStepValue(IS_WATER)= 1.0; pnode->Fix(IS_WATER); AssignParticleRadius(pnode,p_distance,min_edge_size); } } } } void FewCreateParticles3D(ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart, BoundedMatrix<double, 10, 3 > pos, BoundedMatrix<double, 10, 4 > N, const double max_seed_distance, const double min_edge_size) { unsigned int id = (rEulerianModelPart.Nodes().end() - 1)->Id() + 1; for (ModelPart::ElementsContainerType::iterator el_it = rEulerianModelPart.ElementsBegin(); el_it != rEulerianModelPart.ElementsEnd(); el_it++) { Geometry<Node < 3 > >& geom = el_it->GetGeometry(); if(CheckElemDist(geom,max_seed_distance)) { FewComputeGaussPointPositions3D(geom, pos, N); for (unsigned int i = 0; i < pos.size1(); i++) { int node_id = id++; Node < 3 > ::Pointer pnode = rLagrangianModelPart.CreateNewNode(node_id, pos(i, 0), pos(i, 1), pos(i, 2)); array_1d<double, 3 > & vel = pnode->FastGetSolutionStepValue(VELOCITY); noalias(vel) = ZeroVector(3); // double & p_dist = pnode->FastGetSolutionStepValue(DISTANCE); double p_distance = 0.0; for (unsigned int j = 0; j < TDim + 1; j++){ noalias(vel) += N(i, j) * geom[j].FastGetSolutionStepValue(VELOCITY); p_distance += N(i, j) * geom[j].FastGetSolutionStepValue(DISTANCE); } // Assign particle sign if(p_distance < 0.0) pnode->FastGetSolutionStepValue(IS_WATER)=-1.0; else if(p_distance > 0.0) pnode->FastGetSolutionStepValue(IS_WATER)= 1.0; pnode->Fix(IS_WATER); AssignParticleRadius(pnode,p_distance,min_edge_size); } } } } void CreateParticles2D(ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart, BoundedMatrix<double, 16, 3 > pos, BoundedMatrix<double, 16, 3 > N, const double max_seed_distance, const double min_edge_size) { unsigned int id = (rEulerianModelPart.Nodes().end() - 1)->Id() + 1; for (ModelPart::ElementsContainerType::iterator el_it = rEulerianModelPart.ElementsBegin(); el_it != rEulerianModelPart.ElementsEnd(); el_it++) { Geometry<Node < 3 > >& geom = el_it->GetGeometry(); if(CheckElemDist(geom,max_seed_distance)) { ComputeGaussPointPositions2D(geom, pos, N); for (unsigned int i = 0; i < pos.size1(); i++) { int node_id = id++; Node < 3 > ::Pointer pnode = rLagrangianModelPart.CreateNewNode(node_id, pos(i, 0), pos(i, 1), pos(i, 2)); array_1d<double, 3 > & vel = pnode->FastGetSolutionStepValue(VELOCITY); noalias(vel) = ZeroVector(3); // double & p_dist = pnode->FastGetSolutionStepValue(DISTANCE); double p_distance = 0.0; for (unsigned int j = 0; j < TDim + 1; j++){ noalias(vel) += N(i, j) * geom[j].FastGetSolutionStepValue(VELOCITY); p_distance += N(i, j) * geom[j].FastGetSolutionStepValue(DISTANCE); } // Assign particle sign if(p_distance < 0.0) pnode->FastGetSolutionStepValue(IS_WATER)=-1.0; else if(p_distance > 0.0) pnode->FastGetSolutionStepValue(IS_WATER)= 1.0; pnode->Fix(IS_WATER); AssignParticleRadius(pnode,p_distance,min_edge_size); } } } } void ReseedOrDelete2D(ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart, const double max_seed_distance, const double min_edge_size) { int id; if (rLagrangianModelPart.Nodes().size() != 0) id = (rLagrangianModelPart.NodesEnd() - 1)->Id(); else id = 1; const int nelements = rEulerianModelPart.Elements().size(); const int nparticles = rLagrangianModelPart.Nodes().size(); BoundedMatrix<double, 16, 3 > coord; BoundedMatrix<double, 16, 3 > NN; // #pragma omp parallel for firstprivate(NN,coord) for (int ne = 0; ne < nelements; ne++) { ModelPart::ElementsContainerType::iterator ielem = rEulerianModelPart.ElementsBegin() + ne; Geometry<Node < 3 > >& geom = ielem->GetGeometry(); int n_ptr = int(ielem->GetValue(YOUNG_MODULUS)); if( n_ptr < 12 && CheckElemDist(geom,max_seed_distance) )//ressed in close to surface and poor element { //compute cooordinates //RandomPariclePosition(geom, coord, NN); ComputeGaussPointPositions2D(geom, coord, NN); int aux_n_ptr = n_ptr; int cnt = 0; while( aux_n_ptr<16 ){ aux_n_ptr++; //COORDINATES int node_id = id++; Node < 3 > ::Pointer pnode = rLagrangianModelPart.CreateNewNode(node_id, coord(cnt,0), coord(cnt,1), coord(cnt,2)); array_1d<double, 3 > & vel = pnode->FastGetSolutionStepValue(VELOCITY); noalias(vel) = ZeroVector(3); // double & p_dist = pnode->FastGetSolutionStepValue(DISTANCE); double p_distance = 0.0; for (unsigned int j = 0; j < TDim + 1; j++){ noalias(vel) += NN(cnt,j) * geom[j].FastGetSolutionStepValue(VELOCITY); p_distance += NN(cnt,j) * geom[j].FastGetSolutionStepValue(DISTANCE); } // Assign particle sign if(p_distance < 0.0) pnode->FastGetSolutionStepValue(IS_WATER)=-1.0; else if(p_distance > 0.0) pnode->FastGetSolutionStepValue(IS_WATER)= 1.0; pnode->Fix(IS_WATER); AssignParticleRadius(pnode,p_distance,min_edge_size); cnt++; } } else if( n_ptr > 20 && CheckElemDist(geom,max_seed_distance) ){ const int ele_id = ielem->Id(); ModelPart::NodesContainerType element_particles; element_particles.reserve(64); //save particle list for (int kk = 0; kk < nparticles; kk++) { ModelPart::NodesContainerType::iterator iparticle = rLagrangianModelPart.NodesBegin() + kk; const int ptr_nest = iparticle->GetValue(NL_ITERATION_NUMBER); if( ptr_nest==ele_id ) { iparticle->SetValue(SCALE, 0); element_particles.push_back( *(iparticle.base()) ); } } //loop to order based on the radius ModelPart::NodesContainerType::iterator ptr_begin = element_particles.begin(); unsigned int ptr_elem_size = element_particles.size(); for(unsigned int ii=0; ii < ptr_elem_size; ii++) for(unsigned int jj=ii+1; jj < ptr_elem_size; jj++) { double ii_radi = (ptr_begin + ii)->FastGetSolutionStepValue(PARTICLE_RADIUS); double jj_radi = (ptr_begin + jj)->FastGetSolutionStepValue(PARTICLE_RADIUS); (ii_radi>=jj_radi) ? (ptr_begin + ii)->GetValue(SCALE)+=1 : (ptr_begin + jj)->GetValue(SCALE)+=1; } //delete extra nodes int aux_ptr_elem_size = int(ptr_elem_size); while(aux_ptr_elem_size>16) { for(unsigned int ii=0; ii < ptr_elem_size; ii++){ bool swt = false; for( int kkk = ptr_elem_size; kkk>0; kkk-- ) if( (ptr_begin + ii)->GetValue(SCALE) == kkk && (ptr_begin + ii)->GetValue(IS_VISITED) == 0){ bool is_escaped = (ptr_begin + ii)->GetValue(IS_ESCAPED); if( is_escaped==false ) (ptr_begin + ii)->Set(TO_ERASE,true);//CHECK ESCASPED NODES (ptr_begin + ii)->SetValue(IS_VISITED,1); swt = true; break; } if(swt ) break; } aux_ptr_elem_size -= 1; } } } } void MarkEraseExtraParticles3D(ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart, const double max_seed_distance, const double min_edge_size, const int max_num_particle, const int num_particle) { // int id; // if (rLagrangianModelPart.Nodes().size() != 0) // id = (rLagrangianModelPart.NodesEnd() - 1)->Id(); // else // id = 1; const int nelements = rEulerianModelPart.Elements().size(); const int nparticles = rLagrangianModelPart.Nodes().size(); std::vector< WeakPointerVector< Node< 3> > > particle_of_element(nelements); // particle_of_element.reserve(nelements); std::vector< unsigned int > num_ptr_in_elem(nelements,0); // num_ptr_in_elem.reserve(nelements); //loop on elements to resrve the size of particle in element list #pragma omp parallel for firstprivate(num_ptr_in_elem) for (int ne = 0; ne < nelements; ne++) { ModelPart::ElementsContainerType::iterator ielem = rEulerianModelPart.ElementsBegin() + ne; int n_ptr = int(ielem->GetValue(YOUNG_MODULUS)); unsigned int ele_id = ielem->Id(); num_ptr_in_elem[ele_id-1] = n_ptr; if(n_ptr > max_num_particle) particle_of_element[ele_id-1].reserve(n_ptr); } //loop on particles to push_back particle related to full elements for (int kk = 0; kk < nparticles; kk++) { ModelPart::NodesContainerType::iterator iparticle = rLagrangianModelPart.NodesBegin() + kk; const int ptr_nest = iparticle->GetValue(NL_ITERATION_NUMBER); if( num_ptr_in_elem[ptr_nest-1] > static_cast<unsigned int>(max_num_particle) ) particle_of_element[ptr_nest-1].push_back( *(iparticle.base()) ); } //loop over elements to reoreder the particle radius in over populated elements #pragma omp parallel for firstprivate(particle_of_element) for( int ii = 0; ii< static_cast<int>(particle_of_element.size()); ++ii) { if(particle_of_element[ii].size() > static_cast<unsigned int>(max_num_particle)) { //sort std::sort(particle_of_element[ii].ptr_begin(), particle_of_element[ii].ptr_end(), RadiusCompare() ); //delete extra nodes WeakPointerVector< Node< 3> >::iterator ele_pt_ptr = particle_of_element[ii].begin(); const unsigned int this_ele_ptr = particle_of_element[ii].size(); int aux_ptr_elem_size = this_ele_ptr; for( unsigned int ij = 0; (ij < this_ele_ptr && aux_ptr_elem_size > num_particle); ++ij) { bool is_escaped = (ele_pt_ptr + ij)->GetValue(IS_ESCAPED); if( is_escaped==false ){ (ele_pt_ptr + ij)->Set(TO_ERASE,true); aux_ptr_elem_size--; } } } } } struct RadiusCompare{ template<class TRefrenceType> bool operator()(const TRefrenceType ptr_a, const TRefrenceType ptr_b) { double a_radi = ptr_a.lock()->FastGetSolutionStepValue(PARTICLE_RADIUS); double b_radi = ptr_b.lock()->FastGetSolutionStepValue(PARTICLE_RADIUS); return (a_radi > b_radi); } }; void ReseedPoorElements3D(ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart, const double max_seed_distance, const double min_edge_size, const int min_num_particle, const int num_particle) { int id; if (rLagrangianModelPart.Nodes().size() != 0) id = (rLagrangianModelPart.NodesEnd() - 1)->Id(); else id = 1; const int nelements = rEulerianModelPart.Elements().size(); // const int nparticles = rLagrangianModelPart.Nodes().size(); BoundedMatrix<double, 56, 3 > coord; BoundedMatrix<double, 56, 4 > NN; // #pragma omp parallel for firstprivate(NN,coord) for (int ne = 0; ne < nelements; ne++) { ModelPart::ElementsContainerType::iterator ielem = rEulerianModelPart.ElementsBegin() + ne; Geometry<Node < 3 > >& geom = ielem->GetGeometry(); int n_ptr = int(ielem->GetValue(YOUNG_MODULUS)); if( n_ptr < min_num_particle && CheckElemDist(geom,max_seed_distance) )//ressed in close to surface and poor element { //compute cooordinates //RandomPariclePosition(geom, coord, NN); ComputeGaussPointPositions3D(geom, coord, NN); int aux_n_ptr = n_ptr; int cnt = 0; while( aux_n_ptr < num_particle ){ aux_n_ptr++; //COORDINATES int node_id = id++; Node < 3 > ::Pointer pnode = rLagrangianModelPart.CreateNewNode(node_id, coord(cnt,0), coord(cnt,1), coord(cnt,2)); array_1d<double, 3 > & vel = pnode->FastGetSolutionStepValue(VELOCITY); noalias(vel) = ZeroVector(3); // double & p_dist = pnode->FastGetSolutionStepValue(DISTANCE); double p_distance = 0.0; for (unsigned int j = 0; j < TDim + 1; j++){ noalias(vel) += NN(cnt,j) * geom[j].FastGetSolutionStepValue(VELOCITY); p_distance += NN(cnt,j) * geom[j].FastGetSolutionStepValue(DISTANCE); } // Assign particle sign if(p_distance < 0.0) pnode->FastGetSolutionStepValue(IS_WATER)=-1.0; else if(p_distance > 0.0) pnode->FastGetSolutionStepValue(IS_WATER)= 1.0; pnode->Fix(IS_WATER); AssignParticleRadius(pnode,p_distance,min_edge_size); cnt++; } } } } void FewReseedPoorElements3D(ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart, const double max_seed_distance, const double min_edge_size, const int min_num_particle, const int num_particle) { int id; if (rLagrangianModelPart.Nodes().size() != 0) id = (rLagrangianModelPart.NodesEnd() - 1)->Id(); else id = 1; const int nelements = rEulerianModelPart.Elements().size(); // const int nparticles = rLagrangianModelPart.Nodes().size(); BoundedMatrix<double, 10, 3 > coord; BoundedMatrix<double, 10, 4 > NN; // #pragma omp parallel for firstprivate(NN,coord) for (int ne = 0; ne < nelements; ne++) { ModelPart::ElementsContainerType::iterator ielem = rEulerianModelPart.ElementsBegin() + ne; Geometry<Node < 3 > >& geom = ielem->GetGeometry(); int n_ptr = int(ielem->GetValue(YOUNG_MODULUS)); if( n_ptr < min_num_particle && CheckElemDist(geom,max_seed_distance) )//ressed in close to surface and poor element { //compute cooordinates //RandomPariclePosition(geom, coord, NN); FewComputeGaussPointPositions3D(geom, coord, NN); int aux_n_ptr = n_ptr; int cnt = 0; while( aux_n_ptr < num_particle ){ aux_n_ptr++; //COORDINATES int node_id = id++; Node < 3 > ::Pointer pnode = rLagrangianModelPart.CreateNewNode(node_id, coord(cnt,0), coord(cnt,1), coord(cnt,2)); array_1d<double, 3 > & vel = pnode->FastGetSolutionStepValue(VELOCITY); noalias(vel) = ZeroVector(3); // double & p_dist = pnode->FastGetSolutionStepValue(DISTANCE); double p_distance = 0.0; for (unsigned int j = 0; j < TDim + 1; j++){ noalias(vel) += NN(cnt,j) * geom[j].FastGetSolutionStepValue(VELOCITY); p_distance += NN(cnt,j) * geom[j].FastGetSolutionStepValue(DISTANCE); } // Assign particle sign if(p_distance < 0.0) pnode->FastGetSolutionStepValue(IS_WATER)=-1.0; else if(p_distance > 0.0) pnode->FastGetSolutionStepValue(IS_WATER)= 1.0; pnode->Fix(IS_WATER); AssignParticleRadius(pnode,p_distance,min_edge_size); cnt++; } } } } // void ReseedOrDelete3D(ModelPart& rEulerianModelPart, // ModelPart& rLagrangianModelPart, // const double max_seed_distance, // const double min_edge_size) // { // int id; // if (rLagrangianModelPart.Nodes().size() != 0) // id = (rLagrangianModelPart.NodesEnd() - 1)->Id(); // else // id = 1; // const int nelements = rEulerianModelPart.Elements().size(); // const int nparticles = rLagrangianModelPart.Nodes().size(); // // // // BoundedMatrix<double, 56, 3 > coord; // BoundedMatrix<double, 56, 4 > NN; // // #pragma omp parallel for firstprivate(NN,coord) // for (int ne = 0; ne < nelements; ne++) // { // ModelPart::ElementsContainerType::iterator ielem = rEulerianModelPart.ElementsBegin() + ne; // Geometry<Node < 3 > >& geom = ielem->GetGeometry(); // int n_ptr = int(ielem->GetValue(YOUNG_MODULUS)); // // if( n_ptr < 42 && CheckElemDist(geom,max_seed_distance) )//ressed in close to surface and poor element // { // //compute cooordinates // //RandomPariclePosition(geom, coord, NN); // ComputeGaussPointPositions3D(geom, coord, NN); // int aux_n_ptr = n_ptr; // int cnt = 0; // while( aux_n_ptr<56 ){ // aux_n_ptr++; // //COORDINATES // int node_id = id++; // Node < 3 > ::Pointer pnode = rLagrangianModelPart.CreateNewNode(node_id, coord(cnt,0), coord(cnt,1), coord(cnt,2)); // // array_1d<double, 3 > & vel = pnode->FastGetSolutionStepValue(VELOCITY); // noalias(vel) = ZeroVector(3); // // // double & p_dist = pnode->FastGetSolutionStepValue(DISTANCE); // double p_distance = 0.0; // for (unsigned int j = 0; j < TDim + 1; j++){ // noalias(vel) += NN(cnt,j) * geom[j].FastGetSolutionStepValue(VELOCITY); // p_distance += NN(cnt,j) * geom[j].FastGetSolutionStepValue(DISTANCE); // } // // // Assign particle sign // if(p_distance < 0.0) // pnode->FastGetSolutionStepValue(IS_WATER)=-1.0; // else if(p_distance > 0.0) // pnode->FastGetSolutionStepValue(IS_WATER)= 1.0; // // pnode->Fix(IS_WATER); // // AssignParticleRadius(pnode,p_distance,min_edge_size); // // cnt++; // } // } // else if( n_ptr > 70 && CheckElemDist(geom,max_seed_distance) ){ // const int ele_id = ielem->Id(); // ModelPart::NodesContainerType element_particles; // element_particles.reserve(64); // //save particle list // for (int kk = 0; kk < nparticles; kk++) // { // ModelPart::NodesContainerType::iterator iparticle = rLagrangianModelPart.NodesBegin() + kk; // // const int ptr_nest = iparticle->GetValue(NL_ITERATION_NUMBER); // if( ptr_nest==ele_id ) // { // iparticle->SetValue(SCALE, 0); // element_particles.push_back( *(iparticle.base()) ); // } // } // // //loop to order based on the radius // ModelPart::NodesContainerType::iterator ptr_begin = element_particles.begin(); // unsigned int ptr_elem_size = element_particles.size(); // // for(unsigned int ii=0; ii < ptr_elem_size; ii++) // for(unsigned int jj=ii+1; jj < ptr_elem_size; jj++) // { // double ii_radi = (ptr_begin + ii)->FastGetSolutionStepValue(PARTICLE_RADIUS); // double jj_radi = (ptr_begin + jj)->FastGetSolutionStepValue(PARTICLE_RADIUS); // // (ii_radi>=jj_radi) ? (ptr_begin + ii)->GetValue(SCALE)+=1 : (ptr_begin + jj)->GetValue(SCALE)+=1; // // } // //delete extra nodes // int aux_ptr_elem_size = int(ptr_elem_size); // while(aux_ptr_elem_size>56) // { // for(unsigned int ii=0; ii < ptr_elem_size; ii++){ // bool swt = false; // for( int kkk = ptr_elem_size; kkk>0; kkk-- ) // if( (ptr_begin + ii)->GetValue(SCALE) == kkk && (ptr_begin + ii)->GetValue(IS_VISITED) == 0){ // bool is_escaped = (ptr_begin + ii)->GetValue(IS_ESCAPED); // if( is_escaped==false ) // (ptr_begin + ii)->Set(TO_ERASE,true);//CHECK ESCASPED NODES // (ptr_begin + ii)->SetValue(IS_VISITED,1); // swt = true; // break; // } // if(swt ) // break; // } // aux_ptr_elem_size -= 1; // } // } // // // } // // } void ComputeGaussPointPositions2D(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 16, 3 > & pos, BoundedMatrix<double, 16, 3 > & N) { //lower diagonal terms double ypos = 1.0 / 5.0; int pos_counter = 0; for (unsigned int i = 0; i < 4; i++) { double xpos = 1.0 / 8.0; for (unsigned int j = 0; j < (7-2*i); j++) { double N1 = xpos; double N2 = ypos; double N3 = 1.0 - xpos - ypos; pos(pos_counter, 0) = N1 * geom[0].X() + N2 * geom[1].X() + N3 * geom[2].X(); pos(pos_counter, 1) = N1 * geom[0].Y() + N2 * geom[1].Y() + N3 * geom[2].Y(); pos(pos_counter, 2) = N1 * geom[0].Z() + N2 * geom[1].Z() + N3 * geom[2].Z(); N(pos_counter, 0) = N1; N(pos_counter, 1) = N2; N(pos_counter, 2) = N3; xpos += 1.0 / 8.0; pos_counter += 1; } ypos += 1.0 / 5.0; } } void ComputeGaussPointPositions3D(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 56, 3 > & pos, BoundedMatrix<double, 56, 4 > & N) { int pos_counter = 0; const double one_seventh = 1.0/6.5; double close_point = 1.0/20; double zpos = close_point; for (unsigned int kk = 0; kk < 6; kk++) { // double y_div = 1.0/(7 - kk); double ypos = close_point;//one_seventh;// y_div * (1.0 - zpos);//one_seventh for (unsigned int i = 0; i < (6-kk); i++) { // double x_div = 1.0/(7 - kk);// -i double xpos = close_point;//one_seventh;//x_div* (1.0 - ypos) * (1.0 - zpos);//one_seventh for (unsigned int j = 0; j < (6-kk-i); j++) { double N1 = xpos; double N2 = ypos; double N3 = zpos; double N4 = 1.0 - xpos - ypos - zpos; pos(pos_counter, 0) = N1 * geom[0].X() + N2 * geom[1].X() + N3 * geom[2].X() + N4 * geom[3].X(); pos(pos_counter, 1) = N1 * geom[0].Y() + N2 * geom[1].Y() + N3 * geom[2].Y() + N4 * geom[3].Y(); pos(pos_counter, 2) = N1 * geom[0].Z() + N2 * geom[1].Z() + N3 * geom[2].Z() + N4 * geom[3].Z(); N(pos_counter, 0) = N1; N(pos_counter, 1) = N2; N(pos_counter, 2) = N3; N(pos_counter, 3) = N4; xpos += one_seventh;//x_div * (1.0 - ypos) * (1.0 - zpos); //one_seventh pos_counter += 1; } ypos += one_seventh;//y_div * (1.0 - zpos);//one_seventh } zpos += one_seventh; } } void FewComputeGaussPointPositions3D(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 10, 3 > & pos, BoundedMatrix<double, 10, 4 > & N) { int pos_counter = 0; const double one_third = 1.0/2.5; double close_point = 1.0/20; double zpos = close_point; for (unsigned int kk = 0; kk < 3; kk++) { // double y_div = 1.0/(7 - kk); double ypos = close_point;//one_seventh;// y_div * (1.0 - zpos);//one_seventh for (unsigned int i = 0; i < (3-kk); i++) { // double x_div = 1.0/(7 - kk);// -i double xpos = close_point;//one_seventh;//x_div* (1.0 - ypos) * (1.0 - zpos);//one_seventh for (unsigned int j = 0; j < (3-kk-i); j++) { double N1 = xpos; double N2 = ypos; double N3 = zpos; double N4 = 1.0 - xpos - ypos - zpos; pos(pos_counter, 0) = N1 * geom[0].X() + N2 * geom[1].X() + N3 * geom[2].X() + N4 * geom[3].X(); pos(pos_counter, 1) = N1 * geom[0].Y() + N2 * geom[1].Y() + N3 * geom[2].Y() + N4 * geom[3].Y(); pos(pos_counter, 2) = N1 * geom[0].Z() + N2 * geom[1].Z() + N3 * geom[2].Z() + N4 * geom[3].Z(); N(pos_counter, 0) = N1; N(pos_counter, 1) = N2; N(pos_counter, 2) = N3; N(pos_counter, 3) = N4; xpos += one_third;//x_div * (1.0 - ypos) * (1.0 - zpos); //one_seventh pos_counter += 1; } ypos += one_third;//y_div * (1.0 - zpos);//one_seventh } zpos += one_third; } } void RandomPariclePosition(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 16, 3 > & coord, BoundedMatrix<double, 16, 3 > & N_shape) { for(int ii=0;ii<16;ii++){ double xi = rand()* ( 1.0 / ( RAND_MAX + 1.0 ) ); double etta = (1.0 - xi) * ( rand()* ( 1.0 / ( RAND_MAX + 1.0 ) ) ); double zetta = 1.0 - (xi + etta); coord(ii,0) = xi * geom[0].X() + etta * geom[1].X() + zetta * geom[2].X(); coord(ii,1) = xi * geom[0].Y() + etta * geom[1].Y() + zetta * geom[2].Y(); coord(ii,2) = xi * geom[0].Z() + etta * geom[1].Z() + zetta * geom[2].Z(); N_shape(ii,0) = xi; N_shape(ii,1) = etta; N_shape(ii,1) = zetta; } } static int CheckElemDist(Geometry< Node < 3 > >& geom, const double max_dist) { for(unsigned int ii=0; ii < geom.size(); ++ii) { double nd_dist = geom[ii].FastGetSolutionStepValue(DISTANCE); if (fabs(nd_dist) < max_dist) return 1; } return 0; } bool CheckIfEscaped(Geometry< Node < 3 > >& geom, const array_1d<double, 3 > & N_shape,const double particle_sign) { double dist = N_shape[0]*geom[0].FastGetSolutionStepValue(DISTANCE); for(unsigned int ii=1; ii < geom.size(); ++ii) dist += N_shape[ii]*geom[ii].FastGetSolutionStepValue(DISTANCE); if( dist*particle_sign < 0.0) return true; else return false; } void AssignParticleRadius(Node < 3 > ::Pointer nd_ptr, double& p_dist,const double min_edge_size) { if( fabs(p_dist) < 0.1*min_edge_size) nd_ptr->FastGetSolutionStepValue(PARTICLE_RADIUS) = 0.1*min_edge_size; else if(fabs(p_dist) > 0.5*min_edge_size) nd_ptr->FastGetSolutionStepValue(PARTICLE_RADIUS) = 0.5*min_edge_size; else nd_ptr->FastGetSolutionStepValue(PARTICLE_RADIUS) = fabs(p_dist); } // unsigned int time_seed() // { // time_t now = time ( 0 ); // unsigned char *p = (unsigned char *)&now; // unsigned int seed = 0; // size_t i; // // for ( i = 0; i < sizeof now; i++ ) // seed = seed * ( UCHAR_MAX + 2U ) + p[i]; // // return seed; // } }; } #endif // KRATOS_LAGRANGIAN_PARTICLES_UTILITIES_INCLUDED defined
// // Project Name: Kratos // Last Modified by: $Author: rrossi $ // Date: $Date: 2007-03-06 10:30:31 $ // Revision: $Revision: 1.2 $ // // #if !defined(KRATOS_PARTICLE_LEVELSET_UTILITIES_INCLUDED ) #define KRATOS_PARTICLE_LEVELSET_UTILITIES_INCLUDED // System includes #include <string> #include <iostream> #include <algorithm> // External includes #include "pybind11/pybind11.h" // Project includes #include "includes/define.h" #include "includes/model_part.h" #include "includes/deprecated_variables.h" #include "includes/node.h" #include "utilities/geometry_utilities.h" #include "geometries/tetrahedra_3d_4.h" #include "thermo_mechanical_application.h" #include "spatial_containers/spatial_containers.h" #include "utilities/timer.h" #include "processes/node_erase_process.h" #include "utilities/binbased_fast_point_locator.h" #include "utilities/timer.h" // #include <boost/random/linear_congruential.hpp> // #include <boost/random/uniform_int.hpp> // #include <boost/random/uniform_real.hpp> // #include <boost/random/variate_generator.hpp> // #include <boost/generator_iterator.hpp> // #include <tr1/random> #include <time.h> namespace Kratos { template<std::size_t TDim> class ParticleLevelSetUtils { public: KRATOS_CLASS_POINTER_DEFINITION(ParticleLevelSetUtils<TDim>); //********************************************************************************************** //********************************************************************************************** //function to seed a list of new nodes void Seed(ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart, const double max_seed_distance, const double min_edge_size) { KRATOS_TRY; rLagrangianModelPart.Nodes().clear(); unsigned int ele_id = 1; for (ModelPart::ElementsContainerType::iterator el_it = rEulerianModelPart.ElementsBegin(); el_it != rEulerianModelPart.ElementsEnd(); el_it++) { el_it->SetId(ele_id); ele_id++; } if(TDim==2){ BoundedMatrix<double, 16, 3 > pos; BoundedMatrix<double, 16, 3 > N; CreateParticles2D(rEulerianModelPart,rLagrangianModelPart,pos,N,max_seed_distance,min_edge_size); } else { // BoundedMatrix<double, 56, 3 > pos; // BoundedMatrix<double, 56, 4 > N; // CreateParticles3D(rEulerianModelPart,rLagrangianModelPart,pos,N,max_seed_distance,min_edge_size); BoundedMatrix<double, 10, 3 > pos; BoundedMatrix<double, 10, 4 > N; FewCreateParticles3D(rEulerianModelPart,rLagrangianModelPart,pos,N,max_seed_distance,min_edge_size); } for (ModelPart::NodesContainerType::iterator node_it = rLagrangianModelPart.NodesBegin(); node_it != rLagrangianModelPart.NodesEnd(); node_it++) { node_it->FastGetSolutionStepValue(VELOCITY, 1) = node_it->FastGetSolutionStepValue(VELOCITY); // node_it->FastGetSolutionStepValue(DISTANCE, 1) = node_it->FastGetSolutionStepValue(DISTANCE); } KRATOS_CATCH(""); } //********************************************************************************************** //********************************************************************************************** void StreamlineMove(const double dt, ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart, BinBasedFastPointLocator<TDim>& node_locator) { KRATOS_TRY array_1d<double, 3 > veulerian; Vector N; const int max_results = 10000; typename BinBasedFastPointLocator<TDim>::ResultContainerType results(max_results); const int nparticles = rLagrangianModelPart.Nodes().size(); //KRATOS_WATCH("551") for (int i = 0; i < nparticles; i++) { unsigned int substep = 0; unsigned int subdivisions = 1; double small_dt = dt; while(substep++ < subdivisions) { ModelPart::NodesContainerType::iterator iparticle = rLagrangianModelPart.NodesBegin() + i; (iparticle)->Set(TO_ERASE, true); Node < 3 > ::Pointer pparticle = *(iparticle.base()); typename BinBasedFastPointLocator<TDim>::ResultIteratorType result_begin = results.begin(); Element::Pointer pelement; // KRATOS_WATCH("561") bool is_found = node_locator.FindPointOnMesh(pparticle->Coordinates(), N, pelement, result_begin, max_results); // KRATOS_WATCH("564") if (is_found == true) { (pparticle)->GetValue(IS_VISITED) = 1; Geometry< Node < 3 > >& geom = pelement->GetGeometry(); noalias(veulerian) = N[0] * geom[0].FastGetSolutionStepValue(VELOCITY); for (unsigned int k = 1; k < geom.size(); k++) noalias(veulerian) += N[k] * geom[k].FastGetSolutionStepValue(VELOCITY); //compute adaptive subdivisions if(substep == 1) { //compute h double h = N[0] * geom[0].FastGetSolutionStepValue(NODAL_H); for (unsigned int k = 1; k < geom.size(); k++) h += N[k] * geom[k].FastGetSolutionStepValue(NODAL_H); //compute number of subdivisions needed const unsigned int min_subdivisions = 3; const unsigned int max_subdivisions = 20; double v = norm_2(veulerian); subdivisions = double(floor(2*dt*v/h)); subdivisions = (subdivisions<min_subdivisions) ? min_subdivisions : (subdivisions>max_subdivisions) ? max_subdivisions : subdivisions; //compute subdivisions time step small_dt = dt / subdivisions; // KRATOS_WATCH(subdivisions) } //move according to the streamline array_1d<double, 3 > & disp = (iparticle)->FastGetSolutionStepValue(DISPLACEMENT); noalias(disp) += small_dt*veulerian; (pparticle)->Set(TO_ERASE, false); // KRATOS_WATCH("585") //update position noalias(iparticle->Coordinates()) = iparticle->GetInitialPosition(); noalias(iparticle->Coordinates()) += iparticle->FastGetSolutionStepValue(DISPLACEMENT); (iparticle)->GetValue(IS_VISITED) = 0; //KRATOS_WATCH("619") } } } KRATOS_CATCH("") } //********************************************************************************************** //********************************************************************************************** void ParticleLevelSetCorrection(ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart, BinBasedFastPointLocator<TDim>& node_locator) { KRATOS_TRY //Initilize NAGATIVE_DISTANCE & POSETIVE_DISTANCE const int nnodes= rEulerianModelPart.Nodes().size(); for (int jj = 0; jj < nnodes; jj++) { ModelPart::NodesContainerType::iterator node_itr = rEulerianModelPart.NodesBegin() + jj; const double nd_dist = node_itr->FastGetSolutionStepValue(DISTANCE); node_itr->SetValue(POSETIVE_DISTANCE,nd_dist ); node_itr->SetValue(NAGATIVE_DISTANCE,nd_dist ); } //loop over particles double particle_dist= 0.0; Vector N; const int max_results = 10000; typename BinBasedFastPointLocator<TDim>::ResultContainerType results(max_results); const int nparticles = rLagrangianModelPart.Nodes().size(); for (int i = 0; i < nparticles; i++) { ModelPart::NodesContainerType::iterator particle_itr = rLagrangianModelPart.NodesBegin() + i; Node < 3 > ::Pointer p_pointer = *(particle_itr.base()); typename BinBasedFastPointLocator<TDim>::ResultIteratorType result_begin = results.begin(); Element::Pointer pelement; bool is_found = node_locator.FindPointOnMesh(p_pointer->Coordinates(), N, pelement, result_begin, max_results); if (is_found == true) { Geometry< Node < 3 > >& geom = pelement->GetGeometry(); //interpolate the particle distance particle_dist = N[0] * geom[0].FastGetSolutionStepValue(DISTANCE); for (unsigned int k = 1; k < geom.size(); k++) particle_dist += N[k] * geom[k].FastGetSolutionStepValue(DISTANCE); //check if correction is needed const double p_sign = particle_itr->FastGetSolutionStepValue(IS_WATER); const double p_radi = particle_itr->FastGetSolutionStepValue(PARTICLE_RADIUS); if( particle_dist*p_sign < 0.0 && fabs(particle_dist) > p_radi) { double p_xx = particle_itr->X(); double p_yy = particle_itr->Y(); double p_zz = particle_itr->Z(); // const Variable<double> posetive_negative_dist_var; /* if( p_sign == -1.0 ) posetive_negative_dist_var = NAGATIVE_DISTANCE; else if( p_sign == 1.0 ) posetive_negative_dist_var = POSETIVE_DISTANCE; */ for (unsigned int kk = 1; kk < geom.size(); kk++){ p_xx -= geom[kk].X(); p_yy -= geom[kk].Y(); p_zz -= geom[kk].Z(); double dd = p_xx*p_xx + p_yy*p_yy + p_zz*p_zz; dd = sqrt(dd); double dist_to_particle = p_sign * (p_radi - dd); //correction due to particle distance and sign geom[kk].SetLock(); if( p_sign == 1.0){ double& pos_distance = geom[kk].GetValue(POSETIVE_DISTANCE); if ( dist_to_particle > pos_distance) pos_distance = dist_to_particle;} else if( p_sign == -1.0){ double& neg_distance = geom[kk].GetValue(NAGATIVE_DISTANCE); if ( dist_to_particle < neg_distance) neg_distance = dist_to_particle; } geom[kk].UnSetLock(); } } } }//end of loop over particles //final correction, choose between NAGATIVE_DISTANCE & POSETIVE_DISTANCE // const int nnodes= rEulerianModelPart.Nodes().size(); for (int jj = 0; jj < nnodes; jj++) { ModelPart::NodesContainerType::iterator node_itr = rEulerianModelPart.NodesBegin() + jj; double posetive = node_itr->GetValue(POSETIVE_DISTANCE); double negative = node_itr->GetValue(NAGATIVE_DISTANCE); double & nd_dist = node_itr->FastGetSolutionStepValue(DISTANCE); if ( posetive != negative){ if( fabs(posetive) < fabs(negative) ) nd_dist = posetive; else nd_dist = negative; node_itr->SetValue(POSETIVE_DISTANCE,nd_dist ); node_itr->SetValue(NAGATIVE_DISTANCE,nd_dist ); } } KRATOS_CATCH("") } //********************************************************************************************** //********************************************************************************************** void ResetParticleRadius(const double min_edge_length, ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart, BinBasedFastPointLocator<TDim>& node_locator) { KRATOS_TRY; double particle_dist = 0.0; Vector N; const int max_results = 10000; typename BinBasedFastPointLocator<TDim>::ResultContainerType results(max_results); const int nparticles = rLagrangianModelPart.Nodes().size(); for (int i = 0; i < nparticles; i++) { ModelPart::NodesContainerType::iterator particle_itr = rLagrangianModelPart.NodesBegin() + i; Node < 3 > ::Pointer p_pointer = *(particle_itr.base()); typename BinBasedFastPointLocator<TDim>::ResultIteratorType result_begin = results.begin(); Element::Pointer pelement; bool is_found = node_locator.FindPointOnMesh(p_pointer->Coordinates(), N, pelement, result_begin, max_results); if (is_found == true) { Geometry< Node < 3 > >& geom = pelement->GetGeometry(); //interpolate the particle distance particle_dist = N[0] * geom[0].FastGetSolutionStepValue(DISTANCE); for (unsigned int k = 1; k < geom.size(); k++) particle_dist += N[k] * geom[k].FastGetSolutionStepValue(DISTANCE); if( fabs(particle_dist) < 0.1*min_edge_length) particle_itr->FastGetSolutionStepValue(PARTICLE_RADIUS) = 0.1*min_edge_length; else if(fabs(particle_dist) > 0.5*min_edge_length) particle_itr->FastGetSolutionStepValue(PARTICLE_RADIUS) = 0.5*min_edge_length; else particle_itr->FastGetSolutionStepValue(PARTICLE_RADIUS) = fabs(particle_dist); } } KRATOS_CATCH("") } //********************************************************************************************** //********************************************************************************************** void ParticleReseeding(ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart, BinBasedFastPointLocator<TDim>& node_locator, const double max_seed_distance, const double min_edge_size) { KRATOS_TRY; //generate a tree with the position of the lagrangian nodes // typedef Node < 3 > PointType; // typedef Node < 3 > ::Pointer PointTypePointer; //unsigned int min_number_of_particles = 1; for (ModelPart::ElementsContainerType::iterator el_it = rEulerianModelPart.ElementsBegin(); el_it != rEulerianModelPart.ElementsEnd(); el_it++) { el_it->SetValue(YOUNG_MODULUS,0.0); } for (ModelPart::NodesContainerType::iterator pparticle = rLagrangianModelPart.NodesBegin(); pparticle != rLagrangianModelPart.NodesEnd(); pparticle++) { pparticle->Set(TO_ERASE,false); pparticle->SetValue(NL_ITERATION_NUMBER,(rEulerianModelPart.ElementsBegin())->Id()); pparticle->SetValue(IS_ESCAPED,false); pparticle->SetValue(IS_VISITED,0); } //count particles that fall within an element Vector N; const int max_results = 10000; typename BinBasedFastPointLocator<TDim>::ResultContainerType results(max_results); const int nparticles = rLagrangianModelPart.Nodes().size(); //count particles within an element for (int i = 0; i < nparticles; i++) { ModelPart::NodesContainerType::iterator iparticle = rLagrangianModelPart.NodesBegin() + i; Node < 3 > ::Pointer pparticle = *(iparticle.base()); typename BinBasedFastPointLocator<TDim>::ResultIteratorType result_begin = results.begin(); Element::Pointer pelement; bool is_found = node_locator.FindPointOnMesh(pparticle->Coordinates(), N, pelement, result_begin, max_results); if (is_found == true) { const double particle_sign = iparticle->FastGetSolutionStepValue(IS_WATER); Geometry< Node < 3 > >& geom = pelement->GetGeometry(); bool is_scaped = CheckIfEscaped(geom,N,particle_sign); iparticle->SetValue(IS_ESCAPED,is_scaped); if( CheckElemDist(geom,max_seed_distance) )// if it is inside the 3h band { double& counter = pelement->GetValue(YOUNG_MODULUS); counter += 1.0; iparticle->SetValue(NL_ITERATION_NUMBER , pelement->Id()); } else { if( is_scaped == false) //delete if it is not an escaped particle iparticle->Set(TO_ERASE,true); } } } //loop over close to the surface elements to ressed or delet particles if(TDim==2){ ReseedOrDelete2D(rEulerianModelPart, rLagrangianModelPart, max_seed_distance, min_edge_size); } else { const int max_num_ptr = 16;//70; const int num_ptr = 10;//56; const int min_num_ptr = 6;//40; MarkEraseExtraParticles3D(rEulerianModelPart, rLagrangianModelPart, max_seed_distance, min_edge_size, max_num_ptr, num_ptr); ReseedPoorElements3D(rEulerianModelPart, rLagrangianModelPart, max_seed_distance, min_edge_size, min_num_ptr, num_ptr ); FewReseedPoorElements3D(rEulerianModelPart, rLagrangianModelPart, max_seed_distance, min_edge_size, min_num_ptr, num_ptr ); } //perform the erase NodeEraseProcess(rLagrangianModelPart).Execute(); KRATOS_CATCH(""); } //********************************************************************************************** //********************************************************************************************** void VisualizationModelPart(ModelPart& rCompleteModelPart, ModelPart& rEulerianModelPart, ModelPart & rLagrangianModelPart) { KRATOS_TRY; rCompleteModelPart.Elements() = rEulerianModelPart.Elements(); rCompleteModelPart.Nodes() = rEulerianModelPart.Nodes(); unsigned int id; if(rEulerianModelPart.Nodes().size()!= 0) id = (rEulerianModelPart.Nodes().end() - 1)->Id() + 1; else id = 1; //preallocate the memory needed int tot_nodes = rEulerianModelPart.Nodes().size() + rLagrangianModelPart.Nodes().size(); rCompleteModelPart.Nodes().reserve( tot_nodes ); //note that here we renumber the nodes for (ModelPart::NodesContainerType::iterator node_it = rLagrangianModelPart.NodesBegin(); node_it != rLagrangianModelPart.NodesEnd(); node_it++) { rCompleteModelPart.AddNode(*(node_it.base())); node_it->SetId(id++); } KRATOS_CATCH(""); } //********************************************************************************** //********************************************************************************** void FindMaxMinEdgeSize(ModelPart& r_model_part, pybind11::list& maxmin) { KRATOS_TRY double max_edge = 0.0; double min_edge = 1000.0; for(ModelPart::ElementsContainerType::iterator it=r_model_part.ElementsBegin(); it!=r_model_part.ElementsEnd(); it++) { Geometry<Node<3> >&geom = it->GetGeometry(); double loc_h_max = 0.0; double loc_h_min = 1000.0; for(unsigned int i=0; i<TDim+1; i++) { double xc = geom[i].X(); double yc = geom[i].Y(); double zc = geom[i].Z(); for(unsigned int j=i+1; j<TDim+1; j++) { double x = geom[j].X(); double y = geom[j].Y(); double z = geom[j].Z(); double l = (x - xc)*(x - xc); l += (y - yc)*(y - yc); l += (z - zc)*(z - zc); if (l > loc_h_max) loc_h_max = l; else if(l < loc_h_min) loc_h_min = l; } } loc_h_max = sqrt(loc_h_max); loc_h_min = sqrt(loc_h_min); if(loc_h_max > max_edge ) max_edge = loc_h_max; if(loc_h_min < min_edge ) min_edge = loc_h_min; } // r_model_part.GetCommunicator().MaxAll(h_max); maxmin.append(max_edge); maxmin.append(min_edge); KRATOS_CATCH(""); } private: void CreateParticles3D(ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart, BoundedMatrix<double, 56, 3 > pos, BoundedMatrix<double, 56, 4 > N, const double max_seed_distance, const double min_edge_size) { unsigned int id = (rEulerianModelPart.Nodes().end() - 1)->Id() + 1; for (ModelPart::ElementsContainerType::iterator el_it = rEulerianModelPart.ElementsBegin(); el_it != rEulerianModelPart.ElementsEnd(); el_it++) { Geometry<Node < 3 > >& geom = el_it->GetGeometry(); if(CheckElemDist(geom,max_seed_distance)) { ComputeGaussPointPositions3D(geom, pos, N); for (unsigned int i = 0; i < pos.size1(); i++) { int node_id = id++; Node < 3 > ::Pointer pnode = rLagrangianModelPart.CreateNewNode(node_id, pos(i, 0), pos(i, 1), pos(i, 2)); array_1d<double, 3 > & vel = pnode->FastGetSolutionStepValue(VELOCITY); noalias(vel) = ZeroVector(3); // double & p_dist = pnode->FastGetSolutionStepValue(DISTANCE); double p_distance = 0.0; for (unsigned int j = 0; j < TDim + 1; j++){ noalias(vel) += N(i, j) * geom[j].FastGetSolutionStepValue(VELOCITY); p_distance += N(i, j) * geom[j].FastGetSolutionStepValue(DISTANCE); } // Assign particle sign if(p_distance < 0.0) pnode->FastGetSolutionStepValue(IS_WATER)=-1.0; else if(p_distance > 0.0) pnode->FastGetSolutionStepValue(IS_WATER)= 1.0; pnode->Fix(IS_WATER); AssignParticleRadius(pnode,p_distance,min_edge_size); } } } } void FewCreateParticles3D(ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart, BoundedMatrix<double, 10, 3 > pos, BoundedMatrix<double, 10, 4 > N, const double max_seed_distance, const double min_edge_size) { unsigned int id = (rEulerianModelPart.Nodes().end() - 1)->Id() + 1; for (ModelPart::ElementsContainerType::iterator el_it = rEulerianModelPart.ElementsBegin(); el_it != rEulerianModelPart.ElementsEnd(); el_it++) { Geometry<Node < 3 > >& geom = el_it->GetGeometry(); if(CheckElemDist(geom,max_seed_distance)) { FewComputeGaussPointPositions3D(geom, pos, N); for (unsigned int i = 0; i < pos.size1(); i++) { int node_id = id++; Node < 3 > ::Pointer pnode = rLagrangianModelPart.CreateNewNode(node_id, pos(i, 0), pos(i, 1), pos(i, 2)); array_1d<double, 3 > & vel = pnode->FastGetSolutionStepValue(VELOCITY); noalias(vel) = ZeroVector(3); // double & p_dist = pnode->FastGetSolutionStepValue(DISTANCE); double p_distance = 0.0; for (unsigned int j = 0; j < TDim + 1; j++){ noalias(vel) += N(i, j) * geom[j].FastGetSolutionStepValue(VELOCITY); p_distance += N(i, j) * geom[j].FastGetSolutionStepValue(DISTANCE); } // Assign particle sign if(p_distance < 0.0) pnode->FastGetSolutionStepValue(IS_WATER)=-1.0; else if(p_distance > 0.0) pnode->FastGetSolutionStepValue(IS_WATER)= 1.0; pnode->Fix(IS_WATER); AssignParticleRadius(pnode,p_distance,min_edge_size); } } } } void CreateParticles2D(ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart, BoundedMatrix<double, 16, 3 > pos, BoundedMatrix<double, 16, 3 > N, const double max_seed_distance, const double min_edge_size) { unsigned int id = (rEulerianModelPart.Nodes().end() - 1)->Id() + 1; for (ModelPart::ElementsContainerType::iterator el_it = rEulerianModelPart.ElementsBegin(); el_it != rEulerianModelPart.ElementsEnd(); el_it++) { Geometry<Node < 3 > >& geom = el_it->GetGeometry(); if(CheckElemDist(geom,max_seed_distance)) { ComputeGaussPointPositions2D(geom, pos, N); for (unsigned int i = 0; i < pos.size1(); i++) { int node_id = id++; Node < 3 > ::Pointer pnode = rLagrangianModelPart.CreateNewNode(node_id, pos(i, 0), pos(i, 1), pos(i, 2)); array_1d<double, 3 > & vel = pnode->FastGetSolutionStepValue(VELOCITY); noalias(vel) = ZeroVector(3); // double & p_dist = pnode->FastGetSolutionStepValue(DISTANCE); double p_distance = 0.0; for (unsigned int j = 0; j < TDim + 1; j++){ noalias(vel) += N(i, j) * geom[j].FastGetSolutionStepValue(VELOCITY); p_distance += N(i, j) * geom[j].FastGetSolutionStepValue(DISTANCE); } // Assign particle sign if(p_distance < 0.0) pnode->FastGetSolutionStepValue(IS_WATER)=-1.0; else if(p_distance > 0.0) pnode->FastGetSolutionStepValue(IS_WATER)= 1.0; pnode->Fix(IS_WATER); AssignParticleRadius(pnode,p_distance,min_edge_size); } } } } void ReseedOrDelete2D(ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart, const double max_seed_distance, const double min_edge_size) { int id; if (rLagrangianModelPart.Nodes().size() != 0) id = (rLagrangianModelPart.NodesEnd() - 1)->Id(); else id = 1; const int nelements = rEulerianModelPart.Elements().size(); const int nparticles = rLagrangianModelPart.Nodes().size(); BoundedMatrix<double, 16, 3 > coord; BoundedMatrix<double, 16, 3 > NN; // for (int ne = 0; ne < nelements; ne++) { ModelPart::ElementsContainerType::iterator ielem = rEulerianModelPart.ElementsBegin() + ne; Geometry<Node < 3 > >& geom = ielem->GetGeometry(); int n_ptr = int(ielem->GetValue(YOUNG_MODULUS)); if( n_ptr < 12 && CheckElemDist(geom,max_seed_distance) )//ressed in close to surface and poor element { //compute cooordinates //RandomPariclePosition(geom, coord, NN); ComputeGaussPointPositions2D(geom, coord, NN); int aux_n_ptr = n_ptr; int cnt = 0; while( aux_n_ptr<16 ){ aux_n_ptr++; //COORDINATES int node_id = id++; Node < 3 > ::Pointer pnode = rLagrangianModelPart.CreateNewNode(node_id, coord(cnt,0), coord(cnt,1), coord(cnt,2)); array_1d<double, 3 > & vel = pnode->FastGetSolutionStepValue(VELOCITY); noalias(vel) = ZeroVector(3); // double & p_dist = pnode->FastGetSolutionStepValue(DISTANCE); double p_distance = 0.0; for (unsigned int j = 0; j < TDim + 1; j++){ noalias(vel) += NN(cnt,j) * geom[j].FastGetSolutionStepValue(VELOCITY); p_distance += NN(cnt,j) * geom[j].FastGetSolutionStepValue(DISTANCE); } // Assign particle sign if(p_distance < 0.0) pnode->FastGetSolutionStepValue(IS_WATER)=-1.0; else if(p_distance > 0.0) pnode->FastGetSolutionStepValue(IS_WATER)= 1.0; pnode->Fix(IS_WATER); AssignParticleRadius(pnode,p_distance,min_edge_size); cnt++; } } else if( n_ptr > 20 && CheckElemDist(geom,max_seed_distance) ){ const int ele_id = ielem->Id(); ModelPart::NodesContainerType element_particles; element_particles.reserve(64); //save particle list for (int kk = 0; kk < nparticles; kk++) { ModelPart::NodesContainerType::iterator iparticle = rLagrangianModelPart.NodesBegin() + kk; const int ptr_nest = iparticle->GetValue(NL_ITERATION_NUMBER); if( ptr_nest==ele_id ) { iparticle->SetValue(SCALE, 0); element_particles.push_back( *(iparticle.base()) ); } } //loop to order based on the radius ModelPart::NodesContainerType::iterator ptr_begin = element_particles.begin(); unsigned int ptr_elem_size = element_particles.size(); for(unsigned int ii=0; ii < ptr_elem_size; ii++) for(unsigned int jj=ii+1; jj < ptr_elem_size; jj++) { double ii_radi = (ptr_begin + ii)->FastGetSolutionStepValue(PARTICLE_RADIUS); double jj_radi = (ptr_begin + jj)->FastGetSolutionStepValue(PARTICLE_RADIUS); (ii_radi>=jj_radi) ? (ptr_begin + ii)->GetValue(SCALE)+=1 : (ptr_begin + jj)->GetValue(SCALE)+=1; } //delete extra nodes int aux_ptr_elem_size = int(ptr_elem_size); while(aux_ptr_elem_size>16) { for(unsigned int ii=0; ii < ptr_elem_size; ii++){ bool swt = false; for( int kkk = ptr_elem_size; kkk>0; kkk-- ) if( (ptr_begin + ii)->GetValue(SCALE) == kkk && (ptr_begin + ii)->GetValue(IS_VISITED) == 0){ bool is_escaped = (ptr_begin + ii)->GetValue(IS_ESCAPED); if( is_escaped==false ) (ptr_begin + ii)->Set(TO_ERASE,true);//CHECK ESCASPED NODES (ptr_begin + ii)->SetValue(IS_VISITED,1); swt = true; break; } if(swt ) break; } aux_ptr_elem_size -= 1; } } } } void MarkEraseExtraParticles3D(ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart, const double max_seed_distance, const double min_edge_size, const int max_num_particle, const int num_particle) { // int id; // if (rLagrangianModelPart.Nodes().size() != 0) // id = (rLagrangianModelPart.NodesEnd() - 1)->Id(); // else // id = 1; const int nelements = rEulerianModelPart.Elements().size(); const int nparticles = rLagrangianModelPart.Nodes().size(); std::vector< WeakPointerVector< Node< 3> > > particle_of_element(nelements); // particle_of_element.reserve(nelements); std::vector< unsigned int > num_ptr_in_elem(nelements,0); // num_ptr_in_elem.reserve(nelements); //loop on elements to resrve the size of particle in element list for (int ne = 0; ne < nelements; ne++) { ModelPart::ElementsContainerType::iterator ielem = rEulerianModelPart.ElementsBegin() + ne; int n_ptr = int(ielem->GetValue(YOUNG_MODULUS)); unsigned int ele_id = ielem->Id(); num_ptr_in_elem[ele_id-1] = n_ptr; if(n_ptr > max_num_particle) particle_of_element[ele_id-1].reserve(n_ptr); } //loop on particles to push_back particle related to full elements for (int kk = 0; kk < nparticles; kk++) { ModelPart::NodesContainerType::iterator iparticle = rLagrangianModelPart.NodesBegin() + kk; const int ptr_nest = iparticle->GetValue(NL_ITERATION_NUMBER); if( num_ptr_in_elem[ptr_nest-1] > static_cast<unsigned int>(max_num_particle) ) particle_of_element[ptr_nest-1].push_back( *(iparticle.base()) ); } //loop over elements to reoreder the particle radius in over populated elements for( int ii = 0; ii< static_cast<int>(particle_of_element.size()); ++ii) { if(particle_of_element[ii].size() > static_cast<unsigned int>(max_num_particle)) { //sort std::sort(particle_of_element[ii].ptr_begin(), particle_of_element[ii].ptr_end(), RadiusCompare() ); //delete extra nodes WeakPointerVector< Node< 3> >::iterator ele_pt_ptr = particle_of_element[ii].begin(); const unsigned int this_ele_ptr = particle_of_element[ii].size(); int aux_ptr_elem_size = this_ele_ptr; for( unsigned int ij = 0; (ij < this_ele_ptr && aux_ptr_elem_size > num_particle); ++ij) { bool is_escaped = (ele_pt_ptr + ij)->GetValue(IS_ESCAPED); if( is_escaped==false ){ (ele_pt_ptr + ij)->Set(TO_ERASE,true); aux_ptr_elem_size--; } } } } } struct RadiusCompare{ template<class TRefrenceType> bool operator()(const TRefrenceType ptr_a, const TRefrenceType ptr_b) { double a_radi = ptr_a.lock()->FastGetSolutionStepValue(PARTICLE_RADIUS); double b_radi = ptr_b.lock()->FastGetSolutionStepValue(PARTICLE_RADIUS); return (a_radi > b_radi); } }; void ReseedPoorElements3D(ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart, const double max_seed_distance, const double min_edge_size, const int min_num_particle, const int num_particle) { int id; if (rLagrangianModelPart.Nodes().size() != 0) id = (rLagrangianModelPart.NodesEnd() - 1)->Id(); else id = 1; const int nelements = rEulerianModelPart.Elements().size(); // const int nparticles = rLagrangianModelPart.Nodes().size(); BoundedMatrix<double, 56, 3 > coord; BoundedMatrix<double, 56, 4 > NN; // for (int ne = 0; ne < nelements; ne++) { ModelPart::ElementsContainerType::iterator ielem = rEulerianModelPart.ElementsBegin() + ne; Geometry<Node < 3 > >& geom = ielem->GetGeometry(); int n_ptr = int(ielem->GetValue(YOUNG_MODULUS)); if( n_ptr < min_num_particle && CheckElemDist(geom,max_seed_distance) )//ressed in close to surface and poor element { //compute cooordinates //RandomPariclePosition(geom, coord, NN); ComputeGaussPointPositions3D(geom, coord, NN); int aux_n_ptr = n_ptr; int cnt = 0; while( aux_n_ptr < num_particle ){ aux_n_ptr++; //COORDINATES int node_id = id++; Node < 3 > ::Pointer pnode = rLagrangianModelPart.CreateNewNode(node_id, coord(cnt,0), coord(cnt,1), coord(cnt,2)); array_1d<double, 3 > & vel = pnode->FastGetSolutionStepValue(VELOCITY); noalias(vel) = ZeroVector(3); // double & p_dist = pnode->FastGetSolutionStepValue(DISTANCE); double p_distance = 0.0; for (unsigned int j = 0; j < TDim + 1; j++){ noalias(vel) += NN(cnt,j) * geom[j].FastGetSolutionStepValue(VELOCITY); p_distance += NN(cnt,j) * geom[j].FastGetSolutionStepValue(DISTANCE); } // Assign particle sign if(p_distance < 0.0) pnode->FastGetSolutionStepValue(IS_WATER)=-1.0; else if(p_distance > 0.0) pnode->FastGetSolutionStepValue(IS_WATER)= 1.0; pnode->Fix(IS_WATER); AssignParticleRadius(pnode,p_distance,min_edge_size); cnt++; } } } } void FewReseedPoorElements3D(ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart, const double max_seed_distance, const double min_edge_size, const int min_num_particle, const int num_particle) { int id; if (rLagrangianModelPart.Nodes().size() != 0) id = (rLagrangianModelPart.NodesEnd() - 1)->Id(); else id = 1; const int nelements = rEulerianModelPart.Elements().size(); // const int nparticles = rLagrangianModelPart.Nodes().size(); BoundedMatrix<double, 10, 3 > coord; BoundedMatrix<double, 10, 4 > NN; // for (int ne = 0; ne < nelements; ne++) { ModelPart::ElementsContainerType::iterator ielem = rEulerianModelPart.ElementsBegin() + ne; Geometry<Node < 3 > >& geom = ielem->GetGeometry(); int n_ptr = int(ielem->GetValue(YOUNG_MODULUS)); if( n_ptr < min_num_particle && CheckElemDist(geom,max_seed_distance) )//ressed in close to surface and poor element { //compute cooordinates //RandomPariclePosition(geom, coord, NN); FewComputeGaussPointPositions3D(geom, coord, NN); int aux_n_ptr = n_ptr; int cnt = 0; while( aux_n_ptr < num_particle ){ aux_n_ptr++; //COORDINATES int node_id = id++; Node < 3 > ::Pointer pnode = rLagrangianModelPart.CreateNewNode(node_id, coord(cnt,0), coord(cnt,1), coord(cnt,2)); array_1d<double, 3 > & vel = pnode->FastGetSolutionStepValue(VELOCITY); noalias(vel) = ZeroVector(3); // double & p_dist = pnode->FastGetSolutionStepValue(DISTANCE); double p_distance = 0.0; for (unsigned int j = 0; j < TDim + 1; j++){ noalias(vel) += NN(cnt,j) * geom[j].FastGetSolutionStepValue(VELOCITY); p_distance += NN(cnt,j) * geom[j].FastGetSolutionStepValue(DISTANCE); } // Assign particle sign if(p_distance < 0.0) pnode->FastGetSolutionStepValue(IS_WATER)=-1.0; else if(p_distance > 0.0) pnode->FastGetSolutionStepValue(IS_WATER)= 1.0; pnode->Fix(IS_WATER); AssignParticleRadius(pnode,p_distance,min_edge_size); cnt++; } } } } // void ReseedOrDelete3D(ModelPart& rEulerianModelPart, // ModelPart& rLagrangianModelPart, // const double max_seed_distance, // const double min_edge_size) // { // int id; // if (rLagrangianModelPart.Nodes().size() != 0) // id = (rLagrangianModelPart.NodesEnd() - 1)->Id(); // else // id = 1; // const int nelements = rEulerianModelPart.Elements().size(); // const int nparticles = rLagrangianModelPart.Nodes().size(); // // // // BoundedMatrix<double, 56, 3 > coord; // BoundedMatrix<double, 56, 4 > NN; // // // for (int ne = 0; ne < nelements; ne++) // { // ModelPart::ElementsContainerType::iterator ielem = rEulerianModelPart.ElementsBegin() + ne; // Geometry<Node < 3 > >& geom = ielem->GetGeometry(); // int n_ptr = int(ielem->GetValue(YOUNG_MODULUS)); // // if( n_ptr < 42 && CheckElemDist(geom,max_seed_distance) )//ressed in close to surface and poor element // { // //compute cooordinates // //RandomPariclePosition(geom, coord, NN); // ComputeGaussPointPositions3D(geom, coord, NN); // int aux_n_ptr = n_ptr; // int cnt = 0; // while( aux_n_ptr<56 ){ // aux_n_ptr++; // //COORDINATES // int node_id = id++; // Node < 3 > ::Pointer pnode = rLagrangianModelPart.CreateNewNode(node_id, coord(cnt,0), coord(cnt,1), coord(cnt,2)); // // array_1d<double, 3 > & vel = pnode->FastGetSolutionStepValue(VELOCITY); // noalias(vel) = ZeroVector(3); // // // double & p_dist = pnode->FastGetSolutionStepValue(DISTANCE); // double p_distance = 0.0; // for (unsigned int j = 0; j < TDim + 1; j++){ // noalias(vel) += NN(cnt,j) * geom[j].FastGetSolutionStepValue(VELOCITY); // p_distance += NN(cnt,j) * geom[j].FastGetSolutionStepValue(DISTANCE); // } // // // Assign particle sign // if(p_distance < 0.0) // pnode->FastGetSolutionStepValue(IS_WATER)=-1.0; // else if(p_distance > 0.0) // pnode->FastGetSolutionStepValue(IS_WATER)= 1.0; // // pnode->Fix(IS_WATER); // // AssignParticleRadius(pnode,p_distance,min_edge_size); // // cnt++; // } // } // else if( n_ptr > 70 && CheckElemDist(geom,max_seed_distance) ){ // const int ele_id = ielem->Id(); // ModelPart::NodesContainerType element_particles; // element_particles.reserve(64); // //save particle list // for (int kk = 0; kk < nparticles; kk++) // { // ModelPart::NodesContainerType::iterator iparticle = rLagrangianModelPart.NodesBegin() + kk; // // const int ptr_nest = iparticle->GetValue(NL_ITERATION_NUMBER); // if( ptr_nest==ele_id ) // { // iparticle->SetValue(SCALE, 0); // element_particles.push_back( *(iparticle.base()) ); // } // } // // //loop to order based on the radius // ModelPart::NodesContainerType::iterator ptr_begin = element_particles.begin(); // unsigned int ptr_elem_size = element_particles.size(); // // for(unsigned int ii=0; ii < ptr_elem_size; ii++) // for(unsigned int jj=ii+1; jj < ptr_elem_size; jj++) // { // double ii_radi = (ptr_begin + ii)->FastGetSolutionStepValue(PARTICLE_RADIUS); // double jj_radi = (ptr_begin + jj)->FastGetSolutionStepValue(PARTICLE_RADIUS); // // (ii_radi>=jj_radi) ? (ptr_begin + ii)->GetValue(SCALE)+=1 : (ptr_begin + jj)->GetValue(SCALE)+=1; // // } // //delete extra nodes // int aux_ptr_elem_size = int(ptr_elem_size); // while(aux_ptr_elem_size>56) // { // for(unsigned int ii=0; ii < ptr_elem_size; ii++){ // bool swt = false; // for( int kkk = ptr_elem_size; kkk>0; kkk-- ) // if( (ptr_begin + ii)->GetValue(SCALE) == kkk && (ptr_begin + ii)->GetValue(IS_VISITED) == 0){ // bool is_escaped = (ptr_begin + ii)->GetValue(IS_ESCAPED); // if( is_escaped==false ) // (ptr_begin + ii)->Set(TO_ERASE,true);//CHECK ESCASPED NODES // (ptr_begin + ii)->SetValue(IS_VISITED,1); // swt = true; // break; // } // if(swt ) // break; // } // aux_ptr_elem_size -= 1; // } // } // // // } // // } void ComputeGaussPointPositions2D(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 16, 3 > & pos, BoundedMatrix<double, 16, 3 > & N) { //lower diagonal terms double ypos = 1.0 / 5.0; int pos_counter = 0; for (unsigned int i = 0; i < 4; i++) { double xpos = 1.0 / 8.0; for (unsigned int j = 0; j < (7-2*i); j++) { double N1 = xpos; double N2 = ypos; double N3 = 1.0 - xpos - ypos; pos(pos_counter, 0) = N1 * geom[0].X() + N2 * geom[1].X() + N3 * geom[2].X(); pos(pos_counter, 1) = N1 * geom[0].Y() + N2 * geom[1].Y() + N3 * geom[2].Y(); pos(pos_counter, 2) = N1 * geom[0].Z() + N2 * geom[1].Z() + N3 * geom[2].Z(); N(pos_counter, 0) = N1; N(pos_counter, 1) = N2; N(pos_counter, 2) = N3; xpos += 1.0 / 8.0; pos_counter += 1; } ypos += 1.0 / 5.0; } } void ComputeGaussPointPositions3D(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 56, 3 > & pos, BoundedMatrix<double, 56, 4 > & N) { int pos_counter = 0; const double one_seventh = 1.0/6.5; double close_point = 1.0/20; double zpos = close_point; for (unsigned int kk = 0; kk < 6; kk++) { // double y_div = 1.0/(7 - kk); double ypos = close_point;//one_seventh;// y_div * (1.0 - zpos);//one_seventh for (unsigned int i = 0; i < (6-kk); i++) { // double x_div = 1.0/(7 - kk);// -i double xpos = close_point;//one_seventh;//x_div* (1.0 - ypos) * (1.0 - zpos);//one_seventh for (unsigned int j = 0; j < (6-kk-i); j++) { double N1 = xpos; double N2 = ypos; double N3 = zpos; double N4 = 1.0 - xpos - ypos - zpos; pos(pos_counter, 0) = N1 * geom[0].X() + N2 * geom[1].X() + N3 * geom[2].X() + N4 * geom[3].X(); pos(pos_counter, 1) = N1 * geom[0].Y() + N2 * geom[1].Y() + N3 * geom[2].Y() + N4 * geom[3].Y(); pos(pos_counter, 2) = N1 * geom[0].Z() + N2 * geom[1].Z() + N3 * geom[2].Z() + N4 * geom[3].Z(); N(pos_counter, 0) = N1; N(pos_counter, 1) = N2; N(pos_counter, 2) = N3; N(pos_counter, 3) = N4; xpos += one_seventh;//x_div * (1.0 - ypos) * (1.0 - zpos); //one_seventh pos_counter += 1; } ypos += one_seventh;//y_div * (1.0 - zpos);//one_seventh } zpos += one_seventh; } } void FewComputeGaussPointPositions3D(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 10, 3 > & pos, BoundedMatrix<double, 10, 4 > & N) { int pos_counter = 0; const double one_third = 1.0/2.5; double close_point = 1.0/20; double zpos = close_point; for (unsigned int kk = 0; kk < 3; kk++) { // double y_div = 1.0/(7 - kk); double ypos = close_point;//one_seventh;// y_div * (1.0 - zpos);//one_seventh for (unsigned int i = 0; i < (3-kk); i++) { // double x_div = 1.0/(7 - kk);// -i double xpos = close_point;//one_seventh;//x_div* (1.0 - ypos) * (1.0 - zpos);//one_seventh for (unsigned int j = 0; j < (3-kk-i); j++) { double N1 = xpos; double N2 = ypos; double N3 = zpos; double N4 = 1.0 - xpos - ypos - zpos; pos(pos_counter, 0) = N1 * geom[0].X() + N2 * geom[1].X() + N3 * geom[2].X() + N4 * geom[3].X(); pos(pos_counter, 1) = N1 * geom[0].Y() + N2 * geom[1].Y() + N3 * geom[2].Y() + N4 * geom[3].Y(); pos(pos_counter, 2) = N1 * geom[0].Z() + N2 * geom[1].Z() + N3 * geom[2].Z() + N4 * geom[3].Z(); N(pos_counter, 0) = N1; N(pos_counter, 1) = N2; N(pos_counter, 2) = N3; N(pos_counter, 3) = N4; xpos += one_third;//x_div * (1.0 - ypos) * (1.0 - zpos); //one_seventh pos_counter += 1; } ypos += one_third;//y_div * (1.0 - zpos);//one_seventh } zpos += one_third; } } void RandomPariclePosition(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 16, 3 > & coord, BoundedMatrix<double, 16, 3 > & N_shape) { for(int ii=0;ii<16;ii++){ double xi = rand()* ( 1.0 / ( RAND_MAX + 1.0 ) ); double etta = (1.0 - xi) * ( rand()* ( 1.0 / ( RAND_MAX + 1.0 ) ) ); double zetta = 1.0 - (xi + etta); coord(ii,0) = xi * geom[0].X() + etta * geom[1].X() + zetta * geom[2].X(); coord(ii,1) = xi * geom[0].Y() + etta * geom[1].Y() + zetta * geom[2].Y(); coord(ii,2) = xi * geom[0].Z() + etta * geom[1].Z() + zetta * geom[2].Z(); N_shape(ii,0) = xi; N_shape(ii,1) = etta; N_shape(ii,1) = zetta; } } static int CheckElemDist(Geometry< Node < 3 > >& geom, const double max_dist) { for(unsigned int ii=0; ii < geom.size(); ++ii) { double nd_dist = geom[ii].FastGetSolutionStepValue(DISTANCE); if (fabs(nd_dist) < max_dist) return 1; } return 0; } bool CheckIfEscaped(Geometry< Node < 3 > >& geom, const array_1d<double, 3 > & N_shape,const double particle_sign) { double dist = N_shape[0]*geom[0].FastGetSolutionStepValue(DISTANCE); for(unsigned int ii=1; ii < geom.size(); ++ii) dist += N_shape[ii]*geom[ii].FastGetSolutionStepValue(DISTANCE); if( dist*particle_sign < 0.0) return true; else return false; } void AssignParticleRadius(Node < 3 > ::Pointer nd_ptr, double& p_dist,const double min_edge_size) { if( fabs(p_dist) < 0.1*min_edge_size) nd_ptr->FastGetSolutionStepValue(PARTICLE_RADIUS) = 0.1*min_edge_size; else if(fabs(p_dist) > 0.5*min_edge_size) nd_ptr->FastGetSolutionStepValue(PARTICLE_RADIUS) = 0.5*min_edge_size; else nd_ptr->FastGetSolutionStepValue(PARTICLE_RADIUS) = fabs(p_dist); } // unsigned int time_seed() // { // time_t now = time ( 0 ); // unsigned char *p = (unsigned char *)&now; // unsigned int seed = 0; // size_t i; // // for ( i = 0; i < sizeof now; i++ ) // seed = seed * ( UCHAR_MAX + 2U ) + p[i]; // // return seed; // } }; } #endif // KRATOS_LAGRANGIAN_PARTICLES_UTILITIES_INCLUDED defined
// // Project Name: Kratos // Last Modified by: $Author: rrossi $ // Date: $Date: 2007-03-06 10:30:31 $ // Revision: $Revision: 1.2 $ // // #if !defined(KRATOS_PARTICLE_LEVELSET_UTILITIES_INCLUDED ) #define KRATOS_PARTICLE_LEVELSET_UTILITIES_INCLUDED // System includes #include <string> #include <iostream> #include <algorithm> // External includes #include "pybind11/pybind11.h" // Project includes #include "includes/define.h" #include "includes/model_part.h" #include "includes/deprecated_variables.h" #include "includes/node.h" #include "utilities/geometry_utilities.h" #include "geometries/tetrahedra_3d_4.h" #include "thermo_mechanical_application.h" #include "spatial_containers/spatial_containers.h" #include "utilities/timer.h" #include "processes/node_erase_process.h" #include "utilities/binbased_fast_point_locator.h" #include "utilities/timer.h" // #include <boost/random/linear_congruential.hpp> // #include <boost/random/uniform_int.hpp> // #include <boost/random/uniform_real.hpp> // #include <boost/random/variate_generator.hpp> // #include <boost/generator_iterator.hpp> // #include <tr1/random> #include <time.h> #ifdef _OPENMP #include "omp.h" #endif namespace Kratos { template<std::size_t TDim> class ParticleLevelSetUtils { public: KRATOS_CLASS_POINTER_DEFINITION(ParticleLevelSetUtils<TDim>); //********************************************************************************************** //********************************************************************************************** //function to seed a list of new nodes void Seed(ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart, const double max_seed_distance, const double min_edge_size) { KRATOS_TRY; rLagrangianModelPart.Nodes().clear(); unsigned int ele_id = 1; for (ModelPart::ElementsContainerType::iterator el_it = rEulerianModelPart.ElementsBegin(); el_it != rEulerianModelPart.ElementsEnd(); el_it++) { el_it->SetId(ele_id); ele_id++; } if(TDim==2){ BoundedMatrix<double, 16, 3 > pos; BoundedMatrix<double, 16, 3 > N; CreateParticles2D(rEulerianModelPart,rLagrangianModelPart,pos,N,max_seed_distance,min_edge_size); } else { // BoundedMatrix<double, 56, 3 > pos; // BoundedMatrix<double, 56, 4 > N; // CreateParticles3D(rEulerianModelPart,rLagrangianModelPart,pos,N,max_seed_distance,min_edge_size); BoundedMatrix<double, 10, 3 > pos; BoundedMatrix<double, 10, 4 > N; FewCreateParticles3D(rEulerianModelPart,rLagrangianModelPart,pos,N,max_seed_distance,min_edge_size); } for (ModelPart::NodesContainerType::iterator node_it = rLagrangianModelPart.NodesBegin(); node_it != rLagrangianModelPart.NodesEnd(); node_it++) { node_it->FastGetSolutionStepValue(VELOCITY, 1) = node_it->FastGetSolutionStepValue(VELOCITY); // node_it->FastGetSolutionStepValue(DISTANCE, 1) = node_it->FastGetSolutionStepValue(DISTANCE); } KRATOS_CATCH(""); } //********************************************************************************************** //********************************************************************************************** void StreamlineMove(const double dt, ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart, BinBasedFastPointLocator<TDim>& node_locator) { KRATOS_TRY array_1d<double, 3 > veulerian; Vector N; const int max_results = 10000; typename BinBasedFastPointLocator<TDim>::ResultContainerType results(max_results); const int nparticles = rLagrangianModelPart.Nodes().size(); //KRATOS_WATCH("551") #pragma omp parallel for firstprivate(results,N,veulerian) for (int i = 0; i < nparticles; i++) { unsigned int substep = 0; unsigned int subdivisions = 1; double small_dt = dt; while(substep++ < subdivisions) { ModelPart::NodesContainerType::iterator iparticle = rLagrangianModelPart.NodesBegin() + i; (iparticle)->Set(TO_ERASE, true); Node < 3 > ::Pointer pparticle = *(iparticle.base()); typename BinBasedFastPointLocator<TDim>::ResultIteratorType result_begin = results.begin(); Element::Pointer pelement; // KRATOS_WATCH("561") bool is_found = node_locator.FindPointOnMesh(pparticle->Coordinates(), N, pelement, result_begin, max_results); // KRATOS_WATCH("564") if (is_found == true) { (pparticle)->GetValue(IS_VISITED) = 1; Geometry< Node < 3 > >& geom = pelement->GetGeometry(); noalias(veulerian) = N[0] * geom[0].FastGetSolutionStepValue(VELOCITY); for (unsigned int k = 1; k < geom.size(); k++) noalias(veulerian) += N[k] * geom[k].FastGetSolutionStepValue(VELOCITY); //compute adaptive subdivisions if(substep == 1) { //compute h double h = N[0] * geom[0].FastGetSolutionStepValue(NODAL_H); for (unsigned int k = 1; k < geom.size(); k++) h += N[k] * geom[k].FastGetSolutionStepValue(NODAL_H); //compute number of subdivisions needed const unsigned int min_subdivisions = 3; const unsigned int max_subdivisions = 20; double v = norm_2(veulerian); subdivisions = double(floor(2*dt*v/h)); subdivisions = (subdivisions<min_subdivisions) ? min_subdivisions : (subdivisions>max_subdivisions) ? max_subdivisions : subdivisions; //compute subdivisions time step small_dt = dt / subdivisions; // KRATOS_WATCH(subdivisions) } //move according to the streamline array_1d<double, 3 > & disp = (iparticle)->FastGetSolutionStepValue(DISPLACEMENT); noalias(disp) += small_dt*veulerian; (pparticle)->Set(TO_ERASE, false); // KRATOS_WATCH("585") //update position noalias(iparticle->Coordinates()) = iparticle->GetInitialPosition(); noalias(iparticle->Coordinates()) += iparticle->FastGetSolutionStepValue(DISPLACEMENT); (iparticle)->GetValue(IS_VISITED) = 0; //KRATOS_WATCH("619") } } } KRATOS_CATCH("") } //********************************************************************************************** //********************************************************************************************** void ParticleLevelSetCorrection(ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart, BinBasedFastPointLocator<TDim>& node_locator) { KRATOS_TRY //Initilize NAGATIVE_DISTANCE & POSETIVE_DISTANCE const int nnodes= rEulerianModelPart.Nodes().size(); #pragma omp parallel for for (int jj = 0; jj < nnodes; jj++) { ModelPart::NodesContainerType::iterator node_itr = rEulerianModelPart.NodesBegin() + jj; const double nd_dist = node_itr->FastGetSolutionStepValue(DISTANCE); node_itr->SetValue(POSETIVE_DISTANCE,nd_dist ); node_itr->SetValue(NAGATIVE_DISTANCE,nd_dist ); } //loop over particles double particle_dist= 0.0; Vector N; const int max_results = 10000; typename BinBasedFastPointLocator<TDim>::ResultContainerType results(max_results); const int nparticles = rLagrangianModelPart.Nodes().size(); #pragma omp parallel for firstprivate(results,N,particle_dist) for (int i = 0; i < nparticles; i++) { ModelPart::NodesContainerType::iterator particle_itr = rLagrangianModelPart.NodesBegin() + i; Node < 3 > ::Pointer p_pointer = *(particle_itr.base()); typename BinBasedFastPointLocator<TDim>::ResultIteratorType result_begin = results.begin(); Element::Pointer pelement; bool is_found = node_locator.FindPointOnMesh(p_pointer->Coordinates(), N, pelement, result_begin, max_results); if (is_found == true) { Geometry< Node < 3 > >& geom = pelement->GetGeometry(); //interpolate the particle distance particle_dist = N[0] * geom[0].FastGetSolutionStepValue(DISTANCE); for (unsigned int k = 1; k < geom.size(); k++) particle_dist += N[k] * geom[k].FastGetSolutionStepValue(DISTANCE); //check if correction is needed const double p_sign = particle_itr->FastGetSolutionStepValue(IS_WATER); const double p_radi = particle_itr->FastGetSolutionStepValue(PARTICLE_RADIUS); if( particle_dist*p_sign < 0.0 && fabs(particle_dist) > p_radi) { double p_xx = particle_itr->X(); double p_yy = particle_itr->Y(); double p_zz = particle_itr->Z(); // const Variable<double> posetive_negative_dist_var; /* if( p_sign == -1.0 ) posetive_negative_dist_var = NAGATIVE_DISTANCE; else if( p_sign == 1.0 ) posetive_negative_dist_var = POSETIVE_DISTANCE; */ for (unsigned int kk = 1; kk < geom.size(); kk++){ p_xx -= geom[kk].X(); p_yy -= geom[kk].Y(); p_zz -= geom[kk].Z(); double dd = p_xx*p_xx + p_yy*p_yy + p_zz*p_zz; dd = sqrt(dd); double dist_to_particle = p_sign * (p_radi - dd); //correction due to particle distance and sign geom[kk].SetLock(); if( p_sign == 1.0){ double& pos_distance = geom[kk].GetValue(POSETIVE_DISTANCE); if ( dist_to_particle > pos_distance) pos_distance = dist_to_particle;} else if( p_sign == -1.0){ double& neg_distance = geom[kk].GetValue(NAGATIVE_DISTANCE); if ( dist_to_particle < neg_distance) neg_distance = dist_to_particle; } geom[kk].UnSetLock(); } } } }//end of loop over particles //final correction, choose between NAGATIVE_DISTANCE & POSETIVE_DISTANCE // const int nnodes= rEulerianModelPart.Nodes().size(); #pragma omp parallel for for (int jj = 0; jj < nnodes; jj++) { ModelPart::NodesContainerType::iterator node_itr = rEulerianModelPart.NodesBegin() + jj; double posetive = node_itr->GetValue(POSETIVE_DISTANCE); double negative = node_itr->GetValue(NAGATIVE_DISTANCE); double & nd_dist = node_itr->FastGetSolutionStepValue(DISTANCE); if ( posetive != negative){ if( fabs(posetive) < fabs(negative) ) nd_dist = posetive; else nd_dist = negative; node_itr->SetValue(POSETIVE_DISTANCE,nd_dist ); node_itr->SetValue(NAGATIVE_DISTANCE,nd_dist ); } } KRATOS_CATCH("") } //********************************************************************************************** //********************************************************************************************** void ResetParticleRadius(const double min_edge_length, ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart, BinBasedFastPointLocator<TDim>& node_locator) { KRATOS_TRY; double particle_dist = 0.0; Vector N; const int max_results = 10000; typename BinBasedFastPointLocator<TDim>::ResultContainerType results(max_results); const int nparticles = rLagrangianModelPart.Nodes().size(); #pragma omp parallel for firstprivate(results,N,particle_dist) for (int i = 0; i < nparticles; i++) { ModelPart::NodesContainerType::iterator particle_itr = rLagrangianModelPart.NodesBegin() + i; Node < 3 > ::Pointer p_pointer = *(particle_itr.base()); typename BinBasedFastPointLocator<TDim>::ResultIteratorType result_begin = results.begin(); Element::Pointer pelement; bool is_found = node_locator.FindPointOnMesh(p_pointer->Coordinates(), N, pelement, result_begin, max_results); if (is_found == true) { Geometry< Node < 3 > >& geom = pelement->GetGeometry(); //interpolate the particle distance particle_dist = N[0] * geom[0].FastGetSolutionStepValue(DISTANCE); for (unsigned int k = 1; k < geom.size(); k++) particle_dist += N[k] * geom[k].FastGetSolutionStepValue(DISTANCE); if( fabs(particle_dist) < 0.1*min_edge_length) particle_itr->FastGetSolutionStepValue(PARTICLE_RADIUS) = 0.1*min_edge_length; else if(fabs(particle_dist) > 0.5*min_edge_length) particle_itr->FastGetSolutionStepValue(PARTICLE_RADIUS) = 0.5*min_edge_length; else particle_itr->FastGetSolutionStepValue(PARTICLE_RADIUS) = fabs(particle_dist); } } KRATOS_CATCH("") } //********************************************************************************************** //********************************************************************************************** void ParticleReseeding(ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart, BinBasedFastPointLocator<TDim>& node_locator, const double max_seed_distance, const double min_edge_size) { KRATOS_TRY; //generate a tree with the position of the lagrangian nodes // typedef Node < 3 > PointType; // typedef Node < 3 > ::Pointer PointTypePointer; //unsigned int min_number_of_particles = 1; for (ModelPart::ElementsContainerType::iterator el_it = rEulerianModelPart.ElementsBegin(); el_it != rEulerianModelPart.ElementsEnd(); el_it++) { el_it->SetValue(YOUNG_MODULUS,0.0); } for (ModelPart::NodesContainerType::iterator pparticle = rLagrangianModelPart.NodesBegin(); pparticle != rLagrangianModelPart.NodesEnd(); pparticle++) { pparticle->Set(TO_ERASE,false); pparticle->SetValue(NL_ITERATION_NUMBER,(rEulerianModelPart.ElementsBegin())->Id()); pparticle->SetValue(IS_ESCAPED,false); pparticle->SetValue(IS_VISITED,0); } //count particles that fall within an element Vector N; const int max_results = 10000; typename BinBasedFastPointLocator<TDim>::ResultContainerType results(max_results); const int nparticles = rLagrangianModelPart.Nodes().size(); //count particles within an element #pragma omp parallel for firstprivate(results,N) for (int i = 0; i < nparticles; i++) { ModelPart::NodesContainerType::iterator iparticle = rLagrangianModelPart.NodesBegin() + i; Node < 3 > ::Pointer pparticle = *(iparticle.base()); typename BinBasedFastPointLocator<TDim>::ResultIteratorType result_begin = results.begin(); Element::Pointer pelement; bool is_found = node_locator.FindPointOnMesh(pparticle->Coordinates(), N, pelement, result_begin, max_results); if (is_found == true) { const double particle_sign = iparticle->FastGetSolutionStepValue(IS_WATER); Geometry< Node < 3 > >& geom = pelement->GetGeometry(); bool is_scaped = CheckIfEscaped(geom,N,particle_sign); iparticle->SetValue(IS_ESCAPED,is_scaped); if( CheckElemDist(geom,max_seed_distance) )// if it is inside the 3h band { double& counter = pelement->GetValue(YOUNG_MODULUS); #pragma omp atomic counter += 1.0; iparticle->SetValue(NL_ITERATION_NUMBER , pelement->Id()); } else { if( is_scaped == false) //delete if it is not an escaped particle iparticle->Set(TO_ERASE,true); } } } //loop over close to the surface elements to ressed or delet particles if(TDim==2){ ReseedOrDelete2D(rEulerianModelPart, rLagrangianModelPart, max_seed_distance, min_edge_size); } else { const int max_num_ptr = 16;//70; const int num_ptr = 10;//56; const int min_num_ptr = 6;//40; MarkEraseExtraParticles3D(rEulerianModelPart, rLagrangianModelPart, max_seed_distance, min_edge_size, max_num_ptr, num_ptr); ReseedPoorElements3D(rEulerianModelPart, rLagrangianModelPart, max_seed_distance, min_edge_size, min_num_ptr, num_ptr ); FewReseedPoorElements3D(rEulerianModelPart, rLagrangianModelPart, max_seed_distance, min_edge_size, min_num_ptr, num_ptr ); } //perform the erase NodeEraseProcess(rLagrangianModelPart).Execute(); KRATOS_CATCH(""); } //********************************************************************************************** //********************************************************************************************** void VisualizationModelPart(ModelPart& rCompleteModelPart, ModelPart& rEulerianModelPart, ModelPart & rLagrangianModelPart) { KRATOS_TRY; rCompleteModelPart.Elements() = rEulerianModelPart.Elements(); rCompleteModelPart.Nodes() = rEulerianModelPart.Nodes(); unsigned int id; if(rEulerianModelPart.Nodes().size()!= 0) id = (rEulerianModelPart.Nodes().end() - 1)->Id() + 1; else id = 1; //preallocate the memory needed int tot_nodes = rEulerianModelPart.Nodes().size() + rLagrangianModelPart.Nodes().size(); rCompleteModelPart.Nodes().reserve( tot_nodes ); //note that here we renumber the nodes for (ModelPart::NodesContainerType::iterator node_it = rLagrangianModelPart.NodesBegin(); node_it != rLagrangianModelPart.NodesEnd(); node_it++) { rCompleteModelPart.AddNode(*(node_it.base())); node_it->SetId(id++); } KRATOS_CATCH(""); } //********************************************************************************** //********************************************************************************** void FindMaxMinEdgeSize(ModelPart& r_model_part, pybind11::list& maxmin) { KRATOS_TRY double max_edge = 0.0; double min_edge = 1000.0; for(ModelPart::ElementsContainerType::iterator it=r_model_part.ElementsBegin(); it!=r_model_part.ElementsEnd(); it++) { Geometry<Node<3> >&geom = it->GetGeometry(); double loc_h_max = 0.0; double loc_h_min = 1000.0; for(unsigned int i=0; i<TDim+1; i++) { double xc = geom[i].X(); double yc = geom[i].Y(); double zc = geom[i].Z(); for(unsigned int j=i+1; j<TDim+1; j++) { double x = geom[j].X(); double y = geom[j].Y(); double z = geom[j].Z(); double l = (x - xc)*(x - xc); l += (y - yc)*(y - yc); l += (z - zc)*(z - zc); if (l > loc_h_max) loc_h_max = l; else if(l < loc_h_min) loc_h_min = l; } } loc_h_max = sqrt(loc_h_max); loc_h_min = sqrt(loc_h_min); if(loc_h_max > max_edge ) max_edge = loc_h_max; if(loc_h_min < min_edge ) min_edge = loc_h_min; } // r_model_part.GetCommunicator().MaxAll(h_max); maxmin.append(max_edge); maxmin.append(min_edge); KRATOS_CATCH(""); } private: void CreateParticles3D(ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart, BoundedMatrix<double, 56, 3 > pos, BoundedMatrix<double, 56, 4 > N, const double max_seed_distance, const double min_edge_size) { unsigned int id = (rEulerianModelPart.Nodes().end() - 1)->Id() + 1; for (ModelPart::ElementsContainerType::iterator el_it = rEulerianModelPart.ElementsBegin(); el_it != rEulerianModelPart.ElementsEnd(); el_it++) { Geometry<Node < 3 > >& geom = el_it->GetGeometry(); if(CheckElemDist(geom,max_seed_distance)) { ComputeGaussPointPositions3D(geom, pos, N); for (unsigned int i = 0; i < pos.size1(); i++) { int node_id = id++; Node < 3 > ::Pointer pnode = rLagrangianModelPart.CreateNewNode(node_id, pos(i, 0), pos(i, 1), pos(i, 2)); array_1d<double, 3 > & vel = pnode->FastGetSolutionStepValue(VELOCITY); noalias(vel) = ZeroVector(3); // double & p_dist = pnode->FastGetSolutionStepValue(DISTANCE); double p_distance = 0.0; for (unsigned int j = 0; j < TDim + 1; j++){ noalias(vel) += N(i, j) * geom[j].FastGetSolutionStepValue(VELOCITY); p_distance += N(i, j) * geom[j].FastGetSolutionStepValue(DISTANCE); } // Assign particle sign if(p_distance < 0.0) pnode->FastGetSolutionStepValue(IS_WATER)=-1.0; else if(p_distance > 0.0) pnode->FastGetSolutionStepValue(IS_WATER)= 1.0; pnode->Fix(IS_WATER); AssignParticleRadius(pnode,p_distance,min_edge_size); } } } } void FewCreateParticles3D(ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart, BoundedMatrix<double, 10, 3 > pos, BoundedMatrix<double, 10, 4 > N, const double max_seed_distance, const double min_edge_size) { unsigned int id = (rEulerianModelPart.Nodes().end() - 1)->Id() + 1; for (ModelPart::ElementsContainerType::iterator el_it = rEulerianModelPart.ElementsBegin(); el_it != rEulerianModelPart.ElementsEnd(); el_it++) { Geometry<Node < 3 > >& geom = el_it->GetGeometry(); if(CheckElemDist(geom,max_seed_distance)) { FewComputeGaussPointPositions3D(geom, pos, N); for (unsigned int i = 0; i < pos.size1(); i++) { int node_id = id++; Node < 3 > ::Pointer pnode = rLagrangianModelPart.CreateNewNode(node_id, pos(i, 0), pos(i, 1), pos(i, 2)); array_1d<double, 3 > & vel = pnode->FastGetSolutionStepValue(VELOCITY); noalias(vel) = ZeroVector(3); // double & p_dist = pnode->FastGetSolutionStepValue(DISTANCE); double p_distance = 0.0; for (unsigned int j = 0; j < TDim + 1; j++){ noalias(vel) += N(i, j) * geom[j].FastGetSolutionStepValue(VELOCITY); p_distance += N(i, j) * geom[j].FastGetSolutionStepValue(DISTANCE); } // Assign particle sign if(p_distance < 0.0) pnode->FastGetSolutionStepValue(IS_WATER)=-1.0; else if(p_distance > 0.0) pnode->FastGetSolutionStepValue(IS_WATER)= 1.0; pnode->Fix(IS_WATER); AssignParticleRadius(pnode,p_distance,min_edge_size); } } } } void CreateParticles2D(ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart, BoundedMatrix<double, 16, 3 > pos, BoundedMatrix<double, 16, 3 > N, const double max_seed_distance, const double min_edge_size) { unsigned int id = (rEulerianModelPart.Nodes().end() - 1)->Id() + 1; for (ModelPart::ElementsContainerType::iterator el_it = rEulerianModelPart.ElementsBegin(); el_it != rEulerianModelPart.ElementsEnd(); el_it++) { Geometry<Node < 3 > >& geom = el_it->GetGeometry(); if(CheckElemDist(geom,max_seed_distance)) { ComputeGaussPointPositions2D(geom, pos, N); for (unsigned int i = 0; i < pos.size1(); i++) { int node_id = id++; Node < 3 > ::Pointer pnode = rLagrangianModelPart.CreateNewNode(node_id, pos(i, 0), pos(i, 1), pos(i, 2)); array_1d<double, 3 > & vel = pnode->FastGetSolutionStepValue(VELOCITY); noalias(vel) = ZeroVector(3); // double & p_dist = pnode->FastGetSolutionStepValue(DISTANCE); double p_distance = 0.0; for (unsigned int j = 0; j < TDim + 1; j++){ noalias(vel) += N(i, j) * geom[j].FastGetSolutionStepValue(VELOCITY); p_distance += N(i, j) * geom[j].FastGetSolutionStepValue(DISTANCE); } // Assign particle sign if(p_distance < 0.0) pnode->FastGetSolutionStepValue(IS_WATER)=-1.0; else if(p_distance > 0.0) pnode->FastGetSolutionStepValue(IS_WATER)= 1.0; pnode->Fix(IS_WATER); AssignParticleRadius(pnode,p_distance,min_edge_size); } } } } void ReseedOrDelete2D(ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart, const double max_seed_distance, const double min_edge_size) { int id; if (rLagrangianModelPart.Nodes().size() != 0) id = (rLagrangianModelPart.NodesEnd() - 1)->Id(); else id = 1; const int nelements = rEulerianModelPart.Elements().size(); const int nparticles = rLagrangianModelPart.Nodes().size(); BoundedMatrix<double, 16, 3 > coord; BoundedMatrix<double, 16, 3 > NN; // #pragma omp parallel for firstprivate(NN,coord) for (int ne = 0; ne < nelements; ne++) { ModelPart::ElementsContainerType::iterator ielem = rEulerianModelPart.ElementsBegin() + ne; Geometry<Node < 3 > >& geom = ielem->GetGeometry(); int n_ptr = int(ielem->GetValue(YOUNG_MODULUS)); if( n_ptr < 12 && CheckElemDist(geom,max_seed_distance) )//ressed in close to surface and poor element { //compute cooordinates //RandomPariclePosition(geom, coord, NN); ComputeGaussPointPositions2D(geom, coord, NN); int aux_n_ptr = n_ptr; int cnt = 0; while( aux_n_ptr<16 ){ aux_n_ptr++; //COORDINATES int node_id = id++; Node < 3 > ::Pointer pnode = rLagrangianModelPart.CreateNewNode(node_id, coord(cnt,0), coord(cnt,1), coord(cnt,2)); array_1d<double, 3 > & vel = pnode->FastGetSolutionStepValue(VELOCITY); noalias(vel) = ZeroVector(3); // double & p_dist = pnode->FastGetSolutionStepValue(DISTANCE); double p_distance = 0.0; for (unsigned int j = 0; j < TDim + 1; j++){ noalias(vel) += NN(cnt,j) * geom[j].FastGetSolutionStepValue(VELOCITY); p_distance += NN(cnt,j) * geom[j].FastGetSolutionStepValue(DISTANCE); } // Assign particle sign if(p_distance < 0.0) pnode->FastGetSolutionStepValue(IS_WATER)=-1.0; else if(p_distance > 0.0) pnode->FastGetSolutionStepValue(IS_WATER)= 1.0; pnode->Fix(IS_WATER); AssignParticleRadius(pnode,p_distance,min_edge_size); cnt++; } } else if( n_ptr > 20 && CheckElemDist(geom,max_seed_distance) ){ const int ele_id = ielem->Id(); ModelPart::NodesContainerType element_particles; element_particles.reserve(64); //save particle list for (int kk = 0; kk < nparticles; kk++) { ModelPart::NodesContainerType::iterator iparticle = rLagrangianModelPart.NodesBegin() + kk; const int ptr_nest = iparticle->GetValue(NL_ITERATION_NUMBER); if( ptr_nest==ele_id ) { iparticle->SetValue(SCALE, 0); element_particles.push_back( *(iparticle.base()) ); } } //loop to order based on the radius ModelPart::NodesContainerType::iterator ptr_begin = element_particles.begin(); unsigned int ptr_elem_size = element_particles.size(); for(unsigned int ii=0; ii < ptr_elem_size; ii++) for(unsigned int jj=ii+1; jj < ptr_elem_size; jj++) { double ii_radi = (ptr_begin + ii)->FastGetSolutionStepValue(PARTICLE_RADIUS); double jj_radi = (ptr_begin + jj)->FastGetSolutionStepValue(PARTICLE_RADIUS); (ii_radi>=jj_radi) ? (ptr_begin + ii)->GetValue(SCALE)+=1 : (ptr_begin + jj)->GetValue(SCALE)+=1; } //delete extra nodes int aux_ptr_elem_size = int(ptr_elem_size); while(aux_ptr_elem_size>16) { for(unsigned int ii=0; ii < ptr_elem_size; ii++){ bool swt = false; for( int kkk = ptr_elem_size; kkk>0; kkk-- ) if( (ptr_begin + ii)->GetValue(SCALE) == kkk && (ptr_begin + ii)->GetValue(IS_VISITED) == 0){ bool is_escaped = (ptr_begin + ii)->GetValue(IS_ESCAPED); if( is_escaped==false ) (ptr_begin + ii)->Set(TO_ERASE,true);//CHECK ESCASPED NODES (ptr_begin + ii)->SetValue(IS_VISITED,1); swt = true; break; } if(swt ) break; } aux_ptr_elem_size -= 1; } } } } void MarkEraseExtraParticles3D(ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart, const double max_seed_distance, const double min_edge_size, const int max_num_particle, const int num_particle) { // int id; // if (rLagrangianModelPart.Nodes().size() != 0) // id = (rLagrangianModelPart.NodesEnd() - 1)->Id(); // else // id = 1; const int nelements = rEulerianModelPart.Elements().size(); const int nparticles = rLagrangianModelPart.Nodes().size(); std::vector< WeakPointerVector< Node< 3> > > particle_of_element(nelements); // particle_of_element.reserve(nelements); std::vector< unsigned int > num_ptr_in_elem(nelements,0); // num_ptr_in_elem.reserve(nelements); //loop on elements to resrve the size of particle in element list #pragma omp parallel for firstprivate(num_ptr_in_elem) for (int ne = 0; ne < nelements; ne++) { ModelPart::ElementsContainerType::iterator ielem = rEulerianModelPart.ElementsBegin() + ne; int n_ptr = int(ielem->GetValue(YOUNG_MODULUS)); unsigned int ele_id = ielem->Id(); num_ptr_in_elem[ele_id-1] = n_ptr; if(n_ptr > max_num_particle) particle_of_element[ele_id-1].reserve(n_ptr); } //loop on particles to push_back particle related to full elements for (int kk = 0; kk < nparticles; kk++) { ModelPart::NodesContainerType::iterator iparticle = rLagrangianModelPart.NodesBegin() + kk; const int ptr_nest = iparticle->GetValue(NL_ITERATION_NUMBER); if( num_ptr_in_elem[ptr_nest-1] > static_cast<unsigned int>(max_num_particle) ) particle_of_element[ptr_nest-1].push_back( *(iparticle.base()) ); } //loop over elements to reoreder the particle radius in over populated elements #pragma omp parallel for firstprivate(particle_of_element) for( int ii = 0; ii< static_cast<int>(particle_of_element.size()); ++ii) { if(particle_of_element[ii].size() > static_cast<unsigned int>(max_num_particle)) { //sort std::sort(particle_of_element[ii].ptr_begin(), particle_of_element[ii].ptr_end(), RadiusCompare() ); //delete extra nodes WeakPointerVector< Node< 3> >::iterator ele_pt_ptr = particle_of_element[ii].begin(); const unsigned int this_ele_ptr = particle_of_element[ii].size(); int aux_ptr_elem_size = this_ele_ptr; for( unsigned int ij = 0; (ij < this_ele_ptr && aux_ptr_elem_size > num_particle); ++ij) { bool is_escaped = (ele_pt_ptr + ij)->GetValue(IS_ESCAPED); if( is_escaped==false ){ (ele_pt_ptr + ij)->Set(TO_ERASE,true); aux_ptr_elem_size--; } } } } } struct RadiusCompare{ template<class TRefrenceType> bool operator()(const TRefrenceType ptr_a, const TRefrenceType ptr_b) { double a_radi = ptr_a.lock()->FastGetSolutionStepValue(PARTICLE_RADIUS); double b_radi = ptr_b.lock()->FastGetSolutionStepValue(PARTICLE_RADIUS); return (a_radi > b_radi); } }; void ReseedPoorElements3D(ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart, const double max_seed_distance, const double min_edge_size, const int min_num_particle, const int num_particle) { int id; if (rLagrangianModelPart.Nodes().size() != 0) id = (rLagrangianModelPart.NodesEnd() - 1)->Id(); else id = 1; const int nelements = rEulerianModelPart.Elements().size(); // const int nparticles = rLagrangianModelPart.Nodes().size(); BoundedMatrix<double, 56, 3 > coord; BoundedMatrix<double, 56, 4 > NN; // #pragma omp parallel for firstprivate(NN,coord) for (int ne = 0; ne < nelements; ne++) { ModelPart::ElementsContainerType::iterator ielem = rEulerianModelPart.ElementsBegin() + ne; Geometry<Node < 3 > >& geom = ielem->GetGeometry(); int n_ptr = int(ielem->GetValue(YOUNG_MODULUS)); if( n_ptr < min_num_particle && CheckElemDist(geom,max_seed_distance) )//ressed in close to surface and poor element { //compute cooordinates //RandomPariclePosition(geom, coord, NN); ComputeGaussPointPositions3D(geom, coord, NN); int aux_n_ptr = n_ptr; int cnt = 0; while( aux_n_ptr < num_particle ){ aux_n_ptr++; //COORDINATES int node_id = id++; Node < 3 > ::Pointer pnode = rLagrangianModelPart.CreateNewNode(node_id, coord(cnt,0), coord(cnt,1), coord(cnt,2)); array_1d<double, 3 > & vel = pnode->FastGetSolutionStepValue(VELOCITY); noalias(vel) = ZeroVector(3); // double & p_dist = pnode->FastGetSolutionStepValue(DISTANCE); double p_distance = 0.0; for (unsigned int j = 0; j < TDim + 1; j++){ noalias(vel) += NN(cnt,j) * geom[j].FastGetSolutionStepValue(VELOCITY); p_distance += NN(cnt,j) * geom[j].FastGetSolutionStepValue(DISTANCE); } // Assign particle sign if(p_distance < 0.0) pnode->FastGetSolutionStepValue(IS_WATER)=-1.0; else if(p_distance > 0.0) pnode->FastGetSolutionStepValue(IS_WATER)= 1.0; pnode->Fix(IS_WATER); AssignParticleRadius(pnode,p_distance,min_edge_size); cnt++; } } } } void FewReseedPoorElements3D(ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart, const double max_seed_distance, const double min_edge_size, const int min_num_particle, const int num_particle) { int id; if (rLagrangianModelPart.Nodes().size() != 0) id = (rLagrangianModelPart.NodesEnd() - 1)->Id(); else id = 1; const int nelements = rEulerianModelPart.Elements().size(); // const int nparticles = rLagrangianModelPart.Nodes().size(); BoundedMatrix<double, 10, 3 > coord; BoundedMatrix<double, 10, 4 > NN; // #pragma omp parallel for firstprivate(NN,coord) for (int ne = 0; ne < nelements; ne++) { ModelPart::ElementsContainerType::iterator ielem = rEulerianModelPart.ElementsBegin() + ne; Geometry<Node < 3 > >& geom = ielem->GetGeometry(); int n_ptr = int(ielem->GetValue(YOUNG_MODULUS)); if( n_ptr < min_num_particle && CheckElemDist(geom,max_seed_distance) )//ressed in close to surface and poor element { //compute cooordinates //RandomPariclePosition(geom, coord, NN); FewComputeGaussPointPositions3D(geom, coord, NN); int aux_n_ptr = n_ptr; int cnt = 0; while( aux_n_ptr < num_particle ){ aux_n_ptr++; //COORDINATES int node_id = id++; Node < 3 > ::Pointer pnode = rLagrangianModelPart.CreateNewNode(node_id, coord(cnt,0), coord(cnt,1), coord(cnt,2)); array_1d<double, 3 > & vel = pnode->FastGetSolutionStepValue(VELOCITY); noalias(vel) = ZeroVector(3); // double & p_dist = pnode->FastGetSolutionStepValue(DISTANCE); double p_distance = 0.0; for (unsigned int j = 0; j < TDim + 1; j++){ noalias(vel) += NN(cnt,j) * geom[j].FastGetSolutionStepValue(VELOCITY); p_distance += NN(cnt,j) * geom[j].FastGetSolutionStepValue(DISTANCE); } // Assign particle sign if(p_distance < 0.0) pnode->FastGetSolutionStepValue(IS_WATER)=-1.0; else if(p_distance > 0.0) pnode->FastGetSolutionStepValue(IS_WATER)= 1.0; pnode->Fix(IS_WATER); AssignParticleRadius(pnode,p_distance,min_edge_size); cnt++; } } } } // void ReseedOrDelete3D(ModelPart& rEulerianModelPart, // ModelPart& rLagrangianModelPart, // const double max_seed_distance, // const double min_edge_size) // { // int id; // if (rLagrangianModelPart.Nodes().size() != 0) // id = (rLagrangianModelPart.NodesEnd() - 1)->Id(); // else // id = 1; // const int nelements = rEulerianModelPart.Elements().size(); // const int nparticles = rLagrangianModelPart.Nodes().size(); // // // // BoundedMatrix<double, 56, 3 > coord; // BoundedMatrix<double, 56, 4 > NN; // // #pragma omp parallel for firstprivate(NN,coord) // for (int ne = 0; ne < nelements; ne++) // { // ModelPart::ElementsContainerType::iterator ielem = rEulerianModelPart.ElementsBegin() + ne; // Geometry<Node < 3 > >& geom = ielem->GetGeometry(); // int n_ptr = int(ielem->GetValue(YOUNG_MODULUS)); // // if( n_ptr < 42 && CheckElemDist(geom,max_seed_distance) )//ressed in close to surface and poor element // { // //compute cooordinates // //RandomPariclePosition(geom, coord, NN); // ComputeGaussPointPositions3D(geom, coord, NN); // int aux_n_ptr = n_ptr; // int cnt = 0; // while( aux_n_ptr<56 ){ // aux_n_ptr++; // //COORDINATES // int node_id = id++; // Node < 3 > ::Pointer pnode = rLagrangianModelPart.CreateNewNode(node_id, coord(cnt,0), coord(cnt,1), coord(cnt,2)); // // array_1d<double, 3 > & vel = pnode->FastGetSolutionStepValue(VELOCITY); // noalias(vel) = ZeroVector(3); // // // double & p_dist = pnode->FastGetSolutionStepValue(DISTANCE); // double p_distance = 0.0; // for (unsigned int j = 0; j < TDim + 1; j++){ // noalias(vel) += NN(cnt,j) * geom[j].FastGetSolutionStepValue(VELOCITY); // p_distance += NN(cnt,j) * geom[j].FastGetSolutionStepValue(DISTANCE); // } // // // Assign particle sign // if(p_distance < 0.0) // pnode->FastGetSolutionStepValue(IS_WATER)=-1.0; // else if(p_distance > 0.0) // pnode->FastGetSolutionStepValue(IS_WATER)= 1.0; // // pnode->Fix(IS_WATER); // // AssignParticleRadius(pnode,p_distance,min_edge_size); // // cnt++; // } // } // else if( n_ptr > 70 && CheckElemDist(geom,max_seed_distance) ){ // const int ele_id = ielem->Id(); // ModelPart::NodesContainerType element_particles; // element_particles.reserve(64); // //save particle list // for (int kk = 0; kk < nparticles; kk++) // { // ModelPart::NodesContainerType::iterator iparticle = rLagrangianModelPart.NodesBegin() + kk; // // const int ptr_nest = iparticle->GetValue(NL_ITERATION_NUMBER); // if( ptr_nest==ele_id ) // { // iparticle->SetValue(SCALE, 0); // element_particles.push_back( *(iparticle.base()) ); // } // } // // //loop to order based on the radius // ModelPart::NodesContainerType::iterator ptr_begin = element_particles.begin(); // unsigned int ptr_elem_size = element_particles.size(); // // for(unsigned int ii=0; ii < ptr_elem_size; ii++) // for(unsigned int jj=ii+1; jj < ptr_elem_size; jj++) // { // double ii_radi = (ptr_begin + ii)->FastGetSolutionStepValue(PARTICLE_RADIUS); // double jj_radi = (ptr_begin + jj)->FastGetSolutionStepValue(PARTICLE_RADIUS); // // (ii_radi>=jj_radi) ? (ptr_begin + ii)->GetValue(SCALE)+=1 : (ptr_begin + jj)->GetValue(SCALE)+=1; // // } // //delete extra nodes // int aux_ptr_elem_size = int(ptr_elem_size); // while(aux_ptr_elem_size>56) // { // for(unsigned int ii=0; ii < ptr_elem_size; ii++){ // bool swt = false; // for( int kkk = ptr_elem_size; kkk>0; kkk-- ) // if( (ptr_begin + ii)->GetValue(SCALE) == kkk && (ptr_begin + ii)->GetValue(IS_VISITED) == 0){ // bool is_escaped = (ptr_begin + ii)->GetValue(IS_ESCAPED); // if( is_escaped==false ) // (ptr_begin + ii)->Set(TO_ERASE,true);//CHECK ESCASPED NODES // (ptr_begin + ii)->SetValue(IS_VISITED,1); // swt = true; // break; // } // if(swt ) // break; // } // aux_ptr_elem_size -= 1; // } // } // // // } // // } void ComputeGaussPointPositions2D(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 16, 3 > & pos, BoundedMatrix<double, 16, 3 > & N) { //lower diagonal terms double ypos = 1.0 / 5.0; int pos_counter = 0; for (unsigned int i = 0; i < 4; i++) { double xpos = 1.0 / 8.0; for (unsigned int j = 0; j < (7-2*i); j++) { double N1 = xpos; double N2 = ypos; double N3 = 1.0 - xpos - ypos; pos(pos_counter, 0) = N1 * geom[0].X() + N2 * geom[1].X() + N3 * geom[2].X(); pos(pos_counter, 1) = N1 * geom[0].Y() + N2 * geom[1].Y() + N3 * geom[2].Y(); pos(pos_counter, 2) = N1 * geom[0].Z() + N2 * geom[1].Z() + N3 * geom[2].Z(); N(pos_counter, 0) = N1; N(pos_counter, 1) = N2; N(pos_counter, 2) = N3; xpos += 1.0 / 8.0; pos_counter += 1; } ypos += 1.0 / 5.0; } } void ComputeGaussPointPositions3D(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 56, 3 > & pos, BoundedMatrix<double, 56, 4 > & N) { int pos_counter = 0; const double one_seventh = 1.0/6.5; double close_point = 1.0/20; double zpos = close_point; for (unsigned int kk = 0; kk < 6; kk++) { // double y_div = 1.0/(7 - kk); double ypos = close_point;//one_seventh;// y_div * (1.0 - zpos);//one_seventh for (unsigned int i = 0; i < (6-kk); i++) { // double x_div = 1.0/(7 - kk);// -i double xpos = close_point;//one_seventh;//x_div* (1.0 - ypos) * (1.0 - zpos);//one_seventh for (unsigned int j = 0; j < (6-kk-i); j++) { double N1 = xpos; double N2 = ypos; double N3 = zpos; double N4 = 1.0 - xpos - ypos - zpos; pos(pos_counter, 0) = N1 * geom[0].X() + N2 * geom[1].X() + N3 * geom[2].X() + N4 * geom[3].X(); pos(pos_counter, 1) = N1 * geom[0].Y() + N2 * geom[1].Y() + N3 * geom[2].Y() + N4 * geom[3].Y(); pos(pos_counter, 2) = N1 * geom[0].Z() + N2 * geom[1].Z() + N3 * geom[2].Z() + N4 * geom[3].Z(); N(pos_counter, 0) = N1; N(pos_counter, 1) = N2; N(pos_counter, 2) = N3; N(pos_counter, 3) = N4; xpos += one_seventh;//x_div * (1.0 - ypos) * (1.0 - zpos); //one_seventh pos_counter += 1; } ypos += one_seventh;//y_div * (1.0 - zpos);//one_seventh } zpos += one_seventh; } } void FewComputeGaussPointPositions3D(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 10, 3 > & pos, BoundedMatrix<double, 10, 4 > & N) { int pos_counter = 0; const double one_third = 1.0/2.5; double close_point = 1.0/20; double zpos = close_point; for (unsigned int kk = 0; kk < 3; kk++) { // double y_div = 1.0/(7 - kk); double ypos = close_point;//one_seventh;// y_div * (1.0 - zpos);//one_seventh for (unsigned int i = 0; i < (3-kk); i++) { // double x_div = 1.0/(7 - kk);// -i double xpos = close_point;//one_seventh;//x_div* (1.0 - ypos) * (1.0 - zpos);//one_seventh for (unsigned int j = 0; j < (3-kk-i); j++) { double N1 = xpos; double N2 = ypos; double N3 = zpos; double N4 = 1.0 - xpos - ypos - zpos; pos(pos_counter, 0) = N1 * geom[0].X() + N2 * geom[1].X() + N3 * geom[2].X() + N4 * geom[3].X(); pos(pos_counter, 1) = N1 * geom[0].Y() + N2 * geom[1].Y() + N3 * geom[2].Y() + N4 * geom[3].Y(); pos(pos_counter, 2) = N1 * geom[0].Z() + N2 * geom[1].Z() + N3 * geom[2].Z() + N4 * geom[3].Z(); N(pos_counter, 0) = N1; N(pos_counter, 1) = N2; N(pos_counter, 2) = N3; N(pos_counter, 3) = N4; xpos += one_third;//x_div * (1.0 - ypos) * (1.0 - zpos); //one_seventh pos_counter += 1; } ypos += one_third;//y_div * (1.0 - zpos);//one_seventh } zpos += one_third; } } void RandomPariclePosition(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 16, 3 > & coord, BoundedMatrix<double, 16, 3 > & N_shape) { for(int ii=0;ii<16;ii++){ double xi = rand()* ( 1.0 / ( RAND_MAX + 1.0 ) ); double etta = (1.0 - xi) * ( rand()* ( 1.0 / ( RAND_MAX + 1.0 ) ) ); double zetta = 1.0 - (xi + etta); coord(ii,0) = xi * geom[0].X() + etta * geom[1].X() + zetta * geom[2].X(); coord(ii,1) = xi * geom[0].Y() + etta * geom[1].Y() + zetta * geom[2].Y(); coord(ii,2) = xi * geom[0].Z() + etta * geom[1].Z() + zetta * geom[2].Z(); N_shape(ii,0) = xi; N_shape(ii,1) = etta; N_shape(ii,1) = zetta; } } static int CheckElemDist(Geometry< Node < 3 > >& geom, const double max_dist) { for(unsigned int ii=0; ii < geom.size(); ++ii) { double nd_dist = geom[ii].FastGetSolutionStepValue(DISTANCE); if (fabs(nd_dist) < max_dist) return 1; } return 0; } bool CheckIfEscaped(Geometry< Node < 3 > >& geom, const array_1d<double, 3 > & N_shape,const double particle_sign) { double dist = N_shape[0]*geom[0].FastGetSolutionStepValue(DISTANCE); for(unsigned int ii=1; ii < geom.size(); ++ii) dist += N_shape[ii]*geom[ii].FastGetSolutionStepValue(DISTANCE); if( dist*particle_sign < 0.0) return true; else return false; } void AssignParticleRadius(Node < 3 > ::Pointer nd_ptr, double& p_dist,const double min_edge_size) { if( fabs(p_dist) < 0.1*min_edge_size) nd_ptr->FastGetSolutionStepValue(PARTICLE_RADIUS) = 0.1*min_edge_size; else if(fabs(p_dist) > 0.5*min_edge_size) nd_ptr->FastGetSolutionStepValue(PARTICLE_RADIUS) = 0.5*min_edge_size; else nd_ptr->FastGetSolutionStepValue(PARTICLE_RADIUS) = fabs(p_dist); } // unsigned int time_seed() // { // time_t now = time ( 0 ); // unsigned char *p = (unsigned char *)&now; // unsigned int seed = 0; // size_t i; // // for ( i = 0; i < sizeof now; i++ ) // seed = seed * ( UCHAR_MAX + 2U ) + p[i]; // // return seed; // } }; } #endif // KRATOS_LAGRANGIAN_PARTICLES_UTILITIES_INCLUDED defined
hypre_merge_sort.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ #include "_hypre_utilities.h" #include "hypre_hopscotch_hash.h" #include "../seq_mv/HYPRE_seq_mv.h" //#define DBG_MERGE_SORT #ifdef DBG_MERGE_SORT #include <algorithm> #include <unordered_map> #endif #define SWAP(T, a, b) do { T tmp = a; a = b; b = tmp; } while (0) /* union of two sorted (in ascending order) array arr1 and arr2 into arr3 * Assumption: no duplicates in arr1 and arr2 * arr3 should have enough space on entry * map1 and map2 map arr1 and arr2 to arr3 */ void hypre_union2(HYPRE_Int n1, HYPRE_BigInt *arr1, HYPRE_Int n2, HYPRE_BigInt *arr2, HYPRE_Int *n3, HYPRE_BigInt *arr3, HYPRE_Int *map1, HYPRE_Int *map2) { HYPRE_Int i = 0, j = 0, k = 0; while (i < n1 && j < n2) { if (arr1[i] < arr2[j]) { if (map1) { map1[i] = k; } arr3[k++] = arr1[i++]; } else if (arr1[i] > arr2[j]) { if (map2) { map2[j] = k; } arr3[k++] = arr2[j++]; } else /* == */ { if (map1) { map1[i] = k; } if (map2) { map2[j] = k; } arr3[k++] = arr1[i++]; j++; } } while (i < n1) { if (map1) { map1[i] = k; } arr3[k++] = arr1[i++]; } while (j < n2) { if (map2) { map2[j] = k; } arr3[k++] = arr2[j++]; } *n3 = k; } static void hypre_merge(HYPRE_Int *first1, HYPRE_Int *last1, HYPRE_Int *first2, HYPRE_Int *last2, HYPRE_Int *out) { for ( ; first1 != last1; ++out) { if (first2 == last2) { for ( ; first1 != last1; ++first1, ++out) { *out = *first1; } return; } if (*first2 < *first1) { *out = *first2; ++first2; } else { *out = *first1; ++first1; } } for ( ; first2 != last2; ++first2, ++out) { *out = *first2; } } #ifdef HYPRE_CONCURRENT_HOPSCOTCH static void hypre_big_merge(HYPRE_BigInt *first1, HYPRE_BigInt *last1, HYPRE_BigInt *first2, HYPRE_BigInt *last2, HYPRE_BigInt *out) { for ( ; first1 != last1; ++out) { if (first2 == last2) { for ( ; first1 != last1; ++first1, ++out) { *out = *first1; } return; } if (*first2 < *first1) { *out = *first2; ++first2; } else { *out = *first1; ++first1; } } for ( ; first2 != last2; ++first2, ++out) { *out = *first2; } } #endif static void kth_element_( HYPRE_Int *out1, HYPRE_Int *out2, HYPRE_Int *a1, HYPRE_Int *a2, HYPRE_Int left, HYPRE_Int right, HYPRE_Int n1, HYPRE_Int n2, HYPRE_Int k) { while (1) { HYPRE_Int i = (left + right)/2; // right < k -> i < k HYPRE_Int j = k - i - 1; #ifdef DBG_MERGE_SORT hypre_assert(left <= right && right <= k); hypre_assert(i < k); // i == k implies left == right == k that can never happen hypre_assert(j >= 0 && j < n2); #endif if ((j == -1 || a1[i] >= a2[j]) && (j == n2 - 1 || a1[i] <= a2[j + 1])) { *out1 = i; *out2 = j + 1; return; } else if (j >= 0 && a2[j] >= a1[i] && (i == n1 - 1 || a2[j] <= a1[i + 1])) { *out1 = i + 1; *out2 = j; return; } else if (a1[i] > a2[j] && j != n2 - 1 && a1[i] > a2[j+1]) { // search in left half of a1 right = i - 1; } else { // search in right half of a1 left = i + 1; } } } /** * Partition the input so that * a1[0:*out1) and a2[0:*out2) contain the smallest k elements */ static void kth_element( HYPRE_Int *out1, HYPRE_Int *out2, HYPRE_Int *a1, HYPRE_Int *a2, HYPRE_Int n1, HYPRE_Int n2, HYPRE_Int k) { // either of the inputs is empty if (n1 == 0) { *out1 = 0; *out2 = k; return; } if (n2 == 0) { *out1 = k; *out2 = 0; return; } if (k >= n1 + n2) { *out1 = n1; *out2 = n2; return; } // one is greater than the other if (k < n1 && a1[k] <= a2[0]) { *out1 = k; *out2 = 0; return; } if (k - n1 >= 0 && a2[k - n1] >= a1[n1 - 1]) { *out1 = n1; *out2 = k - n1; return; } if (k < n2 && a2[k] <= a1[0]) { *out1 = 0; *out2 = k; return; } if (k - n2 >= 0 && a1[k - n2] >= a2[n2 - 1]) { *out1 = k - n2; *out2 = n2; return; } // now k > 0 // faster to do binary search on the shorter sequence if (n1 > n2) { SWAP(HYPRE_Int, n1, n2); SWAP(HYPRE_Int *, a1, a2); SWAP(HYPRE_Int *, out1, out2); } if (k < (n1 + n2)/2) { kth_element_(out1, out2, a1, a2, 0, hypre_min(n1 - 1, k), n1, n2, k); } else { // when k is big, faster to find (n1 + n2 - k)th biggest element HYPRE_Int offset1 = hypre_max(k - n2, 0), offset2 = hypre_max(k - n1, 0); HYPRE_Int new_k = k - offset1 - offset2; HYPRE_Int new_n1 = hypre_min(n1 - offset1, new_k + 1); HYPRE_Int new_n2 = hypre_min(n2 - offset2, new_k + 1); kth_element_(out1, out2, a1 + offset1, a2 + offset2, 0, new_n1 - 1, new_n1, new_n2, new_k); *out1 += offset1; *out2 += offset2; } #ifdef DBG_MERGE_SORT hypre_assert(*out1 + *out2 == k); #endif } #ifdef HYPRE_CONCURRENT_HOPSCOTCH static void big_kth_element_( HYPRE_Int *out1, HYPRE_Int *out2, HYPRE_BigInt *a1, HYPRE_BigInt *a2, HYPRE_Int left, HYPRE_Int right, HYPRE_Int n1, HYPRE_Int n2, HYPRE_Int k) { while (1) { HYPRE_Int i = (left + right)/2; // right < k -> i < k HYPRE_Int j = k - i - 1; #ifdef DBG_MERGE_SORT hypre_assert(left <= right && right <= k); hypre_assert(i < k); // i == k implies left == right == k that can never happen hypre_assert(j >= 0 && j < n2); #endif if ((j == -1 || a1[i] >= a2[j]) && (j == n2 - 1 || a1[i] <= a2[j + 1])) { *out1 = i; *out2 = j + 1; return; } else if (j >= 0 && a2[j] >= a1[i] && (i == n1 - 1 || a2[j] <= a1[i + 1])) { *out1 = i + 1; *out2 = j; return; } else if (a1[i] > a2[j] && j != n2 - 1 && a1[i] > a2[j+1]) { // search in left half of a1 right = i - 1; } else { // search in right half of a1 left = i + 1; } } } /** * Partition the input so that * a1[0:*out1) and a2[0:*out2) contain the smallest k elements */ static void big_kth_element( HYPRE_Int *out1, HYPRE_Int *out2, HYPRE_BigInt *a1, HYPRE_BigInt *a2, HYPRE_Int n1, HYPRE_Int n2, HYPRE_Int k) { // either of the inputs is empty if (n1 == 0) { *out1 = 0; *out2 = k; return; } if (n2 == 0) { *out1 = k; *out2 = 0; return; } if (k >= n1 + n2) { *out1 = n1; *out2 = n2; return; } // one is greater than the other if (k < n1 && a1[k] <= a2[0]) { *out1 = k; *out2 = 0; return; } if (k - n1 >= 0 && a2[k - n1] >= a1[n1 - 1]) { *out1 = n1; *out2 = k - n1; return; } if (k < n2 && a2[k] <= a1[0]) { *out1 = 0; *out2 = k; return; } if (k - n2 >= 0 && a1[k - n2] >= a2[n2 - 1]) { *out1 = k - n2; *out2 = n2; return; } // now k > 0 // faster to do binary search on the shorter sequence if (n1 > n2) { SWAP(HYPRE_Int, n1, n2); SWAP(HYPRE_BigInt *, a1, a2); SWAP(HYPRE_Int *, out1, out2); } if (k < (n1 + n2)/2) { big_kth_element_(out1, out2, a1, a2, 0, hypre_min(n1 - 1, k), n1, n2, k); } else { // when k is big, faster to find (n1 + n2 - k)th biggest element HYPRE_Int offset1 = hypre_max(k - n2, 0), offset2 = hypre_max(k - n1, 0); HYPRE_Int new_k = k - offset1 - offset2; HYPRE_Int new_n1 = hypre_min(n1 - offset1, new_k + 1); HYPRE_Int new_n2 = hypre_min(n2 - offset2, new_k + 1); big_kth_element_(out1, out2, a1 + (HYPRE_BigInt)offset1, a2 + (HYPRE_BigInt)offset2, 0, new_n1 - 1, new_n1, new_n2, new_k); *out1 += offset1; *out2 += offset2; } #ifdef DBG_MERGE_SORT hypre_assert(*out1 + *out2 == k); #endif } #endif /** * @param num_threads number of threads that participate in this merge * @param my_thread_num thread id (zeor-based) among the threads that participate in this merge */ static void hypre_parallel_merge( HYPRE_Int *first1, HYPRE_Int *last1, HYPRE_Int *first2, HYPRE_Int *last2, HYPRE_Int *out, HYPRE_Int num_threads, HYPRE_Int my_thread_num) { HYPRE_Int n1 = last1 - first1; HYPRE_Int n2 = last2 - first2; HYPRE_Int n = n1 + n2; HYPRE_Int n_per_thread = (n + num_threads - 1)/num_threads; HYPRE_Int begin_rank = hypre_min(n_per_thread*my_thread_num, n); HYPRE_Int end_rank = hypre_min(begin_rank + n_per_thread, n); #ifdef DBG_MERGE_SORT hypre_assert(std::is_sorted(first1, last1)); hypre_assert(std::is_sorted(first2, last2)); #endif HYPRE_Int begin1, begin2, end1, end2; kth_element(&begin1, &begin2, first1, first2, n1, n2, begin_rank); kth_element(&end1, &end2, first1, first2, n1, n2, end_rank); while (begin1 > end1 && begin1 > 0 && begin2 < n2 && first1[begin1 - 1] == first2[begin2]) { #ifdef DBG_MERGE_SORT printf("%s:%d\n", __FILE__, __LINE__); #endif begin1--; begin2++; } while (begin2 > end2 && end1 > 0 && end2 < n2 && first1[end1 - 1] == first2[end2]) { #ifdef DBG_MERGE_SORT printf("%s:%d\n", __FILE__, __LINE__); #endif end1--; end2++; } #ifdef DBG_MERGE_SORT hypre_assert(begin1 <= end1); hypre_assert(begin2 <= end2); #endif hypre_merge( first1 + begin1, first1 + end1, first2 + begin2, first2 + end2, out + begin1 + begin2); #ifdef DBG_MERGE_SORT hypre_assert(std::is_sorted(out + begin1 + begin2, out + end1 + end2)); #endif } #ifdef HYPRE_CONCURRENT_HOPSCOTCH /** * @param num_threads number of threads that participate in this merge * @param my_thread_num thread id (zeor-based) among the threads that participate in this merge */ static void hypre_big_parallel_merge( HYPRE_BigInt *first1, HYPRE_BigInt *last1, HYPRE_BigInt *first2, HYPRE_BigInt *last2, HYPRE_BigInt *out, HYPRE_Int num_threads, HYPRE_Int my_thread_num) { HYPRE_Int n1 = (HYPRE_Int)(last1 - first1); HYPRE_Int n2 = (HYPRE_Int)(last2 - first2); HYPRE_Int n = n1 + n2; HYPRE_Int n_per_thread = (n + num_threads - 1)/num_threads; HYPRE_Int begin_rank = hypre_min(n_per_thread*my_thread_num, n); HYPRE_Int end_rank = hypre_min(begin_rank + n_per_thread, n); #ifdef DBG_MERGE_SORT hypre_assert(std::is_sorted(first1, last1)); hypre_assert(std::is_sorted(first2, last2)); #endif HYPRE_Int begin1, begin2, end1, end2; big_kth_element(&begin1, &begin2, first1, first2, n1, n2, begin_rank); big_kth_element(&end1, &end2, first1, first2, n1, n2, end_rank); while (begin1 > end1 && begin1 > 0 && begin2 < n2 && first1[begin1 - 1] == first2[begin2]) { #ifdef DBG_MERGE_SORT printf("%s:%d\n", __FILE__, __LINE__); #endif begin1--; begin2++; } while (begin2 > end2 && end1 > 0 && end2 < n2 && first1[end1 - 1] == first2[end2]) { #ifdef DBG_MERGE_SORT printf("%s:%d\n", __FILE__, __LINE__); #endif end1--; end2++; } #ifdef DBG_MERGE_SORT hypre_assert(begin1 <= end1); hypre_assert(begin2 <= end2); #endif hypre_big_merge( first1 + (HYPRE_BigInt)begin1, first1 + (HYPRE_BigInt)end1, first2 + (HYPRE_BigInt)begin2, first2 + (HYPRE_BigInt)end2, out + (HYPRE_BigInt)(begin1 + begin2)); #ifdef DBG_MERGE_SORT hypre_assert(std::is_sorted(out + begin1 + begin2, out + end1 + end2)); #endif } #endif void hypre_merge_sort(HYPRE_Int *in, HYPRE_Int *temp, HYPRE_Int len, HYPRE_Int **out) { if (0 == len) return; #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] -= hypre_MPI_Wtime(); #endif #ifdef DBG_MERGE_SORT HYPRE_Int *dbg_buf = new HYPRE_Int[len]; std::copy(in, in + len, dbg_buf); std::sort(dbg_buf, dbg_buf + len); #endif // HYPRE_Int thread_private_len[hypre_NumThreads()]; // HYPRE_Int out_len = 0; #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int num_threads = hypre_NumActiveThreads(); HYPRE_Int my_thread_num = hypre_GetThreadNum(); // thread-private sort HYPRE_Int i_per_thread = (len + num_threads - 1)/num_threads; HYPRE_Int i_begin = hypre_min(i_per_thread*my_thread_num, len); HYPRE_Int i_end = hypre_min(i_begin + i_per_thread, len); hypre_qsort0(in, i_begin, i_end - 1); // merge sorted sequences HYPRE_Int in_group_size; HYPRE_Int *in_buf = in; HYPRE_Int *out_buf = temp; for (in_group_size = 1; in_group_size < num_threads; in_group_size *= 2) { #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif // merge 2 in-groups into 1 out-group HYPRE_Int out_group_size = in_group_size*2; HYPRE_Int group_leader = my_thread_num/out_group_size*out_group_size; // HYPRE_Int group_sub_leader = hypre_min(group_leader + in_group_size, num_threads - 1); HYPRE_Int id_in_group = my_thread_num%out_group_size; HYPRE_Int num_threads_in_group = hypre_min(group_leader + out_group_size, num_threads) - group_leader; HYPRE_Int in_group1_begin = hypre_min(i_per_thread*group_leader, len); HYPRE_Int in_group1_end = hypre_min(in_group1_begin + i_per_thread*in_group_size, len); HYPRE_Int in_group2_begin = hypre_min(in_group1_begin + i_per_thread*in_group_size, len); HYPRE_Int in_group2_end = hypre_min(in_group2_begin + i_per_thread*in_group_size, len); hypre_parallel_merge( in_buf + in_group1_begin, in_buf + in_group1_end, in_buf + in_group2_begin, in_buf + in_group2_end, out_buf + in_group1_begin, num_threads_in_group, id_in_group); HYPRE_Int *temp = in_buf; in_buf = out_buf; out_buf = temp; } *out = in_buf; } /* omp parallel */ #ifdef DBG_MERGE_SORT hypre_assert(std::equal(*out, *out + len, dbg_buf)); delete[] dbg_buf; #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] += hypre_MPI_Wtime(); #endif } #ifdef HYPRE_CONCURRENT_HOPSCOTCH void hypre_sort_and_create_inverse_map( HYPRE_Int *in, HYPRE_Int len, HYPRE_Int **out, hypre_UnorderedIntMap *inverse_map) { if (len == 0) { return; } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] -= hypre_MPI_Wtime(); #endif HYPRE_Int *temp = hypre_TAlloc(HYPRE_Int, len, HYPRE_MEMORY_HOST); hypre_merge_sort(in, temp, len, out); hypre_UnorderedIntMapCreate(inverse_map, 2*len, 16*hypre_NumThreads()); HYPRE_Int i; #pragma omp parallel for HYPRE_SMP_SCHEDULE for (i = 0; i < len; i++) { HYPRE_Int old = hypre_UnorderedIntMapPutIfAbsent(inverse_map, (*out)[i], i); hypre_assert(old == HYPRE_HOPSCOTCH_HASH_EMPTY); #ifdef DBG_MERGE_SORT if (hypre_UnorderedIntMapGet(inverse_map, (*out)[i]) != i) { fprintf(stderr, "%d %d\n", i, (*out)[i]); hypre_assert(false); } #endif } #ifdef DBG_MERGE_SORT std::unordered_map<HYPRE_Int, HYPRE_Int> inverse_map2(len); for (HYPRE_Int i = 0; i < len; ++i) { inverse_map2[(*out)[i]] = i; if (hypre_UnorderedIntMapGet(inverse_map, (*out)[i]) != i) { fprintf(stderr, "%d %d\n", i, (*out)[i]); hypre_assert(false); } } hypre_assert(hypre_UnorderedIntMapSize(inverse_map) == len); #endif if (*out == in) { hypre_TFree(temp, HYPRE_MEMORY_HOST); } else { hypre_TFree(in, HYPRE_MEMORY_HOST); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] += hypre_MPI_Wtime(); #endif } #ifdef HYPRE_CONCURRENT_HOPSCOTCH void hypre_big_merge_sort(HYPRE_BigInt *in, HYPRE_BigInt *temp, HYPRE_Int len, HYPRE_BigInt **out) { if (0 == len) return; #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] -= hypre_MPI_Wtime(); #endif #ifdef DBG_MERGE_SORT HYPRE_Int *dbg_buf = new HYPRE_Int[len]; std::copy(in, in + len, dbg_buf); std::sort(dbg_buf, dbg_buf + len); #endif // HYPRE_Int thread_private_len[hypre_NumThreads()]; // HYPRE_Int out_len = 0; #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int num_threads = hypre_NumActiveThreads(); HYPRE_Int my_thread_num = hypre_GetThreadNum(); // thread-private sort HYPRE_Int i_per_thread = (len + num_threads - 1)/num_threads; HYPRE_Int i_begin = hypre_min(i_per_thread*my_thread_num, len); HYPRE_Int i_end = hypre_min(i_begin + i_per_thread, len); hypre_BigQsort0(in, i_begin, i_end - 1); // merge sorted sequences HYPRE_Int in_group_size; HYPRE_BigInt *in_buf = in; HYPRE_BigInt *out_buf = temp; for (in_group_size = 1; in_group_size < num_threads; in_group_size *= 2) { #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif // merge 2 in-groups into 1 out-group HYPRE_Int out_group_size = in_group_size*2; HYPRE_Int group_leader = my_thread_num/out_group_size*out_group_size; // HYPRE_Int group_sub_leader = hypre_min(group_leader + in_group_size, num_threads - 1); HYPRE_Int id_in_group = my_thread_num%out_group_size; HYPRE_Int num_threads_in_group = hypre_min(group_leader + out_group_size, num_threads) - group_leader; HYPRE_Int in_group1_begin = hypre_min(i_per_thread*group_leader, len); HYPRE_Int in_group1_end = hypre_min(in_group1_begin + i_per_thread*in_group_size, len); HYPRE_Int in_group2_begin = hypre_min(in_group1_begin + i_per_thread*in_group_size, len); HYPRE_Int in_group2_end = hypre_min(in_group2_begin + i_per_thread*in_group_size, len); hypre_big_parallel_merge( in_buf + (HYPRE_BigInt)in_group1_begin, in_buf + (HYPRE_BigInt)in_group1_end, in_buf + (HYPRE_BigInt)in_group2_begin, in_buf + (HYPRE_BigInt)in_group2_end, out_buf + (HYPRE_BigInt)in_group1_begin, num_threads_in_group, id_in_group); HYPRE_BigInt *temp = in_buf; in_buf = out_buf; out_buf = temp; } *out = in_buf; } /* omp parallel */ #ifdef DBG_MERGE_SORT hypre_assert(std::equal(*out, *out + len, dbg_buf)); delete[] dbg_buf; #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] += hypre_MPI_Wtime(); #endif } void hypre_big_sort_and_create_inverse_map( HYPRE_BigInt *in, HYPRE_Int len, HYPRE_BigInt **out, hypre_UnorderedBigIntMap *inverse_map) { if (len == 0) { return; } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] -= hypre_MPI_Wtime(); #endif HYPRE_BigInt *temp = hypre_TAlloc(HYPRE_BigInt, len, HYPRE_MEMORY_HOST); hypre_big_merge_sort(in, temp, len, out); hypre_UnorderedBigIntMapCreate(inverse_map, 2*len, 16*hypre_NumThreads()); HYPRE_Int i; #pragma omp parallel for HYPRE_SMP_SCHEDULE for (i = 0; i < len; i++) { HYPRE_Int old = hypre_UnorderedBigIntMapPutIfAbsent(inverse_map, (*out)[i], i); hypre_assert(old == HYPRE_HOPSCOTCH_HASH_EMPTY); #ifdef DBG_MERGE_SORT if (hypre_UnorderedBigIntMapGet(inverse_map, (*out)[i]) != i) { fprintf(stderr, "%d %d\n", i, (*out)[i]); hypre_assert(false); } #endif } #ifdef DBG_MERGE_SORT std::unordered_map<HYPRE_Int, HYPRE_Int> inverse_map2(len); for (HYPRE_Int i = 0; i < len; ++i) { inverse_map2[(*out)[i]] = i; if (hypre_UnorderedBigIntMapGet(inverse_map, (*out)[i]) != i) { fprintf(stderr, "%d %d\n", i, (*out)[i]); hypre_assert(false); } } hypre_assert(hypre_UnorderedBigIntMapSize(inverse_map) == len); #endif if (*out == in) { hypre_TFree(temp, HYPRE_MEMORY_HOST); } else { hypre_TFree(in, HYPRE_MEMORY_HOST); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] += hypre_MPI_Wtime(); #endif } #endif #endif /* vim: set tabstop=8 softtabstop=3 sw=3 expandtab: */
#include "_hypre_utilities.h" #include "hypre_hopscotch_hash.h" #include "../seq_mv/HYPRE_seq_mv.h" //#define DBG_MERGE_SORT #ifdef DBG_MERGE_SORT #include <algorithm> #include <unordered_map> #endif #define SWAP(T, a, b) do { T tmp = a; a = b; b = tmp; } while (0) /* union of two sorted (in ascending order) array arr1 and arr2 into arr3 * Assumption: no duplicates in arr1 and arr2 * arr3 should have enough space on entry * map1 and map2 map arr1 and arr2 to arr3 */ void hypre_union2(HYPRE_Int n1, HYPRE_BigInt *arr1, HYPRE_Int n2, HYPRE_BigInt *arr2, HYPRE_Int *n3, HYPRE_BigInt *arr3, HYPRE_Int *map1, HYPRE_Int *map2) { HYPRE_Int i = 0, j = 0, k = 0; while (i < n1 && j < n2) { if (arr1[i] < arr2[j]) { if (map1) { map1[i] = k; } arr3[k++] = arr1[i++]; } else if (arr1[i] > arr2[j]) { if (map2) { map2[j] = k; } arr3[k++] = arr2[j++]; } else /* == */ { if (map1) { map1[i] = k; } if (map2) { map2[j] = k; } arr3[k++] = arr1[i++]; j++; } } while (i < n1) { if (map1) { map1[i] = k; } arr3[k++] = arr1[i++]; } while (j < n2) { if (map2) { map2[j] = k; } arr3[k++] = arr2[j++]; } *n3 = k; } static void hypre_merge(HYPRE_Int *first1, HYPRE_Int *last1, HYPRE_Int *first2, HYPRE_Int *last2, HYPRE_Int *out) { for ( ; first1 != last1; ++out) { if (first2 == last2) { for ( ; first1 != last1; ++first1, ++out) { *out = *first1; } return; } if (*first2 < *first1) { *out = *first2; ++first2; } else { *out = *first1; ++first1; } } for ( ; first2 != last2; ++first2, ++out) { *out = *first2; } } #ifdef HYPRE_CONCURRENT_HOPSCOTCH static void hypre_big_merge(HYPRE_BigInt *first1, HYPRE_BigInt *last1, HYPRE_BigInt *first2, HYPRE_BigInt *last2, HYPRE_BigInt *out) { for ( ; first1 != last1; ++out) { if (first2 == last2) { for ( ; first1 != last1; ++first1, ++out) { *out = *first1; } return; } if (*first2 < *first1) { *out = *first2; ++first2; } else { *out = *first1; ++first1; } } for ( ; first2 != last2; ++first2, ++out) { *out = *first2; } } #endif static void kth_element_( HYPRE_Int *out1, HYPRE_Int *out2, HYPRE_Int *a1, HYPRE_Int *a2, HYPRE_Int left, HYPRE_Int right, HYPRE_Int n1, HYPRE_Int n2, HYPRE_Int k) { while (1) { HYPRE_Int i = (left + right)/2; // right < k -> i < k HYPRE_Int j = k - i - 1; #ifdef DBG_MERGE_SORT hypre_assert(left <= right && right <= k); hypre_assert(i < k); // i == k implies left == right == k that can never happen hypre_assert(j >= 0 && j < n2); #endif if ((j == -1 || a1[i] >= a2[j]) && (j == n2 - 1 || a1[i] <= a2[j + 1])) { *out1 = i; *out2 = j + 1; return; } else if (j >= 0 && a2[j] >= a1[i] && (i == n1 - 1 || a2[j] <= a1[i + 1])) { *out1 = i + 1; *out2 = j; return; } else if (a1[i] > a2[j] && j != n2 - 1 && a1[i] > a2[j+1]) { // search in left half of a1 right = i - 1; } else { // search in right half of a1 left = i + 1; } } } /** * Partition the input so that * a1[0:*out1) and a2[0:*out2) contain the smallest k elements */ static void kth_element( HYPRE_Int *out1, HYPRE_Int *out2, HYPRE_Int *a1, HYPRE_Int *a2, HYPRE_Int n1, HYPRE_Int n2, HYPRE_Int k) { // either of the inputs is empty if (n1 == 0) { *out1 = 0; *out2 = k; return; } if (n2 == 0) { *out1 = k; *out2 = 0; return; } if (k >= n1 + n2) { *out1 = n1; *out2 = n2; return; } // one is greater than the other if (k < n1 && a1[k] <= a2[0]) { *out1 = k; *out2 = 0; return; } if (k - n1 >= 0 && a2[k - n1] >= a1[n1 - 1]) { *out1 = n1; *out2 = k - n1; return; } if (k < n2 && a2[k] <= a1[0]) { *out1 = 0; *out2 = k; return; } if (k - n2 >= 0 && a1[k - n2] >= a2[n2 - 1]) { *out1 = k - n2; *out2 = n2; return; } // now k > 0 // faster to do binary search on the shorter sequence if (n1 > n2) { SWAP(HYPRE_Int, n1, n2); SWAP(HYPRE_Int *, a1, a2); SWAP(HYPRE_Int *, out1, out2); } if (k < (n1 + n2)/2) { kth_element_(out1, out2, a1, a2, 0, hypre_min(n1 - 1, k), n1, n2, k); } else { // when k is big, faster to find (n1 + n2 - k)th biggest element HYPRE_Int offset1 = hypre_max(k - n2, 0), offset2 = hypre_max(k - n1, 0); HYPRE_Int new_k = k - offset1 - offset2; HYPRE_Int new_n1 = hypre_min(n1 - offset1, new_k + 1); HYPRE_Int new_n2 = hypre_min(n2 - offset2, new_k + 1); kth_element_(out1, out2, a1 + offset1, a2 + offset2, 0, new_n1 - 1, new_n1, new_n2, new_k); *out1 += offset1; *out2 += offset2; } #ifdef DBG_MERGE_SORT hypre_assert(*out1 + *out2 == k); #endif } #ifdef HYPRE_CONCURRENT_HOPSCOTCH static void big_kth_element_( HYPRE_Int *out1, HYPRE_Int *out2, HYPRE_BigInt *a1, HYPRE_BigInt *a2, HYPRE_Int left, HYPRE_Int right, HYPRE_Int n1, HYPRE_Int n2, HYPRE_Int k) { while (1) { HYPRE_Int i = (left + right)/2; // right < k -> i < k HYPRE_Int j = k - i - 1; #ifdef DBG_MERGE_SORT hypre_assert(left <= right && right <= k); hypre_assert(i < k); // i == k implies left == right == k that can never happen hypre_assert(j >= 0 && j < n2); #endif if ((j == -1 || a1[i] >= a2[j]) && (j == n2 - 1 || a1[i] <= a2[j + 1])) { *out1 = i; *out2 = j + 1; return; } else if (j >= 0 && a2[j] >= a1[i] && (i == n1 - 1 || a2[j] <= a1[i + 1])) { *out1 = i + 1; *out2 = j; return; } else if (a1[i] > a2[j] && j != n2 - 1 && a1[i] > a2[j+1]) { // search in left half of a1 right = i - 1; } else { // search in right half of a1 left = i + 1; } } } /** * Partition the input so that * a1[0:*out1) and a2[0:*out2) contain the smallest k elements */ static void big_kth_element( HYPRE_Int *out1, HYPRE_Int *out2, HYPRE_BigInt *a1, HYPRE_BigInt *a2, HYPRE_Int n1, HYPRE_Int n2, HYPRE_Int k) { // either of the inputs is empty if (n1 == 0) { *out1 = 0; *out2 = k; return; } if (n2 == 0) { *out1 = k; *out2 = 0; return; } if (k >= n1 + n2) { *out1 = n1; *out2 = n2; return; } // one is greater than the other if (k < n1 && a1[k] <= a2[0]) { *out1 = k; *out2 = 0; return; } if (k - n1 >= 0 && a2[k - n1] >= a1[n1 - 1]) { *out1 = n1; *out2 = k - n1; return; } if (k < n2 && a2[k] <= a1[0]) { *out1 = 0; *out2 = k; return; } if (k - n2 >= 0 && a1[k - n2] >= a2[n2 - 1]) { *out1 = k - n2; *out2 = n2; return; } // now k > 0 // faster to do binary search on the shorter sequence if (n1 > n2) { SWAP(HYPRE_Int, n1, n2); SWAP(HYPRE_BigInt *, a1, a2); SWAP(HYPRE_Int *, out1, out2); } if (k < (n1 + n2)/2) { big_kth_element_(out1, out2, a1, a2, 0, hypre_min(n1 - 1, k), n1, n2, k); } else { // when k is big, faster to find (n1 + n2 - k)th biggest element HYPRE_Int offset1 = hypre_max(k - n2, 0), offset2 = hypre_max(k - n1, 0); HYPRE_Int new_k = k - offset1 - offset2; HYPRE_Int new_n1 = hypre_min(n1 - offset1, new_k + 1); HYPRE_Int new_n2 = hypre_min(n2 - offset2, new_k + 1); big_kth_element_(out1, out2, a1 + (HYPRE_BigInt)offset1, a2 + (HYPRE_BigInt)offset2, 0, new_n1 - 1, new_n1, new_n2, new_k); *out1 += offset1; *out2 += offset2; } #ifdef DBG_MERGE_SORT hypre_assert(*out1 + *out2 == k); #endif } #endif /** * @param num_threads number of threads that participate in this merge * @param my_thread_num thread id (zeor-based) among the threads that participate in this merge */ static void hypre_parallel_merge( HYPRE_Int *first1, HYPRE_Int *last1, HYPRE_Int *first2, HYPRE_Int *last2, HYPRE_Int *out, HYPRE_Int num_threads, HYPRE_Int my_thread_num) { HYPRE_Int n1 = last1 - first1; HYPRE_Int n2 = last2 - first2; HYPRE_Int n = n1 + n2; HYPRE_Int n_per_thread = (n + num_threads - 1)/num_threads; HYPRE_Int begin_rank = hypre_min(n_per_thread*my_thread_num, n); HYPRE_Int end_rank = hypre_min(begin_rank + n_per_thread, n); #ifdef DBG_MERGE_SORT hypre_assert(std::is_sorted(first1, last1)); hypre_assert(std::is_sorted(first2, last2)); #endif HYPRE_Int begin1, begin2, end1, end2; kth_element(&begin1, &begin2, first1, first2, n1, n2, begin_rank); kth_element(&end1, &end2, first1, first2, n1, n2, end_rank); while (begin1 > end1 && begin1 > 0 && begin2 < n2 && first1[begin1 - 1] == first2[begin2]) { #ifdef DBG_MERGE_SORT printf("%s:%d\n", __FILE__, __LINE__); #endif begin1--; begin2++; } while (begin2 > end2 && end1 > 0 && end2 < n2 && first1[end1 - 1] == first2[end2]) { #ifdef DBG_MERGE_SORT printf("%s:%d\n", __FILE__, __LINE__); #endif end1--; end2++; } #ifdef DBG_MERGE_SORT hypre_assert(begin1 <= end1); hypre_assert(begin2 <= end2); #endif hypre_merge( first1 + begin1, first1 + end1, first2 + begin2, first2 + end2, out + begin1 + begin2); #ifdef DBG_MERGE_SORT hypre_assert(std::is_sorted(out + begin1 + begin2, out + end1 + end2)); #endif } #ifdef HYPRE_CONCURRENT_HOPSCOTCH /** * @param num_threads number of threads that participate in this merge * @param my_thread_num thread id (zeor-based) among the threads that participate in this merge */ static void hypre_big_parallel_merge( HYPRE_BigInt *first1, HYPRE_BigInt *last1, HYPRE_BigInt *first2, HYPRE_BigInt *last2, HYPRE_BigInt *out, HYPRE_Int num_threads, HYPRE_Int my_thread_num) { HYPRE_Int n1 = (HYPRE_Int)(last1 - first1); HYPRE_Int n2 = (HYPRE_Int)(last2 - first2); HYPRE_Int n = n1 + n2; HYPRE_Int n_per_thread = (n + num_threads - 1)/num_threads; HYPRE_Int begin_rank = hypre_min(n_per_thread*my_thread_num, n); HYPRE_Int end_rank = hypre_min(begin_rank + n_per_thread, n); #ifdef DBG_MERGE_SORT hypre_assert(std::is_sorted(first1, last1)); hypre_assert(std::is_sorted(first2, last2)); #endif HYPRE_Int begin1, begin2, end1, end2; big_kth_element(&begin1, &begin2, first1, first2, n1, n2, begin_rank); big_kth_element(&end1, &end2, first1, first2, n1, n2, end_rank); while (begin1 > end1 && begin1 > 0 && begin2 < n2 && first1[begin1 - 1] == first2[begin2]) { #ifdef DBG_MERGE_SORT printf("%s:%d\n", __FILE__, __LINE__); #endif begin1--; begin2++; } while (begin2 > end2 && end1 > 0 && end2 < n2 && first1[end1 - 1] == first2[end2]) { #ifdef DBG_MERGE_SORT printf("%s:%d\n", __FILE__, __LINE__); #endif end1--; end2++; } #ifdef DBG_MERGE_SORT hypre_assert(begin1 <= end1); hypre_assert(begin2 <= end2); #endif hypre_big_merge( first1 + (HYPRE_BigInt)begin1, first1 + (HYPRE_BigInt)end1, first2 + (HYPRE_BigInt)begin2, first2 + (HYPRE_BigInt)end2, out + (HYPRE_BigInt)(begin1 + begin2)); #ifdef DBG_MERGE_SORT hypre_assert(std::is_sorted(out + begin1 + begin2, out + end1 + end2)); #endif } #endif void hypre_merge_sort(HYPRE_Int *in, HYPRE_Int *temp, HYPRE_Int len, HYPRE_Int **out) { if (0 == len) return; #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] -= hypre_MPI_Wtime(); #endif #ifdef DBG_MERGE_SORT HYPRE_Int *dbg_buf = new HYPRE_Int[len]; std::copy(in, in + len, dbg_buf); std::sort(dbg_buf, dbg_buf + len); #endif // HYPRE_Int thread_private_len[hypre_NumThreads()]; // HYPRE_Int out_len = 0; { HYPRE_Int num_threads = hypre_NumActiveThreads(); HYPRE_Int my_thread_num = hypre_GetThreadNum(); // thread-private sort HYPRE_Int i_per_thread = (len + num_threads - 1)/num_threads; HYPRE_Int i_begin = hypre_min(i_per_thread*my_thread_num, len); HYPRE_Int i_end = hypre_min(i_begin + i_per_thread, len); hypre_qsort0(in, i_begin, i_end - 1); // merge sorted sequences HYPRE_Int in_group_size; HYPRE_Int *in_buf = in; HYPRE_Int *out_buf = temp; for (in_group_size = 1; in_group_size < num_threads; in_group_size *= 2) { // merge 2 in-groups into 1 out-group HYPRE_Int out_group_size = in_group_size*2; HYPRE_Int group_leader = my_thread_num/out_group_size*out_group_size; // HYPRE_Int group_sub_leader = hypre_min(group_leader + in_group_size, num_threads - 1); HYPRE_Int id_in_group = my_thread_num%out_group_size; HYPRE_Int num_threads_in_group = hypre_min(group_leader + out_group_size, num_threads) - group_leader; HYPRE_Int in_group1_begin = hypre_min(i_per_thread*group_leader, len); HYPRE_Int in_group1_end = hypre_min(in_group1_begin + i_per_thread*in_group_size, len); HYPRE_Int in_group2_begin = hypre_min(in_group1_begin + i_per_thread*in_group_size, len); HYPRE_Int in_group2_end = hypre_min(in_group2_begin + i_per_thread*in_group_size, len); hypre_parallel_merge( in_buf + in_group1_begin, in_buf + in_group1_end, in_buf + in_group2_begin, in_buf + in_group2_end, out_buf + in_group1_begin, num_threads_in_group, id_in_group); HYPRE_Int *temp = in_buf; in_buf = out_buf; out_buf = temp; } *out = in_buf; } /* omp parallel */ #ifdef DBG_MERGE_SORT hypre_assert(std::equal(*out, *out + len, dbg_buf)); delete[] dbg_buf; #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] += hypre_MPI_Wtime(); #endif } #ifdef HYPRE_CONCURRENT_HOPSCOTCH void hypre_sort_and_create_inverse_map( HYPRE_Int *in, HYPRE_Int len, HYPRE_Int **out, hypre_UnorderedIntMap *inverse_map) { if (len == 0) { return; } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] -= hypre_MPI_Wtime(); #endif HYPRE_Int *temp = hypre_TAlloc(HYPRE_Int, len, HYPRE_MEMORY_HOST); hypre_merge_sort(in, temp, len, out); hypre_UnorderedIntMapCreate(inverse_map, 2*len, 16*hypre_NumThreads()); HYPRE_Int i; for (i = 0; i < len; i++) { HYPRE_Int old = hypre_UnorderedIntMapPutIfAbsent(inverse_map, (*out)[i], i); hypre_assert(old == HYPRE_HOPSCOTCH_HASH_EMPTY); #ifdef DBG_MERGE_SORT if (hypre_UnorderedIntMapGet(inverse_map, (*out)[i]) != i) { fprintf(stderr, "%d %d\n", i, (*out)[i]); hypre_assert(false); } #endif } #ifdef DBG_MERGE_SORT std::unordered_map<HYPRE_Int, HYPRE_Int> inverse_map2(len); for (HYPRE_Int i = 0; i < len; ++i) { inverse_map2[(*out)[i]] = i; if (hypre_UnorderedIntMapGet(inverse_map, (*out)[i]) != i) { fprintf(stderr, "%d %d\n", i, (*out)[i]); hypre_assert(false); } } hypre_assert(hypre_UnorderedIntMapSize(inverse_map) == len); #endif if (*out == in) { hypre_TFree(temp, HYPRE_MEMORY_HOST); } else { hypre_TFree(in, HYPRE_MEMORY_HOST); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] += hypre_MPI_Wtime(); #endif } #ifdef HYPRE_CONCURRENT_HOPSCOTCH void hypre_big_merge_sort(HYPRE_BigInt *in, HYPRE_BigInt *temp, HYPRE_Int len, HYPRE_BigInt **out) { if (0 == len) return; #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] -= hypre_MPI_Wtime(); #endif #ifdef DBG_MERGE_SORT HYPRE_Int *dbg_buf = new HYPRE_Int[len]; std::copy(in, in + len, dbg_buf); std::sort(dbg_buf, dbg_buf + len); #endif // HYPRE_Int thread_private_len[hypre_NumThreads()]; // HYPRE_Int out_len = 0; { HYPRE_Int num_threads = hypre_NumActiveThreads(); HYPRE_Int my_thread_num = hypre_GetThreadNum(); // thread-private sort HYPRE_Int i_per_thread = (len + num_threads - 1)/num_threads; HYPRE_Int i_begin = hypre_min(i_per_thread*my_thread_num, len); HYPRE_Int i_end = hypre_min(i_begin + i_per_thread, len); hypre_BigQsort0(in, i_begin, i_end - 1); // merge sorted sequences HYPRE_Int in_group_size; HYPRE_BigInt *in_buf = in; HYPRE_BigInt *out_buf = temp; for (in_group_size = 1; in_group_size < num_threads; in_group_size *= 2) { // merge 2 in-groups into 1 out-group HYPRE_Int out_group_size = in_group_size*2; HYPRE_Int group_leader = my_thread_num/out_group_size*out_group_size; // HYPRE_Int group_sub_leader = hypre_min(group_leader + in_group_size, num_threads - 1); HYPRE_Int id_in_group = my_thread_num%out_group_size; HYPRE_Int num_threads_in_group = hypre_min(group_leader + out_group_size, num_threads) - group_leader; HYPRE_Int in_group1_begin = hypre_min(i_per_thread*group_leader, len); HYPRE_Int in_group1_end = hypre_min(in_group1_begin + i_per_thread*in_group_size, len); HYPRE_Int in_group2_begin = hypre_min(in_group1_begin + i_per_thread*in_group_size, len); HYPRE_Int in_group2_end = hypre_min(in_group2_begin + i_per_thread*in_group_size, len); hypre_big_parallel_merge( in_buf + (HYPRE_BigInt)in_group1_begin, in_buf + (HYPRE_BigInt)in_group1_end, in_buf + (HYPRE_BigInt)in_group2_begin, in_buf + (HYPRE_BigInt)in_group2_end, out_buf + (HYPRE_BigInt)in_group1_begin, num_threads_in_group, id_in_group); HYPRE_BigInt *temp = in_buf; in_buf = out_buf; out_buf = temp; } *out = in_buf; } /* omp parallel */ #ifdef DBG_MERGE_SORT hypre_assert(std::equal(*out, *out + len, dbg_buf)); delete[] dbg_buf; #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] += hypre_MPI_Wtime(); #endif } void hypre_big_sort_and_create_inverse_map( HYPRE_BigInt *in, HYPRE_Int len, HYPRE_BigInt **out, hypre_UnorderedBigIntMap *inverse_map) { if (len == 0) { return; } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] -= hypre_MPI_Wtime(); #endif HYPRE_BigInt *temp = hypre_TAlloc(HYPRE_BigInt, len, HYPRE_MEMORY_HOST); hypre_big_merge_sort(in, temp, len, out); hypre_UnorderedBigIntMapCreate(inverse_map, 2*len, 16*hypre_NumThreads()); HYPRE_Int i; for (i = 0; i < len; i++) { HYPRE_Int old = hypre_UnorderedBigIntMapPutIfAbsent(inverse_map, (*out)[i], i); hypre_assert(old == HYPRE_HOPSCOTCH_HASH_EMPTY); #ifdef DBG_MERGE_SORT if (hypre_UnorderedBigIntMapGet(inverse_map, (*out)[i]) != i) { fprintf(stderr, "%d %d\n", i, (*out)[i]); hypre_assert(false); } #endif } #ifdef DBG_MERGE_SORT std::unordered_map<HYPRE_Int, HYPRE_Int> inverse_map2(len); for (HYPRE_Int i = 0; i < len; ++i) { inverse_map2[(*out)[i]] = i; if (hypre_UnorderedBigIntMapGet(inverse_map, (*out)[i]) != i) { fprintf(stderr, "%d %d\n", i, (*out)[i]); hypre_assert(false); } } hypre_assert(hypre_UnorderedBigIntMapSize(inverse_map) == len); #endif if (*out == in) { hypre_TFree(temp, HYPRE_MEMORY_HOST); } else { hypre_TFree(in, HYPRE_MEMORY_HOST); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] += hypre_MPI_Wtime(); #endif } #endif #endif /* vim: set tabstop=8 softtabstop=3 sw=3 expandtab: */
#include "_hypre_utilities.h" #include "hypre_hopscotch_hash.h" #include "../seq_mv/HYPRE_seq_mv.h" //#define DBG_MERGE_SORT #ifdef DBG_MERGE_SORT #include <algorithm> #include <unordered_map> #endif #define SWAP(T, a, b) do { T tmp = a; a = b; b = tmp; } while (0) /* union of two sorted (in ascending order) array arr1 and arr2 into arr3 * Assumption: no duplicates in arr1 and arr2 * arr3 should have enough space on entry * map1 and map2 map arr1 and arr2 to arr3 */ void hypre_union2(HYPRE_Int n1, HYPRE_BigInt *arr1, HYPRE_Int n2, HYPRE_BigInt *arr2, HYPRE_Int *n3, HYPRE_BigInt *arr3, HYPRE_Int *map1, HYPRE_Int *map2) { HYPRE_Int i = 0, j = 0, k = 0; while (i < n1 && j < n2) { if (arr1[i] < arr2[j]) { if (map1) { map1[i] = k; } arr3[k++] = arr1[i++]; } else if (arr1[i] > arr2[j]) { if (map2) { map2[j] = k; } arr3[k++] = arr2[j++]; } else /* == */ { if (map1) { map1[i] = k; } if (map2) { map2[j] = k; } arr3[k++] = arr1[i++]; j++; } } while (i < n1) { if (map1) { map1[i] = k; } arr3[k++] = arr1[i++]; } while (j < n2) { if (map2) { map2[j] = k; } arr3[k++] = arr2[j++]; } *n3 = k; } static void hypre_merge(HYPRE_Int *first1, HYPRE_Int *last1, HYPRE_Int *first2, HYPRE_Int *last2, HYPRE_Int *out) { for ( ; first1 != last1; ++out) { if (first2 == last2) { for ( ; first1 != last1; ++first1, ++out) { *out = *first1; } return; } if (*first2 < *first1) { *out = *first2; ++first2; } else { *out = *first1; ++first1; } } for ( ; first2 != last2; ++first2, ++out) { *out = *first2; } } #ifdef HYPRE_CONCURRENT_HOPSCOTCH static void hypre_big_merge(HYPRE_BigInt *first1, HYPRE_BigInt *last1, HYPRE_BigInt *first2, HYPRE_BigInt *last2, HYPRE_BigInt *out) { for ( ; first1 != last1; ++out) { if (first2 == last2) { for ( ; first1 != last1; ++first1, ++out) { *out = *first1; } return; } if (*first2 < *first1) { *out = *first2; ++first2; } else { *out = *first1; ++first1; } } for ( ; first2 != last2; ++first2, ++out) { *out = *first2; } } #endif static void kth_element_( HYPRE_Int *out1, HYPRE_Int *out2, HYPRE_Int *a1, HYPRE_Int *a2, HYPRE_Int left, HYPRE_Int right, HYPRE_Int n1, HYPRE_Int n2, HYPRE_Int k) { while (1) { HYPRE_Int i = (left + right)/2; // right < k -> i < k HYPRE_Int j = k - i - 1; #ifdef DBG_MERGE_SORT hypre_assert(left <= right && right <= k); hypre_assert(i < k); // i == k implies left == right == k that can never happen hypre_assert(j >= 0 && j < n2); #endif if ((j == -1 || a1[i] >= a2[j]) && (j == n2 - 1 || a1[i] <= a2[j + 1])) { *out1 = i; *out2 = j + 1; return; } else if (j >= 0 && a2[j] >= a1[i] && (i == n1 - 1 || a2[j] <= a1[i + 1])) { *out1 = i + 1; *out2 = j; return; } else if (a1[i] > a2[j] && j != n2 - 1 && a1[i] > a2[j+1]) { // search in left half of a1 right = i - 1; } else { // search in right half of a1 left = i + 1; } } } /** * Partition the input so that * a1[0:*out1) and a2[0:*out2) contain the smallest k elements */ static void kth_element( HYPRE_Int *out1, HYPRE_Int *out2, HYPRE_Int *a1, HYPRE_Int *a2, HYPRE_Int n1, HYPRE_Int n2, HYPRE_Int k) { // either of the inputs is empty if (n1 == 0) { *out1 = 0; *out2 = k; return; } if (n2 == 0) { *out1 = k; *out2 = 0; return; } if (k >= n1 + n2) { *out1 = n1; *out2 = n2; return; } // one is greater than the other if (k < n1 && a1[k] <= a2[0]) { *out1 = k; *out2 = 0; return; } if (k - n1 >= 0 && a2[k - n1] >= a1[n1 - 1]) { *out1 = n1; *out2 = k - n1; return; } if (k < n2 && a2[k] <= a1[0]) { *out1 = 0; *out2 = k; return; } if (k - n2 >= 0 && a1[k - n2] >= a2[n2 - 1]) { *out1 = k - n2; *out2 = n2; return; } // now k > 0 // faster to do binary search on the shorter sequence if (n1 > n2) { SWAP(HYPRE_Int, n1, n2); SWAP(HYPRE_Int *, a1, a2); SWAP(HYPRE_Int *, out1, out2); } if (k < (n1 + n2)/2) { kth_element_(out1, out2, a1, a2, 0, hypre_min(n1 - 1, k), n1, n2, k); } else { // when k is big, faster to find (n1 + n2 - k)th biggest element HYPRE_Int offset1 = hypre_max(k - n2, 0), offset2 = hypre_max(k - n1, 0); HYPRE_Int new_k = k - offset1 - offset2; HYPRE_Int new_n1 = hypre_min(n1 - offset1, new_k + 1); HYPRE_Int new_n2 = hypre_min(n2 - offset2, new_k + 1); kth_element_(out1, out2, a1 + offset1, a2 + offset2, 0, new_n1 - 1, new_n1, new_n2, new_k); *out1 += offset1; *out2 += offset2; } #ifdef DBG_MERGE_SORT hypre_assert(*out1 + *out2 == k); #endif } #ifdef HYPRE_CONCURRENT_HOPSCOTCH static void big_kth_element_( HYPRE_Int *out1, HYPRE_Int *out2, HYPRE_BigInt *a1, HYPRE_BigInt *a2, HYPRE_Int left, HYPRE_Int right, HYPRE_Int n1, HYPRE_Int n2, HYPRE_Int k) { while (1) { HYPRE_Int i = (left + right)/2; // right < k -> i < k HYPRE_Int j = k - i - 1; #ifdef DBG_MERGE_SORT hypre_assert(left <= right && right <= k); hypre_assert(i < k); // i == k implies left == right == k that can never happen hypre_assert(j >= 0 && j < n2); #endif if ((j == -1 || a1[i] >= a2[j]) && (j == n2 - 1 || a1[i] <= a2[j + 1])) { *out1 = i; *out2 = j + 1; return; } else if (j >= 0 && a2[j] >= a1[i] && (i == n1 - 1 || a2[j] <= a1[i + 1])) { *out1 = i + 1; *out2 = j; return; } else if (a1[i] > a2[j] && j != n2 - 1 && a1[i] > a2[j+1]) { // search in left half of a1 right = i - 1; } else { // search in right half of a1 left = i + 1; } } } /** * Partition the input so that * a1[0:*out1) and a2[0:*out2) contain the smallest k elements */ static void big_kth_element( HYPRE_Int *out1, HYPRE_Int *out2, HYPRE_BigInt *a1, HYPRE_BigInt *a2, HYPRE_Int n1, HYPRE_Int n2, HYPRE_Int k) { // either of the inputs is empty if (n1 == 0) { *out1 = 0; *out2 = k; return; } if (n2 == 0) { *out1 = k; *out2 = 0; return; } if (k >= n1 + n2) { *out1 = n1; *out2 = n2; return; } // one is greater than the other if (k < n1 && a1[k] <= a2[0]) { *out1 = k; *out2 = 0; return; } if (k - n1 >= 0 && a2[k - n1] >= a1[n1 - 1]) { *out1 = n1; *out2 = k - n1; return; } if (k < n2 && a2[k] <= a1[0]) { *out1 = 0; *out2 = k; return; } if (k - n2 >= 0 && a1[k - n2] >= a2[n2 - 1]) { *out1 = k - n2; *out2 = n2; return; } // now k > 0 // faster to do binary search on the shorter sequence if (n1 > n2) { SWAP(HYPRE_Int, n1, n2); SWAP(HYPRE_BigInt *, a1, a2); SWAP(HYPRE_Int *, out1, out2); } if (k < (n1 + n2)/2) { big_kth_element_(out1, out2, a1, a2, 0, hypre_min(n1 - 1, k), n1, n2, k); } else { // when k is big, faster to find (n1 + n2 - k)th biggest element HYPRE_Int offset1 = hypre_max(k - n2, 0), offset2 = hypre_max(k - n1, 0); HYPRE_Int new_k = k - offset1 - offset2; HYPRE_Int new_n1 = hypre_min(n1 - offset1, new_k + 1); HYPRE_Int new_n2 = hypre_min(n2 - offset2, new_k + 1); big_kth_element_(out1, out2, a1 + (HYPRE_BigInt)offset1, a2 + (HYPRE_BigInt)offset2, 0, new_n1 - 1, new_n1, new_n2, new_k); *out1 += offset1; *out2 += offset2; } #ifdef DBG_MERGE_SORT hypre_assert(*out1 + *out2 == k); #endif } #endif /** * @param num_threads number of threads that participate in this merge * @param my_thread_num thread id (zeor-based) among the threads that participate in this merge */ static void hypre_parallel_merge( HYPRE_Int *first1, HYPRE_Int *last1, HYPRE_Int *first2, HYPRE_Int *last2, HYPRE_Int *out, HYPRE_Int num_threads, HYPRE_Int my_thread_num) { HYPRE_Int n1 = last1 - first1; HYPRE_Int n2 = last2 - first2; HYPRE_Int n = n1 + n2; HYPRE_Int n_per_thread = (n + num_threads - 1)/num_threads; HYPRE_Int begin_rank = hypre_min(n_per_thread*my_thread_num, n); HYPRE_Int end_rank = hypre_min(begin_rank + n_per_thread, n); #ifdef DBG_MERGE_SORT hypre_assert(std::is_sorted(first1, last1)); hypre_assert(std::is_sorted(first2, last2)); #endif HYPRE_Int begin1, begin2, end1, end2; kth_element(&begin1, &begin2, first1, first2, n1, n2, begin_rank); kth_element(&end1, &end2, first1, first2, n1, n2, end_rank); while (begin1 > end1 && begin1 > 0 && begin2 < n2 && first1[begin1 - 1] == first2[begin2]) { #ifdef DBG_MERGE_SORT printf("%s:%d\n", __FILE__, __LINE__); #endif begin1--; begin2++; } while (begin2 > end2 && end1 > 0 && end2 < n2 && first1[end1 - 1] == first2[end2]) { #ifdef DBG_MERGE_SORT printf("%s:%d\n", __FILE__, __LINE__); #endif end1--; end2++; } #ifdef DBG_MERGE_SORT hypre_assert(begin1 <= end1); hypre_assert(begin2 <= end2); #endif hypre_merge( first1 + begin1, first1 + end1, first2 + begin2, first2 + end2, out + begin1 + begin2); #ifdef DBG_MERGE_SORT hypre_assert(std::is_sorted(out + begin1 + begin2, out + end1 + end2)); #endif } #ifdef HYPRE_CONCURRENT_HOPSCOTCH /** * @param num_threads number of threads that participate in this merge * @param my_thread_num thread id (zeor-based) among the threads that participate in this merge */ static void hypre_big_parallel_merge( HYPRE_BigInt *first1, HYPRE_BigInt *last1, HYPRE_BigInt *first2, HYPRE_BigInt *last2, HYPRE_BigInt *out, HYPRE_Int num_threads, HYPRE_Int my_thread_num) { HYPRE_Int n1 = (HYPRE_Int)(last1 - first1); HYPRE_Int n2 = (HYPRE_Int)(last2 - first2); HYPRE_Int n = n1 + n2; HYPRE_Int n_per_thread = (n + num_threads - 1)/num_threads; HYPRE_Int begin_rank = hypre_min(n_per_thread*my_thread_num, n); HYPRE_Int end_rank = hypre_min(begin_rank + n_per_thread, n); #ifdef DBG_MERGE_SORT hypre_assert(std::is_sorted(first1, last1)); hypre_assert(std::is_sorted(first2, last2)); #endif HYPRE_Int begin1, begin2, end1, end2; big_kth_element(&begin1, &begin2, first1, first2, n1, n2, begin_rank); big_kth_element(&end1, &end2, first1, first2, n1, n2, end_rank); while (begin1 > end1 && begin1 > 0 && begin2 < n2 && first1[begin1 - 1] == first2[begin2]) { #ifdef DBG_MERGE_SORT printf("%s:%d\n", __FILE__, __LINE__); #endif begin1--; begin2++; } while (begin2 > end2 && end1 > 0 && end2 < n2 && first1[end1 - 1] == first2[end2]) { #ifdef DBG_MERGE_SORT printf("%s:%d\n", __FILE__, __LINE__); #endif end1--; end2++; } #ifdef DBG_MERGE_SORT hypre_assert(begin1 <= end1); hypre_assert(begin2 <= end2); #endif hypre_big_merge( first1 + (HYPRE_BigInt)begin1, first1 + (HYPRE_BigInt)end1, first2 + (HYPRE_BigInt)begin2, first2 + (HYPRE_BigInt)end2, out + (HYPRE_BigInt)(begin1 + begin2)); #ifdef DBG_MERGE_SORT hypre_assert(std::is_sorted(out + begin1 + begin2, out + end1 + end2)); #endif } #endif void hypre_merge_sort(HYPRE_Int *in, HYPRE_Int *temp, HYPRE_Int len, HYPRE_Int **out) { if (0 == len) return; #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] -= hypre_MPI_Wtime(); #endif #ifdef DBG_MERGE_SORT HYPRE_Int *dbg_buf = new HYPRE_Int[len]; std::copy(in, in + len, dbg_buf); std::sort(dbg_buf, dbg_buf + len); #endif // HYPRE_Int thread_private_len[hypre_NumThreads()]; // HYPRE_Int out_len = 0; #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int num_threads = hypre_NumActiveThreads(); HYPRE_Int my_thread_num = hypre_GetThreadNum(); // thread-private sort HYPRE_Int i_per_thread = (len + num_threads - 1)/num_threads; HYPRE_Int i_begin = hypre_min(i_per_thread*my_thread_num, len); HYPRE_Int i_end = hypre_min(i_begin + i_per_thread, len); hypre_qsort0(in, i_begin, i_end - 1); // merge sorted sequences HYPRE_Int in_group_size; HYPRE_Int *in_buf = in; HYPRE_Int *out_buf = temp; for (in_group_size = 1; in_group_size < num_threads; in_group_size *= 2) { #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif // merge 2 in-groups into 1 out-group HYPRE_Int out_group_size = in_group_size*2; HYPRE_Int group_leader = my_thread_num/out_group_size*out_group_size; // HYPRE_Int group_sub_leader = hypre_min(group_leader + in_group_size, num_threads - 1); HYPRE_Int id_in_group = my_thread_num%out_group_size; HYPRE_Int num_threads_in_group = hypre_min(group_leader + out_group_size, num_threads) - group_leader; HYPRE_Int in_group1_begin = hypre_min(i_per_thread*group_leader, len); HYPRE_Int in_group1_end = hypre_min(in_group1_begin + i_per_thread*in_group_size, len); HYPRE_Int in_group2_begin = hypre_min(in_group1_begin + i_per_thread*in_group_size, len); HYPRE_Int in_group2_end = hypre_min(in_group2_begin + i_per_thread*in_group_size, len); hypre_parallel_merge( in_buf + in_group1_begin, in_buf + in_group1_end, in_buf + in_group2_begin, in_buf + in_group2_end, out_buf + in_group1_begin, num_threads_in_group, id_in_group); HYPRE_Int *temp = in_buf; in_buf = out_buf; out_buf = temp; } *out = in_buf; } /* omp parallel */ #ifdef DBG_MERGE_SORT hypre_assert(std::equal(*out, *out + len, dbg_buf)); delete[] dbg_buf; #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] += hypre_MPI_Wtime(); #endif } #ifdef HYPRE_CONCURRENT_HOPSCOTCH void hypre_sort_and_create_inverse_map( HYPRE_Int *in, HYPRE_Int len, HYPRE_Int **out, hypre_UnorderedIntMap *inverse_map) { if (len == 0) { return; } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] -= hypre_MPI_Wtime(); #endif HYPRE_Int *temp = hypre_TAlloc(HYPRE_Int, len, HYPRE_MEMORY_HOST); hypre_merge_sort(in, temp, len, out); hypre_UnorderedIntMapCreate(inverse_map, 2*len, 16*hypre_NumThreads()); HYPRE_Int i; #pragma omp parallel for HYPRE_SMP_SCHEDULE for (i = 0; i < len; i++) { HYPRE_Int old = hypre_UnorderedIntMapPutIfAbsent(inverse_map, (*out)[i], i); hypre_assert(old == HYPRE_HOPSCOTCH_HASH_EMPTY); #ifdef DBG_MERGE_SORT if (hypre_UnorderedIntMapGet(inverse_map, (*out)[i]) != i) { fprintf(stderr, "%d %d\n", i, (*out)[i]); hypre_assert(false); } #endif } #ifdef DBG_MERGE_SORT std::unordered_map<HYPRE_Int, HYPRE_Int> inverse_map2(len); for (HYPRE_Int i = 0; i < len; ++i) { inverse_map2[(*out)[i]] = i; if (hypre_UnorderedIntMapGet(inverse_map, (*out)[i]) != i) { fprintf(stderr, "%d %d\n", i, (*out)[i]); hypre_assert(false); } } hypre_assert(hypre_UnorderedIntMapSize(inverse_map) == len); #endif if (*out == in) { hypre_TFree(temp, HYPRE_MEMORY_HOST); } else { hypre_TFree(in, HYPRE_MEMORY_HOST); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] += hypre_MPI_Wtime(); #endif } #ifdef HYPRE_CONCURRENT_HOPSCOTCH void hypre_big_merge_sort(HYPRE_BigInt *in, HYPRE_BigInt *temp, HYPRE_Int len, HYPRE_BigInt **out) { if (0 == len) return; #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] -= hypre_MPI_Wtime(); #endif #ifdef DBG_MERGE_SORT HYPRE_Int *dbg_buf = new HYPRE_Int[len]; std::copy(in, in + len, dbg_buf); std::sort(dbg_buf, dbg_buf + len); #endif // HYPRE_Int thread_private_len[hypre_NumThreads()]; // HYPRE_Int out_len = 0; #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int num_threads = hypre_NumActiveThreads(); HYPRE_Int my_thread_num = hypre_GetThreadNum(); // thread-private sort HYPRE_Int i_per_thread = (len + num_threads - 1)/num_threads; HYPRE_Int i_begin = hypre_min(i_per_thread*my_thread_num, len); HYPRE_Int i_end = hypre_min(i_begin + i_per_thread, len); hypre_BigQsort0(in, i_begin, i_end - 1); // merge sorted sequences HYPRE_Int in_group_size; HYPRE_BigInt *in_buf = in; HYPRE_BigInt *out_buf = temp; for (in_group_size = 1; in_group_size < num_threads; in_group_size *= 2) { #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif // merge 2 in-groups into 1 out-group HYPRE_Int out_group_size = in_group_size*2; HYPRE_Int group_leader = my_thread_num/out_group_size*out_group_size; // HYPRE_Int group_sub_leader = hypre_min(group_leader + in_group_size, num_threads - 1); HYPRE_Int id_in_group = my_thread_num%out_group_size; HYPRE_Int num_threads_in_group = hypre_min(group_leader + out_group_size, num_threads) - group_leader; HYPRE_Int in_group1_begin = hypre_min(i_per_thread*group_leader, len); HYPRE_Int in_group1_end = hypre_min(in_group1_begin + i_per_thread*in_group_size, len); HYPRE_Int in_group2_begin = hypre_min(in_group1_begin + i_per_thread*in_group_size, len); HYPRE_Int in_group2_end = hypre_min(in_group2_begin + i_per_thread*in_group_size, len); hypre_big_parallel_merge( in_buf + (HYPRE_BigInt)in_group1_begin, in_buf + (HYPRE_BigInt)in_group1_end, in_buf + (HYPRE_BigInt)in_group2_begin, in_buf + (HYPRE_BigInt)in_group2_end, out_buf + (HYPRE_BigInt)in_group1_begin, num_threads_in_group, id_in_group); HYPRE_BigInt *temp = in_buf; in_buf = out_buf; out_buf = temp; } *out = in_buf; } /* omp parallel */ #ifdef DBG_MERGE_SORT hypre_assert(std::equal(*out, *out + len, dbg_buf)); delete[] dbg_buf; #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] += hypre_MPI_Wtime(); #endif } void hypre_big_sort_and_create_inverse_map( HYPRE_BigInt *in, HYPRE_Int len, HYPRE_BigInt **out, hypre_UnorderedBigIntMap *inverse_map) { if (len == 0) { return; } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] -= hypre_MPI_Wtime(); #endif HYPRE_BigInt *temp = hypre_TAlloc(HYPRE_BigInt, len, HYPRE_MEMORY_HOST); hypre_big_merge_sort(in, temp, len, out); hypre_UnorderedBigIntMapCreate(inverse_map, 2*len, 16*hypre_NumThreads()); HYPRE_Int i; #pragma omp parallel for HYPRE_SMP_SCHEDULE for (i = 0; i < len; i++) { HYPRE_Int old = hypre_UnorderedBigIntMapPutIfAbsent(inverse_map, (*out)[i], i); hypre_assert(old == HYPRE_HOPSCOTCH_HASH_EMPTY); #ifdef DBG_MERGE_SORT if (hypre_UnorderedBigIntMapGet(inverse_map, (*out)[i]) != i) { fprintf(stderr, "%d %d\n", i, (*out)[i]); hypre_assert(false); } #endif } #ifdef DBG_MERGE_SORT std::unordered_map<HYPRE_Int, HYPRE_Int> inverse_map2(len); for (HYPRE_Int i = 0; i < len; ++i) { inverse_map2[(*out)[i]] = i; if (hypre_UnorderedBigIntMapGet(inverse_map, (*out)[i]) != i) { fprintf(stderr, "%d %d\n", i, (*out)[i]); hypre_assert(false); } } hypre_assert(hypre_UnorderedBigIntMapSize(inverse_map) == len); #endif if (*out == in) { hypre_TFree(temp, HYPRE_MEMORY_HOST); } else { hypre_TFree(in, HYPRE_MEMORY_HOST); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] += hypre_MPI_Wtime(); #endif } #endif #endif /* vim: set tabstop=8 softtabstop=3 sw=3 expandtab: */
par_mgr.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * Two-grid system solver * *****************************************************************************/ #include "_hypre_parcsr_ls.h" #include "par_amg.h" #include "par_mgr.h" #ifdef HYPRE_USING_DSUPERLU #include "dsuperlu.h" #endif /* Create */ void * hypre_MGRCreate() { hypre_ParMGRData *mgr_data; mgr_data = hypre_CTAlloc(hypre_ParMGRData, 1, HYPRE_MEMORY_HOST); /* block data */ (mgr_data -> block_size) = 1; (mgr_data -> block_num_coarse_indexes) = NULL; (mgr_data -> point_marker_array) = NULL; (mgr_data -> block_cf_marker) = NULL; /* general data */ (mgr_data -> max_num_coarse_levels) = 10; (mgr_data -> A_array) = NULL; (mgr_data -> P_array) = NULL; (mgr_data -> RT_array) = NULL; (mgr_data -> RAP) = NULL; (mgr_data -> CF_marker_array) = NULL; (mgr_data -> coarse_indices_lvls) = NULL; (mgr_data -> A_ff_array) = NULL; (mgr_data -> F_fine_array) = NULL; (mgr_data -> U_fine_array) = NULL; (mgr_data -> aff_solver) = NULL; (mgr_data -> fine_grid_solver_setup) = NULL; (mgr_data -> fine_grid_solver_solve) = NULL; (mgr_data -> F_array) = NULL; (mgr_data -> U_array) = NULL; (mgr_data -> residual) = NULL; (mgr_data -> rel_res_norms) = NULL; (mgr_data -> Vtemp) = NULL; (mgr_data -> Ztemp) = NULL; (mgr_data -> Utemp) = NULL; (mgr_data -> Ftemp) = NULL; (mgr_data -> num_iterations) = 0; (mgr_data -> num_interp_sweeps) = 1; (mgr_data -> num_restrict_sweeps) = 1; (mgr_data -> trunc_factor) = 0.0; (mgr_data -> max_row_sum) = 0.9; (mgr_data -> strong_threshold) = 0.25; (mgr_data -> S_commpkg_switch) = 1.0; (mgr_data -> P_max_elmts) = 0; (mgr_data -> coarse_grid_solver) = NULL; (mgr_data -> coarse_grid_solver_setup) = NULL; (mgr_data -> coarse_grid_solver_solve) = NULL; (mgr_data -> global_smoother) = NULL; (mgr_data -> use_default_cgrid_solver) = 1; (mgr_data -> use_default_fsolver) = -1; // set to -1 to avoid printing when not used (mgr_data -> omega) = 1.; (mgr_data -> max_iter) = 20; (mgr_data -> tol) = 1.0e-6; (mgr_data -> relax_type) = 0; (mgr_data -> relax_order) = 1; // not fully utilized. Only used to compute L1-norms. (mgr_data -> interp_type) = NULL; (mgr_data -> restrict_type) = NULL; (mgr_data -> num_relax_sweeps) = 1; (mgr_data -> relax_weight) = 1.0; (mgr_data -> logging) = 0; (mgr_data -> print_level) = 0; (mgr_data -> l1_norms) = NULL; (mgr_data -> reserved_coarse_size) = 0; (mgr_data -> reserved_coarse_indexes) = NULL; (mgr_data -> reserved_Cpoint_local_indexes) = NULL; (mgr_data -> diaginv) = NULL; (mgr_data -> global_smooth_iters) = 1; (mgr_data -> global_smooth_type) = 0; (mgr_data -> set_non_Cpoints_to_F) = 0; (mgr_data -> idx_array) = NULL; (mgr_data -> Frelax_method) = NULL; (mgr_data -> VcycleRelaxVtemp) = NULL; (mgr_data -> VcycleRelaxZtemp) = NULL; (mgr_data -> FrelaxVcycleData) = NULL; (mgr_data -> Frelax_num_functions) = NULL; (mgr_data -> max_local_lvls) = 10; (mgr_data -> use_non_galerkin_cg) = NULL; (mgr_data -> print_coarse_system) = 0; (mgr_data -> set_c_points_method) = 0; (mgr_data -> lvl_to_keep_cpoints) = 0; (mgr_data -> cg_convergence_factor) = 0.0; (mgr_data -> truncate_coarse_grid_threshold) = 0.0; return (void *) mgr_data; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ /* Destroy */ HYPRE_Int hypre_MGRDestroy( void *data ) { hypre_ParMGRData * mgr_data = (hypre_ParMGRData*) data; HYPRE_Int i; HYPRE_Int num_coarse_levels = (mgr_data -> num_coarse_levels); /* block info data */ if ((mgr_data -> block_cf_marker)) { for (i=0; i < (mgr_data -> max_num_coarse_levels); i++) { if ((mgr_data -> block_cf_marker)[i]) { hypre_TFree((mgr_data -> block_cf_marker)[i], HYPRE_MEMORY_HOST); } } hypre_TFree((mgr_data -> block_cf_marker), HYPRE_MEMORY_HOST); (mgr_data -> block_cf_marker) = NULL; } if(mgr_data -> block_num_coarse_indexes) { hypre_TFree(mgr_data -> block_num_coarse_indexes, HYPRE_MEMORY_HOST); (mgr_data -> block_num_coarse_indexes) = NULL; } /* final residual vector */ if((mgr_data -> residual)) { hypre_ParVectorDestroy( (mgr_data -> residual) ); (mgr_data -> residual) = NULL; } if((mgr_data -> rel_res_norms)) { hypre_TFree( (mgr_data -> rel_res_norms) , HYPRE_MEMORY_HOST); (mgr_data -> rel_res_norms) = NULL; } /* temp vectors for solve phase */ if((mgr_data -> Vtemp)) { hypre_ParVectorDestroy( (mgr_data -> Vtemp) ); (mgr_data -> Vtemp) = NULL; } if((mgr_data -> Ztemp)) { hypre_ParVectorDestroy( (mgr_data -> Ztemp) ); (mgr_data -> Ztemp) = NULL; } if((mgr_data -> Utemp)) { hypre_ParVectorDestroy( (mgr_data -> Utemp) ); (mgr_data -> Utemp) = NULL; } if((mgr_data -> Ftemp)) { hypre_ParVectorDestroy( (mgr_data -> Ftemp) ); (mgr_data -> Ftemp) = NULL; } /* coarse grid solver */ if((mgr_data -> use_default_cgrid_solver)) { if((mgr_data -> coarse_grid_solver)) { hypre_BoomerAMGDestroy( (mgr_data -> coarse_grid_solver) ); } (mgr_data -> coarse_grid_solver) = NULL; } /* l1_norms */ if ((mgr_data -> l1_norms)) { for (i=0; i < (num_coarse_levels); i++) { hypre_SeqVectorDestroy((mgr_data -> l1_norms)[i]); } hypre_TFree((mgr_data -> l1_norms), HYPRE_MEMORY_HOST); } /* coarse_indices_lvls */ if ((mgr_data -> coarse_indices_lvls)) { for (i=0; i < (num_coarse_levels); i++) if ((mgr_data -> coarse_indices_lvls)[i]) hypre_TFree((mgr_data -> coarse_indices_lvls)[i], HYPRE_MEMORY_HOST); hypre_TFree((mgr_data -> coarse_indices_lvls), HYPRE_MEMORY_HOST); } /* linear system and cf marker array */ if(mgr_data -> A_array || mgr_data -> P_array || mgr_data -> RT_array || mgr_data -> CF_marker_array) { for (i=1; i < num_coarse_levels+1; i++) { hypre_ParVectorDestroy((mgr_data -> F_array)[i]); hypre_ParVectorDestroy((mgr_data -> U_array)[i]); if ((mgr_data -> P_array)[i-1]) hypre_ParCSRMatrixDestroy((mgr_data -> P_array)[i-1]); if ((mgr_data -> RT_array)[i-1]) hypre_ParCSRMatrixDestroy((mgr_data -> RT_array)[i-1]); hypre_TFree((mgr_data -> CF_marker_array)[i-1], HYPRE_MEMORY_HOST); } for (i=1; i < (num_coarse_levels); i++) { if ((mgr_data -> A_array)[i]) hypre_ParCSRMatrixDestroy((mgr_data -> A_array)[i]); } } /* AMG for Frelax */ if(mgr_data -> A_ff_array || mgr_data -> F_fine_array || mgr_data -> U_fine_array) { for (i=1; i < num_coarse_levels+1; i++) { if (mgr_data -> F_fine_array[i]) hypre_ParVectorDestroy((mgr_data -> F_fine_array)[i]); if (mgr_data -> U_fine_array[i]) hypre_ParVectorDestroy((mgr_data -> U_fine_array)[i]); } for (i=1; i < (num_coarse_levels); i++) { if ((mgr_data -> A_ff_array)[i]) hypre_ParCSRMatrixDestroy((mgr_data -> A_ff_array)[i]); } if (mgr_data -> use_default_fsolver) { hypre_ParCSRMatrixDestroy((mgr_data -> A_ff_array)[0]); } hypre_TFree(mgr_data -> F_fine_array, HYPRE_MEMORY_HOST); (mgr_data -> F_fine_array) = NULL; hypre_TFree(mgr_data -> U_fine_array, HYPRE_MEMORY_HOST); (mgr_data -> U_fine_array) = NULL; hypre_TFree(mgr_data -> A_ff_array, HYPRE_MEMORY_HOST); (mgr_data -> A_ff_array) = NULL; } if(mgr_data -> aff_solver) { for (i = 1; i < (num_coarse_levels); i++) { if ((mgr_data -> aff_solver)[i]) hypre_BoomerAMGDestroy((mgr_data -> aff_solver)[i]); } if (mgr_data -> use_default_fsolver) { if ((mgr_data -> aff_solver)[0]) hypre_BoomerAMGDestroy((mgr_data -> aff_solver)[0]); } hypre_TFree(mgr_data -> aff_solver, HYPRE_MEMORY_HOST); (mgr_data -> aff_solver) = NULL; } if((mgr_data -> F_array)) { hypre_TFree((mgr_data -> F_array), HYPRE_MEMORY_HOST); (mgr_data -> F_array) = NULL; } if((mgr_data -> U_array)) { hypre_TFree((mgr_data -> U_array), HYPRE_MEMORY_HOST); (mgr_data -> U_array) = NULL; } if((mgr_data -> A_array)) { hypre_TFree((mgr_data -> A_array), HYPRE_MEMORY_HOST); (mgr_data -> A_array) = NULL; } if((mgr_data -> P_array)) { hypre_TFree((mgr_data -> P_array), HYPRE_MEMORY_HOST); (mgr_data -> P_array) = NULL; } if((mgr_data -> RT_array)) { hypre_TFree((mgr_data -> RT_array), HYPRE_MEMORY_HOST); (mgr_data -> RT_array) = NULL; } if((mgr_data -> CF_marker_array)) { hypre_TFree((mgr_data -> CF_marker_array), HYPRE_MEMORY_HOST); (mgr_data -> CF_marker_array) = NULL; } if((mgr_data -> reserved_Cpoint_local_indexes)) { hypre_TFree((mgr_data -> reserved_Cpoint_local_indexes), HYPRE_MEMORY_HOST); (mgr_data -> reserved_Cpoint_local_indexes) = NULL; } if (mgr_data -> restrict_type) { hypre_TFree(mgr_data -> restrict_type, HYPRE_MEMORY_HOST); (mgr_data -> restrict_type) = NULL; } if (mgr_data -> interp_type) { hypre_TFree(mgr_data -> interp_type, HYPRE_MEMORY_HOST); (mgr_data -> interp_type) = NULL; } /* Frelax_method */ if (mgr_data -> Frelax_method) { hypre_TFree(mgr_data -> Frelax_method, HYPRE_MEMORY_HOST); (mgr_data -> Frelax_method) = NULL; } /* Frelax_num_functions */ if (mgr_data -> Frelax_num_functions) { hypre_TFree(mgr_data -> Frelax_num_functions, HYPRE_MEMORY_HOST); (mgr_data -> Frelax_num_functions) = NULL; } /* data for V-cycle F-relaxation */ if((mgr_data -> VcycleRelaxVtemp)) { hypre_ParVectorDestroy( (mgr_data -> VcycleRelaxVtemp) ); (mgr_data -> VcycleRelaxVtemp) = NULL; } if((mgr_data -> VcycleRelaxZtemp)) { hypre_ParVectorDestroy( (mgr_data -> VcycleRelaxZtemp) ); (mgr_data -> VcycleRelaxZtemp) = NULL; } if (mgr_data -> FrelaxVcycleData) { for (i = 0; i < num_coarse_levels; i++) { if ((mgr_data -> FrelaxVcycleData)[i]) { hypre_MGRDestroyFrelaxVcycleData((mgr_data -> FrelaxVcycleData)[i]); (mgr_data -> FrelaxVcycleData)[i] = NULL; } } hypre_TFree(mgr_data -> FrelaxVcycleData, HYPRE_MEMORY_HOST); (mgr_data -> FrelaxVcycleData) = NULL; } /* data for reserved coarse nodes */ if(mgr_data -> reserved_coarse_indexes) { hypre_TFree(mgr_data -> reserved_coarse_indexes, HYPRE_MEMORY_HOST); (mgr_data -> reserved_coarse_indexes) = NULL; } /* index array for setting Cpoints by global block */ if ((mgr_data -> set_c_points_method) == 1) { hypre_TFree(mgr_data -> idx_array, HYPRE_MEMORY_HOST); (mgr_data -> idx_array) = NULL; } /* array for setting option to use non-Galerkin coarse grid */ if (mgr_data -> use_non_galerkin_cg) { hypre_TFree(mgr_data -> use_non_galerkin_cg, HYPRE_MEMORY_HOST); (mgr_data -> use_non_galerkin_cg) = NULL; } /* coarse level matrix - RAP */ if ((mgr_data -> RAP)) hypre_ParCSRMatrixDestroy((mgr_data -> RAP)); if ((mgr_data -> diaginv)) hypre_TFree((mgr_data -> diaginv), HYPRE_MEMORY_HOST); if ((mgr_data -> global_smoother)) { if (mgr_data -> global_smooth_type == 8) { HYPRE_EuclidDestroy((mgr_data -> global_smoother)); } else if (mgr_data -> global_smooth_type == 16) { HYPRE_ILUDestroy((mgr_data -> global_smoother)); } } /* mgr data */ hypre_TFree(mgr_data, HYPRE_MEMORY_HOST); return hypre_error_flag; } /* Create data for V-cycle F-relaxtion */ void * hypre_MGRCreateFrelaxVcycleData() { hypre_ParAMGData *vdata = hypre_CTAlloc(hypre_ParAMGData, 1, HYPRE_MEMORY_HOST); hypre_ParAMGDataAArray(vdata) = NULL; hypre_ParAMGDataPArray(vdata) = NULL; hypre_ParAMGDataFArray(vdata) = NULL; hypre_ParAMGDataCFMarkerArray(vdata) = NULL; hypre_ParAMGDataVtemp(vdata) = NULL; hypre_ParAMGDataAMat(vdata) = NULL; hypre_ParAMGDataBVec(vdata) = NULL; hypre_ParAMGDataZtemp(vdata) = NULL; hypre_ParAMGDataCommInfo(vdata) = NULL; hypre_ParAMGDataUArray(vdata) = NULL; hypre_ParAMGDataNewComm(vdata) = hypre_MPI_COMM_NULL; hypre_ParAMGDataNumLevels(vdata) = 0; hypre_ParAMGDataMaxLevels(vdata) = 10; hypre_ParAMGDataNumFunctions(vdata) = 1; hypre_ParAMGDataSCommPkgSwitch(vdata) = 1.0; hypre_ParAMGDataRelaxOrder(vdata) = 1; hypre_ParAMGDataMaxCoarseSize(vdata) = 9; hypre_ParAMGDataMinCoarseSize(vdata) = 0; hypre_ParAMGDataUserCoarseRelaxType(vdata) = 9; return (void *) vdata; } /* Destroy data for V-cycle F-relaxation */ HYPRE_Int hypre_MGRDestroyFrelaxVcycleData( void *data ) { hypre_ParAMGData * vdata = (hypre_ParAMGData*) data; HYPRE_Int i; HYPRE_Int num_levels = hypre_ParAMGDataNumLevels(vdata); MPI_Comm new_comm = hypre_ParAMGDataNewComm(vdata); hypre_TFree(hypre_ParAMGDataDofFuncArray(vdata)[0], HYPRE_MEMORY_HOST); for (i=1; i < num_levels + 1; i++) { if (hypre_ParAMGDataAArray(vdata)[i]) hypre_ParCSRMatrixDestroy(hypre_ParAMGDataAArray(vdata)[i]); if (hypre_ParAMGDataPArray(vdata)[i-1]) hypre_ParCSRMatrixDestroy(hypre_ParAMGDataPArray(vdata)[i-1]); hypre_TFree(hypre_ParAMGDataCFMarkerArray(vdata)[i-1], HYPRE_MEMORY_HOST); hypre_ParVectorDestroy(hypre_ParAMGDataFArray(vdata)[i]); hypre_ParVectorDestroy(hypre_ParAMGDataUArray(vdata)[i]); hypre_TFree(hypre_ParAMGDataDofFuncArray(vdata)[i], HYPRE_MEMORY_HOST); } /* see comments in par_coarsen.c regarding special case for CF_marker */ if (num_levels <= 1) { hypre_TFree(hypre_ParAMGDataCFMarkerArray(vdata)[0], HYPRE_MEMORY_HOST); } /* Points to VcycleRelaxVtemp of mgr_data, which is already destroyed */ //hypre_ParVectorDestroy(hypre_ParAMGDataVtemp(vdata)); hypre_TFree(hypre_ParAMGDataFArray(vdata), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParAMGDataUArray(vdata), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParAMGDataAArray(vdata), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParAMGDataPArray(vdata), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParAMGDataCFMarkerArray(vdata), HYPRE_MEMORY_HOST); //hypre_TFree(hypre_ParAMGDataGridRelaxType(vdata), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParAMGDataDofFuncArray(vdata), HYPRE_MEMORY_HOST); /* Points to VcycleRelaxZtemp of mgr_data, which is already destroyed */ /* if (hypre_ParAMGDataZtemp(vdata)) hypre_ParVectorDestroy(hypre_ParAMGDataZtemp(vdata)); */ if (hypre_ParAMGDataAMat(vdata)) hypre_TFree(hypre_ParAMGDataAMat(vdata), HYPRE_MEMORY_HOST); if (hypre_ParAMGDataBVec(vdata)) hypre_TFree(hypre_ParAMGDataBVec(vdata), HYPRE_MEMORY_HOST); if (hypre_ParAMGDataCommInfo(vdata)) hypre_TFree(hypre_ParAMGDataCommInfo(vdata), HYPRE_MEMORY_HOST); if (new_comm != hypre_MPI_COMM_NULL) { hypre_MPI_Comm_free (&new_comm); } hypre_TFree(vdata, HYPRE_MEMORY_HOST); return hypre_error_flag; } /* Set C-point variables for each reduction level */ /* Currently not implemented */ HYPRE_Int hypre_MGRSetReductionLevelCpoints( void *mgr_vdata, HYPRE_Int nlevels, HYPRE_Int *num_coarse_points, HYPRE_Int **level_coarse_indexes) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> num_coarse_levels) = nlevels; (mgr_data -> num_coarse_per_level) = num_coarse_points; (mgr_data -> level_coarse_indexes) = level_coarse_indexes; return hypre_error_flag; } /* Initialize some data */ /* Set whether non-coarse points on each level should be explicitly tagged as F-points */ HYPRE_Int hypre_MGRSetNonCpointsToFpoints( void *mgr_vdata, HYPRE_Int nonCptToFptFlag) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> set_non_Cpoints_to_F) = nonCptToFptFlag; return hypre_error_flag; } /* Set whether the reserved C points are reduced before the coarse grid solve */ HYPRE_Int hypre_MGRSetReservedCpointsLevelToKeep(void *mgr_vdata, HYPRE_Int level) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> lvl_to_keep_cpoints) = level; return hypre_error_flag; } /* Set Cpoints by contiguous blocks, i.e. p1, p2, ..., pn, s1, s2, ..., sn, ... */ HYPRE_Int hypre_MGRSetCpointsByContiguousBlock( void *mgr_vdata, HYPRE_Int block_size, HYPRE_Int max_num_levels, HYPRE_BigInt *begin_idx_array, HYPRE_Int *block_num_coarse_points, HYPRE_Int **block_coarse_indexes) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; if((mgr_data -> idx_array) != NULL) { hypre_TFree(mgr_data -> idx_array, HYPRE_MEMORY_HOST); (mgr_data -> idx_array) = NULL; } HYPRE_BigInt *index_array = hypre_CTAlloc(HYPRE_BigInt, block_size, HYPRE_MEMORY_HOST); if (begin_idx_array != NULL) { for (i = 0; i < block_size; i++) { index_array[i] = *(begin_idx_array+i); } } hypre_MGRSetCpointsByBlock(mgr_data, block_size, max_num_levels, block_num_coarse_points, block_coarse_indexes); (mgr_data -> idx_array) = index_array; (mgr_data -> set_c_points_method) = 1; return hypre_error_flag; } /* Initialize/ set local block data information */ HYPRE_Int hypre_MGRSetCpointsByBlock( void *mgr_vdata, HYPRE_Int block_size, HYPRE_Int max_num_levels, HYPRE_Int *block_num_coarse_points, HYPRE_Int **block_coarse_indexes) { HYPRE_Int i,j; HYPRE_Int **block_cf_marker = NULL; HYPRE_Int *block_num_coarse_indexes = NULL; hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; /* free block cf_marker data if not previously destroyed */ if((mgr_data -> block_cf_marker) != NULL) { for (i=0; i < (mgr_data -> max_num_coarse_levels); i++) { if ((mgr_data -> block_cf_marker)[i]) { hypre_TFree((mgr_data -> block_cf_marker)[i], HYPRE_MEMORY_HOST); (mgr_data -> block_cf_marker)[i] = NULL; } } hypre_TFree(mgr_data -> block_cf_marker, HYPRE_MEMORY_HOST); (mgr_data -> block_cf_marker) = NULL; } if((mgr_data -> block_num_coarse_indexes)) { hypre_TFree((mgr_data -> block_num_coarse_indexes), HYPRE_MEMORY_HOST); (mgr_data -> block_num_coarse_indexes) = NULL; } /* store block cf_marker */ block_cf_marker = hypre_CTAlloc(HYPRE_Int *, max_num_levels, HYPRE_MEMORY_HOST); for (i = 0; i < max_num_levels; i++) { block_cf_marker[i] = hypre_CTAlloc(HYPRE_Int, block_size, HYPRE_MEMORY_HOST); memset(block_cf_marker[i], FMRK, block_size*sizeof(HYPRE_Int)); } for (i = 0; i < max_num_levels; i++) { for(j=0; j<block_num_coarse_points[i]; j++) { (block_cf_marker[i])[block_coarse_indexes[i][j]] = CMRK; } } /* store block_num_coarse_points */ if(max_num_levels > 0) { block_num_coarse_indexes = hypre_CTAlloc(HYPRE_Int, max_num_levels, HYPRE_MEMORY_HOST); for(i=0; i<max_num_levels; i++) block_num_coarse_indexes[i] = block_num_coarse_points[i]; } /* set block data */ (mgr_data -> max_num_coarse_levels) = max_num_levels; (mgr_data -> block_size) = block_size; (mgr_data -> block_num_coarse_indexes) = block_num_coarse_indexes; (mgr_data -> block_cf_marker) = block_cf_marker; (mgr_data -> set_c_points_method) = 0; return hypre_error_flag; } HYPRE_Int hypre_MGRSetCpointsByPointMarkerArray( void *mgr_vdata, HYPRE_Int block_size, HYPRE_Int max_num_levels, HYPRE_Int *lvl_num_coarse_points, HYPRE_Int **lvl_coarse_indexes, HYPRE_Int *point_marker_array) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i,j; HYPRE_Int **block_cf_marker = NULL; HYPRE_Int *block_num_coarse_indexes = NULL; /* free block cf_marker data if not previously destroyed */ if((mgr_data -> block_cf_marker) != NULL) { for (i=0; i < (mgr_data -> max_num_coarse_levels); i++) { if ((mgr_data -> block_cf_marker)[i]) { hypre_TFree((mgr_data -> block_cf_marker)[i], HYPRE_MEMORY_HOST); (mgr_data -> block_cf_marker)[i] = NULL; } } hypre_TFree(mgr_data -> block_cf_marker, HYPRE_MEMORY_HOST); (mgr_data -> block_cf_marker) = NULL; } if((mgr_data -> block_num_coarse_indexes)) { hypre_TFree((mgr_data -> block_num_coarse_indexes), HYPRE_MEMORY_HOST); (mgr_data -> block_num_coarse_indexes) = NULL; } /* store block cf_marker */ block_cf_marker = hypre_CTAlloc(HYPRE_Int *, max_num_levels, HYPRE_MEMORY_HOST); for (i = 0; i < max_num_levels; i++) { block_cf_marker[i] = hypre_CTAlloc(HYPRE_Int, block_size, HYPRE_MEMORY_HOST); memset(block_cf_marker[i], FMRK, block_size*sizeof(HYPRE_Int)); } for (i = 0; i < max_num_levels; i++) { for(j=0; j<lvl_num_coarse_points[i]; j++) { block_cf_marker[i][j] = lvl_coarse_indexes[i][j]; } } /* store block_num_coarse_points */ if(max_num_levels > 0) { block_num_coarse_indexes = hypre_CTAlloc(HYPRE_Int, max_num_levels, HYPRE_MEMORY_HOST); for(i=0; i<max_num_levels; i++) block_num_coarse_indexes[i] = lvl_num_coarse_points[i]; } /* set block data */ (mgr_data -> max_num_coarse_levels) = max_num_levels; (mgr_data -> block_size) = block_size; (mgr_data -> block_num_coarse_indexes) = block_num_coarse_indexes; (mgr_data -> block_cf_marker) = block_cf_marker; (mgr_data -> point_marker_array) = point_marker_array; (mgr_data -> set_c_points_method) = 2; return hypre_error_flag; } /*Set number of points that remain part of the coarse grid throughout the hierarchy */ HYPRE_Int hypre_MGRSetReservedCoarseNodes(void *mgr_vdata, HYPRE_Int reserved_coarse_size, HYPRE_BigInt *reserved_cpt_index) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_BigInt *reserved_coarse_indexes = NULL; HYPRE_Int i; if (!mgr_data) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Warning! MGR object empty!\n"); return hypre_error_flag; } if(reserved_coarse_size < 0) { hypre_error_in_arg(2); return hypre_error_flag; } /* free data not previously destroyed */ if((mgr_data -> reserved_coarse_indexes)) { hypre_TFree((mgr_data -> reserved_coarse_indexes), HYPRE_MEMORY_HOST); (mgr_data -> reserved_coarse_indexes) = NULL; } /* set reserved coarse nodes */ if(reserved_coarse_size > 0) { reserved_coarse_indexes = hypre_CTAlloc(HYPRE_BigInt, reserved_coarse_size, HYPRE_MEMORY_HOST); for(i=0; i<reserved_coarse_size; i++) reserved_coarse_indexes[i] = reserved_cpt_index[i]; } (mgr_data -> reserved_coarse_size) = reserved_coarse_size; (mgr_data -> reserved_coarse_indexes) = reserved_coarse_indexes; return hypre_error_flag; } /* Set CF marker array */ HYPRE_Int hypre_MGRCoarsen(hypre_ParCSRMatrix *S, hypre_ParCSRMatrix *A, HYPRE_Int fixed_coarse_size, HYPRE_Int *fixed_coarse_indexes, HYPRE_Int debug_flag, HYPRE_Int **CF_marker_ptr, HYPRE_Int cflag) { HYPRE_Int *CF_marker = NULL; HYPRE_Int *cindexes = fixed_coarse_indexes; HYPRE_Int i, row, nc; HYPRE_Int nloc = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); /* If this is the last level, coarsen onto fixed coarse set */ if(cflag) { if(*CF_marker_ptr != NULL) { hypre_TFree(*CF_marker_ptr, HYPRE_MEMORY_HOST); } CF_marker = hypre_CTAlloc(HYPRE_Int, nloc, HYPRE_MEMORY_HOST); memset(CF_marker, FMRK, nloc*sizeof(HYPRE_Int)); /* first mark fixed coarse set */ nc = fixed_coarse_size; for(i = 0; i < nc; i++) { CF_marker[cindexes[i]] = CMRK; } } else { /* First coarsen to get initial CF splitting. * This is then followed by updating the CF marker to pass * coarse information to the next levels. NOTE: It may be * convenient to implement this way (allows the use of multiple * coarsening strategies without changing too much code), * but not necessarily the best option, compared to initializing * CF_marker first and then coarsening on subgraph which excludes * the initialized coarse nodes. */ hypre_BoomerAMGCoarsen(S, A, 0, debug_flag, &CF_marker); /* Update CF_marker to correct Cpoints marked as Fpoints. */ nc = fixed_coarse_size; for(i = 0; i < nc; i++) { CF_marker[cindexes[i]] = CMRK; } /* set F-points to FMRK. This is necessary since the different coarsening schemes differentiate * between type of F-points (example Ruge coarsening). We do not need that distinction here. */ for (row = 0; row <nloc; row++) { if(CF_marker[row] == CMRK) continue; CF_marker[row] = FMRK; } #if 0 /* IMPORTANT: Update coarse_indexes array to define the positions of the fixed coarse points * in the next level. */ nc = 0; index_i = 0; for (row = 0; row <nloc; row++) { /* loop through new c-points */ if(CF_marker[row] == CMRK) nc++; else if(CF_marker[row] == S_CMRK) { /* previously marked c-point is part of fixed coarse set. Track its current local index */ cindexes[index_i++] = nc; /* reset c-point from S_CMRK to CMRK */ cf_marker[row] = CMRK; nc++; } /* set F-points to FMRK. This is necessary since the different coarsening schemes differentiate * between type of F-points (example Ruge coarsening). We do not need that distinction here. */ else { CF_marker[row] = FMRK; } } /* check if this should be last level */ if( nc == fixed_coarse_size) last_level = 1; //printf(" nc = %d and fixed coarse size = %d \n", nc, fixed_coarse_size); #endif } /* set CF_marker */ *CF_marker_ptr = CF_marker; return hypre_error_flag; } /* Interpolation for MGR - Adapted from BoomerAMGBuildInterp */ HYPRE_Int hypre_MGRBuildP( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_BigInt *num_cpts_global, HYPRE_Int method, HYPRE_Int debug_flag, hypre_ParCSRMatrix **P_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Real *a_diag; hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; HYPRE_Int *tmp_map_offd = NULL; HYPRE_Int *CF_marker_offd = NULL; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size, P_offd_size; HYPRE_Int *P_marker, *P_marker_offd; HYPRE_Int jj_counter,jj_counter_offd; HYPRE_Int *jj_count, *jj_count_offd; // HYPRE_Int jj_begin_row,jj_begin_row_offd; // HYPRE_Int jj_end_row,jj_end_row_offd; HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int *fine_to_coarse; //HYPRE_BigInt *fine_to_coarse_offd; HYPRE_Int *coarse_counter; HYPRE_Int coarse_shift; HYPRE_BigInt total_global_cpts; //HYPRE_BigInt my_first_cpt; HYPRE_Int num_cols_P_offd; HYPRE_Int i,i1; HYPRE_Int j,jl,jj; HYPRE_Int start; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_Int num_threads; HYPRE_Int num_sends; HYPRE_Int index; HYPRE_Int ns, ne, size, rest; HYPRE_Int *int_buf_data; HYPRE_Real wall_time; /* for debugging instrumentation */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); //num_threads = hypre_NumThreads(); // Temporary fix, disable threading // TODO: enable threading num_threads = 1; #ifdef HYPRE_NO_GLOBAL_PARTITION //my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); #else //my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; #endif /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (debug_flag < 0) { debug_flag = -debug_flag; } if (debug_flag==4) wall_time = time_getWallclockSeconds(); if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ /* RDF: this looks a little tricky, but doable */ #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE #endif #endif for (j = 0; j < num_threads; j++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { jj_count[j]++; fine_to_coarse[i] = coarse_counter[j]; coarse_counter[j]++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is the approximation of A_{ff}^{-1}A_{fc} *--------------------------------------------------------------------*/ else { for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; if ((CF_marker[i1] >= 0) && (method > 0)) { jj_count[j]++; } } if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; if ((CF_marker_offd[i1] >= 0) && (method > 0)) { jj_count_offd[j]++; } } } } } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ for (i=0; i < num_threads-1; i++) { coarse_counter[i+1] += coarse_counter[i]; jj_count[i+1] += jj_count[i]; jj_count_offd[i+1] += jj_count_offd[i]; } i = num_threads-1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_DEVICE); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_DEVICE); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_DEVICE); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_DEVICE); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Internal work 1 = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); //fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE #endif #endif for (j = 0; j < num_threads; j++) { coarse_shift = 0; if (j > 0) coarse_shift = coarse_counter[j-1]; size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) { fine_to_coarse[i] += coarse_shift; } } /* index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) big_buf_data[index++] = fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]+ my_first_cpt; } comm_handle = hypre_ParCSRCommHandleCreate( 21, comm_pkg, big_buf_data, fine_to_coarse_offd); hypre_ParCSRCommHandleDestroy(comm_handle); */ if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n", my_id, wall_time); fflush(NULL); } if (debug_flag==4) wall_time = time_getWallclockSeconds(); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif //for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt; /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ a_diag = hypre_CTAlloc(HYPRE_Real, n_fine, HYPRE_MEMORY_HOST); for (i = 0; i < n_fine; i++) { if (CF_marker[i] < 0) { for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; if ( i==i1 ) /* diagonal of A only */ { a_diag[i] = 1.0/A_diag_data[jj]; } } } } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,P_marker,P_marker_offd,jj_counter,jj_counter_offd,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE #endif #endif for (jl = 0; jl < num_threads; jl++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (jl < rest) { ns = jl*size+jl; ne = (jl+1)*size+jl+1; } else { ns = jl*size+rest; ne = (jl+1)*size+rest; } jj_counter = 0; if (jl > 0) jj_counter = jj_count[jl-1]; jj_counter_offd = 0; if (jl > 0) jj_counter_offd = jj_count_offd[jl-1]; P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); if (num_cols_A_offd) P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); else P_marker_offd = NULL; for (i = 0; i < n_fine; i++) { P_marker[i] = -1; } for (i = 0; i < num_cols_A_offd; i++) { P_marker_offd[i] = -1; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { /* Diagonal part of P */ P_diag_i[i] = jj_counter; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if ((CF_marker[i1] >= 0) && (method > 0)) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; /* if(method == 0) { P_diag_data[jj_counter] = 0.0; } */ if (method == 1) { P_diag_data[jj_counter] = - A_diag_data[jj]; } else if (method == 2) { P_diag_data[jj_counter] = - A_diag_data[jj]*a_diag[i]; } jj_counter++; } } /* Off-Diagonal part of P */ P_offd_i[i] = jj_counter_offd; if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; /*----------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_offd_j * and initialize interpolation weight to zero. *-----------------------------------------------------------*/ if ((CF_marker_offd[i1] >= 0) && (method > 0)) { P_marker_offd[i1] = jj_counter_offd; /*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/ P_offd_j[jj_counter_offd] = i1; /* if(method == 0) { P_offd_data[jj_counter_offd] = 0.0; } */ if (method == 1) { P_offd_data[jj_counter_offd] = - A_offd_data[jj]; } else if (method == 2) { P_offd_data[jj_counter_offd] = - A_offd_data[jj]*a_diag[i]; } jj_counter_offd++; } } } } P_offd_i[i+1] = jj_counter_offd; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); } hypre_TFree(a_diag, HYPRE_MEMORY_HOST); P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; num_cols_P_offd = 0; if (P_offd_size) { P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i=0; i < num_cols_A_offd; i++) P_marker[i] = 0; num_cols_P_offd = 0; for (i=0; i < P_offd_size; i++) { index = P_offd_j[i]; if (!P_marker[index]) { num_cols_P_offd++; P_marker[index] = 1; } } col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); index = 0; for (i=0; i < num_cols_P_offd; i++) { while (P_marker[index]==0) index++; tmp_map_offd[i] = index++; } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i=0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(tmp_map_offd, P_offd_j[i], num_cols_P_offd); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } for (i=0; i < n_fine; i++) if (CF_marker[i] == -3) CF_marker[i] = -1; if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_GetCommPkgRTFromCommPkgA(P,A, fine_to_coarse, tmp_map_offd); *P_ptr = P; hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); //hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(jj_count, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); return(0); } /* Interpolation for MGR - Dynamic Row Sum method */ HYPRE_Int hypre_MGRBuildPDRS( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_BigInt *num_cpts_global, HYPRE_Int blk_size, HYPRE_Int reserved_coarse_size, HYPRE_Int debug_flag, hypre_ParCSRMatrix **P_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Real *a_diag; hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; HYPRE_Int *tmp_map_offd; HYPRE_Int *CF_marker_offd = NULL; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size, P_offd_size; HYPRE_Int *P_marker, *P_marker_offd; HYPRE_Int jj_counter,jj_counter_offd; HYPRE_Int *jj_count, *jj_count_offd; // HYPRE_Int jj_begin_row,jj_begin_row_offd; // HYPRE_Int jj_end_row,jj_end_row_offd; HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int *fine_to_coarse; //HYPRE_BigInt *fine_to_coarse_offd; HYPRE_Int *coarse_counter; HYPRE_Int coarse_shift; HYPRE_BigInt total_global_cpts; //HYPRE_BigInt my_first_cpt; HYPRE_Int num_cols_P_offd; HYPRE_Int i,i1; HYPRE_Int j,jl,jj; HYPRE_Int start; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_Int num_threads; HYPRE_Int num_sends; HYPRE_Int index; HYPRE_Int ns, ne, size, rest; HYPRE_Int *int_buf_data; HYPRE_Real wall_time; /* for debugging instrumentation */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); //num_threads = hypre_NumThreads(); // Temporary fix, disable threading // TODO: enable threading num_threads = 1; #ifdef HYPRE_NO_GLOBAL_PARTITION //my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); #else //my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; #endif /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (debug_flag < 0) { debug_flag = -debug_flag; } if (debug_flag==4) wall_time = time_getWallclockSeconds(); if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ /* RDF: this looks a little tricky, but doable */ #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE #endif #endif for (j = 0; j < num_threads; j++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { jj_count[j]++; fine_to_coarse[i] = coarse_counter[j]; coarse_counter[j]++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is the approximation of A_{ff}^{-1}A_{fc} *--------------------------------------------------------------------*/ else { for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; if (CF_marker[i1] >= 0) { jj_count[j]++; } } if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; if (CF_marker_offd[i1] >= 0) { jj_count_offd[j]++; } } } } /*-------------------------------------------------------------------- * Set up the indexes for the DRS method *--------------------------------------------------------------------*/ } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ for (i=0; i < num_threads-1; i++) { coarse_counter[i+1] += coarse_counter[i]; jj_count[i+1] += jj_count[i]; jj_count_offd[i+1] += jj_count_offd[i]; } i = num_threads-1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Internal work 1 = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); //fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE #endif #endif for (j = 0; j < num_threads; j++) { coarse_shift = 0; if (j > 0) coarse_shift = coarse_counter[j-1]; size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) fine_to_coarse[i] += coarse_shift; } /*index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, fine_to_coarse_offd); hypre_ParCSRCommHandleDestroy(comm_handle); */ if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n", my_id, wall_time); fflush(NULL); } if (debug_flag==4) wall_time = time_getWallclockSeconds(); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif //for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt; /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ a_diag = hypre_CTAlloc(HYPRE_Real, n_fine, HYPRE_MEMORY_HOST); for (i = 0; i < n_fine; i++) { for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; if ( i==i1 ) /* diagonal of A only */ { a_diag[i] = 1.0/A_diag_data[jj]; } } } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,P_marker,P_marker_offd,jj_counter,jj_counter_offd,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE #endif #endif for (jl = 0; jl < num_threads; jl++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (jl < rest) { ns = jl*size+jl; ne = (jl+1)*size+jl+1; } else { ns = jl*size+rest; ne = (jl+1)*size+rest; } jj_counter = 0; if (jl > 0) jj_counter = jj_count[jl-1]; jj_counter_offd = 0; if (jl > 0) jj_counter_offd = jj_count_offd[jl-1]; P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); if (num_cols_A_offd) P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); else P_marker_offd = NULL; for (i = 0; i < n_fine; i++) { P_marker[i] = -1; } for (i = 0; i < num_cols_A_offd; i++) { P_marker_offd[i] = -1; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { /* Diagonal part of P */ P_diag_i[i] = jj_counter; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] >= 0) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; P_diag_data[jj_counter] = - A_diag_data[jj]*a_diag[i]; jj_counter++; } } /* Off-Diagonal part of P */ P_offd_i[i] = jj_counter_offd; if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; /*----------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_offd_j * and initialize interpolation weight to zero. *-----------------------------------------------------------*/ if (CF_marker_offd[i1] >= 0) { P_marker_offd[i1] = jj_counter_offd; /*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/ P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = - A_offd_data[jj]*a_diag[i]; jj_counter_offd++; } } } } P_offd_i[i+1] = jj_counter_offd; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); } hypre_TFree(a_diag, HYPRE_MEMORY_HOST); P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; num_cols_P_offd = 0; if (P_offd_size) { P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i=0; i < num_cols_A_offd; i++) P_marker[i] = 0; num_cols_P_offd = 0; for (i=0; i < P_offd_size; i++) { index = P_offd_j[i]; if (!P_marker[index]) { num_cols_P_offd++; P_marker[index] = 1; } } tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); index = 0; for (i=0; i < num_cols_P_offd; i++) { while (P_marker[index]==0) index++; tmp_map_offd[i] = index++; } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i=0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(tmp_map_offd, P_offd_j[i], num_cols_P_offd); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } for (i=0; i < n_fine; i++) if (CF_marker[i] == -3) CF_marker[i] = -1; if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_GetCommPkgRTFromCommPkgA(P,A, fine_to_coarse, tmp_map_offd); *P_ptr = P; hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); // hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(jj_count, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); return(0); } /* Scale ParCSR matrix A = scalar * A * A: the target CSR matrix * vector: array of real numbers */ HYPRE_Int hypre_ParCSRMatrixLeftScale(HYPRE_Real *vector, hypre_ParCSRMatrix *A) { HYPRE_Int i, j, n_local; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); n_local = hypre_CSRMatrixNumRows(A_diag); for (i = 0; i < n_local; i++) { HYPRE_Real factor = vector[i]; for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { A_diag_data[j] *= factor; } for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { A_offd_data[j] *= factor; } } return(0); } /************************************************************ * Available methods: * 0: inv(A_FF) approximated by its diagonal inverse * 1: inv(A_FF) approximated by sparse approximate inverse *************************************************************/ HYPRE_Int hypre_MGRComputeNonGalerkinCoarseGrid(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *P, hypre_ParCSRMatrix *RT, HYPRE_Int bsize, HYPRE_Int ordering, HYPRE_Int method, HYPRE_Int Pmax, HYPRE_Int keep_stencil, HYPRE_Int *CF_marker, hypre_ParCSRMatrix **A_h_ptr) { HYPRE_Int *c_marker, *f_marker; HYPRE_Int n_local_fine_grid, i, i1, jj; hypre_ParCSRMatrix *A_cc; hypre_ParCSRMatrix *A_ff; hypre_ParCSRMatrix *A_fc; hypre_ParCSRMatrix *A_cf; hypre_ParCSRMatrix *A_h; hypre_ParCSRMatrix *A_h_correction; HYPRE_Int max_elmts = Pmax; // HYPRE_Real wall_time = 0.; hypre_ParCSRMatrix *P_mod = NULL; HYPRE_Int my_id; MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_MPI_Comm_rank(comm,&my_id); n_local_fine_grid = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); c_marker = hypre_CTAlloc(HYPRE_Int, n_local_fine_grid, HYPRE_MEMORY_HOST); f_marker = hypre_CTAlloc(HYPRE_Int, n_local_fine_grid, HYPRE_MEMORY_HOST); for (i = 0; i < n_local_fine_grid; i++) { HYPRE_Int point_type = CF_marker[i]; assert(point_type == 1 || point_type == -1); c_marker[i] = point_type; f_marker[i] = -point_type; } // get the A_cc sub-block hypre_MGRGetSubBlock(A, c_marker, c_marker, 0, &A_cc); if (method == 0) { if (keep_stencil) { //wall_time = time_getWallclockSeconds(); hypre_MGRGetSubBlock(A, c_marker, f_marker, 0, &A_cf); hypre_MGRGetSubBlock(A, f_marker, c_marker, 0, &A_fc); hypre_MGRGetSubBlock(A, f_marker, f_marker, 0, &A_ff); // extract the diagonal of A_ff and compute D_ff_inv hypre_CSRMatrix *A_ff_diag = hypre_ParCSRMatrixDiag(A_ff); HYPRE_Real *A_ff_diag_data = hypre_CSRMatrixData(A_ff_diag); HYPRE_Int *A_ff_diag_i = hypre_CSRMatrixI(A_ff_diag); HYPRE_Int *A_ff_diag_j = hypre_CSRMatrixJ(A_ff_diag); HYPRE_Int n_local_fpoints = hypre_CSRMatrixNumRows(A_ff_diag); HYPRE_Real *D_ff_inv; D_ff_inv = hypre_CTAlloc(HYPRE_Real, n_local_fpoints, HYPRE_MEMORY_HOST); for (i = 0; i < n_local_fpoints; i++) { for (jj = A_ff_diag_i[i]; jj < A_ff_diag_i[i+1]; jj++) { i1 = A_ff_diag_j[jj]; if ( i==i1 ) { D_ff_inv[i] = -1.0/A_ff_diag_data[jj]; } } } // extract the diagonal of A_cf hypre_CSRMatrix *A_cf_diag = hypre_ParCSRMatrixDiag(A_cf); HYPRE_Real *A_cf_diag_data = hypre_CSRMatrixData(A_cf_diag); HYPRE_Int *A_cf_diag_i = hypre_CSRMatrixI(A_cf_diag); HYPRE_Int *A_cf_diag_j = hypre_CSRMatrixJ(A_cf_diag); n_local_fpoints = hypre_CSRMatrixNumRows(A_cf_diag); HYPRE_Real *D_cf; D_cf = hypre_CTAlloc(HYPRE_Real, n_local_fpoints, HYPRE_MEMORY_HOST); for (i = 0; i < n_local_fpoints; i++) { i1 = A_cf_diag_j[A_cf_diag_i[i]]; D_cf[i] = A_cf_diag_data[jj]; } // compute the triple product hypre_ParCSRMatrixLeftScale(D_ff_inv, A_fc); hypre_ParCSRMatrixLeftScale(D_cf, A_fc); A_h_correction = A_fc; hypre_TFree(D_cf, HYPRE_MEMORY_HOST); hypre_TFree(D_ff_inv, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDestroy(A_ff); hypre_ParCSRMatrixDestroy(A_cf); //wall_time = time_getWallclockSeconds() - wall_time; //hypre_printf("Compute triple product D_cf * D_ff_inv * A_fc time: %1.5f\n", wall_time); } else { //wall_time = time_getWallclockSeconds(); P_mod = hypre_ParCSRMatrixCompleteClone(P); hypre_ParCSRMatrixCopy(P,P_mod,1); HYPRE_Int n_local_rows = hypre_ParCSRMatrixNumRows(P_mod); hypre_CSRMatrix *P_mod_diag = hypre_ParCSRMatrixDiag(P_mod); HYPRE_Int *P_mod_diag_i = hypre_CSRMatrixI(P_mod_diag); HYPRE_Real *P_mod_diag_data = hypre_CSRMatrixData(P_mod_diag); for (i = 0; i < n_local_rows; i ++) { if (CF_marker[i] >= 0) { HYPRE_Int ii = P_mod_diag_i[i]; P_mod_diag_data[ii] = 0.0; } } hypre_BoomerAMGBuildCoarseOperator(RT, A, P_mod, &A_h_correction); //wall_time = time_getWallclockSeconds() - wall_time; //hypre_printf("Compute triple product time new: %1.5f\n", wall_time); hypre_ParCSRMatrixDestroy(P_mod); } } else { // Approximate inverse for ideal interploation hypre_MGRGetSubBlock(A, c_marker, f_marker, 0, &A_cf); hypre_MGRGetSubBlock(A, f_marker, c_marker, 0, &A_fc); hypre_MGRGetSubBlock(A, f_marker, f_marker, 0, &A_ff); hypre_ParCSRMatrix *A_ff_inv = NULL; hypre_ParCSRMatrix *minus_Wp = NULL; hypre_MGRApproximateInverse(A_ff, &A_ff_inv); minus_Wp = hypre_ParMatmul(A_ff_inv, A_fc); A_h_correction = hypre_ParMatmul(A_cf, minus_Wp); hypre_ParCSRMatrixDestroy(minus_Wp); hypre_ParCSRMatrixDestroy(A_ff); hypre_ParCSRMatrixDestroy(A_fc); hypre_ParCSRMatrixDestroy(A_cf); } // perform dropping for A_h_correction // specific to multiphase poromechanics // we only keep the diagonal of each block //wall_time = time_getWallclockSeconds(); HYPRE_Int n_local_cpoints = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A_h_correction)); hypre_CSRMatrix *A_h_correction_diag = hypre_ParCSRMatrixDiag(A_h_correction); HYPRE_Real *A_h_correction_diag_data = hypre_CSRMatrixData(A_h_correction_diag); HYPRE_Int *A_h_correction_diag_i = hypre_CSRMatrixI(A_h_correction_diag); HYPRE_Int *A_h_correction_diag_j = hypre_CSRMatrixJ(A_h_correction_diag); HYPRE_Int ncol_diag = hypre_CSRMatrixNumCols(A_h_correction_diag); hypre_CSRMatrix *A_h_correction_offd = hypre_ParCSRMatrixOffd(A_h_correction); HYPRE_Real *A_h_correction_offd_data = hypre_CSRMatrixData(A_h_correction_offd); HYPRE_Int *A_h_correction_offd_i = hypre_CSRMatrixI(A_h_correction_offd); HYPRE_Int *A_h_correction_offd_j = hypre_CSRMatrixJ(A_h_correction_offd); // Allow for maximum dropping with Pmax = 0 //if (Pmax > 0) //{ if (ordering == 0) // interleaved ordering { HYPRE_Int *A_h_correction_diag_i_new = hypre_CTAlloc(HYPRE_Int, n_local_cpoints+1, HYPRE_MEMORY_HOST); HYPRE_Int *A_h_correction_diag_j_new = hypre_CTAlloc(HYPRE_Int, (bsize + max_elmts)*n_local_cpoints, HYPRE_MEMORY_HOST); HYPRE_Complex *A_h_correction_diag_data_new = hypre_CTAlloc(HYPRE_Complex, (bsize + max_elmts)*n_local_cpoints, HYPRE_MEMORY_HOST); HYPRE_Int num_nonzeros_diag_new = 0; HYPRE_Int *A_h_correction_offd_i_new = hypre_CTAlloc(HYPRE_Int, n_local_cpoints+1, HYPRE_MEMORY_HOST); HYPRE_Int *A_h_correction_offd_j_new = hypre_CTAlloc(HYPRE_Int, max_elmts*n_local_cpoints, HYPRE_MEMORY_HOST); HYPRE_Complex *A_h_correction_offd_data_new = hypre_CTAlloc(HYPRE_Complex, max_elmts*n_local_cpoints, HYPRE_MEMORY_HOST); HYPRE_Int num_nonzeros_offd_new = 0; for (i = 0; i < n_local_cpoints; i++) { HYPRE_Int max_num_nonzeros = A_h_correction_diag_i[i+1] - A_h_correction_diag_i[i] + A_h_correction_offd_i[i+1] - A_h_correction_offd_i[i]; HYPRE_Int *aux_j = hypre_CTAlloc(HYPRE_Int, max_num_nonzeros, HYPRE_MEMORY_HOST); HYPRE_Real *aux_data = hypre_CTAlloc(HYPRE_Real, max_num_nonzeros, HYPRE_MEMORY_HOST); HYPRE_Int row_start = i - (i % bsize); HYPRE_Int row_stop = row_start + bsize - 1; HYPRE_Int cnt = 0; for (jj = A_h_correction_offd_i[i]; jj < A_h_correction_offd_i[i+1]; jj++) { aux_j[cnt] = A_h_correction_offd_j[jj] + ncol_diag; aux_data[cnt] = A_h_correction_offd_data[jj]; cnt++; } for (jj = A_h_correction_diag_i[i]; jj < A_h_correction_diag_i[i+1]; jj++) { aux_j[cnt] = A_h_correction_diag_j[jj]; aux_data[cnt] = A_h_correction_diag_data[jj]; cnt++; } hypre_qsort2_abs(aux_j, aux_data, 0, cnt-1); for (jj = A_h_correction_diag_i[i]; jj < A_h_correction_diag_i[i+1]; jj++) { i1 = A_h_correction_diag_j[jj]; if (i1 >= row_start && i1 <= row_stop) { // copy data to new arrays A_h_correction_diag_j_new[num_nonzeros_diag_new] = i1; A_h_correction_diag_data_new[num_nonzeros_diag_new] = A_h_correction_diag_data[jj]; ++num_nonzeros_diag_new; } else { // Do nothing } } if (max_elmts > 0) { for (jj = 0; jj < hypre_min(max_elmts, cnt); jj++) { HYPRE_Int col_idx = aux_j[jj]; HYPRE_Real col_value = aux_data[jj]; if (col_idx < ncol_diag && (col_idx < row_start || col_idx > row_stop)) { A_h_correction_diag_j_new[num_nonzeros_diag_new] = col_idx; A_h_correction_diag_data_new[num_nonzeros_diag_new] = col_value; ++num_nonzeros_diag_new; } else if (col_idx >= ncol_diag) { A_h_correction_offd_j_new[num_nonzeros_offd_new] = col_idx - ncol_diag; A_h_correction_offd_data_new[num_nonzeros_offd_new] = col_value; ++num_nonzeros_offd_new; } } } A_h_correction_diag_i_new[i+1] = num_nonzeros_diag_new; A_h_correction_offd_i_new[i+1] = num_nonzeros_offd_new; hypre_TFree(aux_j, HYPRE_MEMORY_HOST); hypre_TFree(aux_data, HYPRE_MEMORY_HOST); } hypre_TFree(A_h_correction_diag_i, HYPRE_MEMORY_HOST); hypre_TFree(A_h_correction_diag_j, HYPRE_MEMORY_HOST); hypre_TFree(A_h_correction_diag_data, HYPRE_MEMORY_HOST); hypre_CSRMatrixI(A_h_correction_diag) = A_h_correction_diag_i_new; hypre_CSRMatrixJ(A_h_correction_diag) = A_h_correction_diag_j_new; hypre_CSRMatrixData(A_h_correction_diag) = A_h_correction_diag_data_new; hypre_CSRMatrixNumNonzeros(A_h_correction_diag) = num_nonzeros_diag_new; if (A_h_correction_offd_i) hypre_TFree(A_h_correction_offd_i, HYPRE_MEMORY_HOST); if (A_h_correction_offd_j) hypre_TFree(A_h_correction_offd_j, HYPRE_MEMORY_HOST); if (A_h_correction_offd_data) hypre_TFree(A_h_correction_offd_data, HYPRE_MEMORY_HOST); hypre_CSRMatrixI(A_h_correction_offd) = A_h_correction_offd_i_new; hypre_CSRMatrixJ(A_h_correction_offd) = A_h_correction_offd_j_new; hypre_CSRMatrixData(A_h_correction_offd) = A_h_correction_offd_data_new; hypre_CSRMatrixNumNonzeros(A_h_correction_offd) = num_nonzeros_offd_new; } else { hypre_printf("Error!! Block ordering is not supported at the moment\n"); exit(-1); } //} //hypre_MGRParCSRMatrixTruncate(A_h_correction, max_elmts); //wall_time = time_getWallclockSeconds() - wall_time; //hypre_printf("Filter A_h_correction time: %1.5f\n", wall_time); //hypre_ParCSRMatrixPrintIJ(A_h_correction,1,1,"A_h_correction_filtered"); // coarse grid / schur complement hypre_ParcsrAdd(1.0, A_cc, 1.0, A_h_correction, &A_h); *A_h_ptr = A_h; //hypre_ParCSRMatrixPrintIJ(A_h,1,1,"A_h"); hypre_ParCSRMatrixDestroy(A_cc); hypre_ParCSRMatrixDestroy(A_h_correction); hypre_TFree(c_marker, HYPRE_MEMORY_HOST); hypre_TFree(f_marker, HYPRE_MEMORY_HOST); return hypre_error_flag; } HYPRE_Int hypre_MGRComputeAlgebraicFixedStress(hypre_ParCSRMatrix *A, HYPRE_BigInt *mgr_idx_array, HYPRE_Solver A_ff_solver) { HYPRE_Int *U_marker, *S_marker, *P_marker; HYPRE_Int n_fine, i; HYPRE_BigInt ibegin; hypre_ParCSRMatrix *A_up; hypre_ParCSRMatrix *A_uu; hypre_ParCSRMatrix *A_su; hypre_ParCSRMatrix *A_pu; hypre_ParVector *e1_vector; hypre_ParVector *e2_vector; hypre_ParVector *e3_vector; hypre_ParVector *e4_vector; hypre_ParVector *e5_vector; n_fine = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); ibegin = hypre_ParCSRMatrixFirstRowIndex(A); hypre_assert(ibegin == mgr_idx_array[0]); U_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); S_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); for (i = 0; i < n_fine; i++) { U_marker[i] = -1; S_marker[i] = -1; P_marker[i] = -1; } // create C and F markers for (i = 0; i < n_fine; i++) { if (i < mgr_idx_array[1] - ibegin) { U_marker[i] = 1; } else if (i >= (mgr_idx_array[1] - ibegin) && i < (mgr_idx_array[2] - ibegin)) { S_marker[i] = 1; } else { P_marker[i] = 1; } } // Get A_up hypre_MGRGetSubBlock(A, U_marker, P_marker, 0, &A_up); // GetA_uu hypre_MGRGetSubBlock(A, U_marker, U_marker, 0, &A_uu); // Get A_su hypre_MGRGetSubBlock(A, S_marker, U_marker, 0, &A_su); // Get A_pu hypre_MGRGetSubBlock(A, P_marker, U_marker, 0, &A_pu); e1_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_up), hypre_ParCSRMatrixGlobalNumCols(A_up), hypre_ParCSRMatrixColStarts(A_up)); hypre_ParVectorInitialize(e1_vector); hypre_ParVectorSetPartitioningOwner(e1_vector,0); hypre_ParVectorSetConstantValues(e1_vector, 1.0); e2_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_uu), hypre_ParCSRMatrixGlobalNumRows(A_uu), hypre_ParCSRMatrixRowStarts(A_uu)); hypre_ParVectorInitialize(e2_vector); hypre_ParVectorSetPartitioningOwner(e2_vector,0); hypre_ParVectorSetConstantValues(e2_vector, 0.0); e3_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_uu), hypre_ParCSRMatrixGlobalNumRows(A_uu), hypre_ParCSRMatrixRowStarts(A_uu)); hypre_ParVectorInitialize(e3_vector); hypre_ParVectorSetPartitioningOwner(e3_vector,0); hypre_ParVectorSetConstantValues(e3_vector, 0.0); e4_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_su), hypre_ParCSRMatrixGlobalNumRows(A_su), hypre_ParCSRMatrixRowStarts(A_su)); hypre_ParVectorInitialize(e4_vector); hypre_ParVectorSetPartitioningOwner(e4_vector,0); hypre_ParVectorSetConstantValues(e4_vector, 0.0); e5_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_pu), hypre_ParCSRMatrixGlobalNumRows(A_pu), hypre_ParCSRMatrixRowStarts(A_pu)); hypre_ParVectorInitialize(e5_vector); hypre_ParVectorSetPartitioningOwner(e5_vector,0); hypre_ParVectorSetConstantValues(e5_vector, 0.0); // compute e2 = A_up * e1 hypre_ParCSRMatrixMatvecOutOfPlace(1.0, A_up, e1_vector, 0.0, e2_vector, e2_vector); // solve e3 = A_uu^-1 * e2 hypre_BoomerAMGSolve(A_ff_solver, A_uu, e2_vector, e3_vector); // compute e4 = A_su * e3 hypre_ParCSRMatrixMatvecOutOfPlace(1.0, A_su, e3_vector, 0.0, e4_vector, e4_vector); // compute e4 = A_su * e3 hypre_ParCSRMatrixMatvecOutOfPlace(1.0, A_su, e3_vector, 0.0, e4_vector, e4_vector); // print e4 hypre_ParVectorPrintIJ(e4_vector,1,"Dsp"); // compute e5 = A_pu * e3 hypre_ParCSRMatrixMatvecOutOfPlace(1.0, A_pu, e3_vector, 0.0, e5_vector, e5_vector); hypre_ParVectorPrintIJ(e5_vector,1,"Dpp"); hypre_ParVectorDestroy(e1_vector); hypre_ParVectorDestroy(e2_vector); hypre_ParVectorDestroy(e3_vector); hypre_ParCSRMatrixDestroy(A_uu); hypre_ParCSRMatrixDestroy(A_up); hypre_ParCSRMatrixDestroy(A_pu); hypre_ParCSRMatrixDestroy(A_su); hypre_TFree(U_marker, HYPRE_MEMORY_HOST); hypre_TFree(S_marker, HYPRE_MEMORY_HOST); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); return hypre_error_flag; } HYPRE_Int hypre_MGRApproximateInverse(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix **A_inv) { HYPRE_Int print_level, mr_max_row_nnz, mr_max_iter, nsh_max_row_nnz, nsh_max_iter, mr_col_version; HYPRE_Real mr_tol, nsh_tol; HYPRE_Real *droptol = hypre_CTAlloc(HYPRE_Real, 2, HYPRE_MEMORY_HOST); hypre_ParCSRMatrix *approx_A_inv = NULL; print_level = 0; nsh_max_iter = 2; nsh_max_row_nnz = 2; // default 1000 mr_max_iter = 1; mr_tol = 1.0e-3; mr_max_row_nnz = 2; // default 800 mr_col_version = 0; nsh_tol = 1.0e-3; droptol[0] = 1.0e-2; droptol[1] = 1.0e-2; hypre_ILUParCSRInverseNSH(A, &approx_A_inv, droptol, mr_tol, nsh_tol, DIVIDE_TOL, mr_max_row_nnz, nsh_max_row_nnz, mr_max_iter, nsh_max_iter, mr_col_version, print_level); *A_inv = approx_A_inv; if (droptol) hypre_TFree(droptol, HYPRE_MEMORY_HOST); return hypre_error_flag; } HYPRE_Int hypre_MGRBuildInterpApproximateInverseExp(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *S, HYPRE_Int *CF_marker, HYPRE_BigInt *num_cpts_global, HYPRE_Int debug_flag, hypre_ParCSRMatrix **P_ptr) { HYPRE_Int *C_marker; HYPRE_Int *F_marker; hypre_ParCSRMatrix *A_fc; hypre_ParCSRMatrix *minus_Wp; MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size, P_offd_size; HYPRE_Int jj_counter,jj_counter_offd; HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); HYPRE_Int *fine_to_coarse = NULL; HYPRE_Int coarse_counter; HYPRE_BigInt total_global_cpts; HYPRE_Int num_cols_P_offd; // HYPRE_BigInt my_first_cpt; HYPRE_Int i, jj; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; // HYPRE_Int num_threads; // HYPRE_Real wall_time; /* for debugging instrumentation */ C_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); F_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); // create C and F markers for (i = 0; i < n_fine; i++) { C_marker[i] = (CF_marker[i] == 1)? 1: -1; F_marker[i] = (CF_marker[i] == 1) ? -1: 1; } // Get A_FC hypre_MGRGetSubBlock(A, F_marker, C_marker, 0, &A_fc); // compute -Wp minus_Wp = hypre_ParMatmul(S, A_fc); hypre_CSRMatrix *minus_Wp_diag = hypre_ParCSRMatrixDiag(minus_Wp); HYPRE_Real *minus_Wp_diag_data = hypre_CSRMatrixData(minus_Wp_diag); HYPRE_Int *minus_Wp_diag_i = hypre_CSRMatrixI(minus_Wp_diag); HYPRE_Int *minus_Wp_diag_j = hypre_CSRMatrixJ(minus_Wp_diag); hypre_CSRMatrix *minus_Wp_offd = hypre_ParCSRMatrixOffd(minus_Wp); HYPRE_Real *minus_Wp_offd_data = hypre_CSRMatrixData(minus_Wp_offd); HYPRE_Int *minus_Wp_offd_i = hypre_CSRMatrixI(minus_Wp_offd); HYPRE_Int *minus_Wp_offd_j = hypre_CSRMatrixJ(minus_Wp_offd); hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); // num_threads = hypre_NumThreads(); #ifdef HYPRE_NO_GLOBAL_PARTITION // my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); #else // my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; #endif /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ HYPRE_Int row_counter = 0; coarse_counter = 0; for (i = 0; i < n_fine; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] > 0) { jj_counter++; fine_to_coarse[i] = coarse_counter; coarse_counter++; } else { /*-------------------------------------------------------------------- * If i is an F-point, interpolation is the approximation of A_{ff}^{-1}A_{fc} *--------------------------------------------------------------------*/ for (jj = minus_Wp_diag_i[row_counter]; jj < minus_Wp_diag_i[row_counter+1]; jj++) { jj_counter++; } if (num_procs > 1) { for (jj = minus_Wp_offd_i[row_counter]; jj < minus_Wp_offd_i[row_counter+1]; jj++) { jj_counter_offd++; } } row_counter++; } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_DEVICE); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_DEVICE); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_DEVICE); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_DEVICE); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ row_counter = 0; for (i = 0; i < n_fine; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { /* Diagonal part of P */ P_diag_i[i] = jj_counter; for (jj = minus_Wp_diag_i[row_counter]; jj < minus_Wp_diag_i[row_counter+1]; jj++) { P_diag_j[jj_counter] = minus_Wp_diag_j[jj]; P_diag_data[jj_counter] = - minus_Wp_diag_data[jj]; jj_counter++; } /* Off-Diagonal part of P */ P_offd_i[i] = jj_counter_offd; if (num_procs > 1) { for (jj = minus_Wp_offd_i[row_counter]; jj < minus_Wp_offd_i[row_counter+1]; jj++) { P_offd_j[jj_counter_offd] = minus_Wp_offd_j[jj]; P_offd_data[jj_counter_offd] = - minus_Wp_offd_data[jj]; jj_counter_offd++; } } row_counter++; } P_offd_i[i+1] = jj_counter_offd; } P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; num_cols_P_offd = hypre_CSRMatrixNumCols(minus_Wp_offd); HYPRE_BigInt *col_map_offd_tmp = hypre_ParCSRMatrixColMapOffd(minus_Wp); if (P_offd_size) { col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); for (i=0; i < num_cols_P_offd; i++) { col_map_offd_P[i] = col_map_offd_tmp[i]; } } if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_MatvecCommPkgCreate(P); *P_ptr = P; hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(C_marker, HYPRE_MEMORY_HOST); hypre_TFree(F_marker, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDestroy(A_fc); hypre_ParCSRMatrixDestroy(minus_Wp); return 0; } HYPRE_Int hypre_MGRBuildInterpApproximateInverse(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_BigInt *num_cpts_global, HYPRE_Int debug_flag, hypre_ParCSRMatrix **P_ptr) { HYPRE_Int *C_marker; HYPRE_Int *F_marker; hypre_ParCSRMatrix *A_ff; hypre_ParCSRMatrix *A_fc; hypre_ParCSRMatrix *A_ff_inv; hypre_ParCSRMatrix *minus_Wp; MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size, P_offd_size; HYPRE_Int jj_counter,jj_counter_offd; //HYPRE_Int jj_begin_row,jj_begin_row_offd; //HYPRE_Int jj_end_row,jj_end_row_offd; HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); HYPRE_Int *fine_to_coarse = NULL; //HYPRE_Int *coarse_counter; HYPRE_Int coarse_counter; HYPRE_BigInt total_global_cpts; HYPRE_Int num_cols_P_offd; // HYPRE_BigInt my_first_cpt; HYPRE_Int i,jj; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; // HYPRE_Int num_threads; // HYPRE_Real wall_time; /* for debugging instrumentation */ C_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); F_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); // create C and F markers for (i = 0; i < n_fine; i++) { C_marker[i] = (CF_marker[i] == 1)? 1: -1; F_marker[i] = (CF_marker[i] == 1) ? -1: 1; } // Get A_FF hypre_MGRGetSubBlock(A, F_marker, F_marker, 0, &A_ff); // Get A_FC hypre_MGRGetSubBlock(A, F_marker, C_marker, 0, &A_fc); hypre_MGRApproximateInverse(A_ff, &A_ff_inv); hypre_ParCSRMatrixPrintIJ(A_ff_inv, 1, 1, "A_ff_inv"); hypre_ParCSRMatrixPrintIJ(A_fc, 1, 1, "A_fc"); minus_Wp = hypre_ParMatmul(A_ff_inv, A_fc); hypre_ParCSRMatrixPrintIJ(minus_Wp, 1, 1, "Wp"); hypre_CSRMatrix *minus_Wp_diag = hypre_ParCSRMatrixDiag(minus_Wp); HYPRE_Real *minus_Wp_diag_data = hypre_CSRMatrixData(minus_Wp_diag); HYPRE_Int *minus_Wp_diag_i = hypre_CSRMatrixI(minus_Wp_diag); HYPRE_Int *minus_Wp_diag_j = hypre_CSRMatrixJ(minus_Wp_diag); hypre_CSRMatrix *minus_Wp_offd = hypre_ParCSRMatrixOffd(minus_Wp); HYPRE_Real *minus_Wp_offd_data = hypre_CSRMatrixData(minus_Wp_offd); HYPRE_Int *minus_Wp_offd_i = hypre_CSRMatrixI(minus_Wp_offd); HYPRE_Int *minus_Wp_offd_j = hypre_CSRMatrixJ(minus_Wp_offd); //hypre_CSRMatrix *minus_Wp_offd = hypre_ParCSRMatrixOffd(minus_Wp); //HYPRE_Int num_cols_minus_Wp_offd = hypre_CSRMatrixNumCols(minus_Wp_offd); hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); // num_threads = hypre_NumThreads(); #ifdef HYPRE_NO_GLOBAL_PARTITION // my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); #else // my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; #endif /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ //coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); //jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); //jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ HYPRE_Int row_counter = 0; coarse_counter = 0; for (i = 0; i < n_fine; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] > 0) { //jj_count[j]++; //fine_to_coarse[i] = coarse_counter[j]; //coarse_counter[j]++; jj_counter++; fine_to_coarse[i] = coarse_counter; coarse_counter++; } else { /*-------------------------------------------------------------------- * If i is an F-point, interpolation is the approximation of A_{ff}^{-1}A_{fc} *--------------------------------------------------------------------*/ for (jj = minus_Wp_diag_i[row_counter]; jj < minus_Wp_diag_i[row_counter+1]; jj++) { //jj_count[j]++; jj_counter++; } if (num_procs > 1) { for (jj = minus_Wp_offd_i[row_counter]; jj < minus_Wp_offd_i[row_counter+1]; jj++) { //jj_count_offd[j]++; jj_counter_offd++; } } row_counter++; } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ /* for (i=0; i < num_threads-1; i++) { coarse_counter[i+1] += coarse_counter[i]; jj_count[i+1] += jj_count[i]; jj_count_offd[i+1] += jj_count_offd[i]; } i = num_threads-1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; */ P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_DEVICE); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_DEVICE); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_DEVICE); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_DEVICE); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; /* if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Internal work 1 = %f\n", my_id, wall_time); fflush(NULL); } */ /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ /* if (num_procs > 1) { if (debug_flag==4) wall_time = time_getWallclockSeconds(); fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_minus_Wp_offd, HYPRE_MEMORY_HOST); for (i = 0; i < n_fine; i++) { fine_to_coarse[i] += my_first_cpt; } comm_pkg = hypre_ParCSRMatrixCommPkg(minus_Wp); if (!comm_pkg) { hypre_MatvecCommPkgCreate(minus_Wp); comm_pkg = hypre_ParCSRMatrixCommPkg(minus_Wp); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, fine_to_coarse_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n", my_id, wall_time); fflush(NULL); } if (debug_flag==4) wall_time = time_getWallclockSeconds(); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt; } */ row_counter = 0; for (i = 0; i < n_fine; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { /* Diagonal part of P */ P_diag_i[i] = jj_counter; for (jj = minus_Wp_diag_i[row_counter]; jj < minus_Wp_diag_i[row_counter+1]; jj++) { //P_marker[row_counter] = jj_counter; P_diag_j[jj_counter] = minus_Wp_diag_j[jj]; P_diag_data[jj_counter] = - minus_Wp_diag_data[jj]; jj_counter++; } /* Off-Diagonal part of P */ P_offd_i[i] = jj_counter_offd; if (num_procs > 1) { for (jj = minus_Wp_offd_i[row_counter]; jj < minus_Wp_offd_i[row_counter+1]; jj++) { //P_marker_offd[row_counter] = jj_counter_offd; P_offd_j[jj_counter_offd] = minus_Wp_offd_j[jj]; P_offd_data[jj_counter_offd] = - minus_Wp_offd_data[jj]; jj_counter_offd++; } } row_counter++; } P_offd_i[i+1] = jj_counter_offd; } //hypre_printf("Num rows of Wp = %d\n", row_counter); //P_offd_i[row_counter] = jj_counter_offd; P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; num_cols_P_offd = hypre_CSRMatrixNumCols(minus_Wp_offd); HYPRE_BigInt *col_map_offd_tmp = hypre_ParCSRMatrixColMapOffd(minus_Wp); if (P_offd_size) { col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); for (i=0; i < num_cols_P_offd; i++) { col_map_offd_P[i] = col_map_offd_tmp[i]; } } /* num_cols_P_offd = 0; if (P_offd_size) { P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_minus_Wp_offd, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i=0; i < num_cols_minus_Wp_offd; i++) P_marker[i] = 0; num_cols_P_offd = 0; for (i=0; i < P_offd_size; i++) { index = P_offd_j[i]; if (!P_marker[index]) { num_cols_P_offd++; P_marker[index] = 1; } } col_map_offd_P = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); index = 0; for (i=0; i < num_cols_P_offd; i++) { while (P_marker[index]==0) index++; col_map_offd_P[i] = index++; } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i=0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(col_map_offd_P, P_offd_j[i], num_cols_P_offd); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } */ if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_MatvecCommPkgCreate(P); //hypre_GetCommPkgRTFromCommPkgA(P,A, fine_to_coarse_offd); *P_ptr = P; //hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); //hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); //if (fine_to_coarse_offd) hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); //hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); //hypre_TFree(jj_count, HYPRE_MEMORY_HOST); //hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); hypre_TFree(C_marker, HYPRE_MEMORY_HOST); hypre_TFree(F_marker, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDestroy(A_ff); hypre_ParCSRMatrixDestroy(A_fc); hypre_ParCSRMatrixDestroy(A_ff_inv); hypre_ParCSRMatrixDestroy(minus_Wp); return 0; } /* Setup interpolation operator */ HYPRE_Int hypre_MGRBuildInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, HYPRE_Int *col_offd_S_to_A, hypre_ParCSRMatrix **P, HYPRE_Int interp_type, HYPRE_Int numsweeps) { //HYPRE_Int i; hypre_ParCSRMatrix *P_ptr = NULL; //HYPRE_Real jac_trunc_threshold = trunc_factor; //HYPRE_Real jac_trunc_threshold_minus = 0.5*jac_trunc_threshold; /* Interpolation for each level */ if (interp_type <3) { hypre_MGRBuildP( A,CF_marker,num_cpts_global,interp_type,debug_flag,&P_ptr); /* Could do a few sweeps of Jacobi to further improve P */ //for(i=0; i<numsweeps; i++) // hypre_BoomerAMGJacobiInterp(A, &P_ptr, S,1, NULL, CF_marker, 0, jac_trunc_threshold, jac_trunc_threshold_minus ); //hypre_BoomerAMGInterpTruncation(P_ptr, trunc_factor, max_elmts); } else if (interp_type == 4) { hypre_MGRBuildInterpApproximateInverse(A, CF_marker, num_cpts_global, debug_flag, &P_ptr); hypre_BoomerAMGInterpTruncation(P_ptr, trunc_factor, max_elmts); } else if (interp_type == 99) { hypre_MGRBuildInterpApproximateInverseExp(A, S, CF_marker, num_cpts_global, debug_flag, &P_ptr); hypre_BoomerAMGInterpTruncation(P_ptr, trunc_factor, max_elmts); } else { /* Classical modified interpolation */ hypre_BoomerAMGBuildInterp(A, CF_marker, S, num_cpts_global,1, NULL,debug_flag, trunc_factor, max_elmts, col_offd_S_to_A, &P_ptr); /* Do k steps of Jacobi build W for P = [-W I]. * Note that BoomerAMGJacobiInterp assumes you have some initial P, * hence we need to initialize P as above, before calling this routine. * If numsweeps = 0, the following step is skipped and P is returned as is. * Looping here is equivalent to improving P by Jacobi interpolation */ //for(i=0; i<numsweeps; i++) // hypre_BoomerAMGJacobiInterp(A, &P_ptr, S,1, NULL, CF_marker, // 0, jac_trunc_threshold, // jac_trunc_threshold_minus ); } /* set pointer to P */ *P = P_ptr; return hypre_error_flag; } /* Setup restriction operator */ HYPRE_Int hypre_MGRBuildRestrict(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, HYPRE_Real S_commpkg_switch, HYPRE_Real strong_threshold, HYPRE_Real max_row_sum, hypre_ParCSRMatrix **R, HYPRE_Int restrict_type, HYPRE_Int numsweeps) { // HYPRE_Int i; hypre_ParCSRMatrix *R_ptr = NULL; hypre_ParCSRMatrix *AT = NULL; hypre_ParCSRMatrix *ST = NULL; HYPRE_Int *col_offd_ST_to_AT = NULL; // HYPRE_Real jac_trunc_threshold = trunc_factor; // HYPRE_Real jac_trunc_threshold_minus = 0.5*jac_trunc_threshold; /* Build AT (transpose A) */ if (restrict_type > 0) { hypre_ParCSRMatrixTranspose(A, &AT, 1); } if (restrict_type > 5) { /* Build new strength matrix */ hypre_BoomerAMGCreateS(AT, strong_threshold, max_row_sum, 1, NULL, &ST); /* use appropriate communication package for Strength matrix */ if (strong_threshold > S_commpkg_switch) hypre_BoomerAMGCreateSCommPkg(AT, ST, &col_offd_ST_to_AT); } /* Interpolation for each level */ if (restrict_type == 0) { hypre_MGRBuildP(A, CF_marker, num_cpts_global, restrict_type, debug_flag, &R_ptr); } else if (restrict_type == 1 || restrict_type == 2) { hypre_MGRBuildP(AT, CF_marker, num_cpts_global, restrict_type, debug_flag, &R_ptr); /* Could do a few sweeps of Jacobi to further improve P */ //for(i=0; i<numsweeps; i++) // hypre_BoomerAMGJacobiInterp(A, &R_ptr, S,1, NULL, CF_marker, 0, jac_trunc_threshold, jac_trunc_threshold_minus ); //hypre_BoomerAMGInterpTruncation(R_ptr, trunc_factor, max_elmts); } else if (restrict_type == 4) { hypre_MGRBuildInterpApproximateInverse(A, CF_marker, num_cpts_global, debug_flag, &R_ptr); hypre_BoomerAMGInterpTruncation(R_ptr, trunc_factor, max_elmts); } else { /* Classical modified interpolation */ hypre_BoomerAMGBuildInterp(AT, CF_marker, ST, num_cpts_global,1, NULL,debug_flag, trunc_factor, max_elmts, col_offd_ST_to_AT, &R_ptr); /* Do k steps of Jacobi build W for P = [-W I]. * Note that BoomerAMGJacobiInterp assumes you have some initial P, * hence we need to initialize P as above, before calling this routine. * If numsweeps = 0, the following step is skipped and P is returned as is. * Looping here is equivalent to improving P by Jacobi interpolation */ // for(i=0; i<numsweeps; i++) // hypre_BoomerAMGJacobiInterp(A, &R_ptr, S,1, NULL, CF_marker, 0, // jac_trunc_threshold, jac_trunc_threshold_minus); } /* set pointer to P */ *R = R_ptr; /* Free memory */ if (restrict_type > 0) { hypre_ParCSRMatrixDestroy(AT); } if (restrict_type > 5) { hypre_ParCSRMatrixDestroy(ST); if (col_offd_ST_to_AT) hypre_TFree(col_offd_ST_to_AT, HYPRE_MEMORY_HOST); } return hypre_error_flag; } void hypre_blas_smat_inv_n4 (HYPRE_Real *a) { const HYPRE_Real a11 = a[0], a12 = a[1], a13 = a[2], a14 = a[3]; const HYPRE_Real a21 = a[4], a22 = a[5], a23 = a[6], a24 = a[7]; const HYPRE_Real a31 = a[8], a32 = a[9], a33 = a[10], a34 = a[11]; const HYPRE_Real a41 = a[12], a42 = a[13], a43 = a[14], a44 = a[15]; const HYPRE_Real M11 = a22*a33*a44 + a23*a34*a42 + a24*a32*a43 - a22*a34*a43 - a23*a32*a44 - a24*a33*a42; const HYPRE_Real M12 = a12*a34*a43 + a13*a32*a44 + a14*a33*a42 - a12*a33*a44 - a13*a34*a42 - a14*a32*a43; const HYPRE_Real M13 = a12*a23*a44 + a13*a24*a42 + a14*a22*a43 - a12*a24*a43 - a13*a22*a44 - a14*a23*a42; const HYPRE_Real M14 = a12*a24*a33 + a13*a22*a34 + a14*a23*a32 - a12*a23*a34 - a13*a24*a32 - a14*a22*a33; const HYPRE_Real M21 = a21*a34*a43 + a23*a31*a44 + a24*a33*a41 - a21*a33*a44 - a23*a34*a41 - a24*a31*a43; const HYPRE_Real M22 = a11*a33*a44 + a13*a34*a41 + a14*a31*a43 - a11*a34*a43 - a13*a31*a44 - a14*a33*a41; const HYPRE_Real M23 = a11*a24*a43 + a13*a21*a44 + a14*a23*a41 - a11*a23*a44 - a13*a24*a41 - a14*a21*a43; const HYPRE_Real M24 = a11*a23*a34 + a13*a24*a31 + a14*a21*a33 - a11*a24*a33 - a13*a21*a34 - a14*a23*a31; const HYPRE_Real M31 = a21*a32*a44 + a22*a34*a41 + a24*a31*a42 - a21*a34*a42 - a22*a31*a44 - a24*a32*a41; const HYPRE_Real M32 = a11*a34*a42 + a12*a31*a44 + a14*a32*a41 - a11*a32*a44 - a12*a34*a41 - a14*a31*a42; const HYPRE_Real M33 = a11*a22*a44 + a12*a24*a41 + a14*a21*a42 - a11*a24*a42 - a12*a21*a44 - a14*a22*a41; const HYPRE_Real M34 = a11*a24*a32 + a12*a21*a34 + a14*a22*a31 - a11*a22*a34 - a12*a24*a31 - a14*a21*a32; const HYPRE_Real M41 = a21*a33*a42 + a22*a31*a43 + a23*a32*a41 - a21*a32*a43 - a22*a33*a41 - a23*a31*a42; const HYPRE_Real M42 = a11*a32*a43 + a12*a33*a41 + a13*a31*a42 - a11*a33*a42 - a12*a31*a43 - a13*a32*a41; const HYPRE_Real M43 = a11*a23*a42 + a12*a21*a43 + a13*a22*a41 - a11*a22*a43 - a12*a23*a41 - a13*a21*a42; const HYPRE_Real M44 = a11*a22*a33 + a12*a23*a31 + a13*a21*a32 - a11*a23*a32 - a12*a21*a33 - a13*a22*a31; const HYPRE_Real det = a11*M11 + a12*M21 + a13*M31 + a14*M41; HYPRE_Real det_inv; //if ( fabs(det) < 1e-22 ) { //hypre_printf("### WARNING: Matrix is nearly singular! det = %e\n", det); /* printf("##----------------------------------------------\n"); printf("## %12.5e %12.5e %12.5e \n", a0, a1, a2); printf("## %12.5e %12.5e %12.5e \n", a3, a4, a5); printf("## %12.5e %12.5e %12.5e \n", a5, a6, a7); printf("##----------------------------------------------\n"); getchar(); */ //} det_inv = 1.0/det; a[0] = M11*det_inv; a[1] = M12*det_inv; a[2] = M13*det_inv; a[3] = M14*det_inv; a[4] = M21*det_inv; a[5] = M22*det_inv; a[6] = M23*det_inv; a[7] = M24*det_inv; a[8] = M31*det_inv; a[9] = M32*det_inv; a[10] = M33*det_inv; a[11] = M34*det_inv; a[12] = M41*det_inv; a[13] = M42*det_inv; a[14] = M43*det_inv; a[15] = M44*det_inv; } void hypre_blas_mat_inv(HYPRE_Real *a, HYPRE_Int n) { HYPRE_Int i,j,k,l,u,kn,in; HYPRE_Real alinv; if (n == 4) { hypre_blas_smat_inv_n4(a); } else { for (k=0; k<n; ++k) { kn = k*n; l = kn+k; //if (fabs(a[l]) < SMALLREAL) { // printf("### WARNING: Diagonal entry is close to zero!"); // printf("### WARNING: diag_%d=%e\n", k, a[l]); // a[l] = SMALLREAL; //} alinv = 1.0/a[l]; a[l] = alinv; for (j=0; j<k; ++j) { u = kn+j; a[u] *= alinv; } for (j=k+1; j<n; ++j) { u = kn+j; a[u] *= alinv; } for (i=0; i<k; ++i) { in = i*n; for (j=0; j<n; ++j) if (j!=k) { u = in+j; a[u] -= a[in+k]*a[kn+j]; } // end if (j!=k) } for (i=k+1; i<n; ++i) { in = i*n; for (j=0; j<n; ++j) if (j!=k) { u = in+j; a[u] -= a[in+k]*a[kn+j]; } // end if (j!=k) } for (i=0; i<k; ++i) { u=i*n+k; a[u] *= -alinv; } for (i=k+1; i<n; ++i) { u=i*n+k; a[u] *= -alinv; } } // end for (k=0; k<n; ++k) }// end if } HYPRE_Int hypre_block_jacobi_scaling(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix **B_ptr, void *mgr_vdata, HYPRE_Int debug_flag) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int num_procs, my_id; HYPRE_Int blk_size = (mgr_data -> block_size); HYPRE_Int reserved_coarse_size = (mgr_data -> reserved_coarse_size); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_ParCSRMatrix *B; hypre_CSRMatrix *B_diag; HYPRE_Real *B_diag_data; HYPRE_Int *B_diag_i; HYPRE_Int *B_diag_j; hypre_CSRMatrix *B_offd; HYPRE_Int i,ii; HYPRE_Int j,jj; HYPRE_Int k; HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int n_block, left_size,inv_size; // HYPRE_Real wall_time; /* for debugging instrumentation */ HYPRE_Int bidx,bidxm1,bidxp1; HYPRE_Real * diaginv; const HYPRE_Int nb2 = blk_size*blk_size; HYPRE_Int block_scaling_error = 0; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); // HYPRE_Int num_threads = hypre_NumThreads(); //printf("n = %d\n",n); if (my_id == num_procs) { n_block = (n - reserved_coarse_size) / blk_size; left_size = n - blk_size*n_block; } else { n_block = n / blk_size; left_size = n - blk_size*n_block; } inv_size = nb2*n_block + left_size*left_size; //printf("inv_size = %d\n",inv_size); hypre_blockRelax_setup(A,blk_size,reserved_coarse_size,&(mgr_data -> diaginv)); // if (debug_flag==4) wall_time = time_getWallclockSeconds(); /*----------------------------------------------------------------------- * First Pass: Determine size of B and fill in *-----------------------------------------------------------------------*/ B_diag_i = hypre_CTAlloc(HYPRE_Int, n+1, HYPRE_MEMORY_HOST); B_diag_j = hypre_CTAlloc(HYPRE_Int, inv_size, HYPRE_MEMORY_HOST); B_diag_data = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST); B_diag_i[n] = inv_size; //B_offd_i = hypre_CTAlloc(HYPRE_Int, n+1, HYPRE_MEMORY_HOST); //B_offd_j = hypre_CTAlloc(HYPRE_Int, 1, HYPRE_MEMORY_HOST); //B_offd_data = hypre_CTAlloc(HYPRE_Real, 1, HYPRE_MEMORY_HOST); //B_offd_i[n] = 1; /*----------------------------------------------------------------- * Get all the diagonal sub-blocks *-----------------------------------------------------------------*/ diaginv = hypre_CTAlloc(HYPRE_Real, nb2, HYPRE_MEMORY_HOST); //printf("n_block = %d\n",n_block); for (i = 0;i < n_block; i++) { bidxm1 = i*blk_size; bidxp1 = (i+1)*blk_size; for (k = 0;k < blk_size; k++) { for (j = 0;j < blk_size; j++) { bidx = k*blk_size + j; diaginv[bidx] = 0.0; } for (ii = A_diag_i[bidxm1+k]; ii < A_diag_i[bidxm1+k+1]; ii++) { jj = A_diag_j[ii]; if (jj >= bidxm1 && jj < bidxp1 && fabs(A_diag_data[ii]) > SMALLREAL) { bidx = k*blk_size + jj - bidxm1; //printf("jj = %d,val = %e, bidx = %d\n",jj,A_diag_data[ii],bidx); diaginv[bidx] = A_diag_data[ii]; } } } /* for (k = 0;k < blk_size; k++) */ /* { */ /* for (j = 0;j < blk_size; j++) */ /* { */ /* bidx = k*blk_size + j; */ /* printf("diaginv[%d] = %e\n",bidx,diaginv[bidx]); */ /* } */ /* } */ hypre_blas_mat_inv(diaginv, blk_size); for (k = 0;k < blk_size; k++) { B_diag_i[i*blk_size+k] = i*nb2 + k*blk_size; //B_offd_i[i*nb2+k] = 0; for (j = 0;j < blk_size; j++) { bidx = i*nb2 + k*blk_size + j; B_diag_j[bidx] = i*blk_size + j; B_diag_data[bidx] = diaginv[k*blk_size + j]; } } } //printf("Before create\n"); B = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParCSRMatrixGlobalNumCols(A), hypre_ParCSRMatrixRowStarts(A), hypre_ParCSRMatrixColStarts(A), 0, inv_size, 0); //printf("After create\n"); B_diag = hypre_ParCSRMatrixDiag(B); hypre_CSRMatrixData(B_diag) = B_diag_data; hypre_CSRMatrixI(B_diag) = B_diag_i; hypre_CSRMatrixJ(B_diag) = B_diag_j; B_offd = hypre_ParCSRMatrixOffd(B); hypre_CSRMatrixData(B_offd) = NULL; hypre_CSRMatrixI(B_offd) = NULL; hypre_CSRMatrixJ(B_offd) = NULL; /* hypre_ParCSRMatrixOwnsRowStarts(B) = 0; */ *B_ptr = B; return(block_scaling_error); } HYPRE_Int hypre_blockRelax_solve (hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u, HYPRE_Real blk_size, HYPRE_Int n_block, HYPRE_Int left_size, HYPRE_Int method, HYPRE_Real *diaginv, hypre_ParVector *Vtemp) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); hypre_Vector *u_local = hypre_ParVectorLocalVector(u); HYPRE_Real *u_data = hypre_VectorData(u_local); hypre_Vector *f_local = hypre_ParVectorLocalVector(f); HYPRE_Real *f_data = hypre_VectorData(f_local); hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp); HYPRE_Real *Vtemp_data = hypre_VectorData(Vtemp_local); HYPRE_Real *Vext_data = NULL; HYPRE_Real *v_buf_data; HYPRE_Int i, j, k; HYPRE_Int ii, jj; HYPRE_Int bidx,bidx1; HYPRE_Int relax_error = 0; HYPRE_Int num_sends; HYPRE_Int index, start; HYPRE_Int num_procs, my_id; HYPRE_Real *res; const HYPRE_Int nb2 = blk_size*blk_size; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); // HYPRE_Int num_threads = hypre_NumThreads(); res = hypre_CTAlloc(HYPRE_Real, blk_size, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST); if (num_cols_offd) { A_offd_j = hypre_CSRMatrixJ(A_offd); A_offd_data = hypre_CSRMatrixData(A_offd); } index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data, Vext_data); } /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n; i++) { Vtemp_data[i] = u_data[i]; //printf("u_old[%d] = %e\n",i,Vtemp_data[i]); } if (num_procs > 1) { hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; } /*----------------------------------------------------------------- * Relax points block by block *-----------------------------------------------------------------*/ for (i = 0;i < n_block; i++) { for (j = 0;j < blk_size; j++) { bidx = i*blk_size +j; res[j] = f_data[bidx]; for (jj = A_diag_i[bidx]; jj < A_diag_i[bidx+1]; jj++) { ii = A_diag_j[jj]; if (method == 0) { // Jacobi for diagonal part res[j] -= A_diag_data[jj] * Vtemp_data[ii]; } else if (method == 1) { // Gauss-Seidel for diagonal part res[j] -= A_diag_data[jj] * u_data[ii]; } else { // Default do Jacobi for diagonal part res[j] -= A_diag_data[jj] * Vtemp_data[ii]; } //printf("%d: Au= %e * %e =%e\n",ii,A_diag_data[jj],Vtemp_data[ii], res[j]); } for (jj = A_offd_i[bidx]; jj < A_offd_i[bidx+1]; jj++) { // always do Jacobi for off-diagonal part ii = A_offd_j[jj]; res[j] -= A_offd_data[jj] * Vext_data[ii]; } //printf("%d: res = %e\n",bidx,res[j]); } for (j = 0;j < blk_size; j++) { bidx1 = i*blk_size +j; for (k = 0;k < blk_size; k++) { bidx = i*nb2 +j*blk_size+k; u_data[bidx1] += res[k]*diaginv[bidx]; //printf("u[%d] = %e, diaginv[%d] = %e\n",bidx1,u_data[bidx1],bidx,diaginv[bidx]); } //printf("u[%d] = %e\n",bidx1,u_data[bidx1]); } } if (num_procs > 1) { hypre_TFree(Vext_data, HYPRE_MEMORY_HOST); hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST); } hypre_TFree(res, HYPRE_MEMORY_HOST); return(relax_error); } HYPRE_Int hypre_block_gs (hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u, HYPRE_Real blk_size, HYPRE_Int n_block, HYPRE_Int left_size, HYPRE_Real *diaginv, hypre_ParVector *Vtemp) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); hypre_Vector *u_local = hypre_ParVectorLocalVector(u); HYPRE_Real *u_data = hypre_VectorData(u_local); hypre_Vector *f_local = hypre_ParVectorLocalVector(f); HYPRE_Real *f_data = hypre_VectorData(f_local); hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp); HYPRE_Real *Vtemp_data = hypre_VectorData(Vtemp_local); HYPRE_Real *Vext_data = NULL; HYPRE_Real *v_buf_data; HYPRE_Int i, j, k; HYPRE_Int ii, jj; HYPRE_Int bidx,bidx1; HYPRE_Int relax_error = 0; HYPRE_Int num_sends; HYPRE_Int index, start; HYPRE_Int num_procs, my_id; HYPRE_Real *res; const HYPRE_Int nb2 = blk_size*blk_size; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); //HYPRE_Int num_threads = hypre_NumThreads(); res = hypre_CTAlloc(HYPRE_Real, blk_size, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST); if (num_cols_offd) { A_offd_j = hypre_CSRMatrixJ(A_offd); A_offd_data = hypre_CSRMatrixData(A_offd); } index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data, Vext_data); } /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n; i++) { Vtemp_data[i] = u_data[i]; //printf("u_old[%d] = %e\n",i,Vtemp_data[i]); } if (num_procs > 1) { hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; } /*----------------------------------------------------------------- * Relax points block by block *-----------------------------------------------------------------*/ for (i = 0;i < n_block; i++) { for (j = 0;j < blk_size; j++) { bidx = i*blk_size +j; res[j] = f_data[bidx]; for (jj = A_diag_i[bidx]; jj < A_diag_i[bidx+1]; jj++) { ii = A_diag_j[jj]; //res[j] -= A_diag_data[jj] * Vtemp_data[ii]; //printf("my_id = %d, %d: Au = %e * %e\n",my_id,ii,A_diag_data[jj],Vtemp_data[ii]); res[j] -= A_diag_data[jj] * u_data[ii]; //printf("%d: Au= %e * %e =%e\n",ii,A_diag_data[jj],Vtemp_data[ii], res[j]); } for (jj = A_offd_i[bidx]; jj < A_offd_i[bidx+1]; jj++) { ii = A_offd_j[jj]; res[j] -= A_offd_data[jj] * Vext_data[ii]; } //printf("%d: res = %e\n",bidx,res[j]); } for (j = 0;j < blk_size; j++) { bidx1 = i*blk_size +j; for (k = 0;k < blk_size; k++) { bidx = i*nb2 +j*blk_size+k; u_data[bidx1] += res[k]*diaginv[bidx]; //printf("u[%d] = %e, diaginv[%d] = %e\n",bidx1,u_data[bidx1],bidx,diaginv[bidx]); } //printf("u[%d] = %e\n",bidx1,u_data[bidx1]); } } if (num_procs > 1) { hypre_TFree(Vext_data, HYPRE_MEMORY_HOST); hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST); } hypre_TFree(res, HYPRE_MEMORY_HOST); return(relax_error); } /*Block smoother*/ HYPRE_Int hypre_blockRelax_setup(hypre_ParCSRMatrix *A, HYPRE_Int blk_size, HYPRE_Int reserved_coarse_size, HYPRE_Real **diaginvptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int i, j,k; HYPRE_Int ii, jj; HYPRE_Int bidx,bidxm1,bidxp1; HYPRE_Int num_procs, my_id; const HYPRE_Int nb2 = blk_size*blk_size; HYPRE_Int n_block; HYPRE_Int left_size,inv_size; HYPRE_Real *diaginv = *diaginvptr; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); //HYPRE_Int num_threads = hypre_NumThreads(); if (my_id == num_procs) { n_block = (n - reserved_coarse_size) / blk_size; left_size = n - blk_size*n_block; } else { n_block = n / blk_size; left_size = n - blk_size*n_block; } inv_size = nb2*n_block + left_size*left_size; if (diaginv !=NULL) { hypre_TFree(diaginv, HYPRE_MEMORY_HOST); diaginv = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST); } else { diaginv = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST); } /*----------------------------------------------------------------- * Get all the diagonal sub-blocks *-----------------------------------------------------------------*/ for (i = 0;i < n_block; i++) { bidxm1 = i*blk_size; bidxp1 = (i+1)*blk_size; //printf("bidxm1 = %d,bidxp1 = %d\n",bidxm1,bidxp1); for (k = 0;k < blk_size; k++) { for (j = 0;j < blk_size; j++) { bidx = i*nb2 + k*blk_size + j; diaginv[bidx] = 0.0; } for (ii = A_diag_i[bidxm1+k]; ii < A_diag_i[bidxm1+k+1]; ii++) { jj = A_diag_j[ii]; if (jj >= bidxm1 && jj < bidxp1 && fabs(A_diag_data[ii]) > SMALLREAL) { bidx = i*nb2 + k*blk_size + jj - bidxm1; //printf("jj = %d,val = %e, bidx = %d\n",jj,A_diag_data[ii],bidx); diaginv[bidx] = A_diag_data[ii]; } } } } for (i = 0;i < left_size; i++) { bidxm1 =n_block*nb2 + i*blk_size; bidxp1 =n_block*nb2 + (i+1)*blk_size; for (j = 0;j < left_size; j++) { bidx = n_block*nb2 + i*blk_size +j; diaginv[bidx] = 0.0; } for (ii = A_diag_i[n_block*blk_size + i]; ii < A_diag_i[n_block*blk_size+i+1]; ii++) { jj = A_diag_j[ii]; if (jj > n_block*blk_size) { bidx = n_block*nb2 + i*blk_size + jj - n_block*blk_size; diaginv[bidx] = A_diag_data[ii]; } } } /*----------------------------------------------------------------- * compute the inverses of all the diagonal sub-blocks *-----------------------------------------------------------------*/ if (blk_size > 1) { for (i = 0;i < n_block; i++) { hypre_blas_mat_inv(diaginv+i*nb2, blk_size); } hypre_blas_mat_inv(diaginv+(HYPRE_Int)(blk_size*nb2),left_size); } else { for (i = 0;i < n; i++) { // FIX-ME: zero-diagonal should be tested previously if (fabs(diaginv[i]) < SMALLREAL) diaginv[i] = 0.0; else diaginv[i] = 1.0 / diaginv[i]; } } *diaginvptr = diaginv; return 1; } HYPRE_Int hypre_blockRelax(hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u, HYPRE_Int blk_size, HYPRE_Int reserved_coarse_size, HYPRE_Int method, hypre_ParVector *Vtemp, hypre_ParVector *Ztemp) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int i, j,k; HYPRE_Int ii, jj; HYPRE_Int bidx,bidxm1,bidxp1; HYPRE_Int relax_error = 0; HYPRE_Int num_procs, my_id; const HYPRE_Int nb2 = blk_size*blk_size; HYPRE_Int n_block; HYPRE_Int left_size,inv_size; HYPRE_Real *diaginv; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); //HYPRE_Int num_threads = hypre_NumThreads(); if (my_id == num_procs) { n_block = (n - reserved_coarse_size) / blk_size; left_size = n - blk_size*n_block; } else { n_block = n / blk_size; left_size = n - blk_size*n_block; } inv_size = nb2*n_block + left_size*left_size; diaginv = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------- * Get all the diagonal sub-blocks *-----------------------------------------------------------------*/ for (i = 0;i < n_block; i++) { bidxm1 = i*blk_size; bidxp1 = (i+1)*blk_size; //printf("bidxm1 = %d,bidxp1 = %d\n",bidxm1,bidxp1); for (k = 0;k < blk_size; k++) { for (j = 0;j < blk_size; j++) { bidx = i*nb2 + k*blk_size + j; diaginv[bidx] = 0.0; } for (ii = A_diag_i[bidxm1+k]; ii < A_diag_i[bidxm1+k+1]; ii++) { jj = A_diag_j[ii]; if (jj >= bidxm1 && jj < bidxp1 && fabs(A_diag_data[ii]) > SMALLREAL) { bidx = i*nb2 + k*blk_size + jj - bidxm1; //printf("jj = %d,val = %e, bidx = %d\n",jj,A_diag_data[ii],bidx); diaginv[bidx] = A_diag_data[ii]; } } } } for (i = 0;i < left_size; i++) { bidxm1 =n_block*nb2 + i*blk_size; bidxp1 =n_block*nb2 + (i+1)*blk_size; for (j = 0;j < left_size; j++) { bidx = n_block*nb2 + i*blk_size +j; diaginv[bidx] = 0.0; } for (ii = A_diag_i[n_block*blk_size + i]; ii < A_diag_i[n_block*blk_size+i+1]; ii++) { jj = A_diag_j[ii]; if (jj > n_block*blk_size) { bidx = n_block*nb2 + i*blk_size + jj - n_block*blk_size; diaginv[bidx] = A_diag_data[ii]; } } } /* for (i = 0;i < n_block; i++) { for (j = 0;j < blk_size; j++) { for (k = 0;k < blk_size; k ++) { bidx = i*nb2 + j*blk_size + k; printf("%e\t",diaginv[bidx]); } printf("\n"); } printf("\n"); } */ /*----------------------------------------------------------------- * compute the inverses of all the diagonal sub-blocks *-----------------------------------------------------------------*/ if (blk_size > 1) { for (i = 0;i < n_block; i++) { hypre_blas_mat_inv(diaginv+i*nb2, blk_size); } hypre_blas_mat_inv(diaginv+(HYPRE_Int)(blk_size*nb2),left_size); /* for (i = 0;i < n_block; i++) { for (j = 0;j < blk_size; j++) { for (k = 0;k < blk_size; k ++) { bidx = i*nb2 + j*blk_size + k; printf("%e\t",diaginv[bidx]); } printf("\n"); } printf("\n"); } */ } else { for (i = 0;i < n; i++) { // FIX-ME: zero-diagonal should be tested previously if (fabs(diaginv[i]) < SMALLREAL) diaginv[i] = 0.0; else diaginv[i] = 1.0 / diaginv[i]; } } hypre_blockRelax_solve(A,f,u,blk_size,n_block,left_size,method,diaginv,Vtemp); /*----------------------------------------------------------------- * Free temperary memeory *-----------------------------------------------------------------*/ hypre_TFree(diaginv, HYPRE_MEMORY_HOST); return(relax_error); } /* set coarse grid solver */ HYPRE_Int hypre_MGRSetFSolver( void *mgr_vdata, HYPRE_Int (*fine_grid_solver_solve)(void*,void*,void*,void*), HYPRE_Int (*fine_grid_solver_setup)(void*,void*,void*,void*), void *fsolver ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); HYPRE_Solver **aff_solver = (mgr_data -> aff_solver); if (aff_solver == NULL) aff_solver = hypre_CTAlloc(HYPRE_Solver*, max_num_coarse_levels, HYPRE_MEMORY_HOST); /* only allow to set F-solver for the first level */ aff_solver[0] = (HYPRE_Solver *) fsolver; (mgr_data -> fine_grid_solver_solve) = fine_grid_solver_solve; (mgr_data -> fine_grid_solver_setup) = fine_grid_solver_setup; (mgr_data -> aff_solver) = aff_solver; (mgr_data -> use_default_fsolver) = 0; return hypre_error_flag; } /* set coarse grid solver */ HYPRE_Int hypre_MGRSetCoarseSolver( void *mgr_vdata, HYPRE_Int (*coarse_grid_solver_solve)(void*,void*,void*,void*), HYPRE_Int (*coarse_grid_solver_setup)(void*,void*,void*,void*), void *coarse_grid_solver ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } (mgr_data -> coarse_grid_solver_solve) = coarse_grid_solver_solve; (mgr_data -> coarse_grid_solver_setup) = coarse_grid_solver_setup; (mgr_data -> coarse_grid_solver) = (HYPRE_Solver) coarse_grid_solver; (mgr_data -> use_default_cgrid_solver) = 0; return hypre_error_flag; } HYPRE_Int hypre_MGRSetAffInv( void *mgr_vdata, hypre_ParCSRMatrix *A_ff_inv ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> A_ff_inv) = A_ff_inv; return hypre_error_flag; } /* Set the maximum number of coarse levels. * maxcoarselevs = 1 yields the default 2-grid scheme. */ HYPRE_Int hypre_MGRSetMaxCoarseLevels( void *mgr_vdata, HYPRE_Int maxcoarselevs ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> max_num_coarse_levels) = maxcoarselevs; return hypre_error_flag; } /* Set the system block size */ HYPRE_Int hypre_MGRSetBlockSize( void *mgr_vdata, HYPRE_Int bsize ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> block_size) = bsize; return hypre_error_flag; } /* Set the relaxation type for the fine levels of the reduction. * Currently supports the following flavors of relaxation types * as described in the documentation: * relax_types 0 - 8, 13, 14, 18, 19, 98. * See par_relax.c and par_relax_more.c for more details. * */ HYPRE_Int hypre_MGRSetRelaxType( void *mgr_vdata, HYPRE_Int relax_type ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> relax_type) = relax_type; return hypre_error_flag; } /* Set the number of relaxation sweeps */ HYPRE_Int hypre_MGRSetNumRelaxSweeps( void *mgr_vdata, HYPRE_Int nsweeps ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> num_relax_sweeps) = nsweeps; return hypre_error_flag; } /* Set the F-relaxation strategy: 0=single level, 1=multi level */ HYPRE_Int hypre_MGRSetFRelaxMethod( void *mgr_vdata, HYPRE_Int relax_method ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if((mgr_data -> Frelax_method) != NULL) { hypre_TFree(mgr_data -> Frelax_method, HYPRE_MEMORY_HOST); (mgr_data -> Frelax_method) = NULL; } HYPRE_Int *Frelax_method = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); for (i=0; i < max_num_coarse_levels; i++) { Frelax_method[i] = relax_method; } (mgr_data -> Frelax_method) = Frelax_method; return hypre_error_flag; } /* Set the F-relaxation strategy: 0=single level, 1=multi level */ HYPRE_Int hypre_MGRSetLevelFRelaxMethod( void *mgr_vdata, HYPRE_Int *relax_method ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if((mgr_data -> Frelax_method) != NULL) { hypre_TFree(mgr_data -> Frelax_method, HYPRE_MEMORY_HOST); (mgr_data -> Frelax_method) = NULL; } HYPRE_Int *Frelax_method = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); if (relax_method != NULL) { for (i=0; i < max_num_coarse_levels; i++) { Frelax_method[i] = relax_method[i]; } } else { for (i = 0; i < max_num_coarse_levels; i++) { Frelax_method[i] = 0; } } (mgr_data -> Frelax_method) = Frelax_method; return hypre_error_flag; } /* Coarse grid method: 0=Galerkin RAP, 1=non-Galerkin with dropping*/ HYPRE_Int hypre_MGRSetCoarseGridMethod( void *mgr_vdata, HYPRE_Int *cg_method ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if((mgr_data -> use_non_galerkin_cg) != NULL) { hypre_TFree(mgr_data -> use_non_galerkin_cg, HYPRE_MEMORY_HOST); (mgr_data -> use_non_galerkin_cg) = NULL; } HYPRE_Int *use_non_galerkin_cg = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); if (cg_method != NULL) { for (i=0; i < max_num_coarse_levels; i++) { use_non_galerkin_cg[i] = cg_method[i]; } } else { for (i = 0; i < max_num_coarse_levels; i++) { use_non_galerkin_cg[i] = 0; } } (mgr_data -> use_non_galerkin_cg) = use_non_galerkin_cg; return hypre_error_flag; } /* Set the F-relaxation number of functions for each level */ HYPRE_Int hypre_MGRSetLevelFRelaxNumFunctions( void *mgr_vdata, HYPRE_Int *num_functions ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if((mgr_data -> Frelax_num_functions) != NULL) { hypre_TFree(mgr_data -> Frelax_num_functions, HYPRE_MEMORY_HOST); (mgr_data -> Frelax_num_functions) = NULL; } HYPRE_Int *Frelax_num_functions = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); if (num_functions != NULL) { for (i=0; i < max_num_coarse_levels; i++) { Frelax_num_functions[i] = num_functions[i]; } } else { for (i = 0; i < max_num_coarse_levels; i++) { Frelax_num_functions[i] = 1; } } (mgr_data -> Frelax_num_functions) = Frelax_num_functions; return hypre_error_flag; } /* Set the type of the restriction type * for computing restriction operator */ HYPRE_Int hypre_MGRSetLevelRestrictType( void *mgr_vdata, HYPRE_Int *restrict_type) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if ((mgr_data -> restrict_type) != NULL) { hypre_TFree((mgr_data -> restrict_type), HYPRE_MEMORY_HOST); (mgr_data -> restrict_type) = NULL; } HYPRE_Int *level_restrict_type = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); if (restrict_type != NULL) { for (i=0; i < max_num_coarse_levels; i++) { level_restrict_type[i] = *(restrict_type + i); } } else { for (i=0; i < max_num_coarse_levels; i++) { level_restrict_type[i] = 0; } } (mgr_data -> restrict_type) = level_restrict_type; return hypre_error_flag; } /* Set the type of the restriction type * for computing restriction operator */ HYPRE_Int hypre_MGRSetRestrictType( void *mgr_vdata, HYPRE_Int restrict_type) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if ((mgr_data -> restrict_type) != NULL) { hypre_TFree((mgr_data -> restrict_type), HYPRE_MEMORY_HOST); (mgr_data -> restrict_type) = NULL; } HYPRE_Int *level_restrict_type = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); for (i=0; i < max_num_coarse_levels; i++) { level_restrict_type[i] = restrict_type; } (mgr_data -> restrict_type) = level_restrict_type; return hypre_error_flag; } /* Set the number of Jacobi interpolation iterations * for computing interpolation operator */ HYPRE_Int hypre_MGRSetNumRestrictSweeps( void *mgr_vdata, HYPRE_Int nsweeps ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> num_restrict_sweeps) = nsweeps; return hypre_error_flag; } /* Set the type of the interpolation * for computing interpolation operator */ HYPRE_Int hypre_MGRSetInterpType( void *mgr_vdata, HYPRE_Int interpType) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if ((mgr_data -> interp_type) != NULL) { hypre_TFree((mgr_data -> interp_type), HYPRE_MEMORY_HOST); (mgr_data -> interp_type) = NULL; } HYPRE_Int *level_interp_type = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); for (i=0; i < max_num_coarse_levels; i++) { level_interp_type[i] = interpType; } (mgr_data -> interp_type) = level_interp_type; return hypre_error_flag; } /* Set the type of the interpolation * for computing interpolation operator */ HYPRE_Int hypre_MGRSetLevelInterpType( void *mgr_vdata, HYPRE_Int *interpType) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if ((mgr_data -> interp_type) != NULL) { hypre_TFree((mgr_data -> interp_type), HYPRE_MEMORY_HOST); (mgr_data -> interp_type) = NULL; } HYPRE_Int *level_interp_type = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); if (interpType != NULL) { for (i=0; i < max_num_coarse_levels; i++) { level_interp_type[i] = *(interpType + i); } } else { for (i=0; i < max_num_coarse_levels; i++) { level_interp_type[i] = 2; } } (mgr_data -> interp_type) = level_interp_type; return hypre_error_flag; } /* Set the number of Jacobi interpolation iterations * for computing interpolation operator */ HYPRE_Int hypre_MGRSetNumInterpSweeps( void *mgr_vdata, HYPRE_Int nsweeps ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> num_interp_sweeps) = nsweeps; return hypre_error_flag; } /* Set the threshold to truncate the coarse grid at each * level of reduction */ HYPRE_Int hypre_MGRSetTruncateCoarseGridThreshold( void *mgr_vdata, HYPRE_Real threshold) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> truncate_coarse_grid_threshold) = threshold; return hypre_error_flag; } /* Set print level for mgr solver */ HYPRE_Int hypre_MGRSetPrintLevel( void *mgr_vdata, HYPRE_Int print_level ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> print_level) = print_level; return hypre_error_flag; } /* Set print level for mgr solver */ HYPRE_Int hypre_MGRSetLogging( void *mgr_vdata, HYPRE_Int logging ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> logging) = logging; return hypre_error_flag; } /* Set max number of iterations for mgr solver */ HYPRE_Int hypre_MGRSetMaxIter( void *mgr_vdata, HYPRE_Int max_iter ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> max_iter) = max_iter; return hypre_error_flag; } /* Set convergence tolerance for mgr solver */ HYPRE_Int hypre_MGRSetTol( void *mgr_vdata, HYPRE_Real tol ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> tol) = tol; return hypre_error_flag; } /* Set max number of iterations for mgr global smoother */ HYPRE_Int hypre_MGRSetMaxGlobalsmoothIters( void *mgr_vdata, HYPRE_Int max_iter ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> global_smooth_iters) = max_iter; return hypre_error_flag; } /* Set global smoothing type for mgr solver */ HYPRE_Int hypre_MGRSetGlobalsmoothType( void *mgr_vdata, HYPRE_Int iter_type ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> global_smooth_type) = iter_type; return hypre_error_flag; } /* Set the maximum number of non-zero entries for restriction and interpolation operator if classical AMG interpolation is used */ HYPRE_Int hypre_MGRSetPMaxElmts( void *mgr_vdata, HYPRE_Int P_max_elmts) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> P_max_elmts) = P_max_elmts; return hypre_error_flag; } /* Get number of iterations for MGR solver */ HYPRE_Int hypre_MGRGetNumIterations( void *mgr_vdata, HYPRE_Int *num_iterations ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } *num_iterations = mgr_data->num_iterations; return hypre_error_flag; } /* Get residual norms for MGR solver */ HYPRE_Int hypre_MGRGetFinalRelativeResidualNorm( void *mgr_vdata, HYPRE_Real *res_norm ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } *res_norm = mgr_data->final_rel_residual_norm; return hypre_error_flag; } HYPRE_Int hypre_MGRGetCoarseGridConvergenceFactor( void *mgr_vdata , HYPRE_Real *conv_factor ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } *conv_factor = (mgr_data -> cg_convergence_factor); return hypre_error_flag; } /* Build A_FF matrix from A given a CF_marker array */ HYPRE_Int hypre_MGRGetSubBlock( hypre_ParCSRMatrix *A, HYPRE_Int *row_cf_marker, HYPRE_Int *col_cf_marker, HYPRE_Int debug_flag, hypre_ParCSRMatrix **A_block_ptr ) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); //HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int *coarse_dof_func_ptr = NULL; HYPRE_BigInt *num_row_cpts_global = NULL; HYPRE_BigInt *num_col_cpts_global = NULL; hypre_ParCSRMatrix *Ablock; HYPRE_BigInt *col_map_offd_Ablock; HYPRE_Int *tmp_map_offd = NULL; HYPRE_Int *CF_marker_offd = NULL; hypre_CSRMatrix *Ablock_diag; hypre_CSRMatrix *Ablock_offd; HYPRE_Real *Ablock_diag_data; HYPRE_Int *Ablock_diag_i; HYPRE_Int *Ablock_diag_j; HYPRE_Real *Ablock_offd_data; HYPRE_Int *Ablock_offd_i; HYPRE_Int *Ablock_offd_j; HYPRE_Int Ablock_diag_size, Ablock_offd_size; HYPRE_Int *Ablock_marker; HYPRE_Int ii_counter; HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int *jj_count, *jj_count_offd; HYPRE_Int start_indexing = 0; /* start indexing for Aff_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int *fine_to_coarse; HYPRE_Int *coarse_counter; HYPRE_Int *col_coarse_counter; HYPRE_Int coarse_shift; HYPRE_BigInt total_global_row_cpts; HYPRE_BigInt total_global_col_cpts; HYPRE_Int num_cols_Ablock_offd; // HYPRE_BigInt my_first_row_cpt, my_first_col_cpt; HYPRE_Int i,i1; HYPRE_Int j,jl,jj; HYPRE_Int start; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_Int num_threads; HYPRE_Int num_sends; HYPRE_Int index; HYPRE_Int ns, ne, size, rest; HYPRE_Int *int_buf_data; HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag); // HYPRE_Real wall_time; /* for debugging instrumentation */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); //num_threads = hypre_NumThreads(); // Temporary fix, disable threading // TODO: enable threading num_threads = 1; /* get the number of coarse rows */ hypre_BoomerAMGCoarseParms(comm, local_numrows, 1, NULL, row_cf_marker, &coarse_dof_func_ptr, &num_row_cpts_global); hypre_TFree(coarse_dof_func_ptr, HYPRE_MEMORY_HOST); coarse_dof_func_ptr = NULL; //hypre_printf("my_id = %d, cpts_this = %d, cpts_next = %d\n", my_id, num_row_cpts_global[0], num_row_cpts_global[1]); #ifdef HYPRE_NO_GLOBAL_PARTITION // my_first_row_cpt = num_row_cpts_global[0]; if (my_id == (num_procs -1)) total_global_row_cpts = num_row_cpts_global[1]; hypre_MPI_Bcast(&total_global_row_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); #else // my_first_row_cpt = num_row_cpts_global[my_id]; total_global_row_cpts = num_row_cpts_global[num_procs]; #endif /* get the number of coarse rows */ hypre_BoomerAMGCoarseParms(comm, local_numrows, 1, NULL, col_cf_marker, &coarse_dof_func_ptr, &num_col_cpts_global); hypre_TFree(coarse_dof_func_ptr, HYPRE_MEMORY_HOST); coarse_dof_func_ptr = NULL; //hypre_printf("my_id = %d, cpts_this = %d, cpts_next = %d\n", my_id, num_col_cpts_global[0], num_col_cpts_global[1]); #ifdef HYPRE_NO_GLOBAL_PARTITION // my_first_col_cpt = num_col_cpts_global[0]; if (my_id == (num_procs -1)) total_global_col_cpts = num_col_cpts_global[1]; hypre_MPI_Bcast(&total_global_col_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); #else // my_first_col_cpt = num_col_cpts_global[my_id]; total_global_col_cpts = num_col_cpts_global[num_procs]; #endif /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (debug_flag < 0) { debug_flag = -debug_flag; } // if (debug_flag==4) wall_time = time_getWallclockSeconds(); if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = col_cf_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); /*----------------------------------------------------------------------- * First Pass: Determine size of Ablock and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); col_coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ /* RDF: this looks a little tricky, but doable */ #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE #endif #endif for (j = 0; j < num_threads; j++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a F-point, we loop through the columns and select * the F-columns. Also set up mapping vector. *--------------------------------------------------------------------*/ if (col_cf_marker[i] > 0) { fine_to_coarse[i] = col_coarse_counter[j]; col_coarse_counter[j]++; } if (row_cf_marker[i] > 0) { //fine_to_coarse[i] = coarse_counter[j]; coarse_counter[j]++; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; if (col_cf_marker[i1] > 0) { jj_count[j]++; } } if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; if (CF_marker_offd[i1] > 0) { jj_count_offd[j]++; } } } } } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ for (i=0; i < num_threads-1; i++) { jj_count[i+1] += jj_count[i]; jj_count_offd[i+1] += jj_count_offd[i]; coarse_counter[i+1] += coarse_counter[i]; col_coarse_counter[i+1] += col_coarse_counter[i]; } i = num_threads-1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; ii_counter = coarse_counter[i]; Ablock_diag_size = jj_counter; Ablock_diag_i = hypre_CTAlloc(HYPRE_Int, ii_counter+1, HYPRE_MEMORY_HOST); Ablock_diag_j = hypre_CTAlloc(HYPRE_Int, Ablock_diag_size, HYPRE_MEMORY_HOST); Ablock_diag_data = hypre_CTAlloc(HYPRE_Real, Ablock_diag_size, HYPRE_MEMORY_HOST); Ablock_diag_i[ii_counter] = jj_counter; Ablock_offd_size = jj_counter_offd; Ablock_offd_i = hypre_CTAlloc(HYPRE_Int, ii_counter+1, HYPRE_MEMORY_HOST); Ablock_offd_j = hypre_CTAlloc(HYPRE_Int, Ablock_offd_size, HYPRE_MEMORY_HOST); Ablock_offd_data = hypre_CTAlloc(HYPRE_Real, Ablock_offd_size, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; //----------------------------------------------------------------------- // Send and receive fine_to_coarse info. //----------------------------------------------------------------------- // if (debug_flag==4) wall_time = time_getWallclockSeconds(); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE #endif #endif for (j = 0; j < num_threads; j++) { coarse_shift = 0; if (j > 0) coarse_shift = col_coarse_counter[j-1]; size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) fine_to_coarse[i] += coarse_shift; } // if (debug_flag==4) wall_time = time_getWallclockSeconds(); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif // for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_col_cpt; #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,jl,i1,jj,ns,ne,size,rest,jj_counter,jj_counter_offd,ii_counter) HYPRE_SMP_SCHEDULE #endif #endif for (jl = 0; jl < num_threads; jl++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (jl < rest) { ns = jl*size+jl; ne = (jl+1)*size+jl+1; } else { ns = jl*size+rest; ne = (jl+1)*size+rest; } jj_counter = 0; if (jl > 0) jj_counter = jj_count[jl-1]; jj_counter_offd = 0; if (jl > 0) jj_counter_offd = jj_count_offd[jl-1]; ii_counter = 0; for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a F-point, we loop through the columns and select * the F-columns. Also set up mapping vector. *--------------------------------------------------------------------*/ if (row_cf_marker[i] > 0) { // Diagonal part of Ablock // Ablock_diag_i[ii_counter] = jj_counter; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; if (col_cf_marker[i1] > 0) { Ablock_diag_j[jj_counter] = fine_to_coarse[i1]; Ablock_diag_data[jj_counter] = A_diag_data[jj]; jj_counter++; } } // Off-Diagonal part of Ablock // Ablock_offd_i[ii_counter] = jj_counter_offd; if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; if (CF_marker_offd[i1] > 0) { Ablock_offd_j[jj_counter_offd] = i1; Ablock_offd_data[jj_counter_offd] = A_offd_data[jj]; jj_counter_offd++; } } } ii_counter++; } } Ablock_offd_i[ii_counter] = jj_counter_offd; Ablock_diag_i[ii_counter] = jj_counter; } Ablock = hypre_ParCSRMatrixCreate(comm, total_global_row_cpts, total_global_col_cpts, num_row_cpts_global, num_col_cpts_global, 0, Ablock_diag_i[ii_counter], Ablock_offd_i[ii_counter]); Ablock_diag = hypre_ParCSRMatrixDiag(Ablock); hypre_CSRMatrixData(Ablock_diag) = Ablock_diag_data; hypre_CSRMatrixI(Ablock_diag) = Ablock_diag_i; hypre_CSRMatrixJ(Ablock_diag) = Ablock_diag_j; Ablock_offd = hypre_ParCSRMatrixOffd(Ablock); hypre_CSRMatrixData(Ablock_offd) = Ablock_offd_data; hypre_CSRMatrixI(Ablock_offd) = Ablock_offd_i; hypre_CSRMatrixJ(Ablock_offd) = Ablock_offd_j; num_cols_Ablock_offd = 0; if (Ablock_offd_size) { Ablock_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i=0; i < num_cols_A_offd; i++) Ablock_marker[i] = 0; num_cols_Ablock_offd = 0; for (i=0; i < Ablock_offd_size; i++) { index = Ablock_offd_j[i]; if (!Ablock_marker[index]) { num_cols_Ablock_offd++; Ablock_marker[index] = 1; } } col_map_offd_Ablock = hypre_CTAlloc(HYPRE_BigInt, num_cols_Ablock_offd, HYPRE_MEMORY_HOST); tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_Ablock_offd, HYPRE_MEMORY_HOST); index = 0; for (i=0; i < num_cols_Ablock_offd; i++) { while (Ablock_marker[index]==0) index++; tmp_map_offd[i] = index++; } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i=0; i < Ablock_offd_size; i++) Ablock_offd_j[i] = hypre_BinarySearch(tmp_map_offd, Ablock_offd_j[i], num_cols_Ablock_offd); hypre_TFree(Ablock_marker, HYPRE_MEMORY_HOST); } if (num_cols_Ablock_offd) { hypre_ParCSRMatrixColMapOffd(Ablock) = col_map_offd_Ablock; hypre_CSRMatrixNumCols(Ablock_offd) = num_cols_Ablock_offd; } hypre_GetCommPkgRTFromCommPkgA(Ablock, A, fine_to_coarse, tmp_map_offd); #ifdef HYPRE_NO_GLOBAL_PARTITION /* Create the assumed partition */ if (hypre_ParCSRMatrixAssumedPartition(Ablock) == NULL) { hypre_ParCSRMatrixCreateAssumedPartition(Ablock); } #endif *A_block_ptr= Ablock; hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(col_coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(jj_count, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); return(0); } /* Build A_FF matrix from A given a CF_marker array */ HYPRE_Int hypre_MGRBuildAff( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_Int debug_flag, hypre_ParCSRMatrix **A_ff_ptr ) { HYPRE_Int i; HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); /* create a copy of the CF_marker array and switch C-points to F-points */ HYPRE_Int *CF_marker_copy = hypre_CTAlloc(HYPRE_Int, local_numrows, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < local_numrows; i++) { CF_marker_copy[i] = -CF_marker[i]; } hypre_MGRGetSubBlock(A, CF_marker_copy, CF_marker_copy, debug_flag, A_ff_ptr); /* Free copy of CF marker */ hypre_TFree(CF_marker_copy, HYPRE_MEMORY_HOST); return(0); } /********************************************************************************* * This routine assumes that the 'toVector' is larger than the 'fromVector' and * the CF_marker is of the same length as the toVector. There must be n 'point_type' * values in the CF_marker, where n is the length of the 'fromVector'. * It adds the values of the 'fromVector' to the 'toVector' where the marker is the * same as the 'point_type' *********************************************************************************/ HYPRE_Int hypre_MGRAddVectorP ( HYPRE_Int *CF_marker, HYPRE_Int point_type, HYPRE_Real a, hypre_ParVector *fromVector, HYPRE_Real b, hypre_ParVector **toVector ) { hypre_Vector *fromVectorLocal = hypre_ParVectorLocalVector(fromVector); HYPRE_Real *fromVectorData = hypre_VectorData(fromVectorLocal); hypre_Vector *toVectorLocal = hypre_ParVectorLocalVector(*toVector); HYPRE_Real *toVectorData = hypre_VectorData(toVectorLocal); HYPRE_Int n = hypre_ParVectorActualLocalSize(*toVector); HYPRE_Int i, j; j = 0; for (i = 0; i < n; i++) { if (CF_marker[i] == point_type) { toVectorData[i] = b * toVectorData[i] + a * fromVectorData[j]; j++; } } return 0; } /************************************************************************************* * This routine assumes that the 'fromVector' is larger than the 'toVector' and * the CF_marker is of the same length as the fromVector. There must be n 'point_type' * values in the CF_marker, where n is the length of the 'toVector'. * It adds the values of the 'fromVector' where the marker is the * same as the 'point_type' to the 'toVector' *************************************************************************************/ HYPRE_Int hypre_MGRAddVectorR ( HYPRE_Int *CF_marker, HYPRE_Int point_type, HYPRE_Real a, hypre_ParVector *fromVector, HYPRE_Real b, hypre_ParVector **toVector ) { hypre_Vector *fromVectorLocal = hypre_ParVectorLocalVector(fromVector); HYPRE_Real *fromVectorData = hypre_VectorData(fromVectorLocal); hypre_Vector *toVectorLocal = hypre_ParVectorLocalVector(*toVector); HYPRE_Real *toVectorData = hypre_VectorData(toVectorLocal); HYPRE_Int n = hypre_ParVectorActualLocalSize(fromVector); HYPRE_Int i, j; j = 0; for (i = 0; i < n; i++) { if (CF_marker[i] == point_type) { toVectorData[j] = b * toVectorData[j] + a * fromVectorData[i]; j++; } } return 0; } /* HYPRE_Int hypre_MGRBuildAffRAP( MPI_Comm comm, HYPRE_Int local_num_variables, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int *CF_marker, HYPRE_Int **coarse_dof_func_ptr, HYPRE_BigInt **coarse_pnts_global_ptr, hypre_ParCSRMatrix *A, HYPRE_Int debug_flag, hypre_ParCSRMatrix **P_f_ptr, hypre_ParCSRMatrix **A_ff_ptr ) { HYPRE_Int *CF_marker_copy = hypre_CTAlloc(HYPRE_Int, local_num_variables, HYPRE_MEMORY_HOST); HYPRE_Int i; for (i = 0; i < local_num_variables; i++) { CF_marker_copy[i] = -CF_marker[i]; } hypre_BoomerAMGCoarseParms(comm, local_num_variables, 1, NULL, CF_marker_copy, coarse_dof_func_ptr, coarse_pnts_global_ptr); hypre_MGRBuildP(A, CF_marker_copy, (*coarse_pnts_global_ptr), 0, debug_flag, P_f_ptr); hypre_BoomerAMGBuildCoarseOperator(*P_f_ptr, A, *P_f_ptr, A_ff_ptr); hypre_TFree(CF_marker_copy, HYPRE_MEMORY_HOST); return 0; } */ /* Get pointer to coarse grid matrix for MGR solver */ HYPRE_Int hypre_MGRGetCoarseGridMatrix( void *mgr_vdata, hypre_ParCSRMatrix **RAP ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } if (mgr_data -> RAP == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC," Coarse grid matrix is NULL. Please make sure MGRSetup() is called \n"); return hypre_error_flag; } *RAP = mgr_data->RAP; return hypre_error_flag; } /* Get pointer to coarse grid solution for MGR solver */ HYPRE_Int hypre_MGRGetCoarseGridSolution( void *mgr_vdata, hypre_ParVector **sol ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } if (mgr_data -> U_array == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC," MGR solution array is NULL. Please make sure MGRSetup() and MGRSolve() are called \n"); return hypre_error_flag; } *sol = mgr_data->U_array[mgr_data->num_coarse_levels]; return hypre_error_flag; } /* Get pointer to coarse grid solution for MGR solver */ HYPRE_Int hypre_MGRGetCoarseGridRHS( void *mgr_vdata, hypre_ParVector **rhs ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } if (mgr_data -> F_array == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC," MGR RHS array is NULL. Please make sure MGRSetup() and MGRSolve() are called \n"); return hypre_error_flag; } *rhs = mgr_data->F_array[mgr_data->num_coarse_levels]; return hypre_error_flag; } /* Print coarse grid linear system (for debugging)*/ HYPRE_Int hypre_MGRPrintCoarseSystem( void *mgr_vdata, HYPRE_Int print_flag) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; mgr_data->print_coarse_system = print_flag; return hypre_error_flag; } /* Print solver params */ HYPRE_Int hypre_MGRWriteSolverParams(void *mgr_vdata) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i, j; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); hypre_printf("MGR Setup parameters: \n"); hypre_printf("Block size: %d\n", (mgr_data -> block_size)); hypre_printf("Max number of coarse levels: %d\n", (mgr_data -> max_num_coarse_levels)); hypre_printf("Relax type: %d\n", (mgr_data -> relax_type)); hypre_printf("Set non-Cpoints to F-points: %d\n", (mgr_data -> set_non_Cpoints_to_F)); hypre_printf("Set Cpoints method: %d\n", (mgr_data -> set_c_points_method)); for (i = 0; i < max_num_coarse_levels; i++) { hypre_printf("Lev = %d, Interpolation type: %d\n", i, (mgr_data -> interp_type)[i]); hypre_printf("Lev = %d, Restriction type: %d\n", i, (mgr_data -> restrict_type)[i]); hypre_printf("Lev = %d, F-relaxation method: %d\n", i, (mgr_data -> Frelax_method)[i]); hypre_printf("Lev = %d, Use non-Galerkin coarse grid: %d\n", i, (mgr_data -> use_non_galerkin_cg)[i]); HYPRE_Int lvl_num_coarse_points = (mgr_data -> block_num_coarse_indexes)[i]; hypre_printf("Lev = %d, Number of Cpoints: %d\n", i, lvl_num_coarse_points); hypre_printf("Cpoints indices: "); for (j = 0; j < lvl_num_coarse_points; j++) { if ((mgr_data -> block_cf_marker)[i][j] == 1) { hypre_printf("%d ", j); } } hypre_printf("\n"); } hypre_printf("Number of Reserved Cpoints: %d\n", (mgr_data -> reserved_coarse_size)); hypre_printf("Keep reserved Cpoints to level: %d\n", (mgr_data -> lvl_to_keep_cpoints)); hypre_printf("\n MGR Solver Parameters: \n"); hypre_printf("Number of relax sweeps: %d\n", (mgr_data -> num_relax_sweeps)); hypre_printf("Number of interpolation sweeps: %d\n", (mgr_data -> num_interp_sweeps)); hypre_printf("Number of restriction sweeps: %d\n", (mgr_data -> num_restrict_sweeps)); hypre_printf("Global smoother type: %d\n", (mgr_data ->global_smooth_type)); hypre_printf("Number of global smoother sweeps: %d\n", (mgr_data ->global_smooth_iters)); hypre_printf("Max number of iterations: %d\n", (mgr_data -> max_iter)); hypre_printf("Stopping tolerance: %e\n", (mgr_data -> tol)); hypre_printf("Use default coarse grid solver: %d\n", (mgr_data -> use_default_cgrid_solver)); if((mgr_data -> use_default_fsolver) >= 0) { hypre_printf("Use default AMG solver for full AMG F-relaxation: %d\n", (mgr_data -> use_default_fsolver)); } return hypre_error_flag; } #ifdef HYPRE_USING_DSUPERLU void * hypre_MGRDirectSolverCreate() { hypre_DSLUData *dslu_data = hypre_CTAlloc(hypre_DSLUData, 1, HYPRE_MEMORY_HOST); return (void *) dslu_data; } HYPRE_Int hypre_MGRDirectSolverSetup( void *solver, hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u ) { /* Par Data Structure variables */ HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A); MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_local; HYPRE_Int num_rows; HYPRE_Int num_procs, my_id; HYPRE_Int pcols=1, prows=1; HYPRE_BigInt *big_rowptr = NULL; hypre_DSLUData *dslu_data = (hypre_DSLUData *) solver; HYPRE_Int info = 0; HYPRE_Int nrhs = 0; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); /* Merge diag and offd into one matrix (global ids) */ A_local = hypre_MergeDiagAndOffd(A); num_rows = hypre_CSRMatrixNumRows(A_local); /* Now convert hypre matrix to a SuperMatrix */ #ifdef HYPRE_MIXEDINT { HYPRE_Int *rowptr = NULL; HYPRE_Int i; rowptr = hypre_CSRMatrixI(A_local); big_rowptr = hypre_CTAlloc(HYPRE_BigInt, (num_rows+1), HYPRE_MEMORY_HOST); for(i=0; i<(num_rows+1); i++) { big_rowptr[i] = (HYPRE_BigInt)rowptr[i]; } } #else big_rowptr = hypre_CSRMatrixI(A_local); #endif dCreate_CompRowLoc_Matrix_dist( &(dslu_data->A_dslu),global_num_rows,global_num_rows, hypre_CSRMatrixNumNonzeros(A_local), num_rows, hypre_ParCSRMatrixFirstRowIndex(A), hypre_CSRMatrixData(A_local), hypre_CSRMatrixBigJ(A_local),big_rowptr, SLU_NR_loc, SLU_D, SLU_GE); /* DOK: SuperLU frees assigned data, so set them to null before * calling hypre_CSRMatrixdestroy on A_local to avoid memory errors. */ #ifndef HYPRE_MIXEDINT hypre_CSRMatrixI(A_local) = NULL; #endif hypre_CSRMatrixData(A_local) = NULL; hypre_CSRMatrixBigJ(A_local) = NULL; hypre_CSRMatrixDestroy(A_local); /*Create process grid */ while (prows*pcols <= num_procs) ++prows; --prows; pcols = num_procs/prows; while (prows*pcols != num_procs) { prows -= 1; pcols = num_procs/prows; } //hypre_printf(" prows %d pcols %d\n", prows, pcols); superlu_gridinit(comm, prows, pcols, &(dslu_data->dslu_data_grid)); set_default_options_dist(&(dslu_data->dslu_options)); dslu_data->dslu_options.Fact = DOFACT; dslu_data->dslu_options.PrintStat = NO; /*dslu_data->dslu_options.IterRefine = SLU_DOUBLE; dslu_data->dslu_options.ColPerm = MMD_AT_PLUS_A; dslu_data->dslu_options.DiagPivotThresh = 1.0; dslu_data->dslu_options.ReplaceTinyPivot = NO; */ dScalePermstructInit(global_num_rows, global_num_rows, &(dslu_data->dslu_ScalePermstruct)); dLUstructInit(global_num_rows, &(dslu_data->dslu_data_LU)); PStatInit(&(dslu_data->dslu_data_stat)); dslu_data->global_num_rows = global_num_rows; dslu_data->berr = hypre_CTAlloc(HYPRE_Real, 1, HYPRE_MEMORY_HOST); dslu_data->berr[0] = 0.0; pdgssvx(&(dslu_data->dslu_options), &(dslu_data->A_dslu), &(dslu_data->dslu_ScalePermstruct), NULL, num_rows, nrhs, &(dslu_data->dslu_data_grid), &(dslu_data->dslu_data_LU), &(dslu_data->dslu_solve), dslu_data->berr, &(dslu_data->dslu_data_stat), &info); dslu_data->dslu_options.Fact = FACTORED; return hypre_error_flag; } HYPRE_Int hypre_MGRDirectSolverSolve( void *solver, hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u ) { hypre_SLUDistSolve(solver, f, u); return hypre_error_flag; } HYPRE_Int hypre_MGRDirectSolverDestroy( void *solver ) { hypre_SLUDistDestroy(solver); return hypre_error_flag; } #endif
/****************************************************************************** * * Two-grid system solver * *****************************************************************************/ #include "_hypre_parcsr_ls.h" #include "par_amg.h" #include "par_mgr.h" #ifdef HYPRE_USING_DSUPERLU #include "dsuperlu.h" #endif /* Create */ void * hypre_MGRCreate() { hypre_ParMGRData *mgr_data; mgr_data = hypre_CTAlloc(hypre_ParMGRData, 1, HYPRE_MEMORY_HOST); /* block data */ (mgr_data -> block_size) = 1; (mgr_data -> block_num_coarse_indexes) = NULL; (mgr_data -> point_marker_array) = NULL; (mgr_data -> block_cf_marker) = NULL; /* general data */ (mgr_data -> max_num_coarse_levels) = 10; (mgr_data -> A_array) = NULL; (mgr_data -> P_array) = NULL; (mgr_data -> RT_array) = NULL; (mgr_data -> RAP) = NULL; (mgr_data -> CF_marker_array) = NULL; (mgr_data -> coarse_indices_lvls) = NULL; (mgr_data -> A_ff_array) = NULL; (mgr_data -> F_fine_array) = NULL; (mgr_data -> U_fine_array) = NULL; (mgr_data -> aff_solver) = NULL; (mgr_data -> fine_grid_solver_setup) = NULL; (mgr_data -> fine_grid_solver_solve) = NULL; (mgr_data -> F_array) = NULL; (mgr_data -> U_array) = NULL; (mgr_data -> residual) = NULL; (mgr_data -> rel_res_norms) = NULL; (mgr_data -> Vtemp) = NULL; (mgr_data -> Ztemp) = NULL; (mgr_data -> Utemp) = NULL; (mgr_data -> Ftemp) = NULL; (mgr_data -> num_iterations) = 0; (mgr_data -> num_interp_sweeps) = 1; (mgr_data -> num_restrict_sweeps) = 1; (mgr_data -> trunc_factor) = 0.0; (mgr_data -> max_row_sum) = 0.9; (mgr_data -> strong_threshold) = 0.25; (mgr_data -> S_commpkg_switch) = 1.0; (mgr_data -> P_max_elmts) = 0; (mgr_data -> coarse_grid_solver) = NULL; (mgr_data -> coarse_grid_solver_setup) = NULL; (mgr_data -> coarse_grid_solver_solve) = NULL; (mgr_data -> global_smoother) = NULL; (mgr_data -> use_default_cgrid_solver) = 1; (mgr_data -> use_default_fsolver) = -1; // set to -1 to avoid printing when not used (mgr_data -> omega) = 1.; (mgr_data -> max_iter) = 20; (mgr_data -> tol) = 1.0e-6; (mgr_data -> relax_type) = 0; (mgr_data -> relax_order) = 1; // not fully utilized. Only used to compute L1-norms. (mgr_data -> interp_type) = NULL; (mgr_data -> restrict_type) = NULL; (mgr_data -> num_relax_sweeps) = 1; (mgr_data -> relax_weight) = 1.0; (mgr_data -> logging) = 0; (mgr_data -> print_level) = 0; (mgr_data -> l1_norms) = NULL; (mgr_data -> reserved_coarse_size) = 0; (mgr_data -> reserved_coarse_indexes) = NULL; (mgr_data -> reserved_Cpoint_local_indexes) = NULL; (mgr_data -> diaginv) = NULL; (mgr_data -> global_smooth_iters) = 1; (mgr_data -> global_smooth_type) = 0; (mgr_data -> set_non_Cpoints_to_F) = 0; (mgr_data -> idx_array) = NULL; (mgr_data -> Frelax_method) = NULL; (mgr_data -> VcycleRelaxVtemp) = NULL; (mgr_data -> VcycleRelaxZtemp) = NULL; (mgr_data -> FrelaxVcycleData) = NULL; (mgr_data -> Frelax_num_functions) = NULL; (mgr_data -> max_local_lvls) = 10; (mgr_data -> use_non_galerkin_cg) = NULL; (mgr_data -> print_coarse_system) = 0; (mgr_data -> set_c_points_method) = 0; (mgr_data -> lvl_to_keep_cpoints) = 0; (mgr_data -> cg_convergence_factor) = 0.0; (mgr_data -> truncate_coarse_grid_threshold) = 0.0; return (void *) mgr_data; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ /* Destroy */ HYPRE_Int hypre_MGRDestroy( void *data ) { hypre_ParMGRData * mgr_data = (hypre_ParMGRData*) data; HYPRE_Int i; HYPRE_Int num_coarse_levels = (mgr_data -> num_coarse_levels); /* block info data */ if ((mgr_data -> block_cf_marker)) { for (i=0; i < (mgr_data -> max_num_coarse_levels); i++) { if ((mgr_data -> block_cf_marker)[i]) { hypre_TFree((mgr_data -> block_cf_marker)[i], HYPRE_MEMORY_HOST); } } hypre_TFree((mgr_data -> block_cf_marker), HYPRE_MEMORY_HOST); (mgr_data -> block_cf_marker) = NULL; } if(mgr_data -> block_num_coarse_indexes) { hypre_TFree(mgr_data -> block_num_coarse_indexes, HYPRE_MEMORY_HOST); (mgr_data -> block_num_coarse_indexes) = NULL; } /* final residual vector */ if((mgr_data -> residual)) { hypre_ParVectorDestroy( (mgr_data -> residual) ); (mgr_data -> residual) = NULL; } if((mgr_data -> rel_res_norms)) { hypre_TFree( (mgr_data -> rel_res_norms) , HYPRE_MEMORY_HOST); (mgr_data -> rel_res_norms) = NULL; } /* temp vectors for solve phase */ if((mgr_data -> Vtemp)) { hypre_ParVectorDestroy( (mgr_data -> Vtemp) ); (mgr_data -> Vtemp) = NULL; } if((mgr_data -> Ztemp)) { hypre_ParVectorDestroy( (mgr_data -> Ztemp) ); (mgr_data -> Ztemp) = NULL; } if((mgr_data -> Utemp)) { hypre_ParVectorDestroy( (mgr_data -> Utemp) ); (mgr_data -> Utemp) = NULL; } if((mgr_data -> Ftemp)) { hypre_ParVectorDestroy( (mgr_data -> Ftemp) ); (mgr_data -> Ftemp) = NULL; } /* coarse grid solver */ if((mgr_data -> use_default_cgrid_solver)) { if((mgr_data -> coarse_grid_solver)) { hypre_BoomerAMGDestroy( (mgr_data -> coarse_grid_solver) ); } (mgr_data -> coarse_grid_solver) = NULL; } /* l1_norms */ if ((mgr_data -> l1_norms)) { for (i=0; i < (num_coarse_levels); i++) { hypre_SeqVectorDestroy((mgr_data -> l1_norms)[i]); } hypre_TFree((mgr_data -> l1_norms), HYPRE_MEMORY_HOST); } /* coarse_indices_lvls */ if ((mgr_data -> coarse_indices_lvls)) { for (i=0; i < (num_coarse_levels); i++) if ((mgr_data -> coarse_indices_lvls)[i]) hypre_TFree((mgr_data -> coarse_indices_lvls)[i], HYPRE_MEMORY_HOST); hypre_TFree((mgr_data -> coarse_indices_lvls), HYPRE_MEMORY_HOST); } /* linear system and cf marker array */ if(mgr_data -> A_array || mgr_data -> P_array || mgr_data -> RT_array || mgr_data -> CF_marker_array) { for (i=1; i < num_coarse_levels+1; i++) { hypre_ParVectorDestroy((mgr_data -> F_array)[i]); hypre_ParVectorDestroy((mgr_data -> U_array)[i]); if ((mgr_data -> P_array)[i-1]) hypre_ParCSRMatrixDestroy((mgr_data -> P_array)[i-1]); if ((mgr_data -> RT_array)[i-1]) hypre_ParCSRMatrixDestroy((mgr_data -> RT_array)[i-1]); hypre_TFree((mgr_data -> CF_marker_array)[i-1], HYPRE_MEMORY_HOST); } for (i=1; i < (num_coarse_levels); i++) { if ((mgr_data -> A_array)[i]) hypre_ParCSRMatrixDestroy((mgr_data -> A_array)[i]); } } /* AMG for Frelax */ if(mgr_data -> A_ff_array || mgr_data -> F_fine_array || mgr_data -> U_fine_array) { for (i=1; i < num_coarse_levels+1; i++) { if (mgr_data -> F_fine_array[i]) hypre_ParVectorDestroy((mgr_data -> F_fine_array)[i]); if (mgr_data -> U_fine_array[i]) hypre_ParVectorDestroy((mgr_data -> U_fine_array)[i]); } for (i=1; i < (num_coarse_levels); i++) { if ((mgr_data -> A_ff_array)[i]) hypre_ParCSRMatrixDestroy((mgr_data -> A_ff_array)[i]); } if (mgr_data -> use_default_fsolver) { hypre_ParCSRMatrixDestroy((mgr_data -> A_ff_array)[0]); } hypre_TFree(mgr_data -> F_fine_array, HYPRE_MEMORY_HOST); (mgr_data -> F_fine_array) = NULL; hypre_TFree(mgr_data -> U_fine_array, HYPRE_MEMORY_HOST); (mgr_data -> U_fine_array) = NULL; hypre_TFree(mgr_data -> A_ff_array, HYPRE_MEMORY_HOST); (mgr_data -> A_ff_array) = NULL; } if(mgr_data -> aff_solver) { for (i = 1; i < (num_coarse_levels); i++) { if ((mgr_data -> aff_solver)[i]) hypre_BoomerAMGDestroy((mgr_data -> aff_solver)[i]); } if (mgr_data -> use_default_fsolver) { if ((mgr_data -> aff_solver)[0]) hypre_BoomerAMGDestroy((mgr_data -> aff_solver)[0]); } hypre_TFree(mgr_data -> aff_solver, HYPRE_MEMORY_HOST); (mgr_data -> aff_solver) = NULL; } if((mgr_data -> F_array)) { hypre_TFree((mgr_data -> F_array), HYPRE_MEMORY_HOST); (mgr_data -> F_array) = NULL; } if((mgr_data -> U_array)) { hypre_TFree((mgr_data -> U_array), HYPRE_MEMORY_HOST); (mgr_data -> U_array) = NULL; } if((mgr_data -> A_array)) { hypre_TFree((mgr_data -> A_array), HYPRE_MEMORY_HOST); (mgr_data -> A_array) = NULL; } if((mgr_data -> P_array)) { hypre_TFree((mgr_data -> P_array), HYPRE_MEMORY_HOST); (mgr_data -> P_array) = NULL; } if((mgr_data -> RT_array)) { hypre_TFree((mgr_data -> RT_array), HYPRE_MEMORY_HOST); (mgr_data -> RT_array) = NULL; } if((mgr_data -> CF_marker_array)) { hypre_TFree((mgr_data -> CF_marker_array), HYPRE_MEMORY_HOST); (mgr_data -> CF_marker_array) = NULL; } if((mgr_data -> reserved_Cpoint_local_indexes)) { hypre_TFree((mgr_data -> reserved_Cpoint_local_indexes), HYPRE_MEMORY_HOST); (mgr_data -> reserved_Cpoint_local_indexes) = NULL; } if (mgr_data -> restrict_type) { hypre_TFree(mgr_data -> restrict_type, HYPRE_MEMORY_HOST); (mgr_data -> restrict_type) = NULL; } if (mgr_data -> interp_type) { hypre_TFree(mgr_data -> interp_type, HYPRE_MEMORY_HOST); (mgr_data -> interp_type) = NULL; } /* Frelax_method */ if (mgr_data -> Frelax_method) { hypre_TFree(mgr_data -> Frelax_method, HYPRE_MEMORY_HOST); (mgr_data -> Frelax_method) = NULL; } /* Frelax_num_functions */ if (mgr_data -> Frelax_num_functions) { hypre_TFree(mgr_data -> Frelax_num_functions, HYPRE_MEMORY_HOST); (mgr_data -> Frelax_num_functions) = NULL; } /* data for V-cycle F-relaxation */ if((mgr_data -> VcycleRelaxVtemp)) { hypre_ParVectorDestroy( (mgr_data -> VcycleRelaxVtemp) ); (mgr_data -> VcycleRelaxVtemp) = NULL; } if((mgr_data -> VcycleRelaxZtemp)) { hypre_ParVectorDestroy( (mgr_data -> VcycleRelaxZtemp) ); (mgr_data -> VcycleRelaxZtemp) = NULL; } if (mgr_data -> FrelaxVcycleData) { for (i = 0; i < num_coarse_levels; i++) { if ((mgr_data -> FrelaxVcycleData)[i]) { hypre_MGRDestroyFrelaxVcycleData((mgr_data -> FrelaxVcycleData)[i]); (mgr_data -> FrelaxVcycleData)[i] = NULL; } } hypre_TFree(mgr_data -> FrelaxVcycleData, HYPRE_MEMORY_HOST); (mgr_data -> FrelaxVcycleData) = NULL; } /* data for reserved coarse nodes */ if(mgr_data -> reserved_coarse_indexes) { hypre_TFree(mgr_data -> reserved_coarse_indexes, HYPRE_MEMORY_HOST); (mgr_data -> reserved_coarse_indexes) = NULL; } /* index array for setting Cpoints by global block */ if ((mgr_data -> set_c_points_method) == 1) { hypre_TFree(mgr_data -> idx_array, HYPRE_MEMORY_HOST); (mgr_data -> idx_array) = NULL; } /* array for setting option to use non-Galerkin coarse grid */ if (mgr_data -> use_non_galerkin_cg) { hypre_TFree(mgr_data -> use_non_galerkin_cg, HYPRE_MEMORY_HOST); (mgr_data -> use_non_galerkin_cg) = NULL; } /* coarse level matrix - RAP */ if ((mgr_data -> RAP)) hypre_ParCSRMatrixDestroy((mgr_data -> RAP)); if ((mgr_data -> diaginv)) hypre_TFree((mgr_data -> diaginv), HYPRE_MEMORY_HOST); if ((mgr_data -> global_smoother)) { if (mgr_data -> global_smooth_type == 8) { HYPRE_EuclidDestroy((mgr_data -> global_smoother)); } else if (mgr_data -> global_smooth_type == 16) { HYPRE_ILUDestroy((mgr_data -> global_smoother)); } } /* mgr data */ hypre_TFree(mgr_data, HYPRE_MEMORY_HOST); return hypre_error_flag; } /* Create data for V-cycle F-relaxtion */ void * hypre_MGRCreateFrelaxVcycleData() { hypre_ParAMGData *vdata = hypre_CTAlloc(hypre_ParAMGData, 1, HYPRE_MEMORY_HOST); hypre_ParAMGDataAArray(vdata) = NULL; hypre_ParAMGDataPArray(vdata) = NULL; hypre_ParAMGDataFArray(vdata) = NULL; hypre_ParAMGDataCFMarkerArray(vdata) = NULL; hypre_ParAMGDataVtemp(vdata) = NULL; hypre_ParAMGDataAMat(vdata) = NULL; hypre_ParAMGDataBVec(vdata) = NULL; hypre_ParAMGDataZtemp(vdata) = NULL; hypre_ParAMGDataCommInfo(vdata) = NULL; hypre_ParAMGDataUArray(vdata) = NULL; hypre_ParAMGDataNewComm(vdata) = hypre_MPI_COMM_NULL; hypre_ParAMGDataNumLevels(vdata) = 0; hypre_ParAMGDataMaxLevels(vdata) = 10; hypre_ParAMGDataNumFunctions(vdata) = 1; hypre_ParAMGDataSCommPkgSwitch(vdata) = 1.0; hypre_ParAMGDataRelaxOrder(vdata) = 1; hypre_ParAMGDataMaxCoarseSize(vdata) = 9; hypre_ParAMGDataMinCoarseSize(vdata) = 0; hypre_ParAMGDataUserCoarseRelaxType(vdata) = 9; return (void *) vdata; } /* Destroy data for V-cycle F-relaxation */ HYPRE_Int hypre_MGRDestroyFrelaxVcycleData( void *data ) { hypre_ParAMGData * vdata = (hypre_ParAMGData*) data; HYPRE_Int i; HYPRE_Int num_levels = hypre_ParAMGDataNumLevels(vdata); MPI_Comm new_comm = hypre_ParAMGDataNewComm(vdata); hypre_TFree(hypre_ParAMGDataDofFuncArray(vdata)[0], HYPRE_MEMORY_HOST); for (i=1; i < num_levels + 1; i++) { if (hypre_ParAMGDataAArray(vdata)[i]) hypre_ParCSRMatrixDestroy(hypre_ParAMGDataAArray(vdata)[i]); if (hypre_ParAMGDataPArray(vdata)[i-1]) hypre_ParCSRMatrixDestroy(hypre_ParAMGDataPArray(vdata)[i-1]); hypre_TFree(hypre_ParAMGDataCFMarkerArray(vdata)[i-1], HYPRE_MEMORY_HOST); hypre_ParVectorDestroy(hypre_ParAMGDataFArray(vdata)[i]); hypre_ParVectorDestroy(hypre_ParAMGDataUArray(vdata)[i]); hypre_TFree(hypre_ParAMGDataDofFuncArray(vdata)[i], HYPRE_MEMORY_HOST); } /* see comments in par_coarsen.c regarding special case for CF_marker */ if (num_levels <= 1) { hypre_TFree(hypre_ParAMGDataCFMarkerArray(vdata)[0], HYPRE_MEMORY_HOST); } /* Points to VcycleRelaxVtemp of mgr_data, which is already destroyed */ //hypre_ParVectorDestroy(hypre_ParAMGDataVtemp(vdata)); hypre_TFree(hypre_ParAMGDataFArray(vdata), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParAMGDataUArray(vdata), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParAMGDataAArray(vdata), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParAMGDataPArray(vdata), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParAMGDataCFMarkerArray(vdata), HYPRE_MEMORY_HOST); //hypre_TFree(hypre_ParAMGDataGridRelaxType(vdata), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParAMGDataDofFuncArray(vdata), HYPRE_MEMORY_HOST); /* Points to VcycleRelaxZtemp of mgr_data, which is already destroyed */ /* if (hypre_ParAMGDataZtemp(vdata)) hypre_ParVectorDestroy(hypre_ParAMGDataZtemp(vdata)); */ if (hypre_ParAMGDataAMat(vdata)) hypre_TFree(hypre_ParAMGDataAMat(vdata), HYPRE_MEMORY_HOST); if (hypre_ParAMGDataBVec(vdata)) hypre_TFree(hypre_ParAMGDataBVec(vdata), HYPRE_MEMORY_HOST); if (hypre_ParAMGDataCommInfo(vdata)) hypre_TFree(hypre_ParAMGDataCommInfo(vdata), HYPRE_MEMORY_HOST); if (new_comm != hypre_MPI_COMM_NULL) { hypre_MPI_Comm_free (&new_comm); } hypre_TFree(vdata, HYPRE_MEMORY_HOST); return hypre_error_flag; } /* Set C-point variables for each reduction level */ /* Currently not implemented */ HYPRE_Int hypre_MGRSetReductionLevelCpoints( void *mgr_vdata, HYPRE_Int nlevels, HYPRE_Int *num_coarse_points, HYPRE_Int **level_coarse_indexes) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> num_coarse_levels) = nlevels; (mgr_data -> num_coarse_per_level) = num_coarse_points; (mgr_data -> level_coarse_indexes) = level_coarse_indexes; return hypre_error_flag; } /* Initialize some data */ /* Set whether non-coarse points on each level should be explicitly tagged as F-points */ HYPRE_Int hypre_MGRSetNonCpointsToFpoints( void *mgr_vdata, HYPRE_Int nonCptToFptFlag) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> set_non_Cpoints_to_F) = nonCptToFptFlag; return hypre_error_flag; } /* Set whether the reserved C points are reduced before the coarse grid solve */ HYPRE_Int hypre_MGRSetReservedCpointsLevelToKeep(void *mgr_vdata, HYPRE_Int level) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> lvl_to_keep_cpoints) = level; return hypre_error_flag; } /* Set Cpoints by contiguous blocks, i.e. p1, p2, ..., pn, s1, s2, ..., sn, ... */ HYPRE_Int hypre_MGRSetCpointsByContiguousBlock( void *mgr_vdata, HYPRE_Int block_size, HYPRE_Int max_num_levels, HYPRE_BigInt *begin_idx_array, HYPRE_Int *block_num_coarse_points, HYPRE_Int **block_coarse_indexes) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; if((mgr_data -> idx_array) != NULL) { hypre_TFree(mgr_data -> idx_array, HYPRE_MEMORY_HOST); (mgr_data -> idx_array) = NULL; } HYPRE_BigInt *index_array = hypre_CTAlloc(HYPRE_BigInt, block_size, HYPRE_MEMORY_HOST); if (begin_idx_array != NULL) { for (i = 0; i < block_size; i++) { index_array[i] = *(begin_idx_array+i); } } hypre_MGRSetCpointsByBlock(mgr_data, block_size, max_num_levels, block_num_coarse_points, block_coarse_indexes); (mgr_data -> idx_array) = index_array; (mgr_data -> set_c_points_method) = 1; return hypre_error_flag; } /* Initialize/ set local block data information */ HYPRE_Int hypre_MGRSetCpointsByBlock( void *mgr_vdata, HYPRE_Int block_size, HYPRE_Int max_num_levels, HYPRE_Int *block_num_coarse_points, HYPRE_Int **block_coarse_indexes) { HYPRE_Int i,j; HYPRE_Int **block_cf_marker = NULL; HYPRE_Int *block_num_coarse_indexes = NULL; hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; /* free block cf_marker data if not previously destroyed */ if((mgr_data -> block_cf_marker) != NULL) { for (i=0; i < (mgr_data -> max_num_coarse_levels); i++) { if ((mgr_data -> block_cf_marker)[i]) { hypre_TFree((mgr_data -> block_cf_marker)[i], HYPRE_MEMORY_HOST); (mgr_data -> block_cf_marker)[i] = NULL; } } hypre_TFree(mgr_data -> block_cf_marker, HYPRE_MEMORY_HOST); (mgr_data -> block_cf_marker) = NULL; } if((mgr_data -> block_num_coarse_indexes)) { hypre_TFree((mgr_data -> block_num_coarse_indexes), HYPRE_MEMORY_HOST); (mgr_data -> block_num_coarse_indexes) = NULL; } /* store block cf_marker */ block_cf_marker = hypre_CTAlloc(HYPRE_Int *, max_num_levels, HYPRE_MEMORY_HOST); for (i = 0; i < max_num_levels; i++) { block_cf_marker[i] = hypre_CTAlloc(HYPRE_Int, block_size, HYPRE_MEMORY_HOST); memset(block_cf_marker[i], FMRK, block_size*sizeof(HYPRE_Int)); } for (i = 0; i < max_num_levels; i++) { for(j=0; j<block_num_coarse_points[i]; j++) { (block_cf_marker[i])[block_coarse_indexes[i][j]] = CMRK; } } /* store block_num_coarse_points */ if(max_num_levels > 0) { block_num_coarse_indexes = hypre_CTAlloc(HYPRE_Int, max_num_levels, HYPRE_MEMORY_HOST); for(i=0; i<max_num_levels; i++) block_num_coarse_indexes[i] = block_num_coarse_points[i]; } /* set block data */ (mgr_data -> max_num_coarse_levels) = max_num_levels; (mgr_data -> block_size) = block_size; (mgr_data -> block_num_coarse_indexes) = block_num_coarse_indexes; (mgr_data -> block_cf_marker) = block_cf_marker; (mgr_data -> set_c_points_method) = 0; return hypre_error_flag; } HYPRE_Int hypre_MGRSetCpointsByPointMarkerArray( void *mgr_vdata, HYPRE_Int block_size, HYPRE_Int max_num_levels, HYPRE_Int *lvl_num_coarse_points, HYPRE_Int **lvl_coarse_indexes, HYPRE_Int *point_marker_array) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i,j; HYPRE_Int **block_cf_marker = NULL; HYPRE_Int *block_num_coarse_indexes = NULL; /* free block cf_marker data if not previously destroyed */ if((mgr_data -> block_cf_marker) != NULL) { for (i=0; i < (mgr_data -> max_num_coarse_levels); i++) { if ((mgr_data -> block_cf_marker)[i]) { hypre_TFree((mgr_data -> block_cf_marker)[i], HYPRE_MEMORY_HOST); (mgr_data -> block_cf_marker)[i] = NULL; } } hypre_TFree(mgr_data -> block_cf_marker, HYPRE_MEMORY_HOST); (mgr_data -> block_cf_marker) = NULL; } if((mgr_data -> block_num_coarse_indexes)) { hypre_TFree((mgr_data -> block_num_coarse_indexes), HYPRE_MEMORY_HOST); (mgr_data -> block_num_coarse_indexes) = NULL; } /* store block cf_marker */ block_cf_marker = hypre_CTAlloc(HYPRE_Int *, max_num_levels, HYPRE_MEMORY_HOST); for (i = 0; i < max_num_levels; i++) { block_cf_marker[i] = hypre_CTAlloc(HYPRE_Int, block_size, HYPRE_MEMORY_HOST); memset(block_cf_marker[i], FMRK, block_size*sizeof(HYPRE_Int)); } for (i = 0; i < max_num_levels; i++) { for(j=0; j<lvl_num_coarse_points[i]; j++) { block_cf_marker[i][j] = lvl_coarse_indexes[i][j]; } } /* store block_num_coarse_points */ if(max_num_levels > 0) { block_num_coarse_indexes = hypre_CTAlloc(HYPRE_Int, max_num_levels, HYPRE_MEMORY_HOST); for(i=0; i<max_num_levels; i++) block_num_coarse_indexes[i] = lvl_num_coarse_points[i]; } /* set block data */ (mgr_data -> max_num_coarse_levels) = max_num_levels; (mgr_data -> block_size) = block_size; (mgr_data -> block_num_coarse_indexes) = block_num_coarse_indexes; (mgr_data -> block_cf_marker) = block_cf_marker; (mgr_data -> point_marker_array) = point_marker_array; (mgr_data -> set_c_points_method) = 2; return hypre_error_flag; } /*Set number of points that remain part of the coarse grid throughout the hierarchy */ HYPRE_Int hypre_MGRSetReservedCoarseNodes(void *mgr_vdata, HYPRE_Int reserved_coarse_size, HYPRE_BigInt *reserved_cpt_index) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_BigInt *reserved_coarse_indexes = NULL; HYPRE_Int i; if (!mgr_data) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Warning! MGR object empty!\n"); return hypre_error_flag; } if(reserved_coarse_size < 0) { hypre_error_in_arg(2); return hypre_error_flag; } /* free data not previously destroyed */ if((mgr_data -> reserved_coarse_indexes)) { hypre_TFree((mgr_data -> reserved_coarse_indexes), HYPRE_MEMORY_HOST); (mgr_data -> reserved_coarse_indexes) = NULL; } /* set reserved coarse nodes */ if(reserved_coarse_size > 0) { reserved_coarse_indexes = hypre_CTAlloc(HYPRE_BigInt, reserved_coarse_size, HYPRE_MEMORY_HOST); for(i=0; i<reserved_coarse_size; i++) reserved_coarse_indexes[i] = reserved_cpt_index[i]; } (mgr_data -> reserved_coarse_size) = reserved_coarse_size; (mgr_data -> reserved_coarse_indexes) = reserved_coarse_indexes; return hypre_error_flag; } /* Set CF marker array */ HYPRE_Int hypre_MGRCoarsen(hypre_ParCSRMatrix *S, hypre_ParCSRMatrix *A, HYPRE_Int fixed_coarse_size, HYPRE_Int *fixed_coarse_indexes, HYPRE_Int debug_flag, HYPRE_Int **CF_marker_ptr, HYPRE_Int cflag) { HYPRE_Int *CF_marker = NULL; HYPRE_Int *cindexes = fixed_coarse_indexes; HYPRE_Int i, row, nc; HYPRE_Int nloc = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); /* If this is the last level, coarsen onto fixed coarse set */ if(cflag) { if(*CF_marker_ptr != NULL) { hypre_TFree(*CF_marker_ptr, HYPRE_MEMORY_HOST); } CF_marker = hypre_CTAlloc(HYPRE_Int, nloc, HYPRE_MEMORY_HOST); memset(CF_marker, FMRK, nloc*sizeof(HYPRE_Int)); /* first mark fixed coarse set */ nc = fixed_coarse_size; for(i = 0; i < nc; i++) { CF_marker[cindexes[i]] = CMRK; } } else { /* First coarsen to get initial CF splitting. * This is then followed by updating the CF marker to pass * coarse information to the next levels. NOTE: It may be * convenient to implement this way (allows the use of multiple * coarsening strategies without changing too much code), * but not necessarily the best option, compared to initializing * CF_marker first and then coarsening on subgraph which excludes * the initialized coarse nodes. */ hypre_BoomerAMGCoarsen(S, A, 0, debug_flag, &CF_marker); /* Update CF_marker to correct Cpoints marked as Fpoints. */ nc = fixed_coarse_size; for(i = 0; i < nc; i++) { CF_marker[cindexes[i]] = CMRK; } /* set F-points to FMRK. This is necessary since the different coarsening schemes differentiate * between type of F-points (example Ruge coarsening). We do not need that distinction here. */ for (row = 0; row <nloc; row++) { if(CF_marker[row] == CMRK) continue; CF_marker[row] = FMRK; } #if 0 /* IMPORTANT: Update coarse_indexes array to define the positions of the fixed coarse points * in the next level. */ nc = 0; index_i = 0; for (row = 0; row <nloc; row++) { /* loop through new c-points */ if(CF_marker[row] == CMRK) nc++; else if(CF_marker[row] == S_CMRK) { /* previously marked c-point is part of fixed coarse set. Track its current local index */ cindexes[index_i++] = nc; /* reset c-point from S_CMRK to CMRK */ cf_marker[row] = CMRK; nc++; } /* set F-points to FMRK. This is necessary since the different coarsening schemes differentiate * between type of F-points (example Ruge coarsening). We do not need that distinction here. */ else { CF_marker[row] = FMRK; } } /* check if this should be last level */ if( nc == fixed_coarse_size) last_level = 1; //printf(" nc = %d and fixed coarse size = %d \n", nc, fixed_coarse_size); #endif } /* set CF_marker */ *CF_marker_ptr = CF_marker; return hypre_error_flag; } /* Interpolation for MGR - Adapted from BoomerAMGBuildInterp */ HYPRE_Int hypre_MGRBuildP( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_BigInt *num_cpts_global, HYPRE_Int method, HYPRE_Int debug_flag, hypre_ParCSRMatrix **P_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Real *a_diag; hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; HYPRE_Int *tmp_map_offd = NULL; HYPRE_Int *CF_marker_offd = NULL; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size, P_offd_size; HYPRE_Int *P_marker, *P_marker_offd; HYPRE_Int jj_counter,jj_counter_offd; HYPRE_Int *jj_count, *jj_count_offd; // HYPRE_Int jj_begin_row,jj_begin_row_offd; // HYPRE_Int jj_end_row,jj_end_row_offd; HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int *fine_to_coarse; //HYPRE_BigInt *fine_to_coarse_offd; HYPRE_Int *coarse_counter; HYPRE_Int coarse_shift; HYPRE_BigInt total_global_cpts; //HYPRE_BigInt my_first_cpt; HYPRE_Int num_cols_P_offd; HYPRE_Int i,i1; HYPRE_Int j,jl,jj; HYPRE_Int start; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_Int num_threads; HYPRE_Int num_sends; HYPRE_Int index; HYPRE_Int ns, ne, size, rest; HYPRE_Int *int_buf_data; HYPRE_Real wall_time; /* for debugging instrumentation */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); //num_threads = hypre_NumThreads(); // Temporary fix, disable threading // TODO: enable threading num_threads = 1; #ifdef HYPRE_NO_GLOBAL_PARTITION //my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); #else //my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; #endif /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (debug_flag < 0) { debug_flag = -debug_flag; } if (debug_flag==4) wall_time = time_getWallclockSeconds(); if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #if 0 #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ /* RDF: this looks a little tricky, but doable */ #if 0 #endif for (j = 0; j < num_threads; j++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { jj_count[j]++; fine_to_coarse[i] = coarse_counter[j]; coarse_counter[j]++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is the approximation of A_{ff}^{-1}A_{fc} *--------------------------------------------------------------------*/ else { for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; if ((CF_marker[i1] >= 0) && (method > 0)) { jj_count[j]++; } } if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; if ((CF_marker_offd[i1] >= 0) && (method > 0)) { jj_count_offd[j]++; } } } } } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ for (i=0; i < num_threads-1; i++) { coarse_counter[i+1] += coarse_counter[i]; jj_count[i+1] += jj_count[i]; jj_count_offd[i+1] += jj_count_offd[i]; } i = num_threads-1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_DEVICE); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_DEVICE); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_DEVICE); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_DEVICE); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Internal work 1 = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); //fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_HOST); #if 0 #endif for (j = 0; j < num_threads; j++) { coarse_shift = 0; if (j > 0) coarse_shift = coarse_counter[j-1]; size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) { fine_to_coarse[i] += coarse_shift; } } /* index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) big_buf_data[index++] = fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]+ my_first_cpt; } comm_handle = hypre_ParCSRCommHandleCreate( 21, comm_pkg, big_buf_data, fine_to_coarse_offd); hypre_ParCSRCommHandleDestroy(comm_handle); */ if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n", my_id, wall_time); fflush(NULL); } if (debug_flag==4) wall_time = time_getWallclockSeconds(); #if 0 #endif //for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt; /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ a_diag = hypre_CTAlloc(HYPRE_Real, n_fine, HYPRE_MEMORY_HOST); for (i = 0; i < n_fine; i++) { if (CF_marker[i] < 0) { for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; if ( i==i1 ) /* diagonal of A only */ { a_diag[i] = 1.0/A_diag_data[jj]; } } } } #if 0 #endif for (jl = 0; jl < num_threads; jl++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (jl < rest) { ns = jl*size+jl; ne = (jl+1)*size+jl+1; } else { ns = jl*size+rest; ne = (jl+1)*size+rest; } jj_counter = 0; if (jl > 0) jj_counter = jj_count[jl-1]; jj_counter_offd = 0; if (jl > 0) jj_counter_offd = jj_count_offd[jl-1]; P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); if (num_cols_A_offd) P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); else P_marker_offd = NULL; for (i = 0; i < n_fine; i++) { P_marker[i] = -1; } for (i = 0; i < num_cols_A_offd; i++) { P_marker_offd[i] = -1; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { /* Diagonal part of P */ P_diag_i[i] = jj_counter; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if ((CF_marker[i1] >= 0) && (method > 0)) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; /* if(method == 0) { P_diag_data[jj_counter] = 0.0; } */ if (method == 1) { P_diag_data[jj_counter] = - A_diag_data[jj]; } else if (method == 2) { P_diag_data[jj_counter] = - A_diag_data[jj]*a_diag[i]; } jj_counter++; } } /* Off-Diagonal part of P */ P_offd_i[i] = jj_counter_offd; if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; /*----------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_offd_j * and initialize interpolation weight to zero. *-----------------------------------------------------------*/ if ((CF_marker_offd[i1] >= 0) && (method > 0)) { P_marker_offd[i1] = jj_counter_offd; /*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/ P_offd_j[jj_counter_offd] = i1; /* if(method == 0) { P_offd_data[jj_counter_offd] = 0.0; } */ if (method == 1) { P_offd_data[jj_counter_offd] = - A_offd_data[jj]; } else if (method == 2) { P_offd_data[jj_counter_offd] = - A_offd_data[jj]*a_diag[i]; } jj_counter_offd++; } } } } P_offd_i[i+1] = jj_counter_offd; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); } hypre_TFree(a_diag, HYPRE_MEMORY_HOST); P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; num_cols_P_offd = 0; if (P_offd_size) { P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #if 0 #endif for (i=0; i < num_cols_A_offd; i++) P_marker[i] = 0; num_cols_P_offd = 0; for (i=0; i < P_offd_size; i++) { index = P_offd_j[i]; if (!P_marker[index]) { num_cols_P_offd++; P_marker[index] = 1; } } col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); index = 0; for (i=0; i < num_cols_P_offd; i++) { while (P_marker[index]==0) index++; tmp_map_offd[i] = index++; } #if 0 #endif for (i=0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(tmp_map_offd, P_offd_j[i], num_cols_P_offd); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } for (i=0; i < n_fine; i++) if (CF_marker[i] == -3) CF_marker[i] = -1; if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_GetCommPkgRTFromCommPkgA(P,A, fine_to_coarse, tmp_map_offd); *P_ptr = P; hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); //hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(jj_count, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); return(0); } /* Interpolation for MGR - Dynamic Row Sum method */ HYPRE_Int hypre_MGRBuildPDRS( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_BigInt *num_cpts_global, HYPRE_Int blk_size, HYPRE_Int reserved_coarse_size, HYPRE_Int debug_flag, hypre_ParCSRMatrix **P_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Real *a_diag; hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; HYPRE_Int *tmp_map_offd; HYPRE_Int *CF_marker_offd = NULL; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size, P_offd_size; HYPRE_Int *P_marker, *P_marker_offd; HYPRE_Int jj_counter,jj_counter_offd; HYPRE_Int *jj_count, *jj_count_offd; // HYPRE_Int jj_begin_row,jj_begin_row_offd; // HYPRE_Int jj_end_row,jj_end_row_offd; HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int *fine_to_coarse; //HYPRE_BigInt *fine_to_coarse_offd; HYPRE_Int *coarse_counter; HYPRE_Int coarse_shift; HYPRE_BigInt total_global_cpts; //HYPRE_BigInt my_first_cpt; HYPRE_Int num_cols_P_offd; HYPRE_Int i,i1; HYPRE_Int j,jl,jj; HYPRE_Int start; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_Int num_threads; HYPRE_Int num_sends; HYPRE_Int index; HYPRE_Int ns, ne, size, rest; HYPRE_Int *int_buf_data; HYPRE_Real wall_time; /* for debugging instrumentation */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); //num_threads = hypre_NumThreads(); // Temporary fix, disable threading // TODO: enable threading num_threads = 1; #ifdef HYPRE_NO_GLOBAL_PARTITION //my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); #else //my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; #endif /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (debug_flag < 0) { debug_flag = -debug_flag; } if (debug_flag==4) wall_time = time_getWallclockSeconds(); if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #if 0 #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ /* RDF: this looks a little tricky, but doable */ #if 0 #endif for (j = 0; j < num_threads; j++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { jj_count[j]++; fine_to_coarse[i] = coarse_counter[j]; coarse_counter[j]++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is the approximation of A_{ff}^{-1}A_{fc} *--------------------------------------------------------------------*/ else { for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; if (CF_marker[i1] >= 0) { jj_count[j]++; } } if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; if (CF_marker_offd[i1] >= 0) { jj_count_offd[j]++; } } } } /*-------------------------------------------------------------------- * Set up the indexes for the DRS method *--------------------------------------------------------------------*/ } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ for (i=0; i < num_threads-1; i++) { coarse_counter[i+1] += coarse_counter[i]; jj_count[i+1] += jj_count[i]; jj_count_offd[i+1] += jj_count_offd[i]; } i = num_threads-1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Internal work 1 = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); //fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #if 0 #endif for (j = 0; j < num_threads; j++) { coarse_shift = 0; if (j > 0) coarse_shift = coarse_counter[j-1]; size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) fine_to_coarse[i] += coarse_shift; } /*index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, fine_to_coarse_offd); hypre_ParCSRCommHandleDestroy(comm_handle); */ if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n", my_id, wall_time); fflush(NULL); } if (debug_flag==4) wall_time = time_getWallclockSeconds(); #if 0 #endif //for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt; /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ a_diag = hypre_CTAlloc(HYPRE_Real, n_fine, HYPRE_MEMORY_HOST); for (i = 0; i < n_fine; i++) { for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; if ( i==i1 ) /* diagonal of A only */ { a_diag[i] = 1.0/A_diag_data[jj]; } } } #if 0 #endif for (jl = 0; jl < num_threads; jl++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (jl < rest) { ns = jl*size+jl; ne = (jl+1)*size+jl+1; } else { ns = jl*size+rest; ne = (jl+1)*size+rest; } jj_counter = 0; if (jl > 0) jj_counter = jj_count[jl-1]; jj_counter_offd = 0; if (jl > 0) jj_counter_offd = jj_count_offd[jl-1]; P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); if (num_cols_A_offd) P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); else P_marker_offd = NULL; for (i = 0; i < n_fine; i++) { P_marker[i] = -1; } for (i = 0; i < num_cols_A_offd; i++) { P_marker_offd[i] = -1; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { /* Diagonal part of P */ P_diag_i[i] = jj_counter; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] >= 0) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; P_diag_data[jj_counter] = - A_diag_data[jj]*a_diag[i]; jj_counter++; } } /* Off-Diagonal part of P */ P_offd_i[i] = jj_counter_offd; if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; /*----------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_offd_j * and initialize interpolation weight to zero. *-----------------------------------------------------------*/ if (CF_marker_offd[i1] >= 0) { P_marker_offd[i1] = jj_counter_offd; /*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/ P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = - A_offd_data[jj]*a_diag[i]; jj_counter_offd++; } } } } P_offd_i[i+1] = jj_counter_offd; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); } hypre_TFree(a_diag, HYPRE_MEMORY_HOST); P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; num_cols_P_offd = 0; if (P_offd_size) { P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #if 0 #endif for (i=0; i < num_cols_A_offd; i++) P_marker[i] = 0; num_cols_P_offd = 0; for (i=0; i < P_offd_size; i++) { index = P_offd_j[i]; if (!P_marker[index]) { num_cols_P_offd++; P_marker[index] = 1; } } tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); index = 0; for (i=0; i < num_cols_P_offd; i++) { while (P_marker[index]==0) index++; tmp_map_offd[i] = index++; } #if 0 #endif for (i=0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(tmp_map_offd, P_offd_j[i], num_cols_P_offd); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } for (i=0; i < n_fine; i++) if (CF_marker[i] == -3) CF_marker[i] = -1; if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_GetCommPkgRTFromCommPkgA(P,A, fine_to_coarse, tmp_map_offd); *P_ptr = P; hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); // hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(jj_count, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); return(0); } /* Scale ParCSR matrix A = scalar * A * A: the target CSR matrix * vector: array of real numbers */ HYPRE_Int hypre_ParCSRMatrixLeftScale(HYPRE_Real *vector, hypre_ParCSRMatrix *A) { HYPRE_Int i, j, n_local; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); n_local = hypre_CSRMatrixNumRows(A_diag); for (i = 0; i < n_local; i++) { HYPRE_Real factor = vector[i]; for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { A_diag_data[j] *= factor; } for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { A_offd_data[j] *= factor; } } return(0); } /************************************************************ * Available methods: * 0: inv(A_FF) approximated by its diagonal inverse * 1: inv(A_FF) approximated by sparse approximate inverse *************************************************************/ HYPRE_Int hypre_MGRComputeNonGalerkinCoarseGrid(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *P, hypre_ParCSRMatrix *RT, HYPRE_Int bsize, HYPRE_Int ordering, HYPRE_Int method, HYPRE_Int Pmax, HYPRE_Int keep_stencil, HYPRE_Int *CF_marker, hypre_ParCSRMatrix **A_h_ptr) { HYPRE_Int *c_marker, *f_marker; HYPRE_Int n_local_fine_grid, i, i1, jj; hypre_ParCSRMatrix *A_cc; hypre_ParCSRMatrix *A_ff; hypre_ParCSRMatrix *A_fc; hypre_ParCSRMatrix *A_cf; hypre_ParCSRMatrix *A_h; hypre_ParCSRMatrix *A_h_correction; HYPRE_Int max_elmts = Pmax; // HYPRE_Real wall_time = 0.; hypre_ParCSRMatrix *P_mod = NULL; HYPRE_Int my_id; MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_MPI_Comm_rank(comm,&my_id); n_local_fine_grid = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); c_marker = hypre_CTAlloc(HYPRE_Int, n_local_fine_grid, HYPRE_MEMORY_HOST); f_marker = hypre_CTAlloc(HYPRE_Int, n_local_fine_grid, HYPRE_MEMORY_HOST); for (i = 0; i < n_local_fine_grid; i++) { HYPRE_Int point_type = CF_marker[i]; assert(point_type == 1 || point_type == -1); c_marker[i] = point_type; f_marker[i] = -point_type; } // get the A_cc sub-block hypre_MGRGetSubBlock(A, c_marker, c_marker, 0, &A_cc); if (method == 0) { if (keep_stencil) { //wall_time = time_getWallclockSeconds(); hypre_MGRGetSubBlock(A, c_marker, f_marker, 0, &A_cf); hypre_MGRGetSubBlock(A, f_marker, c_marker, 0, &A_fc); hypre_MGRGetSubBlock(A, f_marker, f_marker, 0, &A_ff); // extract the diagonal of A_ff and compute D_ff_inv hypre_CSRMatrix *A_ff_diag = hypre_ParCSRMatrixDiag(A_ff); HYPRE_Real *A_ff_diag_data = hypre_CSRMatrixData(A_ff_diag); HYPRE_Int *A_ff_diag_i = hypre_CSRMatrixI(A_ff_diag); HYPRE_Int *A_ff_diag_j = hypre_CSRMatrixJ(A_ff_diag); HYPRE_Int n_local_fpoints = hypre_CSRMatrixNumRows(A_ff_diag); HYPRE_Real *D_ff_inv; D_ff_inv = hypre_CTAlloc(HYPRE_Real, n_local_fpoints, HYPRE_MEMORY_HOST); for (i = 0; i < n_local_fpoints; i++) { for (jj = A_ff_diag_i[i]; jj < A_ff_diag_i[i+1]; jj++) { i1 = A_ff_diag_j[jj]; if ( i==i1 ) { D_ff_inv[i] = -1.0/A_ff_diag_data[jj]; } } } // extract the diagonal of A_cf hypre_CSRMatrix *A_cf_diag = hypre_ParCSRMatrixDiag(A_cf); HYPRE_Real *A_cf_diag_data = hypre_CSRMatrixData(A_cf_diag); HYPRE_Int *A_cf_diag_i = hypre_CSRMatrixI(A_cf_diag); HYPRE_Int *A_cf_diag_j = hypre_CSRMatrixJ(A_cf_diag); n_local_fpoints = hypre_CSRMatrixNumRows(A_cf_diag); HYPRE_Real *D_cf; D_cf = hypre_CTAlloc(HYPRE_Real, n_local_fpoints, HYPRE_MEMORY_HOST); for (i = 0; i < n_local_fpoints; i++) { i1 = A_cf_diag_j[A_cf_diag_i[i]]; D_cf[i] = A_cf_diag_data[jj]; } // compute the triple product hypre_ParCSRMatrixLeftScale(D_ff_inv, A_fc); hypre_ParCSRMatrixLeftScale(D_cf, A_fc); A_h_correction = A_fc; hypre_TFree(D_cf, HYPRE_MEMORY_HOST); hypre_TFree(D_ff_inv, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDestroy(A_ff); hypre_ParCSRMatrixDestroy(A_cf); //wall_time = time_getWallclockSeconds() - wall_time; //hypre_printf("Compute triple product D_cf * D_ff_inv * A_fc time: %1.5f\n", wall_time); } else { //wall_time = time_getWallclockSeconds(); P_mod = hypre_ParCSRMatrixCompleteClone(P); hypre_ParCSRMatrixCopy(P,P_mod,1); HYPRE_Int n_local_rows = hypre_ParCSRMatrixNumRows(P_mod); hypre_CSRMatrix *P_mod_diag = hypre_ParCSRMatrixDiag(P_mod); HYPRE_Int *P_mod_diag_i = hypre_CSRMatrixI(P_mod_diag); HYPRE_Real *P_mod_diag_data = hypre_CSRMatrixData(P_mod_diag); for (i = 0; i < n_local_rows; i ++) { if (CF_marker[i] >= 0) { HYPRE_Int ii = P_mod_diag_i[i]; P_mod_diag_data[ii] = 0.0; } } hypre_BoomerAMGBuildCoarseOperator(RT, A, P_mod, &A_h_correction); //wall_time = time_getWallclockSeconds() - wall_time; //hypre_printf("Compute triple product time new: %1.5f\n", wall_time); hypre_ParCSRMatrixDestroy(P_mod); } } else { // Approximate inverse for ideal interploation hypre_MGRGetSubBlock(A, c_marker, f_marker, 0, &A_cf); hypre_MGRGetSubBlock(A, f_marker, c_marker, 0, &A_fc); hypre_MGRGetSubBlock(A, f_marker, f_marker, 0, &A_ff); hypre_ParCSRMatrix *A_ff_inv = NULL; hypre_ParCSRMatrix *minus_Wp = NULL; hypre_MGRApproximateInverse(A_ff, &A_ff_inv); minus_Wp = hypre_ParMatmul(A_ff_inv, A_fc); A_h_correction = hypre_ParMatmul(A_cf, minus_Wp); hypre_ParCSRMatrixDestroy(minus_Wp); hypre_ParCSRMatrixDestroy(A_ff); hypre_ParCSRMatrixDestroy(A_fc); hypre_ParCSRMatrixDestroy(A_cf); } // perform dropping for A_h_correction // specific to multiphase poromechanics // we only keep the diagonal of each block //wall_time = time_getWallclockSeconds(); HYPRE_Int n_local_cpoints = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A_h_correction)); hypre_CSRMatrix *A_h_correction_diag = hypre_ParCSRMatrixDiag(A_h_correction); HYPRE_Real *A_h_correction_diag_data = hypre_CSRMatrixData(A_h_correction_diag); HYPRE_Int *A_h_correction_diag_i = hypre_CSRMatrixI(A_h_correction_diag); HYPRE_Int *A_h_correction_diag_j = hypre_CSRMatrixJ(A_h_correction_diag); HYPRE_Int ncol_diag = hypre_CSRMatrixNumCols(A_h_correction_diag); hypre_CSRMatrix *A_h_correction_offd = hypre_ParCSRMatrixOffd(A_h_correction); HYPRE_Real *A_h_correction_offd_data = hypre_CSRMatrixData(A_h_correction_offd); HYPRE_Int *A_h_correction_offd_i = hypre_CSRMatrixI(A_h_correction_offd); HYPRE_Int *A_h_correction_offd_j = hypre_CSRMatrixJ(A_h_correction_offd); // Allow for maximum dropping with Pmax = 0 //if (Pmax > 0) //{ if (ordering == 0) // interleaved ordering { HYPRE_Int *A_h_correction_diag_i_new = hypre_CTAlloc(HYPRE_Int, n_local_cpoints+1, HYPRE_MEMORY_HOST); HYPRE_Int *A_h_correction_diag_j_new = hypre_CTAlloc(HYPRE_Int, (bsize + max_elmts)*n_local_cpoints, HYPRE_MEMORY_HOST); HYPRE_Complex *A_h_correction_diag_data_new = hypre_CTAlloc(HYPRE_Complex, (bsize + max_elmts)*n_local_cpoints, HYPRE_MEMORY_HOST); HYPRE_Int num_nonzeros_diag_new = 0; HYPRE_Int *A_h_correction_offd_i_new = hypre_CTAlloc(HYPRE_Int, n_local_cpoints+1, HYPRE_MEMORY_HOST); HYPRE_Int *A_h_correction_offd_j_new = hypre_CTAlloc(HYPRE_Int, max_elmts*n_local_cpoints, HYPRE_MEMORY_HOST); HYPRE_Complex *A_h_correction_offd_data_new = hypre_CTAlloc(HYPRE_Complex, max_elmts*n_local_cpoints, HYPRE_MEMORY_HOST); HYPRE_Int num_nonzeros_offd_new = 0; for (i = 0; i < n_local_cpoints; i++) { HYPRE_Int max_num_nonzeros = A_h_correction_diag_i[i+1] - A_h_correction_diag_i[i] + A_h_correction_offd_i[i+1] - A_h_correction_offd_i[i]; HYPRE_Int *aux_j = hypre_CTAlloc(HYPRE_Int, max_num_nonzeros, HYPRE_MEMORY_HOST); HYPRE_Real *aux_data = hypre_CTAlloc(HYPRE_Real, max_num_nonzeros, HYPRE_MEMORY_HOST); HYPRE_Int row_start = i - (i % bsize); HYPRE_Int row_stop = row_start + bsize - 1; HYPRE_Int cnt = 0; for (jj = A_h_correction_offd_i[i]; jj < A_h_correction_offd_i[i+1]; jj++) { aux_j[cnt] = A_h_correction_offd_j[jj] + ncol_diag; aux_data[cnt] = A_h_correction_offd_data[jj]; cnt++; } for (jj = A_h_correction_diag_i[i]; jj < A_h_correction_diag_i[i+1]; jj++) { aux_j[cnt] = A_h_correction_diag_j[jj]; aux_data[cnt] = A_h_correction_diag_data[jj]; cnt++; } hypre_qsort2_abs(aux_j, aux_data, 0, cnt-1); for (jj = A_h_correction_diag_i[i]; jj < A_h_correction_diag_i[i+1]; jj++) { i1 = A_h_correction_diag_j[jj]; if (i1 >= row_start && i1 <= row_stop) { // copy data to new arrays A_h_correction_diag_j_new[num_nonzeros_diag_new] = i1; A_h_correction_diag_data_new[num_nonzeros_diag_new] = A_h_correction_diag_data[jj]; ++num_nonzeros_diag_new; } else { // Do nothing } } if (max_elmts > 0) { for (jj = 0; jj < hypre_min(max_elmts, cnt); jj++) { HYPRE_Int col_idx = aux_j[jj]; HYPRE_Real col_value = aux_data[jj]; if (col_idx < ncol_diag && (col_idx < row_start || col_idx > row_stop)) { A_h_correction_diag_j_new[num_nonzeros_diag_new] = col_idx; A_h_correction_diag_data_new[num_nonzeros_diag_new] = col_value; ++num_nonzeros_diag_new; } else if (col_idx >= ncol_diag) { A_h_correction_offd_j_new[num_nonzeros_offd_new] = col_idx - ncol_diag; A_h_correction_offd_data_new[num_nonzeros_offd_new] = col_value; ++num_nonzeros_offd_new; } } } A_h_correction_diag_i_new[i+1] = num_nonzeros_diag_new; A_h_correction_offd_i_new[i+1] = num_nonzeros_offd_new; hypre_TFree(aux_j, HYPRE_MEMORY_HOST); hypre_TFree(aux_data, HYPRE_MEMORY_HOST); } hypre_TFree(A_h_correction_diag_i, HYPRE_MEMORY_HOST); hypre_TFree(A_h_correction_diag_j, HYPRE_MEMORY_HOST); hypre_TFree(A_h_correction_diag_data, HYPRE_MEMORY_HOST); hypre_CSRMatrixI(A_h_correction_diag) = A_h_correction_diag_i_new; hypre_CSRMatrixJ(A_h_correction_diag) = A_h_correction_diag_j_new; hypre_CSRMatrixData(A_h_correction_diag) = A_h_correction_diag_data_new; hypre_CSRMatrixNumNonzeros(A_h_correction_diag) = num_nonzeros_diag_new; if (A_h_correction_offd_i) hypre_TFree(A_h_correction_offd_i, HYPRE_MEMORY_HOST); if (A_h_correction_offd_j) hypre_TFree(A_h_correction_offd_j, HYPRE_MEMORY_HOST); if (A_h_correction_offd_data) hypre_TFree(A_h_correction_offd_data, HYPRE_MEMORY_HOST); hypre_CSRMatrixI(A_h_correction_offd) = A_h_correction_offd_i_new; hypre_CSRMatrixJ(A_h_correction_offd) = A_h_correction_offd_j_new; hypre_CSRMatrixData(A_h_correction_offd) = A_h_correction_offd_data_new; hypre_CSRMatrixNumNonzeros(A_h_correction_offd) = num_nonzeros_offd_new; } else { hypre_printf("Error!! Block ordering is not supported at the moment\n"); exit(-1); } //} //hypre_MGRParCSRMatrixTruncate(A_h_correction, max_elmts); //wall_time = time_getWallclockSeconds() - wall_time; //hypre_printf("Filter A_h_correction time: %1.5f\n", wall_time); //hypre_ParCSRMatrixPrintIJ(A_h_correction,1,1,"A_h_correction_filtered"); // coarse grid / schur complement hypre_ParcsrAdd(1.0, A_cc, 1.0, A_h_correction, &A_h); *A_h_ptr = A_h; //hypre_ParCSRMatrixPrintIJ(A_h,1,1,"A_h"); hypre_ParCSRMatrixDestroy(A_cc); hypre_ParCSRMatrixDestroy(A_h_correction); hypre_TFree(c_marker, HYPRE_MEMORY_HOST); hypre_TFree(f_marker, HYPRE_MEMORY_HOST); return hypre_error_flag; } HYPRE_Int hypre_MGRComputeAlgebraicFixedStress(hypre_ParCSRMatrix *A, HYPRE_BigInt *mgr_idx_array, HYPRE_Solver A_ff_solver) { HYPRE_Int *U_marker, *S_marker, *P_marker; HYPRE_Int n_fine, i; HYPRE_BigInt ibegin; hypre_ParCSRMatrix *A_up; hypre_ParCSRMatrix *A_uu; hypre_ParCSRMatrix *A_su; hypre_ParCSRMatrix *A_pu; hypre_ParVector *e1_vector; hypre_ParVector *e2_vector; hypre_ParVector *e3_vector; hypre_ParVector *e4_vector; hypre_ParVector *e5_vector; n_fine = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); ibegin = hypre_ParCSRMatrixFirstRowIndex(A); hypre_assert(ibegin == mgr_idx_array[0]); U_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); S_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); for (i = 0; i < n_fine; i++) { U_marker[i] = -1; S_marker[i] = -1; P_marker[i] = -1; } // create C and F markers for (i = 0; i < n_fine; i++) { if (i < mgr_idx_array[1] - ibegin) { U_marker[i] = 1; } else if (i >= (mgr_idx_array[1] - ibegin) && i < (mgr_idx_array[2] - ibegin)) { S_marker[i] = 1; } else { P_marker[i] = 1; } } // Get A_up hypre_MGRGetSubBlock(A, U_marker, P_marker, 0, &A_up); // GetA_uu hypre_MGRGetSubBlock(A, U_marker, U_marker, 0, &A_uu); // Get A_su hypre_MGRGetSubBlock(A, S_marker, U_marker, 0, &A_su); // Get A_pu hypre_MGRGetSubBlock(A, P_marker, U_marker, 0, &A_pu); e1_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_up), hypre_ParCSRMatrixGlobalNumCols(A_up), hypre_ParCSRMatrixColStarts(A_up)); hypre_ParVectorInitialize(e1_vector); hypre_ParVectorSetPartitioningOwner(e1_vector,0); hypre_ParVectorSetConstantValues(e1_vector, 1.0); e2_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_uu), hypre_ParCSRMatrixGlobalNumRows(A_uu), hypre_ParCSRMatrixRowStarts(A_uu)); hypre_ParVectorInitialize(e2_vector); hypre_ParVectorSetPartitioningOwner(e2_vector,0); hypre_ParVectorSetConstantValues(e2_vector, 0.0); e3_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_uu), hypre_ParCSRMatrixGlobalNumRows(A_uu), hypre_ParCSRMatrixRowStarts(A_uu)); hypre_ParVectorInitialize(e3_vector); hypre_ParVectorSetPartitioningOwner(e3_vector,0); hypre_ParVectorSetConstantValues(e3_vector, 0.0); e4_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_su), hypre_ParCSRMatrixGlobalNumRows(A_su), hypre_ParCSRMatrixRowStarts(A_su)); hypre_ParVectorInitialize(e4_vector); hypre_ParVectorSetPartitioningOwner(e4_vector,0); hypre_ParVectorSetConstantValues(e4_vector, 0.0); e5_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_pu), hypre_ParCSRMatrixGlobalNumRows(A_pu), hypre_ParCSRMatrixRowStarts(A_pu)); hypre_ParVectorInitialize(e5_vector); hypre_ParVectorSetPartitioningOwner(e5_vector,0); hypre_ParVectorSetConstantValues(e5_vector, 0.0); // compute e2 = A_up * e1 hypre_ParCSRMatrixMatvecOutOfPlace(1.0, A_up, e1_vector, 0.0, e2_vector, e2_vector); // solve e3 = A_uu^-1 * e2 hypre_BoomerAMGSolve(A_ff_solver, A_uu, e2_vector, e3_vector); // compute e4 = A_su * e3 hypre_ParCSRMatrixMatvecOutOfPlace(1.0, A_su, e3_vector, 0.0, e4_vector, e4_vector); // compute e4 = A_su * e3 hypre_ParCSRMatrixMatvecOutOfPlace(1.0, A_su, e3_vector, 0.0, e4_vector, e4_vector); // print e4 hypre_ParVectorPrintIJ(e4_vector,1,"Dsp"); // compute e5 = A_pu * e3 hypre_ParCSRMatrixMatvecOutOfPlace(1.0, A_pu, e3_vector, 0.0, e5_vector, e5_vector); hypre_ParVectorPrintIJ(e5_vector,1,"Dpp"); hypre_ParVectorDestroy(e1_vector); hypre_ParVectorDestroy(e2_vector); hypre_ParVectorDestroy(e3_vector); hypre_ParCSRMatrixDestroy(A_uu); hypre_ParCSRMatrixDestroy(A_up); hypre_ParCSRMatrixDestroy(A_pu); hypre_ParCSRMatrixDestroy(A_su); hypre_TFree(U_marker, HYPRE_MEMORY_HOST); hypre_TFree(S_marker, HYPRE_MEMORY_HOST); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); return hypre_error_flag; } HYPRE_Int hypre_MGRApproximateInverse(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix **A_inv) { HYPRE_Int print_level, mr_max_row_nnz, mr_max_iter, nsh_max_row_nnz, nsh_max_iter, mr_col_version; HYPRE_Real mr_tol, nsh_tol; HYPRE_Real *droptol = hypre_CTAlloc(HYPRE_Real, 2, HYPRE_MEMORY_HOST); hypre_ParCSRMatrix *approx_A_inv = NULL; print_level = 0; nsh_max_iter = 2; nsh_max_row_nnz = 2; // default 1000 mr_max_iter = 1; mr_tol = 1.0e-3; mr_max_row_nnz = 2; // default 800 mr_col_version = 0; nsh_tol = 1.0e-3; droptol[0] = 1.0e-2; droptol[1] = 1.0e-2; hypre_ILUParCSRInverseNSH(A, &approx_A_inv, droptol, mr_tol, nsh_tol, DIVIDE_TOL, mr_max_row_nnz, nsh_max_row_nnz, mr_max_iter, nsh_max_iter, mr_col_version, print_level); *A_inv = approx_A_inv; if (droptol) hypre_TFree(droptol, HYPRE_MEMORY_HOST); return hypre_error_flag; } HYPRE_Int hypre_MGRBuildInterpApproximateInverseExp(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *S, HYPRE_Int *CF_marker, HYPRE_BigInt *num_cpts_global, HYPRE_Int debug_flag, hypre_ParCSRMatrix **P_ptr) { HYPRE_Int *C_marker; HYPRE_Int *F_marker; hypre_ParCSRMatrix *A_fc; hypre_ParCSRMatrix *minus_Wp; MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size, P_offd_size; HYPRE_Int jj_counter,jj_counter_offd; HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); HYPRE_Int *fine_to_coarse = NULL; HYPRE_Int coarse_counter; HYPRE_BigInt total_global_cpts; HYPRE_Int num_cols_P_offd; // HYPRE_BigInt my_first_cpt; HYPRE_Int i, jj; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; // HYPRE_Int num_threads; // HYPRE_Real wall_time; /* for debugging instrumentation */ C_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); F_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); // create C and F markers for (i = 0; i < n_fine; i++) { C_marker[i] = (CF_marker[i] == 1)? 1: -1; F_marker[i] = (CF_marker[i] == 1) ? -1: 1; } // Get A_FC hypre_MGRGetSubBlock(A, F_marker, C_marker, 0, &A_fc); // compute -Wp minus_Wp = hypre_ParMatmul(S, A_fc); hypre_CSRMatrix *minus_Wp_diag = hypre_ParCSRMatrixDiag(minus_Wp); HYPRE_Real *minus_Wp_diag_data = hypre_CSRMatrixData(minus_Wp_diag); HYPRE_Int *minus_Wp_diag_i = hypre_CSRMatrixI(minus_Wp_diag); HYPRE_Int *minus_Wp_diag_j = hypre_CSRMatrixJ(minus_Wp_diag); hypre_CSRMatrix *minus_Wp_offd = hypre_ParCSRMatrixOffd(minus_Wp); HYPRE_Real *minus_Wp_offd_data = hypre_CSRMatrixData(minus_Wp_offd); HYPRE_Int *minus_Wp_offd_i = hypre_CSRMatrixI(minus_Wp_offd); HYPRE_Int *minus_Wp_offd_j = hypre_CSRMatrixJ(minus_Wp_offd); hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); // num_threads = hypre_NumThreads(); #ifdef HYPRE_NO_GLOBAL_PARTITION // my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); #else // my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; #endif /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #if 0 #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ HYPRE_Int row_counter = 0; coarse_counter = 0; for (i = 0; i < n_fine; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] > 0) { jj_counter++; fine_to_coarse[i] = coarse_counter; coarse_counter++; } else { /*-------------------------------------------------------------------- * If i is an F-point, interpolation is the approximation of A_{ff}^{-1}A_{fc} *--------------------------------------------------------------------*/ for (jj = minus_Wp_diag_i[row_counter]; jj < minus_Wp_diag_i[row_counter+1]; jj++) { jj_counter++; } if (num_procs > 1) { for (jj = minus_Wp_offd_i[row_counter]; jj < minus_Wp_offd_i[row_counter+1]; jj++) { jj_counter_offd++; } } row_counter++; } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_DEVICE); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_DEVICE); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_DEVICE); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_DEVICE); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ row_counter = 0; for (i = 0; i < n_fine; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { /* Diagonal part of P */ P_diag_i[i] = jj_counter; for (jj = minus_Wp_diag_i[row_counter]; jj < minus_Wp_diag_i[row_counter+1]; jj++) { P_diag_j[jj_counter] = minus_Wp_diag_j[jj]; P_diag_data[jj_counter] = - minus_Wp_diag_data[jj]; jj_counter++; } /* Off-Diagonal part of P */ P_offd_i[i] = jj_counter_offd; if (num_procs > 1) { for (jj = minus_Wp_offd_i[row_counter]; jj < minus_Wp_offd_i[row_counter+1]; jj++) { P_offd_j[jj_counter_offd] = minus_Wp_offd_j[jj]; P_offd_data[jj_counter_offd] = - minus_Wp_offd_data[jj]; jj_counter_offd++; } } row_counter++; } P_offd_i[i+1] = jj_counter_offd; } P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; num_cols_P_offd = hypre_CSRMatrixNumCols(minus_Wp_offd); HYPRE_BigInt *col_map_offd_tmp = hypre_ParCSRMatrixColMapOffd(minus_Wp); if (P_offd_size) { col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); for (i=0; i < num_cols_P_offd; i++) { col_map_offd_P[i] = col_map_offd_tmp[i]; } } if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_MatvecCommPkgCreate(P); *P_ptr = P; hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(C_marker, HYPRE_MEMORY_HOST); hypre_TFree(F_marker, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDestroy(A_fc); hypre_ParCSRMatrixDestroy(minus_Wp); return 0; } HYPRE_Int hypre_MGRBuildInterpApproximateInverse(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_BigInt *num_cpts_global, HYPRE_Int debug_flag, hypre_ParCSRMatrix **P_ptr) { HYPRE_Int *C_marker; HYPRE_Int *F_marker; hypre_ParCSRMatrix *A_ff; hypre_ParCSRMatrix *A_fc; hypre_ParCSRMatrix *A_ff_inv; hypre_ParCSRMatrix *minus_Wp; MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size, P_offd_size; HYPRE_Int jj_counter,jj_counter_offd; //HYPRE_Int jj_begin_row,jj_begin_row_offd; //HYPRE_Int jj_end_row,jj_end_row_offd; HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); HYPRE_Int *fine_to_coarse = NULL; //HYPRE_Int *coarse_counter; HYPRE_Int coarse_counter; HYPRE_BigInt total_global_cpts; HYPRE_Int num_cols_P_offd; // HYPRE_BigInt my_first_cpt; HYPRE_Int i,jj; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; // HYPRE_Int num_threads; // HYPRE_Real wall_time; /* for debugging instrumentation */ C_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); F_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); // create C and F markers for (i = 0; i < n_fine; i++) { C_marker[i] = (CF_marker[i] == 1)? 1: -1; F_marker[i] = (CF_marker[i] == 1) ? -1: 1; } // Get A_FF hypre_MGRGetSubBlock(A, F_marker, F_marker, 0, &A_ff); // Get A_FC hypre_MGRGetSubBlock(A, F_marker, C_marker, 0, &A_fc); hypre_MGRApproximateInverse(A_ff, &A_ff_inv); hypre_ParCSRMatrixPrintIJ(A_ff_inv, 1, 1, "A_ff_inv"); hypre_ParCSRMatrixPrintIJ(A_fc, 1, 1, "A_fc"); minus_Wp = hypre_ParMatmul(A_ff_inv, A_fc); hypre_ParCSRMatrixPrintIJ(minus_Wp, 1, 1, "Wp"); hypre_CSRMatrix *minus_Wp_diag = hypre_ParCSRMatrixDiag(minus_Wp); HYPRE_Real *minus_Wp_diag_data = hypre_CSRMatrixData(minus_Wp_diag); HYPRE_Int *minus_Wp_diag_i = hypre_CSRMatrixI(minus_Wp_diag); HYPRE_Int *minus_Wp_diag_j = hypre_CSRMatrixJ(minus_Wp_diag); hypre_CSRMatrix *minus_Wp_offd = hypre_ParCSRMatrixOffd(minus_Wp); HYPRE_Real *minus_Wp_offd_data = hypre_CSRMatrixData(minus_Wp_offd); HYPRE_Int *minus_Wp_offd_i = hypre_CSRMatrixI(minus_Wp_offd); HYPRE_Int *minus_Wp_offd_j = hypre_CSRMatrixJ(minus_Wp_offd); //hypre_CSRMatrix *minus_Wp_offd = hypre_ParCSRMatrixOffd(minus_Wp); //HYPRE_Int num_cols_minus_Wp_offd = hypre_CSRMatrixNumCols(minus_Wp_offd); hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); // num_threads = hypre_NumThreads(); #ifdef HYPRE_NO_GLOBAL_PARTITION // my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); #else // my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; #endif /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ //coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); //jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); //jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #if 0 #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ HYPRE_Int row_counter = 0; coarse_counter = 0; for (i = 0; i < n_fine; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] > 0) { //jj_count[j]++; //fine_to_coarse[i] = coarse_counter[j]; //coarse_counter[j]++; jj_counter++; fine_to_coarse[i] = coarse_counter; coarse_counter++; } else { /*-------------------------------------------------------------------- * If i is an F-point, interpolation is the approximation of A_{ff}^{-1}A_{fc} *--------------------------------------------------------------------*/ for (jj = minus_Wp_diag_i[row_counter]; jj < minus_Wp_diag_i[row_counter+1]; jj++) { //jj_count[j]++; jj_counter++; } if (num_procs > 1) { for (jj = minus_Wp_offd_i[row_counter]; jj < minus_Wp_offd_i[row_counter+1]; jj++) { //jj_count_offd[j]++; jj_counter_offd++; } } row_counter++; } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ /* for (i=0; i < num_threads-1; i++) { coarse_counter[i+1] += coarse_counter[i]; jj_count[i+1] += jj_count[i]; jj_count_offd[i+1] += jj_count_offd[i]; } i = num_threads-1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; */ P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_DEVICE); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_DEVICE); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_DEVICE); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_DEVICE); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; /* if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Internal work 1 = %f\n", my_id, wall_time); fflush(NULL); } */ /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ /* if (num_procs > 1) { if (debug_flag==4) wall_time = time_getWallclockSeconds(); fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_minus_Wp_offd, HYPRE_MEMORY_HOST); for (i = 0; i < n_fine; i++) { fine_to_coarse[i] += my_first_cpt; } comm_pkg = hypre_ParCSRMatrixCommPkg(minus_Wp); if (!comm_pkg) { hypre_MatvecCommPkgCreate(minus_Wp); comm_pkg = hypre_ParCSRMatrixCommPkg(minus_Wp); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, fine_to_coarse_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n", my_id, wall_time); fflush(NULL); } if (debug_flag==4) wall_time = time_getWallclockSeconds(); #if 0 #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt; } */ row_counter = 0; for (i = 0; i < n_fine; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { /* Diagonal part of P */ P_diag_i[i] = jj_counter; for (jj = minus_Wp_diag_i[row_counter]; jj < minus_Wp_diag_i[row_counter+1]; jj++) { //P_marker[row_counter] = jj_counter; P_diag_j[jj_counter] = minus_Wp_diag_j[jj]; P_diag_data[jj_counter] = - minus_Wp_diag_data[jj]; jj_counter++; } /* Off-Diagonal part of P */ P_offd_i[i] = jj_counter_offd; if (num_procs > 1) { for (jj = minus_Wp_offd_i[row_counter]; jj < minus_Wp_offd_i[row_counter+1]; jj++) { //P_marker_offd[row_counter] = jj_counter_offd; P_offd_j[jj_counter_offd] = minus_Wp_offd_j[jj]; P_offd_data[jj_counter_offd] = - minus_Wp_offd_data[jj]; jj_counter_offd++; } } row_counter++; } P_offd_i[i+1] = jj_counter_offd; } //hypre_printf("Num rows of Wp = %d\n", row_counter); //P_offd_i[row_counter] = jj_counter_offd; P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; num_cols_P_offd = hypre_CSRMatrixNumCols(minus_Wp_offd); HYPRE_BigInt *col_map_offd_tmp = hypre_ParCSRMatrixColMapOffd(minus_Wp); if (P_offd_size) { col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); for (i=0; i < num_cols_P_offd; i++) { col_map_offd_P[i] = col_map_offd_tmp[i]; } } /* num_cols_P_offd = 0; if (P_offd_size) { P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_minus_Wp_offd, HYPRE_MEMORY_HOST); #if 0 #endif for (i=0; i < num_cols_minus_Wp_offd; i++) P_marker[i] = 0; num_cols_P_offd = 0; for (i=0; i < P_offd_size; i++) { index = P_offd_j[i]; if (!P_marker[index]) { num_cols_P_offd++; P_marker[index] = 1; } } col_map_offd_P = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); index = 0; for (i=0; i < num_cols_P_offd; i++) { while (P_marker[index]==0) index++; col_map_offd_P[i] = index++; } #if 0 #endif for (i=0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(col_map_offd_P, P_offd_j[i], num_cols_P_offd); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } */ if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_MatvecCommPkgCreate(P); //hypre_GetCommPkgRTFromCommPkgA(P,A, fine_to_coarse_offd); *P_ptr = P; //hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); //hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); //if (fine_to_coarse_offd) hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); //hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); //hypre_TFree(jj_count, HYPRE_MEMORY_HOST); //hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); hypre_TFree(C_marker, HYPRE_MEMORY_HOST); hypre_TFree(F_marker, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDestroy(A_ff); hypre_ParCSRMatrixDestroy(A_fc); hypre_ParCSRMatrixDestroy(A_ff_inv); hypre_ParCSRMatrixDestroy(minus_Wp); return 0; } /* Setup interpolation operator */ HYPRE_Int hypre_MGRBuildInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, HYPRE_Int *col_offd_S_to_A, hypre_ParCSRMatrix **P, HYPRE_Int interp_type, HYPRE_Int numsweeps) { //HYPRE_Int i; hypre_ParCSRMatrix *P_ptr = NULL; //HYPRE_Real jac_trunc_threshold = trunc_factor; //HYPRE_Real jac_trunc_threshold_minus = 0.5*jac_trunc_threshold; /* Interpolation for each level */ if (interp_type <3) { hypre_MGRBuildP( A,CF_marker,num_cpts_global,interp_type,debug_flag,&P_ptr); /* Could do a few sweeps of Jacobi to further improve P */ //for(i=0; i<numsweeps; i++) // hypre_BoomerAMGJacobiInterp(A, &P_ptr, S,1, NULL, CF_marker, 0, jac_trunc_threshold, jac_trunc_threshold_minus ); //hypre_BoomerAMGInterpTruncation(P_ptr, trunc_factor, max_elmts); } else if (interp_type == 4) { hypre_MGRBuildInterpApproximateInverse(A, CF_marker, num_cpts_global, debug_flag, &P_ptr); hypre_BoomerAMGInterpTruncation(P_ptr, trunc_factor, max_elmts); } else if (interp_type == 99) { hypre_MGRBuildInterpApproximateInverseExp(A, S, CF_marker, num_cpts_global, debug_flag, &P_ptr); hypre_BoomerAMGInterpTruncation(P_ptr, trunc_factor, max_elmts); } else { /* Classical modified interpolation */ hypre_BoomerAMGBuildInterp(A, CF_marker, S, num_cpts_global,1, NULL,debug_flag, trunc_factor, max_elmts, col_offd_S_to_A, &P_ptr); /* Do k steps of Jacobi build W for P = [-W I]. * Note that BoomerAMGJacobiInterp assumes you have some initial P, * hence we need to initialize P as above, before calling this routine. * If numsweeps = 0, the following step is skipped and P is returned as is. * Looping here is equivalent to improving P by Jacobi interpolation */ //for(i=0; i<numsweeps; i++) // hypre_BoomerAMGJacobiInterp(A, &P_ptr, S,1, NULL, CF_marker, // 0, jac_trunc_threshold, // jac_trunc_threshold_minus ); } /* set pointer to P */ *P = P_ptr; return hypre_error_flag; } /* Setup restriction operator */ HYPRE_Int hypre_MGRBuildRestrict(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, HYPRE_Real S_commpkg_switch, HYPRE_Real strong_threshold, HYPRE_Real max_row_sum, hypre_ParCSRMatrix **R, HYPRE_Int restrict_type, HYPRE_Int numsweeps) { // HYPRE_Int i; hypre_ParCSRMatrix *R_ptr = NULL; hypre_ParCSRMatrix *AT = NULL; hypre_ParCSRMatrix *ST = NULL; HYPRE_Int *col_offd_ST_to_AT = NULL; // HYPRE_Real jac_trunc_threshold = trunc_factor; // HYPRE_Real jac_trunc_threshold_minus = 0.5*jac_trunc_threshold; /* Build AT (transpose A) */ if (restrict_type > 0) { hypre_ParCSRMatrixTranspose(A, &AT, 1); } if (restrict_type > 5) { /* Build new strength matrix */ hypre_BoomerAMGCreateS(AT, strong_threshold, max_row_sum, 1, NULL, &ST); /* use appropriate communication package for Strength matrix */ if (strong_threshold > S_commpkg_switch) hypre_BoomerAMGCreateSCommPkg(AT, ST, &col_offd_ST_to_AT); } /* Interpolation for each level */ if (restrict_type == 0) { hypre_MGRBuildP(A, CF_marker, num_cpts_global, restrict_type, debug_flag, &R_ptr); } else if (restrict_type == 1 || restrict_type == 2) { hypre_MGRBuildP(AT, CF_marker, num_cpts_global, restrict_type, debug_flag, &R_ptr); /* Could do a few sweeps of Jacobi to further improve P */ //for(i=0; i<numsweeps; i++) // hypre_BoomerAMGJacobiInterp(A, &R_ptr, S,1, NULL, CF_marker, 0, jac_trunc_threshold, jac_trunc_threshold_minus ); //hypre_BoomerAMGInterpTruncation(R_ptr, trunc_factor, max_elmts); } else if (restrict_type == 4) { hypre_MGRBuildInterpApproximateInverse(A, CF_marker, num_cpts_global, debug_flag, &R_ptr); hypre_BoomerAMGInterpTruncation(R_ptr, trunc_factor, max_elmts); } else { /* Classical modified interpolation */ hypre_BoomerAMGBuildInterp(AT, CF_marker, ST, num_cpts_global,1, NULL,debug_flag, trunc_factor, max_elmts, col_offd_ST_to_AT, &R_ptr); /* Do k steps of Jacobi build W for P = [-W I]. * Note that BoomerAMGJacobiInterp assumes you have some initial P, * hence we need to initialize P as above, before calling this routine. * If numsweeps = 0, the following step is skipped and P is returned as is. * Looping here is equivalent to improving P by Jacobi interpolation */ // for(i=0; i<numsweeps; i++) // hypre_BoomerAMGJacobiInterp(A, &R_ptr, S,1, NULL, CF_marker, 0, // jac_trunc_threshold, jac_trunc_threshold_minus); } /* set pointer to P */ *R = R_ptr; /* Free memory */ if (restrict_type > 0) { hypre_ParCSRMatrixDestroy(AT); } if (restrict_type > 5) { hypre_ParCSRMatrixDestroy(ST); if (col_offd_ST_to_AT) hypre_TFree(col_offd_ST_to_AT, HYPRE_MEMORY_HOST); } return hypre_error_flag; } void hypre_blas_smat_inv_n4 (HYPRE_Real *a) { const HYPRE_Real a11 = a[0], a12 = a[1], a13 = a[2], a14 = a[3]; const HYPRE_Real a21 = a[4], a22 = a[5], a23 = a[6], a24 = a[7]; const HYPRE_Real a31 = a[8], a32 = a[9], a33 = a[10], a34 = a[11]; const HYPRE_Real a41 = a[12], a42 = a[13], a43 = a[14], a44 = a[15]; const HYPRE_Real M11 = a22*a33*a44 + a23*a34*a42 + a24*a32*a43 - a22*a34*a43 - a23*a32*a44 - a24*a33*a42; const HYPRE_Real M12 = a12*a34*a43 + a13*a32*a44 + a14*a33*a42 - a12*a33*a44 - a13*a34*a42 - a14*a32*a43; const HYPRE_Real M13 = a12*a23*a44 + a13*a24*a42 + a14*a22*a43 - a12*a24*a43 - a13*a22*a44 - a14*a23*a42; const HYPRE_Real M14 = a12*a24*a33 + a13*a22*a34 + a14*a23*a32 - a12*a23*a34 - a13*a24*a32 - a14*a22*a33; const HYPRE_Real M21 = a21*a34*a43 + a23*a31*a44 + a24*a33*a41 - a21*a33*a44 - a23*a34*a41 - a24*a31*a43; const HYPRE_Real M22 = a11*a33*a44 + a13*a34*a41 + a14*a31*a43 - a11*a34*a43 - a13*a31*a44 - a14*a33*a41; const HYPRE_Real M23 = a11*a24*a43 + a13*a21*a44 + a14*a23*a41 - a11*a23*a44 - a13*a24*a41 - a14*a21*a43; const HYPRE_Real M24 = a11*a23*a34 + a13*a24*a31 + a14*a21*a33 - a11*a24*a33 - a13*a21*a34 - a14*a23*a31; const HYPRE_Real M31 = a21*a32*a44 + a22*a34*a41 + a24*a31*a42 - a21*a34*a42 - a22*a31*a44 - a24*a32*a41; const HYPRE_Real M32 = a11*a34*a42 + a12*a31*a44 + a14*a32*a41 - a11*a32*a44 - a12*a34*a41 - a14*a31*a42; const HYPRE_Real M33 = a11*a22*a44 + a12*a24*a41 + a14*a21*a42 - a11*a24*a42 - a12*a21*a44 - a14*a22*a41; const HYPRE_Real M34 = a11*a24*a32 + a12*a21*a34 + a14*a22*a31 - a11*a22*a34 - a12*a24*a31 - a14*a21*a32; const HYPRE_Real M41 = a21*a33*a42 + a22*a31*a43 + a23*a32*a41 - a21*a32*a43 - a22*a33*a41 - a23*a31*a42; const HYPRE_Real M42 = a11*a32*a43 + a12*a33*a41 + a13*a31*a42 - a11*a33*a42 - a12*a31*a43 - a13*a32*a41; const HYPRE_Real M43 = a11*a23*a42 + a12*a21*a43 + a13*a22*a41 - a11*a22*a43 - a12*a23*a41 - a13*a21*a42; const HYPRE_Real M44 = a11*a22*a33 + a12*a23*a31 + a13*a21*a32 - a11*a23*a32 - a12*a21*a33 - a13*a22*a31; const HYPRE_Real det = a11*M11 + a12*M21 + a13*M31 + a14*M41; HYPRE_Real det_inv; //if ( fabs(det) < 1e-22 ) { //hypre_printf("### WARNING: Matrix is nearly singular! det = %e\n", det); /* printf("##----------------------------------------------\n"); printf("## %12.5e %12.5e %12.5e \n", a0, a1, a2); printf("## %12.5e %12.5e %12.5e \n", a3, a4, a5); printf("## %12.5e %12.5e %12.5e \n", a5, a6, a7); printf("##----------------------------------------------\n"); getchar(); */ //} det_inv = 1.0/det; a[0] = M11*det_inv; a[1] = M12*det_inv; a[2] = M13*det_inv; a[3] = M14*det_inv; a[4] = M21*det_inv; a[5] = M22*det_inv; a[6] = M23*det_inv; a[7] = M24*det_inv; a[8] = M31*det_inv; a[9] = M32*det_inv; a[10] = M33*det_inv; a[11] = M34*det_inv; a[12] = M41*det_inv; a[13] = M42*det_inv; a[14] = M43*det_inv; a[15] = M44*det_inv; } void hypre_blas_mat_inv(HYPRE_Real *a, HYPRE_Int n) { HYPRE_Int i,j,k,l,u,kn,in; HYPRE_Real alinv; if (n == 4) { hypre_blas_smat_inv_n4(a); } else { for (k=0; k<n; ++k) { kn = k*n; l = kn+k; //if (fabs(a[l]) < SMALLREAL) { // printf("### WARNING: Diagonal entry is close to zero!"); // printf("### WARNING: diag_%d=%e\n", k, a[l]); // a[l] = SMALLREAL; //} alinv = 1.0/a[l]; a[l] = alinv; for (j=0; j<k; ++j) { u = kn+j; a[u] *= alinv; } for (j=k+1; j<n; ++j) { u = kn+j; a[u] *= alinv; } for (i=0; i<k; ++i) { in = i*n; for (j=0; j<n; ++j) if (j!=k) { u = in+j; a[u] -= a[in+k]*a[kn+j]; } // end if (j!=k) } for (i=k+1; i<n; ++i) { in = i*n; for (j=0; j<n; ++j) if (j!=k) { u = in+j; a[u] -= a[in+k]*a[kn+j]; } // end if (j!=k) } for (i=0; i<k; ++i) { u=i*n+k; a[u] *= -alinv; } for (i=k+1; i<n; ++i) { u=i*n+k; a[u] *= -alinv; } } // end for (k=0; k<n; ++k) }// end if } HYPRE_Int hypre_block_jacobi_scaling(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix **B_ptr, void *mgr_vdata, HYPRE_Int debug_flag) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int num_procs, my_id; HYPRE_Int blk_size = (mgr_data -> block_size); HYPRE_Int reserved_coarse_size = (mgr_data -> reserved_coarse_size); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_ParCSRMatrix *B; hypre_CSRMatrix *B_diag; HYPRE_Real *B_diag_data; HYPRE_Int *B_diag_i; HYPRE_Int *B_diag_j; hypre_CSRMatrix *B_offd; HYPRE_Int i,ii; HYPRE_Int j,jj; HYPRE_Int k; HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int n_block, left_size,inv_size; // HYPRE_Real wall_time; /* for debugging instrumentation */ HYPRE_Int bidx,bidxm1,bidxp1; HYPRE_Real * diaginv; const HYPRE_Int nb2 = blk_size*blk_size; HYPRE_Int block_scaling_error = 0; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); // HYPRE_Int num_threads = hypre_NumThreads(); //printf("n = %d\n",n); if (my_id == num_procs) { n_block = (n - reserved_coarse_size) / blk_size; left_size = n - blk_size*n_block; } else { n_block = n / blk_size; left_size = n - blk_size*n_block; } inv_size = nb2*n_block + left_size*left_size; //printf("inv_size = %d\n",inv_size); hypre_blockRelax_setup(A,blk_size,reserved_coarse_size,&(mgr_data -> diaginv)); // if (debug_flag==4) wall_time = time_getWallclockSeconds(); /*----------------------------------------------------------------------- * First Pass: Determine size of B and fill in *-----------------------------------------------------------------------*/ B_diag_i = hypre_CTAlloc(HYPRE_Int, n+1, HYPRE_MEMORY_HOST); B_diag_j = hypre_CTAlloc(HYPRE_Int, inv_size, HYPRE_MEMORY_HOST); B_diag_data = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST); B_diag_i[n] = inv_size; //B_offd_i = hypre_CTAlloc(HYPRE_Int, n+1, HYPRE_MEMORY_HOST); //B_offd_j = hypre_CTAlloc(HYPRE_Int, 1, HYPRE_MEMORY_HOST); //B_offd_data = hypre_CTAlloc(HYPRE_Real, 1, HYPRE_MEMORY_HOST); //B_offd_i[n] = 1; /*----------------------------------------------------------------- * Get all the diagonal sub-blocks *-----------------------------------------------------------------*/ diaginv = hypre_CTAlloc(HYPRE_Real, nb2, HYPRE_MEMORY_HOST); //printf("n_block = %d\n",n_block); for (i = 0;i < n_block; i++) { bidxm1 = i*blk_size; bidxp1 = (i+1)*blk_size; for (k = 0;k < blk_size; k++) { for (j = 0;j < blk_size; j++) { bidx = k*blk_size + j; diaginv[bidx] = 0.0; } for (ii = A_diag_i[bidxm1+k]; ii < A_diag_i[bidxm1+k+1]; ii++) { jj = A_diag_j[ii]; if (jj >= bidxm1 && jj < bidxp1 && fabs(A_diag_data[ii]) > SMALLREAL) { bidx = k*blk_size + jj - bidxm1; //printf("jj = %d,val = %e, bidx = %d\n",jj,A_diag_data[ii],bidx); diaginv[bidx] = A_diag_data[ii]; } } } /* for (k = 0;k < blk_size; k++) */ /* { */ /* for (j = 0;j < blk_size; j++) */ /* { */ /* bidx = k*blk_size + j; */ /* printf("diaginv[%d] = %e\n",bidx,diaginv[bidx]); */ /* } */ /* } */ hypre_blas_mat_inv(diaginv, blk_size); for (k = 0;k < blk_size; k++) { B_diag_i[i*blk_size+k] = i*nb2 + k*blk_size; //B_offd_i[i*nb2+k] = 0; for (j = 0;j < blk_size; j++) { bidx = i*nb2 + k*blk_size + j; B_diag_j[bidx] = i*blk_size + j; B_diag_data[bidx] = diaginv[k*blk_size + j]; } } } //printf("Before create\n"); B = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParCSRMatrixGlobalNumCols(A), hypre_ParCSRMatrixRowStarts(A), hypre_ParCSRMatrixColStarts(A), 0, inv_size, 0); //printf("After create\n"); B_diag = hypre_ParCSRMatrixDiag(B); hypre_CSRMatrixData(B_diag) = B_diag_data; hypre_CSRMatrixI(B_diag) = B_diag_i; hypre_CSRMatrixJ(B_diag) = B_diag_j; B_offd = hypre_ParCSRMatrixOffd(B); hypre_CSRMatrixData(B_offd) = NULL; hypre_CSRMatrixI(B_offd) = NULL; hypre_CSRMatrixJ(B_offd) = NULL; /* hypre_ParCSRMatrixOwnsRowStarts(B) = 0; */ *B_ptr = B; return(block_scaling_error); } HYPRE_Int hypre_blockRelax_solve (hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u, HYPRE_Real blk_size, HYPRE_Int n_block, HYPRE_Int left_size, HYPRE_Int method, HYPRE_Real *diaginv, hypre_ParVector *Vtemp) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); hypre_Vector *u_local = hypre_ParVectorLocalVector(u); HYPRE_Real *u_data = hypre_VectorData(u_local); hypre_Vector *f_local = hypre_ParVectorLocalVector(f); HYPRE_Real *f_data = hypre_VectorData(f_local); hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp); HYPRE_Real *Vtemp_data = hypre_VectorData(Vtemp_local); HYPRE_Real *Vext_data = NULL; HYPRE_Real *v_buf_data; HYPRE_Int i, j, k; HYPRE_Int ii, jj; HYPRE_Int bidx,bidx1; HYPRE_Int relax_error = 0; HYPRE_Int num_sends; HYPRE_Int index, start; HYPRE_Int num_procs, my_id; HYPRE_Real *res; const HYPRE_Int nb2 = blk_size*blk_size; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); // HYPRE_Int num_threads = hypre_NumThreads(); res = hypre_CTAlloc(HYPRE_Real, blk_size, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST); if (num_cols_offd) { A_offd_j = hypre_CSRMatrixJ(A_offd); A_offd_data = hypre_CSRMatrixData(A_offd); } index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data, Vext_data); } /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ #if 0 #endif for (i = 0; i < n; i++) { Vtemp_data[i] = u_data[i]; //printf("u_old[%d] = %e\n",i,Vtemp_data[i]); } if (num_procs > 1) { hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; } /*----------------------------------------------------------------- * Relax points block by block *-----------------------------------------------------------------*/ for (i = 0;i < n_block; i++) { for (j = 0;j < blk_size; j++) { bidx = i*blk_size +j; res[j] = f_data[bidx]; for (jj = A_diag_i[bidx]; jj < A_diag_i[bidx+1]; jj++) { ii = A_diag_j[jj]; if (method == 0) { // Jacobi for diagonal part res[j] -= A_diag_data[jj] * Vtemp_data[ii]; } else if (method == 1) { // Gauss-Seidel for diagonal part res[j] -= A_diag_data[jj] * u_data[ii]; } else { // Default do Jacobi for diagonal part res[j] -= A_diag_data[jj] * Vtemp_data[ii]; } //printf("%d: Au= %e * %e =%e\n",ii,A_diag_data[jj],Vtemp_data[ii], res[j]); } for (jj = A_offd_i[bidx]; jj < A_offd_i[bidx+1]; jj++) { // always do Jacobi for off-diagonal part ii = A_offd_j[jj]; res[j] -= A_offd_data[jj] * Vext_data[ii]; } //printf("%d: res = %e\n",bidx,res[j]); } for (j = 0;j < blk_size; j++) { bidx1 = i*blk_size +j; for (k = 0;k < blk_size; k++) { bidx = i*nb2 +j*blk_size+k; u_data[bidx1] += res[k]*diaginv[bidx]; //printf("u[%d] = %e, diaginv[%d] = %e\n",bidx1,u_data[bidx1],bidx,diaginv[bidx]); } //printf("u[%d] = %e\n",bidx1,u_data[bidx1]); } } if (num_procs > 1) { hypre_TFree(Vext_data, HYPRE_MEMORY_HOST); hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST); } hypre_TFree(res, HYPRE_MEMORY_HOST); return(relax_error); } HYPRE_Int hypre_block_gs (hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u, HYPRE_Real blk_size, HYPRE_Int n_block, HYPRE_Int left_size, HYPRE_Real *diaginv, hypre_ParVector *Vtemp) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); hypre_Vector *u_local = hypre_ParVectorLocalVector(u); HYPRE_Real *u_data = hypre_VectorData(u_local); hypre_Vector *f_local = hypre_ParVectorLocalVector(f); HYPRE_Real *f_data = hypre_VectorData(f_local); hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp); HYPRE_Real *Vtemp_data = hypre_VectorData(Vtemp_local); HYPRE_Real *Vext_data = NULL; HYPRE_Real *v_buf_data; HYPRE_Int i, j, k; HYPRE_Int ii, jj; HYPRE_Int bidx,bidx1; HYPRE_Int relax_error = 0; HYPRE_Int num_sends; HYPRE_Int index, start; HYPRE_Int num_procs, my_id; HYPRE_Real *res; const HYPRE_Int nb2 = blk_size*blk_size; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); //HYPRE_Int num_threads = hypre_NumThreads(); res = hypre_CTAlloc(HYPRE_Real, blk_size, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST); if (num_cols_offd) { A_offd_j = hypre_CSRMatrixJ(A_offd); A_offd_data = hypre_CSRMatrixData(A_offd); } index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data, Vext_data); } /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ #if 0 #endif for (i = 0; i < n; i++) { Vtemp_data[i] = u_data[i]; //printf("u_old[%d] = %e\n",i,Vtemp_data[i]); } if (num_procs > 1) { hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; } /*----------------------------------------------------------------- * Relax points block by block *-----------------------------------------------------------------*/ for (i = 0;i < n_block; i++) { for (j = 0;j < blk_size; j++) { bidx = i*blk_size +j; res[j] = f_data[bidx]; for (jj = A_diag_i[bidx]; jj < A_diag_i[bidx+1]; jj++) { ii = A_diag_j[jj]; //res[j] -= A_diag_data[jj] * Vtemp_data[ii]; //printf("my_id = %d, %d: Au = %e * %e\n",my_id,ii,A_diag_data[jj],Vtemp_data[ii]); res[j] -= A_diag_data[jj] * u_data[ii]; //printf("%d: Au= %e * %e =%e\n",ii,A_diag_data[jj],Vtemp_data[ii], res[j]); } for (jj = A_offd_i[bidx]; jj < A_offd_i[bidx+1]; jj++) { ii = A_offd_j[jj]; res[j] -= A_offd_data[jj] * Vext_data[ii]; } //printf("%d: res = %e\n",bidx,res[j]); } for (j = 0;j < blk_size; j++) { bidx1 = i*blk_size +j; for (k = 0;k < blk_size; k++) { bidx = i*nb2 +j*blk_size+k; u_data[bidx1] += res[k]*diaginv[bidx]; //printf("u[%d] = %e, diaginv[%d] = %e\n",bidx1,u_data[bidx1],bidx,diaginv[bidx]); } //printf("u[%d] = %e\n",bidx1,u_data[bidx1]); } } if (num_procs > 1) { hypre_TFree(Vext_data, HYPRE_MEMORY_HOST); hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST); } hypre_TFree(res, HYPRE_MEMORY_HOST); return(relax_error); } /*Block smoother*/ HYPRE_Int hypre_blockRelax_setup(hypre_ParCSRMatrix *A, HYPRE_Int blk_size, HYPRE_Int reserved_coarse_size, HYPRE_Real **diaginvptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int i, j,k; HYPRE_Int ii, jj; HYPRE_Int bidx,bidxm1,bidxp1; HYPRE_Int num_procs, my_id; const HYPRE_Int nb2 = blk_size*blk_size; HYPRE_Int n_block; HYPRE_Int left_size,inv_size; HYPRE_Real *diaginv = *diaginvptr; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); //HYPRE_Int num_threads = hypre_NumThreads(); if (my_id == num_procs) { n_block = (n - reserved_coarse_size) / blk_size; left_size = n - blk_size*n_block; } else { n_block = n / blk_size; left_size = n - blk_size*n_block; } inv_size = nb2*n_block + left_size*left_size; if (diaginv !=NULL) { hypre_TFree(diaginv, HYPRE_MEMORY_HOST); diaginv = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST); } else { diaginv = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST); } /*----------------------------------------------------------------- * Get all the diagonal sub-blocks *-----------------------------------------------------------------*/ for (i = 0;i < n_block; i++) { bidxm1 = i*blk_size; bidxp1 = (i+1)*blk_size; //printf("bidxm1 = %d,bidxp1 = %d\n",bidxm1,bidxp1); for (k = 0;k < blk_size; k++) { for (j = 0;j < blk_size; j++) { bidx = i*nb2 + k*blk_size + j; diaginv[bidx] = 0.0; } for (ii = A_diag_i[bidxm1+k]; ii < A_diag_i[bidxm1+k+1]; ii++) { jj = A_diag_j[ii]; if (jj >= bidxm1 && jj < bidxp1 && fabs(A_diag_data[ii]) > SMALLREAL) { bidx = i*nb2 + k*blk_size + jj - bidxm1; //printf("jj = %d,val = %e, bidx = %d\n",jj,A_diag_data[ii],bidx); diaginv[bidx] = A_diag_data[ii]; } } } } for (i = 0;i < left_size; i++) { bidxm1 =n_block*nb2 + i*blk_size; bidxp1 =n_block*nb2 + (i+1)*blk_size; for (j = 0;j < left_size; j++) { bidx = n_block*nb2 + i*blk_size +j; diaginv[bidx] = 0.0; } for (ii = A_diag_i[n_block*blk_size + i]; ii < A_diag_i[n_block*blk_size+i+1]; ii++) { jj = A_diag_j[ii]; if (jj > n_block*blk_size) { bidx = n_block*nb2 + i*blk_size + jj - n_block*blk_size; diaginv[bidx] = A_diag_data[ii]; } } } /*----------------------------------------------------------------- * compute the inverses of all the diagonal sub-blocks *-----------------------------------------------------------------*/ if (blk_size > 1) { for (i = 0;i < n_block; i++) { hypre_blas_mat_inv(diaginv+i*nb2, blk_size); } hypre_blas_mat_inv(diaginv+(HYPRE_Int)(blk_size*nb2),left_size); } else { for (i = 0;i < n; i++) { // FIX-ME: zero-diagonal should be tested previously if (fabs(diaginv[i]) < SMALLREAL) diaginv[i] = 0.0; else diaginv[i] = 1.0 / diaginv[i]; } } *diaginvptr = diaginv; return 1; } HYPRE_Int hypre_blockRelax(hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u, HYPRE_Int blk_size, HYPRE_Int reserved_coarse_size, HYPRE_Int method, hypre_ParVector *Vtemp, hypre_ParVector *Ztemp) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int i, j,k; HYPRE_Int ii, jj; HYPRE_Int bidx,bidxm1,bidxp1; HYPRE_Int relax_error = 0; HYPRE_Int num_procs, my_id; const HYPRE_Int nb2 = blk_size*blk_size; HYPRE_Int n_block; HYPRE_Int left_size,inv_size; HYPRE_Real *diaginv; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); //HYPRE_Int num_threads = hypre_NumThreads(); if (my_id == num_procs) { n_block = (n - reserved_coarse_size) / blk_size; left_size = n - blk_size*n_block; } else { n_block = n / blk_size; left_size = n - blk_size*n_block; } inv_size = nb2*n_block + left_size*left_size; diaginv = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------- * Get all the diagonal sub-blocks *-----------------------------------------------------------------*/ for (i = 0;i < n_block; i++) { bidxm1 = i*blk_size; bidxp1 = (i+1)*blk_size; //printf("bidxm1 = %d,bidxp1 = %d\n",bidxm1,bidxp1); for (k = 0;k < blk_size; k++) { for (j = 0;j < blk_size; j++) { bidx = i*nb2 + k*blk_size + j; diaginv[bidx] = 0.0; } for (ii = A_diag_i[bidxm1+k]; ii < A_diag_i[bidxm1+k+1]; ii++) { jj = A_diag_j[ii]; if (jj >= bidxm1 && jj < bidxp1 && fabs(A_diag_data[ii]) > SMALLREAL) { bidx = i*nb2 + k*blk_size + jj - bidxm1; //printf("jj = %d,val = %e, bidx = %d\n",jj,A_diag_data[ii],bidx); diaginv[bidx] = A_diag_data[ii]; } } } } for (i = 0;i < left_size; i++) { bidxm1 =n_block*nb2 + i*blk_size; bidxp1 =n_block*nb2 + (i+1)*blk_size; for (j = 0;j < left_size; j++) { bidx = n_block*nb2 + i*blk_size +j; diaginv[bidx] = 0.0; } for (ii = A_diag_i[n_block*blk_size + i]; ii < A_diag_i[n_block*blk_size+i+1]; ii++) { jj = A_diag_j[ii]; if (jj > n_block*blk_size) { bidx = n_block*nb2 + i*blk_size + jj - n_block*blk_size; diaginv[bidx] = A_diag_data[ii]; } } } /* for (i = 0;i < n_block; i++) { for (j = 0;j < blk_size; j++) { for (k = 0;k < blk_size; k ++) { bidx = i*nb2 + j*blk_size + k; printf("%e\t",diaginv[bidx]); } printf("\n"); } printf("\n"); } */ /*----------------------------------------------------------------- * compute the inverses of all the diagonal sub-blocks *-----------------------------------------------------------------*/ if (blk_size > 1) { for (i = 0;i < n_block; i++) { hypre_blas_mat_inv(diaginv+i*nb2, blk_size); } hypre_blas_mat_inv(diaginv+(HYPRE_Int)(blk_size*nb2),left_size); /* for (i = 0;i < n_block; i++) { for (j = 0;j < blk_size; j++) { for (k = 0;k < blk_size; k ++) { bidx = i*nb2 + j*blk_size + k; printf("%e\t",diaginv[bidx]); } printf("\n"); } printf("\n"); } */ } else { for (i = 0;i < n; i++) { // FIX-ME: zero-diagonal should be tested previously if (fabs(diaginv[i]) < SMALLREAL) diaginv[i] = 0.0; else diaginv[i] = 1.0 / diaginv[i]; } } hypre_blockRelax_solve(A,f,u,blk_size,n_block,left_size,method,diaginv,Vtemp); /*----------------------------------------------------------------- * Free temperary memeory *-----------------------------------------------------------------*/ hypre_TFree(diaginv, HYPRE_MEMORY_HOST); return(relax_error); } /* set coarse grid solver */ HYPRE_Int hypre_MGRSetFSolver( void *mgr_vdata, HYPRE_Int (*fine_grid_solver_solve)(void*,void*,void*,void*), HYPRE_Int (*fine_grid_solver_setup)(void*,void*,void*,void*), void *fsolver ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); HYPRE_Solver **aff_solver = (mgr_data -> aff_solver); if (aff_solver == NULL) aff_solver = hypre_CTAlloc(HYPRE_Solver*, max_num_coarse_levels, HYPRE_MEMORY_HOST); /* only allow to set F-solver for the first level */ aff_solver[0] = (HYPRE_Solver *) fsolver; (mgr_data -> fine_grid_solver_solve) = fine_grid_solver_solve; (mgr_data -> fine_grid_solver_setup) = fine_grid_solver_setup; (mgr_data -> aff_solver) = aff_solver; (mgr_data -> use_default_fsolver) = 0; return hypre_error_flag; } /* set coarse grid solver */ HYPRE_Int hypre_MGRSetCoarseSolver( void *mgr_vdata, HYPRE_Int (*coarse_grid_solver_solve)(void*,void*,void*,void*), HYPRE_Int (*coarse_grid_solver_setup)(void*,void*,void*,void*), void *coarse_grid_solver ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } (mgr_data -> coarse_grid_solver_solve) = coarse_grid_solver_solve; (mgr_data -> coarse_grid_solver_setup) = coarse_grid_solver_setup; (mgr_data -> coarse_grid_solver) = (HYPRE_Solver) coarse_grid_solver; (mgr_data -> use_default_cgrid_solver) = 0; return hypre_error_flag; } HYPRE_Int hypre_MGRSetAffInv( void *mgr_vdata, hypre_ParCSRMatrix *A_ff_inv ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> A_ff_inv) = A_ff_inv; return hypre_error_flag; } /* Set the maximum number of coarse levels. * maxcoarselevs = 1 yields the default 2-grid scheme. */ HYPRE_Int hypre_MGRSetMaxCoarseLevels( void *mgr_vdata, HYPRE_Int maxcoarselevs ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> max_num_coarse_levels) = maxcoarselevs; return hypre_error_flag; } /* Set the system block size */ HYPRE_Int hypre_MGRSetBlockSize( void *mgr_vdata, HYPRE_Int bsize ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> block_size) = bsize; return hypre_error_flag; } /* Set the relaxation type for the fine levels of the reduction. * Currently supports the following flavors of relaxation types * as described in the documentation: * relax_types 0 - 8, 13, 14, 18, 19, 98. * See par_relax.c and par_relax_more.c for more details. * */ HYPRE_Int hypre_MGRSetRelaxType( void *mgr_vdata, HYPRE_Int relax_type ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> relax_type) = relax_type; return hypre_error_flag; } /* Set the number of relaxation sweeps */ HYPRE_Int hypre_MGRSetNumRelaxSweeps( void *mgr_vdata, HYPRE_Int nsweeps ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> num_relax_sweeps) = nsweeps; return hypre_error_flag; } /* Set the F-relaxation strategy: 0=single level, 1=multi level */ HYPRE_Int hypre_MGRSetFRelaxMethod( void *mgr_vdata, HYPRE_Int relax_method ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if((mgr_data -> Frelax_method) != NULL) { hypre_TFree(mgr_data -> Frelax_method, HYPRE_MEMORY_HOST); (mgr_data -> Frelax_method) = NULL; } HYPRE_Int *Frelax_method = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); for (i=0; i < max_num_coarse_levels; i++) { Frelax_method[i] = relax_method; } (mgr_data -> Frelax_method) = Frelax_method; return hypre_error_flag; } /* Set the F-relaxation strategy: 0=single level, 1=multi level */ HYPRE_Int hypre_MGRSetLevelFRelaxMethod( void *mgr_vdata, HYPRE_Int *relax_method ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if((mgr_data -> Frelax_method) != NULL) { hypre_TFree(mgr_data -> Frelax_method, HYPRE_MEMORY_HOST); (mgr_data -> Frelax_method) = NULL; } HYPRE_Int *Frelax_method = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); if (relax_method != NULL) { for (i=0; i < max_num_coarse_levels; i++) { Frelax_method[i] = relax_method[i]; } } else { for (i = 0; i < max_num_coarse_levels; i++) { Frelax_method[i] = 0; } } (mgr_data -> Frelax_method) = Frelax_method; return hypre_error_flag; } /* Coarse grid method: 0=Galerkin RAP, 1=non-Galerkin with dropping*/ HYPRE_Int hypre_MGRSetCoarseGridMethod( void *mgr_vdata, HYPRE_Int *cg_method ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if((mgr_data -> use_non_galerkin_cg) != NULL) { hypre_TFree(mgr_data -> use_non_galerkin_cg, HYPRE_MEMORY_HOST); (mgr_data -> use_non_galerkin_cg) = NULL; } HYPRE_Int *use_non_galerkin_cg = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); if (cg_method != NULL) { for (i=0; i < max_num_coarse_levels; i++) { use_non_galerkin_cg[i] = cg_method[i]; } } else { for (i = 0; i < max_num_coarse_levels; i++) { use_non_galerkin_cg[i] = 0; } } (mgr_data -> use_non_galerkin_cg) = use_non_galerkin_cg; return hypre_error_flag; } /* Set the F-relaxation number of functions for each level */ HYPRE_Int hypre_MGRSetLevelFRelaxNumFunctions( void *mgr_vdata, HYPRE_Int *num_functions ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if((mgr_data -> Frelax_num_functions) != NULL) { hypre_TFree(mgr_data -> Frelax_num_functions, HYPRE_MEMORY_HOST); (mgr_data -> Frelax_num_functions) = NULL; } HYPRE_Int *Frelax_num_functions = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); if (num_functions != NULL) { for (i=0; i < max_num_coarse_levels; i++) { Frelax_num_functions[i] = num_functions[i]; } } else { for (i = 0; i < max_num_coarse_levels; i++) { Frelax_num_functions[i] = 1; } } (mgr_data -> Frelax_num_functions) = Frelax_num_functions; return hypre_error_flag; } /* Set the type of the restriction type * for computing restriction operator */ HYPRE_Int hypre_MGRSetLevelRestrictType( void *mgr_vdata, HYPRE_Int *restrict_type) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if ((mgr_data -> restrict_type) != NULL) { hypre_TFree((mgr_data -> restrict_type), HYPRE_MEMORY_HOST); (mgr_data -> restrict_type) = NULL; } HYPRE_Int *level_restrict_type = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); if (restrict_type != NULL) { for (i=0; i < max_num_coarse_levels; i++) { level_restrict_type[i] = *(restrict_type + i); } } else { for (i=0; i < max_num_coarse_levels; i++) { level_restrict_type[i] = 0; } } (mgr_data -> restrict_type) = level_restrict_type; return hypre_error_flag; } /* Set the type of the restriction type * for computing restriction operator */ HYPRE_Int hypre_MGRSetRestrictType( void *mgr_vdata, HYPRE_Int restrict_type) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if ((mgr_data -> restrict_type) != NULL) { hypre_TFree((mgr_data -> restrict_type), HYPRE_MEMORY_HOST); (mgr_data -> restrict_type) = NULL; } HYPRE_Int *level_restrict_type = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); for (i=0; i < max_num_coarse_levels; i++) { level_restrict_type[i] = restrict_type; } (mgr_data -> restrict_type) = level_restrict_type; return hypre_error_flag; } /* Set the number of Jacobi interpolation iterations * for computing interpolation operator */ HYPRE_Int hypre_MGRSetNumRestrictSweeps( void *mgr_vdata, HYPRE_Int nsweeps ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> num_restrict_sweeps) = nsweeps; return hypre_error_flag; } /* Set the type of the interpolation * for computing interpolation operator */ HYPRE_Int hypre_MGRSetInterpType( void *mgr_vdata, HYPRE_Int interpType) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if ((mgr_data -> interp_type) != NULL) { hypre_TFree((mgr_data -> interp_type), HYPRE_MEMORY_HOST); (mgr_data -> interp_type) = NULL; } HYPRE_Int *level_interp_type = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); for (i=0; i < max_num_coarse_levels; i++) { level_interp_type[i] = interpType; } (mgr_data -> interp_type) = level_interp_type; return hypre_error_flag; } /* Set the type of the interpolation * for computing interpolation operator */ HYPRE_Int hypre_MGRSetLevelInterpType( void *mgr_vdata, HYPRE_Int *interpType) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if ((mgr_data -> interp_type) != NULL) { hypre_TFree((mgr_data -> interp_type), HYPRE_MEMORY_HOST); (mgr_data -> interp_type) = NULL; } HYPRE_Int *level_interp_type = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); if (interpType != NULL) { for (i=0; i < max_num_coarse_levels; i++) { level_interp_type[i] = *(interpType + i); } } else { for (i=0; i < max_num_coarse_levels; i++) { level_interp_type[i] = 2; } } (mgr_data -> interp_type) = level_interp_type; return hypre_error_flag; } /* Set the number of Jacobi interpolation iterations * for computing interpolation operator */ HYPRE_Int hypre_MGRSetNumInterpSweeps( void *mgr_vdata, HYPRE_Int nsweeps ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> num_interp_sweeps) = nsweeps; return hypre_error_flag; } /* Set the threshold to truncate the coarse grid at each * level of reduction */ HYPRE_Int hypre_MGRSetTruncateCoarseGridThreshold( void *mgr_vdata, HYPRE_Real threshold) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> truncate_coarse_grid_threshold) = threshold; return hypre_error_flag; } /* Set print level for mgr solver */ HYPRE_Int hypre_MGRSetPrintLevel( void *mgr_vdata, HYPRE_Int print_level ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> print_level) = print_level; return hypre_error_flag; } /* Set print level for mgr solver */ HYPRE_Int hypre_MGRSetLogging( void *mgr_vdata, HYPRE_Int logging ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> logging) = logging; return hypre_error_flag; } /* Set max number of iterations for mgr solver */ HYPRE_Int hypre_MGRSetMaxIter( void *mgr_vdata, HYPRE_Int max_iter ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> max_iter) = max_iter; return hypre_error_flag; } /* Set convergence tolerance for mgr solver */ HYPRE_Int hypre_MGRSetTol( void *mgr_vdata, HYPRE_Real tol ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> tol) = tol; return hypre_error_flag; } /* Set max number of iterations for mgr global smoother */ HYPRE_Int hypre_MGRSetMaxGlobalsmoothIters( void *mgr_vdata, HYPRE_Int max_iter ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> global_smooth_iters) = max_iter; return hypre_error_flag; } /* Set global smoothing type for mgr solver */ HYPRE_Int hypre_MGRSetGlobalsmoothType( void *mgr_vdata, HYPRE_Int iter_type ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> global_smooth_type) = iter_type; return hypre_error_flag; } /* Set the maximum number of non-zero entries for restriction and interpolation operator if classical AMG interpolation is used */ HYPRE_Int hypre_MGRSetPMaxElmts( void *mgr_vdata, HYPRE_Int P_max_elmts) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> P_max_elmts) = P_max_elmts; return hypre_error_flag; } /* Get number of iterations for MGR solver */ HYPRE_Int hypre_MGRGetNumIterations( void *mgr_vdata, HYPRE_Int *num_iterations ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } *num_iterations = mgr_data->num_iterations; return hypre_error_flag; } /* Get residual norms for MGR solver */ HYPRE_Int hypre_MGRGetFinalRelativeResidualNorm( void *mgr_vdata, HYPRE_Real *res_norm ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } *res_norm = mgr_data->final_rel_residual_norm; return hypre_error_flag; } HYPRE_Int hypre_MGRGetCoarseGridConvergenceFactor( void *mgr_vdata , HYPRE_Real *conv_factor ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } *conv_factor = (mgr_data -> cg_convergence_factor); return hypre_error_flag; } /* Build A_FF matrix from A given a CF_marker array */ HYPRE_Int hypre_MGRGetSubBlock( hypre_ParCSRMatrix *A, HYPRE_Int *row_cf_marker, HYPRE_Int *col_cf_marker, HYPRE_Int debug_flag, hypre_ParCSRMatrix **A_block_ptr ) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); //HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int *coarse_dof_func_ptr = NULL; HYPRE_BigInt *num_row_cpts_global = NULL; HYPRE_BigInt *num_col_cpts_global = NULL; hypre_ParCSRMatrix *Ablock; HYPRE_BigInt *col_map_offd_Ablock; HYPRE_Int *tmp_map_offd = NULL; HYPRE_Int *CF_marker_offd = NULL; hypre_CSRMatrix *Ablock_diag; hypre_CSRMatrix *Ablock_offd; HYPRE_Real *Ablock_diag_data; HYPRE_Int *Ablock_diag_i; HYPRE_Int *Ablock_diag_j; HYPRE_Real *Ablock_offd_data; HYPRE_Int *Ablock_offd_i; HYPRE_Int *Ablock_offd_j; HYPRE_Int Ablock_diag_size, Ablock_offd_size; HYPRE_Int *Ablock_marker; HYPRE_Int ii_counter; HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int *jj_count, *jj_count_offd; HYPRE_Int start_indexing = 0; /* start indexing for Aff_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int *fine_to_coarse; HYPRE_Int *coarse_counter; HYPRE_Int *col_coarse_counter; HYPRE_Int coarse_shift; HYPRE_BigInt total_global_row_cpts; HYPRE_BigInt total_global_col_cpts; HYPRE_Int num_cols_Ablock_offd; // HYPRE_BigInt my_first_row_cpt, my_first_col_cpt; HYPRE_Int i,i1; HYPRE_Int j,jl,jj; HYPRE_Int start; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_Int num_threads; HYPRE_Int num_sends; HYPRE_Int index; HYPRE_Int ns, ne, size, rest; HYPRE_Int *int_buf_data; HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag); // HYPRE_Real wall_time; /* for debugging instrumentation */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); //num_threads = hypre_NumThreads(); // Temporary fix, disable threading // TODO: enable threading num_threads = 1; /* get the number of coarse rows */ hypre_BoomerAMGCoarseParms(comm, local_numrows, 1, NULL, row_cf_marker, &coarse_dof_func_ptr, &num_row_cpts_global); hypre_TFree(coarse_dof_func_ptr, HYPRE_MEMORY_HOST); coarse_dof_func_ptr = NULL; //hypre_printf("my_id = %d, cpts_this = %d, cpts_next = %d\n", my_id, num_row_cpts_global[0], num_row_cpts_global[1]); #ifdef HYPRE_NO_GLOBAL_PARTITION // my_first_row_cpt = num_row_cpts_global[0]; if (my_id == (num_procs -1)) total_global_row_cpts = num_row_cpts_global[1]; hypre_MPI_Bcast(&total_global_row_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); #else // my_first_row_cpt = num_row_cpts_global[my_id]; total_global_row_cpts = num_row_cpts_global[num_procs]; #endif /* get the number of coarse rows */ hypre_BoomerAMGCoarseParms(comm, local_numrows, 1, NULL, col_cf_marker, &coarse_dof_func_ptr, &num_col_cpts_global); hypre_TFree(coarse_dof_func_ptr, HYPRE_MEMORY_HOST); coarse_dof_func_ptr = NULL; //hypre_printf("my_id = %d, cpts_this = %d, cpts_next = %d\n", my_id, num_col_cpts_global[0], num_col_cpts_global[1]); #ifdef HYPRE_NO_GLOBAL_PARTITION // my_first_col_cpt = num_col_cpts_global[0]; if (my_id == (num_procs -1)) total_global_col_cpts = num_col_cpts_global[1]; hypre_MPI_Bcast(&total_global_col_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); #else // my_first_col_cpt = num_col_cpts_global[my_id]; total_global_col_cpts = num_col_cpts_global[num_procs]; #endif /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (debug_flag < 0) { debug_flag = -debug_flag; } // if (debug_flag==4) wall_time = time_getWallclockSeconds(); if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = col_cf_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); /*----------------------------------------------------------------------- * First Pass: Determine size of Ablock and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); col_coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #if 0 #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ /* RDF: this looks a little tricky, but doable */ #if 0 #endif for (j = 0; j < num_threads; j++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a F-point, we loop through the columns and select * the F-columns. Also set up mapping vector. *--------------------------------------------------------------------*/ if (col_cf_marker[i] > 0) { fine_to_coarse[i] = col_coarse_counter[j]; col_coarse_counter[j]++; } if (row_cf_marker[i] > 0) { //fine_to_coarse[i] = coarse_counter[j]; coarse_counter[j]++; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; if (col_cf_marker[i1] > 0) { jj_count[j]++; } } if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; if (CF_marker_offd[i1] > 0) { jj_count_offd[j]++; } } } } } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ for (i=0; i < num_threads-1; i++) { jj_count[i+1] += jj_count[i]; jj_count_offd[i+1] += jj_count_offd[i]; coarse_counter[i+1] += coarse_counter[i]; col_coarse_counter[i+1] += col_coarse_counter[i]; } i = num_threads-1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; ii_counter = coarse_counter[i]; Ablock_diag_size = jj_counter; Ablock_diag_i = hypre_CTAlloc(HYPRE_Int, ii_counter+1, HYPRE_MEMORY_HOST); Ablock_diag_j = hypre_CTAlloc(HYPRE_Int, Ablock_diag_size, HYPRE_MEMORY_HOST); Ablock_diag_data = hypre_CTAlloc(HYPRE_Real, Ablock_diag_size, HYPRE_MEMORY_HOST); Ablock_diag_i[ii_counter] = jj_counter; Ablock_offd_size = jj_counter_offd; Ablock_offd_i = hypre_CTAlloc(HYPRE_Int, ii_counter+1, HYPRE_MEMORY_HOST); Ablock_offd_j = hypre_CTAlloc(HYPRE_Int, Ablock_offd_size, HYPRE_MEMORY_HOST); Ablock_offd_data = hypre_CTAlloc(HYPRE_Real, Ablock_offd_size, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; //----------------------------------------------------------------------- // Send and receive fine_to_coarse info. //----------------------------------------------------------------------- // if (debug_flag==4) wall_time = time_getWallclockSeconds(); #if 0 #endif for (j = 0; j < num_threads; j++) { coarse_shift = 0; if (j > 0) coarse_shift = col_coarse_counter[j-1]; size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) fine_to_coarse[i] += coarse_shift; } // if (debug_flag==4) wall_time = time_getWallclockSeconds(); #if 0 #endif // for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_col_cpt; #if 0 #endif for (jl = 0; jl < num_threads; jl++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (jl < rest) { ns = jl*size+jl; ne = (jl+1)*size+jl+1; } else { ns = jl*size+rest; ne = (jl+1)*size+rest; } jj_counter = 0; if (jl > 0) jj_counter = jj_count[jl-1]; jj_counter_offd = 0; if (jl > 0) jj_counter_offd = jj_count_offd[jl-1]; ii_counter = 0; for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a F-point, we loop through the columns and select * the F-columns. Also set up mapping vector. *--------------------------------------------------------------------*/ if (row_cf_marker[i] > 0) { // Diagonal part of Ablock // Ablock_diag_i[ii_counter] = jj_counter; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; if (col_cf_marker[i1] > 0) { Ablock_diag_j[jj_counter] = fine_to_coarse[i1]; Ablock_diag_data[jj_counter] = A_diag_data[jj]; jj_counter++; } } // Off-Diagonal part of Ablock // Ablock_offd_i[ii_counter] = jj_counter_offd; if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; if (CF_marker_offd[i1] > 0) { Ablock_offd_j[jj_counter_offd] = i1; Ablock_offd_data[jj_counter_offd] = A_offd_data[jj]; jj_counter_offd++; } } } ii_counter++; } } Ablock_offd_i[ii_counter] = jj_counter_offd; Ablock_diag_i[ii_counter] = jj_counter; } Ablock = hypre_ParCSRMatrixCreate(comm, total_global_row_cpts, total_global_col_cpts, num_row_cpts_global, num_col_cpts_global, 0, Ablock_diag_i[ii_counter], Ablock_offd_i[ii_counter]); Ablock_diag = hypre_ParCSRMatrixDiag(Ablock); hypre_CSRMatrixData(Ablock_diag) = Ablock_diag_data; hypre_CSRMatrixI(Ablock_diag) = Ablock_diag_i; hypre_CSRMatrixJ(Ablock_diag) = Ablock_diag_j; Ablock_offd = hypre_ParCSRMatrixOffd(Ablock); hypre_CSRMatrixData(Ablock_offd) = Ablock_offd_data; hypre_CSRMatrixI(Ablock_offd) = Ablock_offd_i; hypre_CSRMatrixJ(Ablock_offd) = Ablock_offd_j; num_cols_Ablock_offd = 0; if (Ablock_offd_size) { Ablock_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #if 0 #endif for (i=0; i < num_cols_A_offd; i++) Ablock_marker[i] = 0; num_cols_Ablock_offd = 0; for (i=0; i < Ablock_offd_size; i++) { index = Ablock_offd_j[i]; if (!Ablock_marker[index]) { num_cols_Ablock_offd++; Ablock_marker[index] = 1; } } col_map_offd_Ablock = hypre_CTAlloc(HYPRE_BigInt, num_cols_Ablock_offd, HYPRE_MEMORY_HOST); tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_Ablock_offd, HYPRE_MEMORY_HOST); index = 0; for (i=0; i < num_cols_Ablock_offd; i++) { while (Ablock_marker[index]==0) index++; tmp_map_offd[i] = index++; } #if 0 #endif for (i=0; i < Ablock_offd_size; i++) Ablock_offd_j[i] = hypre_BinarySearch(tmp_map_offd, Ablock_offd_j[i], num_cols_Ablock_offd); hypre_TFree(Ablock_marker, HYPRE_MEMORY_HOST); } if (num_cols_Ablock_offd) { hypre_ParCSRMatrixColMapOffd(Ablock) = col_map_offd_Ablock; hypre_CSRMatrixNumCols(Ablock_offd) = num_cols_Ablock_offd; } hypre_GetCommPkgRTFromCommPkgA(Ablock, A, fine_to_coarse, tmp_map_offd); #ifdef HYPRE_NO_GLOBAL_PARTITION /* Create the assumed partition */ if (hypre_ParCSRMatrixAssumedPartition(Ablock) == NULL) { hypre_ParCSRMatrixCreateAssumedPartition(Ablock); } #endif *A_block_ptr= Ablock; hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(col_coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(jj_count, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); return(0); } /* Build A_FF matrix from A given a CF_marker array */ HYPRE_Int hypre_MGRBuildAff( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_Int debug_flag, hypre_ParCSRMatrix **A_ff_ptr ) { HYPRE_Int i; HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); /* create a copy of the CF_marker array and switch C-points to F-points */ HYPRE_Int *CF_marker_copy = hypre_CTAlloc(HYPRE_Int, local_numrows, HYPRE_MEMORY_HOST); #if 0 #endif for (i = 0; i < local_numrows; i++) { CF_marker_copy[i] = -CF_marker[i]; } hypre_MGRGetSubBlock(A, CF_marker_copy, CF_marker_copy, debug_flag, A_ff_ptr); /* Free copy of CF marker */ hypre_TFree(CF_marker_copy, HYPRE_MEMORY_HOST); return(0); } /********************************************************************************* * This routine assumes that the 'toVector' is larger than the 'fromVector' and * the CF_marker is of the same length as the toVector. There must be n 'point_type' * values in the CF_marker, where n is the length of the 'fromVector'. * It adds the values of the 'fromVector' to the 'toVector' where the marker is the * same as the 'point_type' *********************************************************************************/ HYPRE_Int hypre_MGRAddVectorP ( HYPRE_Int *CF_marker, HYPRE_Int point_type, HYPRE_Real a, hypre_ParVector *fromVector, HYPRE_Real b, hypre_ParVector **toVector ) { hypre_Vector *fromVectorLocal = hypre_ParVectorLocalVector(fromVector); HYPRE_Real *fromVectorData = hypre_VectorData(fromVectorLocal); hypre_Vector *toVectorLocal = hypre_ParVectorLocalVector(*toVector); HYPRE_Real *toVectorData = hypre_VectorData(toVectorLocal); HYPRE_Int n = hypre_ParVectorActualLocalSize(*toVector); HYPRE_Int i, j; j = 0; for (i = 0; i < n; i++) { if (CF_marker[i] == point_type) { toVectorData[i] = b * toVectorData[i] + a * fromVectorData[j]; j++; } } return 0; } /************************************************************************************* * This routine assumes that the 'fromVector' is larger than the 'toVector' and * the CF_marker is of the same length as the fromVector. There must be n 'point_type' * values in the CF_marker, where n is the length of the 'toVector'. * It adds the values of the 'fromVector' where the marker is the * same as the 'point_type' to the 'toVector' *************************************************************************************/ HYPRE_Int hypre_MGRAddVectorR ( HYPRE_Int *CF_marker, HYPRE_Int point_type, HYPRE_Real a, hypre_ParVector *fromVector, HYPRE_Real b, hypre_ParVector **toVector ) { hypre_Vector *fromVectorLocal = hypre_ParVectorLocalVector(fromVector); HYPRE_Real *fromVectorData = hypre_VectorData(fromVectorLocal); hypre_Vector *toVectorLocal = hypre_ParVectorLocalVector(*toVector); HYPRE_Real *toVectorData = hypre_VectorData(toVectorLocal); HYPRE_Int n = hypre_ParVectorActualLocalSize(fromVector); HYPRE_Int i, j; j = 0; for (i = 0; i < n; i++) { if (CF_marker[i] == point_type) { toVectorData[j] = b * toVectorData[j] + a * fromVectorData[i]; j++; } } return 0; } /* HYPRE_Int hypre_MGRBuildAffRAP( MPI_Comm comm, HYPRE_Int local_num_variables, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int *CF_marker, HYPRE_Int **coarse_dof_func_ptr, HYPRE_BigInt **coarse_pnts_global_ptr, hypre_ParCSRMatrix *A, HYPRE_Int debug_flag, hypre_ParCSRMatrix **P_f_ptr, hypre_ParCSRMatrix **A_ff_ptr ) { HYPRE_Int *CF_marker_copy = hypre_CTAlloc(HYPRE_Int, local_num_variables, HYPRE_MEMORY_HOST); HYPRE_Int i; for (i = 0; i < local_num_variables; i++) { CF_marker_copy[i] = -CF_marker[i]; } hypre_BoomerAMGCoarseParms(comm, local_num_variables, 1, NULL, CF_marker_copy, coarse_dof_func_ptr, coarse_pnts_global_ptr); hypre_MGRBuildP(A, CF_marker_copy, (*coarse_pnts_global_ptr), 0, debug_flag, P_f_ptr); hypre_BoomerAMGBuildCoarseOperator(*P_f_ptr, A, *P_f_ptr, A_ff_ptr); hypre_TFree(CF_marker_copy, HYPRE_MEMORY_HOST); return 0; } */ /* Get pointer to coarse grid matrix for MGR solver */ HYPRE_Int hypre_MGRGetCoarseGridMatrix( void *mgr_vdata, hypre_ParCSRMatrix **RAP ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } if (mgr_data -> RAP == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC," Coarse grid matrix is NULL. Please make sure MGRSetup() is called \n"); return hypre_error_flag; } *RAP = mgr_data->RAP; return hypre_error_flag; } /* Get pointer to coarse grid solution for MGR solver */ HYPRE_Int hypre_MGRGetCoarseGridSolution( void *mgr_vdata, hypre_ParVector **sol ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } if (mgr_data -> U_array == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC," MGR solution array is NULL. Please make sure MGRSetup() and MGRSolve() are called \n"); return hypre_error_flag; } *sol = mgr_data->U_array[mgr_data->num_coarse_levels]; return hypre_error_flag; } /* Get pointer to coarse grid solution for MGR solver */ HYPRE_Int hypre_MGRGetCoarseGridRHS( void *mgr_vdata, hypre_ParVector **rhs ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } if (mgr_data -> F_array == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC," MGR RHS array is NULL. Please make sure MGRSetup() and MGRSolve() are called \n"); return hypre_error_flag; } *rhs = mgr_data->F_array[mgr_data->num_coarse_levels]; return hypre_error_flag; } /* Print coarse grid linear system (for debugging)*/ HYPRE_Int hypre_MGRPrintCoarseSystem( void *mgr_vdata, HYPRE_Int print_flag) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; mgr_data->print_coarse_system = print_flag; return hypre_error_flag; } /* Print solver params */ HYPRE_Int hypre_MGRWriteSolverParams(void *mgr_vdata) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i, j; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); hypre_printf("MGR Setup parameters: \n"); hypre_printf("Block size: %d\n", (mgr_data -> block_size)); hypre_printf("Max number of coarse levels: %d\n", (mgr_data -> max_num_coarse_levels)); hypre_printf("Relax type: %d\n", (mgr_data -> relax_type)); hypre_printf("Set non-Cpoints to F-points: %d\n", (mgr_data -> set_non_Cpoints_to_F)); hypre_printf("Set Cpoints method: %d\n", (mgr_data -> set_c_points_method)); for (i = 0; i < max_num_coarse_levels; i++) { hypre_printf("Lev = %d, Interpolation type: %d\n", i, (mgr_data -> interp_type)[i]); hypre_printf("Lev = %d, Restriction type: %d\n", i, (mgr_data -> restrict_type)[i]); hypre_printf("Lev = %d, F-relaxation method: %d\n", i, (mgr_data -> Frelax_method)[i]); hypre_printf("Lev = %d, Use non-Galerkin coarse grid: %d\n", i, (mgr_data -> use_non_galerkin_cg)[i]); HYPRE_Int lvl_num_coarse_points = (mgr_data -> block_num_coarse_indexes)[i]; hypre_printf("Lev = %d, Number of Cpoints: %d\n", i, lvl_num_coarse_points); hypre_printf("Cpoints indices: "); for (j = 0; j < lvl_num_coarse_points; j++) { if ((mgr_data -> block_cf_marker)[i][j] == 1) { hypre_printf("%d ", j); } } hypre_printf("\n"); } hypre_printf("Number of Reserved Cpoints: %d\n", (mgr_data -> reserved_coarse_size)); hypre_printf("Keep reserved Cpoints to level: %d\n", (mgr_data -> lvl_to_keep_cpoints)); hypre_printf("\n MGR Solver Parameters: \n"); hypre_printf("Number of relax sweeps: %d\n", (mgr_data -> num_relax_sweeps)); hypre_printf("Number of interpolation sweeps: %d\n", (mgr_data -> num_interp_sweeps)); hypre_printf("Number of restriction sweeps: %d\n", (mgr_data -> num_restrict_sweeps)); hypre_printf("Global smoother type: %d\n", (mgr_data ->global_smooth_type)); hypre_printf("Number of global smoother sweeps: %d\n", (mgr_data ->global_smooth_iters)); hypre_printf("Max number of iterations: %d\n", (mgr_data -> max_iter)); hypre_printf("Stopping tolerance: %e\n", (mgr_data -> tol)); hypre_printf("Use default coarse grid solver: %d\n", (mgr_data -> use_default_cgrid_solver)); if((mgr_data -> use_default_fsolver) >= 0) { hypre_printf("Use default AMG solver for full AMG F-relaxation: %d\n", (mgr_data -> use_default_fsolver)); } return hypre_error_flag; } #ifdef HYPRE_USING_DSUPERLU void * hypre_MGRDirectSolverCreate() { hypre_DSLUData *dslu_data = hypre_CTAlloc(hypre_DSLUData, 1, HYPRE_MEMORY_HOST); return (void *) dslu_data; } HYPRE_Int hypre_MGRDirectSolverSetup( void *solver, hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u ) { /* Par Data Structure variables */ HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A); MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_local; HYPRE_Int num_rows; HYPRE_Int num_procs, my_id; HYPRE_Int pcols=1, prows=1; HYPRE_BigInt *big_rowptr = NULL; hypre_DSLUData *dslu_data = (hypre_DSLUData *) solver; HYPRE_Int info = 0; HYPRE_Int nrhs = 0; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); /* Merge diag and offd into one matrix (global ids) */ A_local = hypre_MergeDiagAndOffd(A); num_rows = hypre_CSRMatrixNumRows(A_local); /* Now convert hypre matrix to a SuperMatrix */ #ifdef HYPRE_MIXEDINT { HYPRE_Int *rowptr = NULL; HYPRE_Int i; rowptr = hypre_CSRMatrixI(A_local); big_rowptr = hypre_CTAlloc(HYPRE_BigInt, (num_rows+1), HYPRE_MEMORY_HOST); for(i=0; i<(num_rows+1); i++) { big_rowptr[i] = (HYPRE_BigInt)rowptr[i]; } } #else big_rowptr = hypre_CSRMatrixI(A_local); #endif dCreate_CompRowLoc_Matrix_dist( &(dslu_data->A_dslu),global_num_rows,global_num_rows, hypre_CSRMatrixNumNonzeros(A_local), num_rows, hypre_ParCSRMatrixFirstRowIndex(A), hypre_CSRMatrixData(A_local), hypre_CSRMatrixBigJ(A_local),big_rowptr, SLU_NR_loc, SLU_D, SLU_GE); /* DOK: SuperLU frees assigned data, so set them to null before * calling hypre_CSRMatrixdestroy on A_local to avoid memory errors. */ #ifndef HYPRE_MIXEDINT hypre_CSRMatrixI(A_local) = NULL; #endif hypre_CSRMatrixData(A_local) = NULL; hypre_CSRMatrixBigJ(A_local) = NULL; hypre_CSRMatrixDestroy(A_local); /*Create process grid */ while (prows*pcols <= num_procs) ++prows; --prows; pcols = num_procs/prows; while (prows*pcols != num_procs) { prows -= 1; pcols = num_procs/prows; } //hypre_printf(" prows %d pcols %d\n", prows, pcols); superlu_gridinit(comm, prows, pcols, &(dslu_data->dslu_data_grid)); set_default_options_dist(&(dslu_data->dslu_options)); dslu_data->dslu_options.Fact = DOFACT; dslu_data->dslu_options.PrintStat = NO; /*dslu_data->dslu_options.IterRefine = SLU_DOUBLE; dslu_data->dslu_options.ColPerm = MMD_AT_PLUS_A; dslu_data->dslu_options.DiagPivotThresh = 1.0; dslu_data->dslu_options.ReplaceTinyPivot = NO; */ dScalePermstructInit(global_num_rows, global_num_rows, &(dslu_data->dslu_ScalePermstruct)); dLUstructInit(global_num_rows, &(dslu_data->dslu_data_LU)); PStatInit(&(dslu_data->dslu_data_stat)); dslu_data->global_num_rows = global_num_rows; dslu_data->berr = hypre_CTAlloc(HYPRE_Real, 1, HYPRE_MEMORY_HOST); dslu_data->berr[0] = 0.0; pdgssvx(&(dslu_data->dslu_options), &(dslu_data->A_dslu), &(dslu_data->dslu_ScalePermstruct), NULL, num_rows, nrhs, &(dslu_data->dslu_data_grid), &(dslu_data->dslu_data_LU), &(dslu_data->dslu_solve), dslu_data->berr, &(dslu_data->dslu_data_stat), &info); dslu_data->dslu_options.Fact = FACTORED; return hypre_error_flag; } HYPRE_Int hypre_MGRDirectSolverSolve( void *solver, hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u ) { hypre_SLUDistSolve(solver, f, u); return hypre_error_flag; } HYPRE_Int hypre_MGRDirectSolverDestroy( void *solver ) { hypre_SLUDistDestroy(solver); return hypre_error_flag; } #endif
/****************************************************************************** * * Two-grid system solver * *****************************************************************************/ #include "_hypre_parcsr_ls.h" #include "par_amg.h" #include "par_mgr.h" #ifdef HYPRE_USING_DSUPERLU #include "dsuperlu.h" #endif /* Create */ void * hypre_MGRCreate() { hypre_ParMGRData *mgr_data; mgr_data = hypre_CTAlloc(hypre_ParMGRData, 1, HYPRE_MEMORY_HOST); /* block data */ (mgr_data -> block_size) = 1; (mgr_data -> block_num_coarse_indexes) = NULL; (mgr_data -> point_marker_array) = NULL; (mgr_data -> block_cf_marker) = NULL; /* general data */ (mgr_data -> max_num_coarse_levels) = 10; (mgr_data -> A_array) = NULL; (mgr_data -> P_array) = NULL; (mgr_data -> RT_array) = NULL; (mgr_data -> RAP) = NULL; (mgr_data -> CF_marker_array) = NULL; (mgr_data -> coarse_indices_lvls) = NULL; (mgr_data -> A_ff_array) = NULL; (mgr_data -> F_fine_array) = NULL; (mgr_data -> U_fine_array) = NULL; (mgr_data -> aff_solver) = NULL; (mgr_data -> fine_grid_solver_setup) = NULL; (mgr_data -> fine_grid_solver_solve) = NULL; (mgr_data -> F_array) = NULL; (mgr_data -> U_array) = NULL; (mgr_data -> residual) = NULL; (mgr_data -> rel_res_norms) = NULL; (mgr_data -> Vtemp) = NULL; (mgr_data -> Ztemp) = NULL; (mgr_data -> Utemp) = NULL; (mgr_data -> Ftemp) = NULL; (mgr_data -> num_iterations) = 0; (mgr_data -> num_interp_sweeps) = 1; (mgr_data -> num_restrict_sweeps) = 1; (mgr_data -> trunc_factor) = 0.0; (mgr_data -> max_row_sum) = 0.9; (mgr_data -> strong_threshold) = 0.25; (mgr_data -> S_commpkg_switch) = 1.0; (mgr_data -> P_max_elmts) = 0; (mgr_data -> coarse_grid_solver) = NULL; (mgr_data -> coarse_grid_solver_setup) = NULL; (mgr_data -> coarse_grid_solver_solve) = NULL; (mgr_data -> global_smoother) = NULL; (mgr_data -> use_default_cgrid_solver) = 1; (mgr_data -> use_default_fsolver) = -1; // set to -1 to avoid printing when not used (mgr_data -> omega) = 1.; (mgr_data -> max_iter) = 20; (mgr_data -> tol) = 1.0e-6; (mgr_data -> relax_type) = 0; (mgr_data -> relax_order) = 1; // not fully utilized. Only used to compute L1-norms. (mgr_data -> interp_type) = NULL; (mgr_data -> restrict_type) = NULL; (mgr_data -> num_relax_sweeps) = 1; (mgr_data -> relax_weight) = 1.0; (mgr_data -> logging) = 0; (mgr_data -> print_level) = 0; (mgr_data -> l1_norms) = NULL; (mgr_data -> reserved_coarse_size) = 0; (mgr_data -> reserved_coarse_indexes) = NULL; (mgr_data -> reserved_Cpoint_local_indexes) = NULL; (mgr_data -> diaginv) = NULL; (mgr_data -> global_smooth_iters) = 1; (mgr_data -> global_smooth_type) = 0; (mgr_data -> set_non_Cpoints_to_F) = 0; (mgr_data -> idx_array) = NULL; (mgr_data -> Frelax_method) = NULL; (mgr_data -> VcycleRelaxVtemp) = NULL; (mgr_data -> VcycleRelaxZtemp) = NULL; (mgr_data -> FrelaxVcycleData) = NULL; (mgr_data -> Frelax_num_functions) = NULL; (mgr_data -> max_local_lvls) = 10; (mgr_data -> use_non_galerkin_cg) = NULL; (mgr_data -> print_coarse_system) = 0; (mgr_data -> set_c_points_method) = 0; (mgr_data -> lvl_to_keep_cpoints) = 0; (mgr_data -> cg_convergence_factor) = 0.0; (mgr_data -> truncate_coarse_grid_threshold) = 0.0; return (void *) mgr_data; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ /* Destroy */ HYPRE_Int hypre_MGRDestroy( void *data ) { hypre_ParMGRData * mgr_data = (hypre_ParMGRData*) data; HYPRE_Int i; HYPRE_Int num_coarse_levels = (mgr_data -> num_coarse_levels); /* block info data */ if ((mgr_data -> block_cf_marker)) { for (i=0; i < (mgr_data -> max_num_coarse_levels); i++) { if ((mgr_data -> block_cf_marker)[i]) { hypre_TFree((mgr_data -> block_cf_marker)[i], HYPRE_MEMORY_HOST); } } hypre_TFree((mgr_data -> block_cf_marker), HYPRE_MEMORY_HOST); (mgr_data -> block_cf_marker) = NULL; } if(mgr_data -> block_num_coarse_indexes) { hypre_TFree(mgr_data -> block_num_coarse_indexes, HYPRE_MEMORY_HOST); (mgr_data -> block_num_coarse_indexes) = NULL; } /* final residual vector */ if((mgr_data -> residual)) { hypre_ParVectorDestroy( (mgr_data -> residual) ); (mgr_data -> residual) = NULL; } if((mgr_data -> rel_res_norms)) { hypre_TFree( (mgr_data -> rel_res_norms) , HYPRE_MEMORY_HOST); (mgr_data -> rel_res_norms) = NULL; } /* temp vectors for solve phase */ if((mgr_data -> Vtemp)) { hypre_ParVectorDestroy( (mgr_data -> Vtemp) ); (mgr_data -> Vtemp) = NULL; } if((mgr_data -> Ztemp)) { hypre_ParVectorDestroy( (mgr_data -> Ztemp) ); (mgr_data -> Ztemp) = NULL; } if((mgr_data -> Utemp)) { hypre_ParVectorDestroy( (mgr_data -> Utemp) ); (mgr_data -> Utemp) = NULL; } if((mgr_data -> Ftemp)) { hypre_ParVectorDestroy( (mgr_data -> Ftemp) ); (mgr_data -> Ftemp) = NULL; } /* coarse grid solver */ if((mgr_data -> use_default_cgrid_solver)) { if((mgr_data -> coarse_grid_solver)) { hypre_BoomerAMGDestroy( (mgr_data -> coarse_grid_solver) ); } (mgr_data -> coarse_grid_solver) = NULL; } /* l1_norms */ if ((mgr_data -> l1_norms)) { for (i=0; i < (num_coarse_levels); i++) { hypre_SeqVectorDestroy((mgr_data -> l1_norms)[i]); } hypre_TFree((mgr_data -> l1_norms), HYPRE_MEMORY_HOST); } /* coarse_indices_lvls */ if ((mgr_data -> coarse_indices_lvls)) { for (i=0; i < (num_coarse_levels); i++) if ((mgr_data -> coarse_indices_lvls)[i]) hypre_TFree((mgr_data -> coarse_indices_lvls)[i], HYPRE_MEMORY_HOST); hypre_TFree((mgr_data -> coarse_indices_lvls), HYPRE_MEMORY_HOST); } /* linear system and cf marker array */ if(mgr_data -> A_array || mgr_data -> P_array || mgr_data -> RT_array || mgr_data -> CF_marker_array) { for (i=1; i < num_coarse_levels+1; i++) { hypre_ParVectorDestroy((mgr_data -> F_array)[i]); hypre_ParVectorDestroy((mgr_data -> U_array)[i]); if ((mgr_data -> P_array)[i-1]) hypre_ParCSRMatrixDestroy((mgr_data -> P_array)[i-1]); if ((mgr_data -> RT_array)[i-1]) hypre_ParCSRMatrixDestroy((mgr_data -> RT_array)[i-1]); hypre_TFree((mgr_data -> CF_marker_array)[i-1], HYPRE_MEMORY_HOST); } for (i=1; i < (num_coarse_levels); i++) { if ((mgr_data -> A_array)[i]) hypre_ParCSRMatrixDestroy((mgr_data -> A_array)[i]); } } /* AMG for Frelax */ if(mgr_data -> A_ff_array || mgr_data -> F_fine_array || mgr_data -> U_fine_array) { for (i=1; i < num_coarse_levels+1; i++) { if (mgr_data -> F_fine_array[i]) hypre_ParVectorDestroy((mgr_data -> F_fine_array)[i]); if (mgr_data -> U_fine_array[i]) hypre_ParVectorDestroy((mgr_data -> U_fine_array)[i]); } for (i=1; i < (num_coarse_levels); i++) { if ((mgr_data -> A_ff_array)[i]) hypre_ParCSRMatrixDestroy((mgr_data -> A_ff_array)[i]); } if (mgr_data -> use_default_fsolver) { hypre_ParCSRMatrixDestroy((mgr_data -> A_ff_array)[0]); } hypre_TFree(mgr_data -> F_fine_array, HYPRE_MEMORY_HOST); (mgr_data -> F_fine_array) = NULL; hypre_TFree(mgr_data -> U_fine_array, HYPRE_MEMORY_HOST); (mgr_data -> U_fine_array) = NULL; hypre_TFree(mgr_data -> A_ff_array, HYPRE_MEMORY_HOST); (mgr_data -> A_ff_array) = NULL; } if(mgr_data -> aff_solver) { for (i = 1; i < (num_coarse_levels); i++) { if ((mgr_data -> aff_solver)[i]) hypre_BoomerAMGDestroy((mgr_data -> aff_solver)[i]); } if (mgr_data -> use_default_fsolver) { if ((mgr_data -> aff_solver)[0]) hypre_BoomerAMGDestroy((mgr_data -> aff_solver)[0]); } hypre_TFree(mgr_data -> aff_solver, HYPRE_MEMORY_HOST); (mgr_data -> aff_solver) = NULL; } if((mgr_data -> F_array)) { hypre_TFree((mgr_data -> F_array), HYPRE_MEMORY_HOST); (mgr_data -> F_array) = NULL; } if((mgr_data -> U_array)) { hypre_TFree((mgr_data -> U_array), HYPRE_MEMORY_HOST); (mgr_data -> U_array) = NULL; } if((mgr_data -> A_array)) { hypre_TFree((mgr_data -> A_array), HYPRE_MEMORY_HOST); (mgr_data -> A_array) = NULL; } if((mgr_data -> P_array)) { hypre_TFree((mgr_data -> P_array), HYPRE_MEMORY_HOST); (mgr_data -> P_array) = NULL; } if((mgr_data -> RT_array)) { hypre_TFree((mgr_data -> RT_array), HYPRE_MEMORY_HOST); (mgr_data -> RT_array) = NULL; } if((mgr_data -> CF_marker_array)) { hypre_TFree((mgr_data -> CF_marker_array), HYPRE_MEMORY_HOST); (mgr_data -> CF_marker_array) = NULL; } if((mgr_data -> reserved_Cpoint_local_indexes)) { hypre_TFree((mgr_data -> reserved_Cpoint_local_indexes), HYPRE_MEMORY_HOST); (mgr_data -> reserved_Cpoint_local_indexes) = NULL; } if (mgr_data -> restrict_type) { hypre_TFree(mgr_data -> restrict_type, HYPRE_MEMORY_HOST); (mgr_data -> restrict_type) = NULL; } if (mgr_data -> interp_type) { hypre_TFree(mgr_data -> interp_type, HYPRE_MEMORY_HOST); (mgr_data -> interp_type) = NULL; } /* Frelax_method */ if (mgr_data -> Frelax_method) { hypre_TFree(mgr_data -> Frelax_method, HYPRE_MEMORY_HOST); (mgr_data -> Frelax_method) = NULL; } /* Frelax_num_functions */ if (mgr_data -> Frelax_num_functions) { hypre_TFree(mgr_data -> Frelax_num_functions, HYPRE_MEMORY_HOST); (mgr_data -> Frelax_num_functions) = NULL; } /* data for V-cycle F-relaxation */ if((mgr_data -> VcycleRelaxVtemp)) { hypre_ParVectorDestroy( (mgr_data -> VcycleRelaxVtemp) ); (mgr_data -> VcycleRelaxVtemp) = NULL; } if((mgr_data -> VcycleRelaxZtemp)) { hypre_ParVectorDestroy( (mgr_data -> VcycleRelaxZtemp) ); (mgr_data -> VcycleRelaxZtemp) = NULL; } if (mgr_data -> FrelaxVcycleData) { for (i = 0; i < num_coarse_levels; i++) { if ((mgr_data -> FrelaxVcycleData)[i]) { hypre_MGRDestroyFrelaxVcycleData((mgr_data -> FrelaxVcycleData)[i]); (mgr_data -> FrelaxVcycleData)[i] = NULL; } } hypre_TFree(mgr_data -> FrelaxVcycleData, HYPRE_MEMORY_HOST); (mgr_data -> FrelaxVcycleData) = NULL; } /* data for reserved coarse nodes */ if(mgr_data -> reserved_coarse_indexes) { hypre_TFree(mgr_data -> reserved_coarse_indexes, HYPRE_MEMORY_HOST); (mgr_data -> reserved_coarse_indexes) = NULL; } /* index array for setting Cpoints by global block */ if ((mgr_data -> set_c_points_method) == 1) { hypre_TFree(mgr_data -> idx_array, HYPRE_MEMORY_HOST); (mgr_data -> idx_array) = NULL; } /* array for setting option to use non-Galerkin coarse grid */ if (mgr_data -> use_non_galerkin_cg) { hypre_TFree(mgr_data -> use_non_galerkin_cg, HYPRE_MEMORY_HOST); (mgr_data -> use_non_galerkin_cg) = NULL; } /* coarse level matrix - RAP */ if ((mgr_data -> RAP)) hypre_ParCSRMatrixDestroy((mgr_data -> RAP)); if ((mgr_data -> diaginv)) hypre_TFree((mgr_data -> diaginv), HYPRE_MEMORY_HOST); if ((mgr_data -> global_smoother)) { if (mgr_data -> global_smooth_type == 8) { HYPRE_EuclidDestroy((mgr_data -> global_smoother)); } else if (mgr_data -> global_smooth_type == 16) { HYPRE_ILUDestroy((mgr_data -> global_smoother)); } } /* mgr data */ hypre_TFree(mgr_data, HYPRE_MEMORY_HOST); return hypre_error_flag; } /* Create data for V-cycle F-relaxtion */ void * hypre_MGRCreateFrelaxVcycleData() { hypre_ParAMGData *vdata = hypre_CTAlloc(hypre_ParAMGData, 1, HYPRE_MEMORY_HOST); hypre_ParAMGDataAArray(vdata) = NULL; hypre_ParAMGDataPArray(vdata) = NULL; hypre_ParAMGDataFArray(vdata) = NULL; hypre_ParAMGDataCFMarkerArray(vdata) = NULL; hypre_ParAMGDataVtemp(vdata) = NULL; hypre_ParAMGDataAMat(vdata) = NULL; hypre_ParAMGDataBVec(vdata) = NULL; hypre_ParAMGDataZtemp(vdata) = NULL; hypre_ParAMGDataCommInfo(vdata) = NULL; hypre_ParAMGDataUArray(vdata) = NULL; hypre_ParAMGDataNewComm(vdata) = hypre_MPI_COMM_NULL; hypre_ParAMGDataNumLevels(vdata) = 0; hypre_ParAMGDataMaxLevels(vdata) = 10; hypre_ParAMGDataNumFunctions(vdata) = 1; hypre_ParAMGDataSCommPkgSwitch(vdata) = 1.0; hypre_ParAMGDataRelaxOrder(vdata) = 1; hypre_ParAMGDataMaxCoarseSize(vdata) = 9; hypre_ParAMGDataMinCoarseSize(vdata) = 0; hypre_ParAMGDataUserCoarseRelaxType(vdata) = 9; return (void *) vdata; } /* Destroy data for V-cycle F-relaxation */ HYPRE_Int hypre_MGRDestroyFrelaxVcycleData( void *data ) { hypre_ParAMGData * vdata = (hypre_ParAMGData*) data; HYPRE_Int i; HYPRE_Int num_levels = hypre_ParAMGDataNumLevels(vdata); MPI_Comm new_comm = hypre_ParAMGDataNewComm(vdata); hypre_TFree(hypre_ParAMGDataDofFuncArray(vdata)[0], HYPRE_MEMORY_HOST); for (i=1; i < num_levels + 1; i++) { if (hypre_ParAMGDataAArray(vdata)[i]) hypre_ParCSRMatrixDestroy(hypre_ParAMGDataAArray(vdata)[i]); if (hypre_ParAMGDataPArray(vdata)[i-1]) hypre_ParCSRMatrixDestroy(hypre_ParAMGDataPArray(vdata)[i-1]); hypre_TFree(hypre_ParAMGDataCFMarkerArray(vdata)[i-1], HYPRE_MEMORY_HOST); hypre_ParVectorDestroy(hypre_ParAMGDataFArray(vdata)[i]); hypre_ParVectorDestroy(hypre_ParAMGDataUArray(vdata)[i]); hypre_TFree(hypre_ParAMGDataDofFuncArray(vdata)[i], HYPRE_MEMORY_HOST); } /* see comments in par_coarsen.c regarding special case for CF_marker */ if (num_levels <= 1) { hypre_TFree(hypre_ParAMGDataCFMarkerArray(vdata)[0], HYPRE_MEMORY_HOST); } /* Points to VcycleRelaxVtemp of mgr_data, which is already destroyed */ //hypre_ParVectorDestroy(hypre_ParAMGDataVtemp(vdata)); hypre_TFree(hypre_ParAMGDataFArray(vdata), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParAMGDataUArray(vdata), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParAMGDataAArray(vdata), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParAMGDataPArray(vdata), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParAMGDataCFMarkerArray(vdata), HYPRE_MEMORY_HOST); //hypre_TFree(hypre_ParAMGDataGridRelaxType(vdata), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParAMGDataDofFuncArray(vdata), HYPRE_MEMORY_HOST); /* Points to VcycleRelaxZtemp of mgr_data, which is already destroyed */ /* if (hypre_ParAMGDataZtemp(vdata)) hypre_ParVectorDestroy(hypre_ParAMGDataZtemp(vdata)); */ if (hypre_ParAMGDataAMat(vdata)) hypre_TFree(hypre_ParAMGDataAMat(vdata), HYPRE_MEMORY_HOST); if (hypre_ParAMGDataBVec(vdata)) hypre_TFree(hypre_ParAMGDataBVec(vdata), HYPRE_MEMORY_HOST); if (hypre_ParAMGDataCommInfo(vdata)) hypre_TFree(hypre_ParAMGDataCommInfo(vdata), HYPRE_MEMORY_HOST); if (new_comm != hypre_MPI_COMM_NULL) { hypre_MPI_Comm_free (&new_comm); } hypre_TFree(vdata, HYPRE_MEMORY_HOST); return hypre_error_flag; } /* Set C-point variables for each reduction level */ /* Currently not implemented */ HYPRE_Int hypre_MGRSetReductionLevelCpoints( void *mgr_vdata, HYPRE_Int nlevels, HYPRE_Int *num_coarse_points, HYPRE_Int **level_coarse_indexes) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> num_coarse_levels) = nlevels; (mgr_data -> num_coarse_per_level) = num_coarse_points; (mgr_data -> level_coarse_indexes) = level_coarse_indexes; return hypre_error_flag; } /* Initialize some data */ /* Set whether non-coarse points on each level should be explicitly tagged as F-points */ HYPRE_Int hypre_MGRSetNonCpointsToFpoints( void *mgr_vdata, HYPRE_Int nonCptToFptFlag) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> set_non_Cpoints_to_F) = nonCptToFptFlag; return hypre_error_flag; } /* Set whether the reserved C points are reduced before the coarse grid solve */ HYPRE_Int hypre_MGRSetReservedCpointsLevelToKeep(void *mgr_vdata, HYPRE_Int level) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> lvl_to_keep_cpoints) = level; return hypre_error_flag; } /* Set Cpoints by contiguous blocks, i.e. p1, p2, ..., pn, s1, s2, ..., sn, ... */ HYPRE_Int hypre_MGRSetCpointsByContiguousBlock( void *mgr_vdata, HYPRE_Int block_size, HYPRE_Int max_num_levels, HYPRE_BigInt *begin_idx_array, HYPRE_Int *block_num_coarse_points, HYPRE_Int **block_coarse_indexes) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; if((mgr_data -> idx_array) != NULL) { hypre_TFree(mgr_data -> idx_array, HYPRE_MEMORY_HOST); (mgr_data -> idx_array) = NULL; } HYPRE_BigInt *index_array = hypre_CTAlloc(HYPRE_BigInt, block_size, HYPRE_MEMORY_HOST); if (begin_idx_array != NULL) { for (i = 0; i < block_size; i++) { index_array[i] = *(begin_idx_array+i); } } hypre_MGRSetCpointsByBlock(mgr_data, block_size, max_num_levels, block_num_coarse_points, block_coarse_indexes); (mgr_data -> idx_array) = index_array; (mgr_data -> set_c_points_method) = 1; return hypre_error_flag; } /* Initialize/ set local block data information */ HYPRE_Int hypre_MGRSetCpointsByBlock( void *mgr_vdata, HYPRE_Int block_size, HYPRE_Int max_num_levels, HYPRE_Int *block_num_coarse_points, HYPRE_Int **block_coarse_indexes) { HYPRE_Int i,j; HYPRE_Int **block_cf_marker = NULL; HYPRE_Int *block_num_coarse_indexes = NULL; hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; /* free block cf_marker data if not previously destroyed */ if((mgr_data -> block_cf_marker) != NULL) { for (i=0; i < (mgr_data -> max_num_coarse_levels); i++) { if ((mgr_data -> block_cf_marker)[i]) { hypre_TFree((mgr_data -> block_cf_marker)[i], HYPRE_MEMORY_HOST); (mgr_data -> block_cf_marker)[i] = NULL; } } hypre_TFree(mgr_data -> block_cf_marker, HYPRE_MEMORY_HOST); (mgr_data -> block_cf_marker) = NULL; } if((mgr_data -> block_num_coarse_indexes)) { hypre_TFree((mgr_data -> block_num_coarse_indexes), HYPRE_MEMORY_HOST); (mgr_data -> block_num_coarse_indexes) = NULL; } /* store block cf_marker */ block_cf_marker = hypre_CTAlloc(HYPRE_Int *, max_num_levels, HYPRE_MEMORY_HOST); for (i = 0; i < max_num_levels; i++) { block_cf_marker[i] = hypre_CTAlloc(HYPRE_Int, block_size, HYPRE_MEMORY_HOST); memset(block_cf_marker[i], FMRK, block_size*sizeof(HYPRE_Int)); } for (i = 0; i < max_num_levels; i++) { for(j=0; j<block_num_coarse_points[i]; j++) { (block_cf_marker[i])[block_coarse_indexes[i][j]] = CMRK; } } /* store block_num_coarse_points */ if(max_num_levels > 0) { block_num_coarse_indexes = hypre_CTAlloc(HYPRE_Int, max_num_levels, HYPRE_MEMORY_HOST); for(i=0; i<max_num_levels; i++) block_num_coarse_indexes[i] = block_num_coarse_points[i]; } /* set block data */ (mgr_data -> max_num_coarse_levels) = max_num_levels; (mgr_data -> block_size) = block_size; (mgr_data -> block_num_coarse_indexes) = block_num_coarse_indexes; (mgr_data -> block_cf_marker) = block_cf_marker; (mgr_data -> set_c_points_method) = 0; return hypre_error_flag; } HYPRE_Int hypre_MGRSetCpointsByPointMarkerArray( void *mgr_vdata, HYPRE_Int block_size, HYPRE_Int max_num_levels, HYPRE_Int *lvl_num_coarse_points, HYPRE_Int **lvl_coarse_indexes, HYPRE_Int *point_marker_array) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i,j; HYPRE_Int **block_cf_marker = NULL; HYPRE_Int *block_num_coarse_indexes = NULL; /* free block cf_marker data if not previously destroyed */ if((mgr_data -> block_cf_marker) != NULL) { for (i=0; i < (mgr_data -> max_num_coarse_levels); i++) { if ((mgr_data -> block_cf_marker)[i]) { hypre_TFree((mgr_data -> block_cf_marker)[i], HYPRE_MEMORY_HOST); (mgr_data -> block_cf_marker)[i] = NULL; } } hypre_TFree(mgr_data -> block_cf_marker, HYPRE_MEMORY_HOST); (mgr_data -> block_cf_marker) = NULL; } if((mgr_data -> block_num_coarse_indexes)) { hypre_TFree((mgr_data -> block_num_coarse_indexes), HYPRE_MEMORY_HOST); (mgr_data -> block_num_coarse_indexes) = NULL; } /* store block cf_marker */ block_cf_marker = hypre_CTAlloc(HYPRE_Int *, max_num_levels, HYPRE_MEMORY_HOST); for (i = 0; i < max_num_levels; i++) { block_cf_marker[i] = hypre_CTAlloc(HYPRE_Int, block_size, HYPRE_MEMORY_HOST); memset(block_cf_marker[i], FMRK, block_size*sizeof(HYPRE_Int)); } for (i = 0; i < max_num_levels; i++) { for(j=0; j<lvl_num_coarse_points[i]; j++) { block_cf_marker[i][j] = lvl_coarse_indexes[i][j]; } } /* store block_num_coarse_points */ if(max_num_levels > 0) { block_num_coarse_indexes = hypre_CTAlloc(HYPRE_Int, max_num_levels, HYPRE_MEMORY_HOST); for(i=0; i<max_num_levels; i++) block_num_coarse_indexes[i] = lvl_num_coarse_points[i]; } /* set block data */ (mgr_data -> max_num_coarse_levels) = max_num_levels; (mgr_data -> block_size) = block_size; (mgr_data -> block_num_coarse_indexes) = block_num_coarse_indexes; (mgr_data -> block_cf_marker) = block_cf_marker; (mgr_data -> point_marker_array) = point_marker_array; (mgr_data -> set_c_points_method) = 2; return hypre_error_flag; } /*Set number of points that remain part of the coarse grid throughout the hierarchy */ HYPRE_Int hypre_MGRSetReservedCoarseNodes(void *mgr_vdata, HYPRE_Int reserved_coarse_size, HYPRE_BigInt *reserved_cpt_index) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_BigInt *reserved_coarse_indexes = NULL; HYPRE_Int i; if (!mgr_data) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Warning! MGR object empty!\n"); return hypre_error_flag; } if(reserved_coarse_size < 0) { hypre_error_in_arg(2); return hypre_error_flag; } /* free data not previously destroyed */ if((mgr_data -> reserved_coarse_indexes)) { hypre_TFree((mgr_data -> reserved_coarse_indexes), HYPRE_MEMORY_HOST); (mgr_data -> reserved_coarse_indexes) = NULL; } /* set reserved coarse nodes */ if(reserved_coarse_size > 0) { reserved_coarse_indexes = hypre_CTAlloc(HYPRE_BigInt, reserved_coarse_size, HYPRE_MEMORY_HOST); for(i=0; i<reserved_coarse_size; i++) reserved_coarse_indexes[i] = reserved_cpt_index[i]; } (mgr_data -> reserved_coarse_size) = reserved_coarse_size; (mgr_data -> reserved_coarse_indexes) = reserved_coarse_indexes; return hypre_error_flag; } /* Set CF marker array */ HYPRE_Int hypre_MGRCoarsen(hypre_ParCSRMatrix *S, hypre_ParCSRMatrix *A, HYPRE_Int fixed_coarse_size, HYPRE_Int *fixed_coarse_indexes, HYPRE_Int debug_flag, HYPRE_Int **CF_marker_ptr, HYPRE_Int cflag) { HYPRE_Int *CF_marker = NULL; HYPRE_Int *cindexes = fixed_coarse_indexes; HYPRE_Int i, row, nc; HYPRE_Int nloc = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); /* If this is the last level, coarsen onto fixed coarse set */ if(cflag) { if(*CF_marker_ptr != NULL) { hypre_TFree(*CF_marker_ptr, HYPRE_MEMORY_HOST); } CF_marker = hypre_CTAlloc(HYPRE_Int, nloc, HYPRE_MEMORY_HOST); memset(CF_marker, FMRK, nloc*sizeof(HYPRE_Int)); /* first mark fixed coarse set */ nc = fixed_coarse_size; for(i = 0; i < nc; i++) { CF_marker[cindexes[i]] = CMRK; } } else { /* First coarsen to get initial CF splitting. * This is then followed by updating the CF marker to pass * coarse information to the next levels. NOTE: It may be * convenient to implement this way (allows the use of multiple * coarsening strategies without changing too much code), * but not necessarily the best option, compared to initializing * CF_marker first and then coarsening on subgraph which excludes * the initialized coarse nodes. */ hypre_BoomerAMGCoarsen(S, A, 0, debug_flag, &CF_marker); /* Update CF_marker to correct Cpoints marked as Fpoints. */ nc = fixed_coarse_size; for(i = 0; i < nc; i++) { CF_marker[cindexes[i]] = CMRK; } /* set F-points to FMRK. This is necessary since the different coarsening schemes differentiate * between type of F-points (example Ruge coarsening). We do not need that distinction here. */ for (row = 0; row <nloc; row++) { if(CF_marker[row] == CMRK) continue; CF_marker[row] = FMRK; } #if 0 /* IMPORTANT: Update coarse_indexes array to define the positions of the fixed coarse points * in the next level. */ nc = 0; index_i = 0; for (row = 0; row <nloc; row++) { /* loop through new c-points */ if(CF_marker[row] == CMRK) nc++; else if(CF_marker[row] == S_CMRK) { /* previously marked c-point is part of fixed coarse set. Track its current local index */ cindexes[index_i++] = nc; /* reset c-point from S_CMRK to CMRK */ cf_marker[row] = CMRK; nc++; } /* set F-points to FMRK. This is necessary since the different coarsening schemes differentiate * between type of F-points (example Ruge coarsening). We do not need that distinction here. */ else { CF_marker[row] = FMRK; } } /* check if this should be last level */ if( nc == fixed_coarse_size) last_level = 1; //printf(" nc = %d and fixed coarse size = %d \n", nc, fixed_coarse_size); #endif } /* set CF_marker */ *CF_marker_ptr = CF_marker; return hypre_error_flag; } /* Interpolation for MGR - Adapted from BoomerAMGBuildInterp */ HYPRE_Int hypre_MGRBuildP( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_BigInt *num_cpts_global, HYPRE_Int method, HYPRE_Int debug_flag, hypre_ParCSRMatrix **P_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Real *a_diag; hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; HYPRE_Int *tmp_map_offd = NULL; HYPRE_Int *CF_marker_offd = NULL; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size, P_offd_size; HYPRE_Int *P_marker, *P_marker_offd; HYPRE_Int jj_counter,jj_counter_offd; HYPRE_Int *jj_count, *jj_count_offd; // HYPRE_Int jj_begin_row,jj_begin_row_offd; // HYPRE_Int jj_end_row,jj_end_row_offd; HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int *fine_to_coarse; //HYPRE_BigInt *fine_to_coarse_offd; HYPRE_Int *coarse_counter; HYPRE_Int coarse_shift; HYPRE_BigInt total_global_cpts; //HYPRE_BigInt my_first_cpt; HYPRE_Int num_cols_P_offd; HYPRE_Int i,i1; HYPRE_Int j,jl,jj; HYPRE_Int start; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_Int num_threads; HYPRE_Int num_sends; HYPRE_Int index; HYPRE_Int ns, ne, size, rest; HYPRE_Int *int_buf_data; HYPRE_Real wall_time; /* for debugging instrumentation */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); //num_threads = hypre_NumThreads(); // Temporary fix, disable threading // TODO: enable threading num_threads = 1; #ifdef HYPRE_NO_GLOBAL_PARTITION //my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); #else //my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; #endif /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (debug_flag < 0) { debug_flag = -debug_flag; } if (debug_flag==4) wall_time = time_getWallclockSeconds(); if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ /* RDF: this looks a little tricky, but doable */ #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE #endif #endif for (j = 0; j < num_threads; j++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { jj_count[j]++; fine_to_coarse[i] = coarse_counter[j]; coarse_counter[j]++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is the approximation of A_{ff}^{-1}A_{fc} *--------------------------------------------------------------------*/ else { for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; if ((CF_marker[i1] >= 0) && (method > 0)) { jj_count[j]++; } } if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; if ((CF_marker_offd[i1] >= 0) && (method > 0)) { jj_count_offd[j]++; } } } } } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ for (i=0; i < num_threads-1; i++) { coarse_counter[i+1] += coarse_counter[i]; jj_count[i+1] += jj_count[i]; jj_count_offd[i+1] += jj_count_offd[i]; } i = num_threads-1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_DEVICE); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_DEVICE); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_DEVICE); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_DEVICE); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Internal work 1 = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); //fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE #endif #endif for (j = 0; j < num_threads; j++) { coarse_shift = 0; if (j > 0) coarse_shift = coarse_counter[j-1]; size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) { fine_to_coarse[i] += coarse_shift; } } /* index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) big_buf_data[index++] = fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]+ my_first_cpt; } comm_handle = hypre_ParCSRCommHandleCreate( 21, comm_pkg, big_buf_data, fine_to_coarse_offd); hypre_ParCSRCommHandleDestroy(comm_handle); */ if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n", my_id, wall_time); fflush(NULL); } if (debug_flag==4) wall_time = time_getWallclockSeconds(); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif //for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt; /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ a_diag = hypre_CTAlloc(HYPRE_Real, n_fine, HYPRE_MEMORY_HOST); for (i = 0; i < n_fine; i++) { if (CF_marker[i] < 0) { for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; if ( i==i1 ) /* diagonal of A only */ { a_diag[i] = 1.0/A_diag_data[jj]; } } } } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,P_marker,P_marker_offd,jj_counter,jj_counter_offd,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE #endif #endif for (jl = 0; jl < num_threads; jl++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (jl < rest) { ns = jl*size+jl; ne = (jl+1)*size+jl+1; } else { ns = jl*size+rest; ne = (jl+1)*size+rest; } jj_counter = 0; if (jl > 0) jj_counter = jj_count[jl-1]; jj_counter_offd = 0; if (jl > 0) jj_counter_offd = jj_count_offd[jl-1]; P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); if (num_cols_A_offd) P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); else P_marker_offd = NULL; for (i = 0; i < n_fine; i++) { P_marker[i] = -1; } for (i = 0; i < num_cols_A_offd; i++) { P_marker_offd[i] = -1; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { /* Diagonal part of P */ P_diag_i[i] = jj_counter; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if ((CF_marker[i1] >= 0) && (method > 0)) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; /* if(method == 0) { P_diag_data[jj_counter] = 0.0; } */ if (method == 1) { P_diag_data[jj_counter] = - A_diag_data[jj]; } else if (method == 2) { P_diag_data[jj_counter] = - A_diag_data[jj]*a_diag[i]; } jj_counter++; } } /* Off-Diagonal part of P */ P_offd_i[i] = jj_counter_offd; if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; /*----------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_offd_j * and initialize interpolation weight to zero. *-----------------------------------------------------------*/ if ((CF_marker_offd[i1] >= 0) && (method > 0)) { P_marker_offd[i1] = jj_counter_offd; /*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/ P_offd_j[jj_counter_offd] = i1; /* if(method == 0) { P_offd_data[jj_counter_offd] = 0.0; } */ if (method == 1) { P_offd_data[jj_counter_offd] = - A_offd_data[jj]; } else if (method == 2) { P_offd_data[jj_counter_offd] = - A_offd_data[jj]*a_diag[i]; } jj_counter_offd++; } } } } P_offd_i[i+1] = jj_counter_offd; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); } hypre_TFree(a_diag, HYPRE_MEMORY_HOST); P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; num_cols_P_offd = 0; if (P_offd_size) { P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i=0; i < num_cols_A_offd; i++) P_marker[i] = 0; num_cols_P_offd = 0; for (i=0; i < P_offd_size; i++) { index = P_offd_j[i]; if (!P_marker[index]) { num_cols_P_offd++; P_marker[index] = 1; } } col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); index = 0; for (i=0; i < num_cols_P_offd; i++) { while (P_marker[index]==0) index++; tmp_map_offd[i] = index++; } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i=0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(tmp_map_offd, P_offd_j[i], num_cols_P_offd); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } for (i=0; i < n_fine; i++) if (CF_marker[i] == -3) CF_marker[i] = -1; if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_GetCommPkgRTFromCommPkgA(P,A, fine_to_coarse, tmp_map_offd); *P_ptr = P; hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); //hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(jj_count, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); return(0); } /* Interpolation for MGR - Dynamic Row Sum method */ HYPRE_Int hypre_MGRBuildPDRS( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_BigInt *num_cpts_global, HYPRE_Int blk_size, HYPRE_Int reserved_coarse_size, HYPRE_Int debug_flag, hypre_ParCSRMatrix **P_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Real *a_diag; hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; HYPRE_Int *tmp_map_offd; HYPRE_Int *CF_marker_offd = NULL; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size, P_offd_size; HYPRE_Int *P_marker, *P_marker_offd; HYPRE_Int jj_counter,jj_counter_offd; HYPRE_Int *jj_count, *jj_count_offd; // HYPRE_Int jj_begin_row,jj_begin_row_offd; // HYPRE_Int jj_end_row,jj_end_row_offd; HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int *fine_to_coarse; //HYPRE_BigInt *fine_to_coarse_offd; HYPRE_Int *coarse_counter; HYPRE_Int coarse_shift; HYPRE_BigInt total_global_cpts; //HYPRE_BigInt my_first_cpt; HYPRE_Int num_cols_P_offd; HYPRE_Int i,i1; HYPRE_Int j,jl,jj; HYPRE_Int start; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_Int num_threads; HYPRE_Int num_sends; HYPRE_Int index; HYPRE_Int ns, ne, size, rest; HYPRE_Int *int_buf_data; HYPRE_Real wall_time; /* for debugging instrumentation */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); //num_threads = hypre_NumThreads(); // Temporary fix, disable threading // TODO: enable threading num_threads = 1; #ifdef HYPRE_NO_GLOBAL_PARTITION //my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); #else //my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; #endif /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (debug_flag < 0) { debug_flag = -debug_flag; } if (debug_flag==4) wall_time = time_getWallclockSeconds(); if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ /* RDF: this looks a little tricky, but doable */ #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE #endif #endif for (j = 0; j < num_threads; j++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { jj_count[j]++; fine_to_coarse[i] = coarse_counter[j]; coarse_counter[j]++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is the approximation of A_{ff}^{-1}A_{fc} *--------------------------------------------------------------------*/ else { for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; if (CF_marker[i1] >= 0) { jj_count[j]++; } } if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; if (CF_marker_offd[i1] >= 0) { jj_count_offd[j]++; } } } } /*-------------------------------------------------------------------- * Set up the indexes for the DRS method *--------------------------------------------------------------------*/ } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ for (i=0; i < num_threads-1; i++) { coarse_counter[i+1] += coarse_counter[i]; jj_count[i+1] += jj_count[i]; jj_count_offd[i+1] += jj_count_offd[i]; } i = num_threads-1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Internal work 1 = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); //fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE #endif #endif for (j = 0; j < num_threads; j++) { coarse_shift = 0; if (j > 0) coarse_shift = coarse_counter[j-1]; size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) fine_to_coarse[i] += coarse_shift; } /*index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, fine_to_coarse_offd); hypre_ParCSRCommHandleDestroy(comm_handle); */ if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n", my_id, wall_time); fflush(NULL); } if (debug_flag==4) wall_time = time_getWallclockSeconds(); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif //for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt; /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ a_diag = hypre_CTAlloc(HYPRE_Real, n_fine, HYPRE_MEMORY_HOST); for (i = 0; i < n_fine; i++) { for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; if ( i==i1 ) /* diagonal of A only */ { a_diag[i] = 1.0/A_diag_data[jj]; } } } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,P_marker,P_marker_offd,jj_counter,jj_counter_offd,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE #endif #endif for (jl = 0; jl < num_threads; jl++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (jl < rest) { ns = jl*size+jl; ne = (jl+1)*size+jl+1; } else { ns = jl*size+rest; ne = (jl+1)*size+rest; } jj_counter = 0; if (jl > 0) jj_counter = jj_count[jl-1]; jj_counter_offd = 0; if (jl > 0) jj_counter_offd = jj_count_offd[jl-1]; P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); if (num_cols_A_offd) P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); else P_marker_offd = NULL; for (i = 0; i < n_fine; i++) { P_marker[i] = -1; } for (i = 0; i < num_cols_A_offd; i++) { P_marker_offd[i] = -1; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { /* Diagonal part of P */ P_diag_i[i] = jj_counter; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] >= 0) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; P_diag_data[jj_counter] = - A_diag_data[jj]*a_diag[i]; jj_counter++; } } /* Off-Diagonal part of P */ P_offd_i[i] = jj_counter_offd; if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; /*----------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_offd_j * and initialize interpolation weight to zero. *-----------------------------------------------------------*/ if (CF_marker_offd[i1] >= 0) { P_marker_offd[i1] = jj_counter_offd; /*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/ P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = - A_offd_data[jj]*a_diag[i]; jj_counter_offd++; } } } } P_offd_i[i+1] = jj_counter_offd; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); } hypre_TFree(a_diag, HYPRE_MEMORY_HOST); P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; num_cols_P_offd = 0; if (P_offd_size) { P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i=0; i < num_cols_A_offd; i++) P_marker[i] = 0; num_cols_P_offd = 0; for (i=0; i < P_offd_size; i++) { index = P_offd_j[i]; if (!P_marker[index]) { num_cols_P_offd++; P_marker[index] = 1; } } tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); index = 0; for (i=0; i < num_cols_P_offd; i++) { while (P_marker[index]==0) index++; tmp_map_offd[i] = index++; } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i=0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(tmp_map_offd, P_offd_j[i], num_cols_P_offd); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } for (i=0; i < n_fine; i++) if (CF_marker[i] == -3) CF_marker[i] = -1; if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_GetCommPkgRTFromCommPkgA(P,A, fine_to_coarse, tmp_map_offd); *P_ptr = P; hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); // hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(jj_count, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); return(0); } /* Scale ParCSR matrix A = scalar * A * A: the target CSR matrix * vector: array of real numbers */ HYPRE_Int hypre_ParCSRMatrixLeftScale(HYPRE_Real *vector, hypre_ParCSRMatrix *A) { HYPRE_Int i, j, n_local; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); n_local = hypre_CSRMatrixNumRows(A_diag); for (i = 0; i < n_local; i++) { HYPRE_Real factor = vector[i]; for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { A_diag_data[j] *= factor; } for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { A_offd_data[j] *= factor; } } return(0); } /************************************************************ * Available methods: * 0: inv(A_FF) approximated by its diagonal inverse * 1: inv(A_FF) approximated by sparse approximate inverse *************************************************************/ HYPRE_Int hypre_MGRComputeNonGalerkinCoarseGrid(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *P, hypre_ParCSRMatrix *RT, HYPRE_Int bsize, HYPRE_Int ordering, HYPRE_Int method, HYPRE_Int Pmax, HYPRE_Int keep_stencil, HYPRE_Int *CF_marker, hypre_ParCSRMatrix **A_h_ptr) { HYPRE_Int *c_marker, *f_marker; HYPRE_Int n_local_fine_grid, i, i1, jj; hypre_ParCSRMatrix *A_cc; hypre_ParCSRMatrix *A_ff; hypre_ParCSRMatrix *A_fc; hypre_ParCSRMatrix *A_cf; hypre_ParCSRMatrix *A_h; hypre_ParCSRMatrix *A_h_correction; HYPRE_Int max_elmts = Pmax; // HYPRE_Real wall_time = 0.; hypre_ParCSRMatrix *P_mod = NULL; HYPRE_Int my_id; MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_MPI_Comm_rank(comm,&my_id); n_local_fine_grid = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); c_marker = hypre_CTAlloc(HYPRE_Int, n_local_fine_grid, HYPRE_MEMORY_HOST); f_marker = hypre_CTAlloc(HYPRE_Int, n_local_fine_grid, HYPRE_MEMORY_HOST); for (i = 0; i < n_local_fine_grid; i++) { HYPRE_Int point_type = CF_marker[i]; assert(point_type == 1 || point_type == -1); c_marker[i] = point_type; f_marker[i] = -point_type; } // get the A_cc sub-block hypre_MGRGetSubBlock(A, c_marker, c_marker, 0, &A_cc); if (method == 0) { if (keep_stencil) { //wall_time = time_getWallclockSeconds(); hypre_MGRGetSubBlock(A, c_marker, f_marker, 0, &A_cf); hypre_MGRGetSubBlock(A, f_marker, c_marker, 0, &A_fc); hypre_MGRGetSubBlock(A, f_marker, f_marker, 0, &A_ff); // extract the diagonal of A_ff and compute D_ff_inv hypre_CSRMatrix *A_ff_diag = hypre_ParCSRMatrixDiag(A_ff); HYPRE_Real *A_ff_diag_data = hypre_CSRMatrixData(A_ff_diag); HYPRE_Int *A_ff_diag_i = hypre_CSRMatrixI(A_ff_diag); HYPRE_Int *A_ff_diag_j = hypre_CSRMatrixJ(A_ff_diag); HYPRE_Int n_local_fpoints = hypre_CSRMatrixNumRows(A_ff_diag); HYPRE_Real *D_ff_inv; D_ff_inv = hypre_CTAlloc(HYPRE_Real, n_local_fpoints, HYPRE_MEMORY_HOST); for (i = 0; i < n_local_fpoints; i++) { for (jj = A_ff_diag_i[i]; jj < A_ff_diag_i[i+1]; jj++) { i1 = A_ff_diag_j[jj]; if ( i==i1 ) { D_ff_inv[i] = -1.0/A_ff_diag_data[jj]; } } } // extract the diagonal of A_cf hypre_CSRMatrix *A_cf_diag = hypre_ParCSRMatrixDiag(A_cf); HYPRE_Real *A_cf_diag_data = hypre_CSRMatrixData(A_cf_diag); HYPRE_Int *A_cf_diag_i = hypre_CSRMatrixI(A_cf_diag); HYPRE_Int *A_cf_diag_j = hypre_CSRMatrixJ(A_cf_diag); n_local_fpoints = hypre_CSRMatrixNumRows(A_cf_diag); HYPRE_Real *D_cf; D_cf = hypre_CTAlloc(HYPRE_Real, n_local_fpoints, HYPRE_MEMORY_HOST); for (i = 0; i < n_local_fpoints; i++) { i1 = A_cf_diag_j[A_cf_diag_i[i]]; D_cf[i] = A_cf_diag_data[jj]; } // compute the triple product hypre_ParCSRMatrixLeftScale(D_ff_inv, A_fc); hypre_ParCSRMatrixLeftScale(D_cf, A_fc); A_h_correction = A_fc; hypre_TFree(D_cf, HYPRE_MEMORY_HOST); hypre_TFree(D_ff_inv, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDestroy(A_ff); hypre_ParCSRMatrixDestroy(A_cf); //wall_time = time_getWallclockSeconds() - wall_time; //hypre_printf("Compute triple product D_cf * D_ff_inv * A_fc time: %1.5f\n", wall_time); } else { //wall_time = time_getWallclockSeconds(); P_mod = hypre_ParCSRMatrixCompleteClone(P); hypre_ParCSRMatrixCopy(P,P_mod,1); HYPRE_Int n_local_rows = hypre_ParCSRMatrixNumRows(P_mod); hypre_CSRMatrix *P_mod_diag = hypre_ParCSRMatrixDiag(P_mod); HYPRE_Int *P_mod_diag_i = hypre_CSRMatrixI(P_mod_diag); HYPRE_Real *P_mod_diag_data = hypre_CSRMatrixData(P_mod_diag); for (i = 0; i < n_local_rows; i ++) { if (CF_marker[i] >= 0) { HYPRE_Int ii = P_mod_diag_i[i]; P_mod_diag_data[ii] = 0.0; } } hypre_BoomerAMGBuildCoarseOperator(RT, A, P_mod, &A_h_correction); //wall_time = time_getWallclockSeconds() - wall_time; //hypre_printf("Compute triple product time new: %1.5f\n", wall_time); hypre_ParCSRMatrixDestroy(P_mod); } } else { // Approximate inverse for ideal interploation hypre_MGRGetSubBlock(A, c_marker, f_marker, 0, &A_cf); hypre_MGRGetSubBlock(A, f_marker, c_marker, 0, &A_fc); hypre_MGRGetSubBlock(A, f_marker, f_marker, 0, &A_ff); hypre_ParCSRMatrix *A_ff_inv = NULL; hypre_ParCSRMatrix *minus_Wp = NULL; hypre_MGRApproximateInverse(A_ff, &A_ff_inv); minus_Wp = hypre_ParMatmul(A_ff_inv, A_fc); A_h_correction = hypre_ParMatmul(A_cf, minus_Wp); hypre_ParCSRMatrixDestroy(minus_Wp); hypre_ParCSRMatrixDestroy(A_ff); hypre_ParCSRMatrixDestroy(A_fc); hypre_ParCSRMatrixDestroy(A_cf); } // perform dropping for A_h_correction // specific to multiphase poromechanics // we only keep the diagonal of each block //wall_time = time_getWallclockSeconds(); HYPRE_Int n_local_cpoints = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A_h_correction)); hypre_CSRMatrix *A_h_correction_diag = hypre_ParCSRMatrixDiag(A_h_correction); HYPRE_Real *A_h_correction_diag_data = hypre_CSRMatrixData(A_h_correction_diag); HYPRE_Int *A_h_correction_diag_i = hypre_CSRMatrixI(A_h_correction_diag); HYPRE_Int *A_h_correction_diag_j = hypre_CSRMatrixJ(A_h_correction_diag); HYPRE_Int ncol_diag = hypre_CSRMatrixNumCols(A_h_correction_diag); hypre_CSRMatrix *A_h_correction_offd = hypre_ParCSRMatrixOffd(A_h_correction); HYPRE_Real *A_h_correction_offd_data = hypre_CSRMatrixData(A_h_correction_offd); HYPRE_Int *A_h_correction_offd_i = hypre_CSRMatrixI(A_h_correction_offd); HYPRE_Int *A_h_correction_offd_j = hypre_CSRMatrixJ(A_h_correction_offd); // Allow for maximum dropping with Pmax = 0 //if (Pmax > 0) //{ if (ordering == 0) // interleaved ordering { HYPRE_Int *A_h_correction_diag_i_new = hypre_CTAlloc(HYPRE_Int, n_local_cpoints+1, HYPRE_MEMORY_HOST); HYPRE_Int *A_h_correction_diag_j_new = hypre_CTAlloc(HYPRE_Int, (bsize + max_elmts)*n_local_cpoints, HYPRE_MEMORY_HOST); HYPRE_Complex *A_h_correction_diag_data_new = hypre_CTAlloc(HYPRE_Complex, (bsize + max_elmts)*n_local_cpoints, HYPRE_MEMORY_HOST); HYPRE_Int num_nonzeros_diag_new = 0; HYPRE_Int *A_h_correction_offd_i_new = hypre_CTAlloc(HYPRE_Int, n_local_cpoints+1, HYPRE_MEMORY_HOST); HYPRE_Int *A_h_correction_offd_j_new = hypre_CTAlloc(HYPRE_Int, max_elmts*n_local_cpoints, HYPRE_MEMORY_HOST); HYPRE_Complex *A_h_correction_offd_data_new = hypre_CTAlloc(HYPRE_Complex, max_elmts*n_local_cpoints, HYPRE_MEMORY_HOST); HYPRE_Int num_nonzeros_offd_new = 0; for (i = 0; i < n_local_cpoints; i++) { HYPRE_Int max_num_nonzeros = A_h_correction_diag_i[i+1] - A_h_correction_diag_i[i] + A_h_correction_offd_i[i+1] - A_h_correction_offd_i[i]; HYPRE_Int *aux_j = hypre_CTAlloc(HYPRE_Int, max_num_nonzeros, HYPRE_MEMORY_HOST); HYPRE_Real *aux_data = hypre_CTAlloc(HYPRE_Real, max_num_nonzeros, HYPRE_MEMORY_HOST); HYPRE_Int row_start = i - (i % bsize); HYPRE_Int row_stop = row_start + bsize - 1; HYPRE_Int cnt = 0; for (jj = A_h_correction_offd_i[i]; jj < A_h_correction_offd_i[i+1]; jj++) { aux_j[cnt] = A_h_correction_offd_j[jj] + ncol_diag; aux_data[cnt] = A_h_correction_offd_data[jj]; cnt++; } for (jj = A_h_correction_diag_i[i]; jj < A_h_correction_diag_i[i+1]; jj++) { aux_j[cnt] = A_h_correction_diag_j[jj]; aux_data[cnt] = A_h_correction_diag_data[jj]; cnt++; } hypre_qsort2_abs(aux_j, aux_data, 0, cnt-1); for (jj = A_h_correction_diag_i[i]; jj < A_h_correction_diag_i[i+1]; jj++) { i1 = A_h_correction_diag_j[jj]; if (i1 >= row_start && i1 <= row_stop) { // copy data to new arrays A_h_correction_diag_j_new[num_nonzeros_diag_new] = i1; A_h_correction_diag_data_new[num_nonzeros_diag_new] = A_h_correction_diag_data[jj]; ++num_nonzeros_diag_new; } else { // Do nothing } } if (max_elmts > 0) { for (jj = 0; jj < hypre_min(max_elmts, cnt); jj++) { HYPRE_Int col_idx = aux_j[jj]; HYPRE_Real col_value = aux_data[jj]; if (col_idx < ncol_diag && (col_idx < row_start || col_idx > row_stop)) { A_h_correction_diag_j_new[num_nonzeros_diag_new] = col_idx; A_h_correction_diag_data_new[num_nonzeros_diag_new] = col_value; ++num_nonzeros_diag_new; } else if (col_idx >= ncol_diag) { A_h_correction_offd_j_new[num_nonzeros_offd_new] = col_idx - ncol_diag; A_h_correction_offd_data_new[num_nonzeros_offd_new] = col_value; ++num_nonzeros_offd_new; } } } A_h_correction_diag_i_new[i+1] = num_nonzeros_diag_new; A_h_correction_offd_i_new[i+1] = num_nonzeros_offd_new; hypre_TFree(aux_j, HYPRE_MEMORY_HOST); hypre_TFree(aux_data, HYPRE_MEMORY_HOST); } hypre_TFree(A_h_correction_diag_i, HYPRE_MEMORY_HOST); hypre_TFree(A_h_correction_diag_j, HYPRE_MEMORY_HOST); hypre_TFree(A_h_correction_diag_data, HYPRE_MEMORY_HOST); hypre_CSRMatrixI(A_h_correction_diag) = A_h_correction_diag_i_new; hypre_CSRMatrixJ(A_h_correction_diag) = A_h_correction_diag_j_new; hypre_CSRMatrixData(A_h_correction_diag) = A_h_correction_diag_data_new; hypre_CSRMatrixNumNonzeros(A_h_correction_diag) = num_nonzeros_diag_new; if (A_h_correction_offd_i) hypre_TFree(A_h_correction_offd_i, HYPRE_MEMORY_HOST); if (A_h_correction_offd_j) hypre_TFree(A_h_correction_offd_j, HYPRE_MEMORY_HOST); if (A_h_correction_offd_data) hypre_TFree(A_h_correction_offd_data, HYPRE_MEMORY_HOST); hypre_CSRMatrixI(A_h_correction_offd) = A_h_correction_offd_i_new; hypre_CSRMatrixJ(A_h_correction_offd) = A_h_correction_offd_j_new; hypre_CSRMatrixData(A_h_correction_offd) = A_h_correction_offd_data_new; hypre_CSRMatrixNumNonzeros(A_h_correction_offd) = num_nonzeros_offd_new; } else { hypre_printf("Error!! Block ordering is not supported at the moment\n"); exit(-1); } //} //hypre_MGRParCSRMatrixTruncate(A_h_correction, max_elmts); //wall_time = time_getWallclockSeconds() - wall_time; //hypre_printf("Filter A_h_correction time: %1.5f\n", wall_time); //hypre_ParCSRMatrixPrintIJ(A_h_correction,1,1,"A_h_correction_filtered"); // coarse grid / schur complement hypre_ParcsrAdd(1.0, A_cc, 1.0, A_h_correction, &A_h); *A_h_ptr = A_h; //hypre_ParCSRMatrixPrintIJ(A_h,1,1,"A_h"); hypre_ParCSRMatrixDestroy(A_cc); hypre_ParCSRMatrixDestroy(A_h_correction); hypre_TFree(c_marker, HYPRE_MEMORY_HOST); hypre_TFree(f_marker, HYPRE_MEMORY_HOST); return hypre_error_flag; } HYPRE_Int hypre_MGRComputeAlgebraicFixedStress(hypre_ParCSRMatrix *A, HYPRE_BigInt *mgr_idx_array, HYPRE_Solver A_ff_solver) { HYPRE_Int *U_marker, *S_marker, *P_marker; HYPRE_Int n_fine, i; HYPRE_BigInt ibegin; hypre_ParCSRMatrix *A_up; hypre_ParCSRMatrix *A_uu; hypre_ParCSRMatrix *A_su; hypre_ParCSRMatrix *A_pu; hypre_ParVector *e1_vector; hypre_ParVector *e2_vector; hypre_ParVector *e3_vector; hypre_ParVector *e4_vector; hypre_ParVector *e5_vector; n_fine = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); ibegin = hypre_ParCSRMatrixFirstRowIndex(A); hypre_assert(ibegin == mgr_idx_array[0]); U_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); S_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); for (i = 0; i < n_fine; i++) { U_marker[i] = -1; S_marker[i] = -1; P_marker[i] = -1; } // create C and F markers for (i = 0; i < n_fine; i++) { if (i < mgr_idx_array[1] - ibegin) { U_marker[i] = 1; } else if (i >= (mgr_idx_array[1] - ibegin) && i < (mgr_idx_array[2] - ibegin)) { S_marker[i] = 1; } else { P_marker[i] = 1; } } // Get A_up hypre_MGRGetSubBlock(A, U_marker, P_marker, 0, &A_up); // GetA_uu hypre_MGRGetSubBlock(A, U_marker, U_marker, 0, &A_uu); // Get A_su hypre_MGRGetSubBlock(A, S_marker, U_marker, 0, &A_su); // Get A_pu hypre_MGRGetSubBlock(A, P_marker, U_marker, 0, &A_pu); e1_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_up), hypre_ParCSRMatrixGlobalNumCols(A_up), hypre_ParCSRMatrixColStarts(A_up)); hypre_ParVectorInitialize(e1_vector); hypre_ParVectorSetPartitioningOwner(e1_vector,0); hypre_ParVectorSetConstantValues(e1_vector, 1.0); e2_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_uu), hypre_ParCSRMatrixGlobalNumRows(A_uu), hypre_ParCSRMatrixRowStarts(A_uu)); hypre_ParVectorInitialize(e2_vector); hypre_ParVectorSetPartitioningOwner(e2_vector,0); hypre_ParVectorSetConstantValues(e2_vector, 0.0); e3_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_uu), hypre_ParCSRMatrixGlobalNumRows(A_uu), hypre_ParCSRMatrixRowStarts(A_uu)); hypre_ParVectorInitialize(e3_vector); hypre_ParVectorSetPartitioningOwner(e3_vector,0); hypre_ParVectorSetConstantValues(e3_vector, 0.0); e4_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_su), hypre_ParCSRMatrixGlobalNumRows(A_su), hypre_ParCSRMatrixRowStarts(A_su)); hypre_ParVectorInitialize(e4_vector); hypre_ParVectorSetPartitioningOwner(e4_vector,0); hypre_ParVectorSetConstantValues(e4_vector, 0.0); e5_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_pu), hypre_ParCSRMatrixGlobalNumRows(A_pu), hypre_ParCSRMatrixRowStarts(A_pu)); hypre_ParVectorInitialize(e5_vector); hypre_ParVectorSetPartitioningOwner(e5_vector,0); hypre_ParVectorSetConstantValues(e5_vector, 0.0); // compute e2 = A_up * e1 hypre_ParCSRMatrixMatvecOutOfPlace(1.0, A_up, e1_vector, 0.0, e2_vector, e2_vector); // solve e3 = A_uu^-1 * e2 hypre_BoomerAMGSolve(A_ff_solver, A_uu, e2_vector, e3_vector); // compute e4 = A_su * e3 hypre_ParCSRMatrixMatvecOutOfPlace(1.0, A_su, e3_vector, 0.0, e4_vector, e4_vector); // compute e4 = A_su * e3 hypre_ParCSRMatrixMatvecOutOfPlace(1.0, A_su, e3_vector, 0.0, e4_vector, e4_vector); // print e4 hypre_ParVectorPrintIJ(e4_vector,1,"Dsp"); // compute e5 = A_pu * e3 hypre_ParCSRMatrixMatvecOutOfPlace(1.0, A_pu, e3_vector, 0.0, e5_vector, e5_vector); hypre_ParVectorPrintIJ(e5_vector,1,"Dpp"); hypre_ParVectorDestroy(e1_vector); hypre_ParVectorDestroy(e2_vector); hypre_ParVectorDestroy(e3_vector); hypre_ParCSRMatrixDestroy(A_uu); hypre_ParCSRMatrixDestroy(A_up); hypre_ParCSRMatrixDestroy(A_pu); hypre_ParCSRMatrixDestroy(A_su); hypre_TFree(U_marker, HYPRE_MEMORY_HOST); hypre_TFree(S_marker, HYPRE_MEMORY_HOST); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); return hypre_error_flag; } HYPRE_Int hypre_MGRApproximateInverse(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix **A_inv) { HYPRE_Int print_level, mr_max_row_nnz, mr_max_iter, nsh_max_row_nnz, nsh_max_iter, mr_col_version; HYPRE_Real mr_tol, nsh_tol; HYPRE_Real *droptol = hypre_CTAlloc(HYPRE_Real, 2, HYPRE_MEMORY_HOST); hypre_ParCSRMatrix *approx_A_inv = NULL; print_level = 0; nsh_max_iter = 2; nsh_max_row_nnz = 2; // default 1000 mr_max_iter = 1; mr_tol = 1.0e-3; mr_max_row_nnz = 2; // default 800 mr_col_version = 0; nsh_tol = 1.0e-3; droptol[0] = 1.0e-2; droptol[1] = 1.0e-2; hypre_ILUParCSRInverseNSH(A, &approx_A_inv, droptol, mr_tol, nsh_tol, DIVIDE_TOL, mr_max_row_nnz, nsh_max_row_nnz, mr_max_iter, nsh_max_iter, mr_col_version, print_level); *A_inv = approx_A_inv; if (droptol) hypre_TFree(droptol, HYPRE_MEMORY_HOST); return hypre_error_flag; } HYPRE_Int hypre_MGRBuildInterpApproximateInverseExp(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *S, HYPRE_Int *CF_marker, HYPRE_BigInt *num_cpts_global, HYPRE_Int debug_flag, hypre_ParCSRMatrix **P_ptr) { HYPRE_Int *C_marker; HYPRE_Int *F_marker; hypre_ParCSRMatrix *A_fc; hypre_ParCSRMatrix *minus_Wp; MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size, P_offd_size; HYPRE_Int jj_counter,jj_counter_offd; HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); HYPRE_Int *fine_to_coarse = NULL; HYPRE_Int coarse_counter; HYPRE_BigInt total_global_cpts; HYPRE_Int num_cols_P_offd; // HYPRE_BigInt my_first_cpt; HYPRE_Int i, jj; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; // HYPRE_Int num_threads; // HYPRE_Real wall_time; /* for debugging instrumentation */ C_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); F_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); // create C and F markers for (i = 0; i < n_fine; i++) { C_marker[i] = (CF_marker[i] == 1)? 1: -1; F_marker[i] = (CF_marker[i] == 1) ? -1: 1; } // Get A_FC hypre_MGRGetSubBlock(A, F_marker, C_marker, 0, &A_fc); // compute -Wp minus_Wp = hypre_ParMatmul(S, A_fc); hypre_CSRMatrix *minus_Wp_diag = hypre_ParCSRMatrixDiag(minus_Wp); HYPRE_Real *minus_Wp_diag_data = hypre_CSRMatrixData(minus_Wp_diag); HYPRE_Int *minus_Wp_diag_i = hypre_CSRMatrixI(minus_Wp_diag); HYPRE_Int *minus_Wp_diag_j = hypre_CSRMatrixJ(minus_Wp_diag); hypre_CSRMatrix *minus_Wp_offd = hypre_ParCSRMatrixOffd(minus_Wp); HYPRE_Real *minus_Wp_offd_data = hypre_CSRMatrixData(minus_Wp_offd); HYPRE_Int *minus_Wp_offd_i = hypre_CSRMatrixI(minus_Wp_offd); HYPRE_Int *minus_Wp_offd_j = hypre_CSRMatrixJ(minus_Wp_offd); hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); // num_threads = hypre_NumThreads(); #ifdef HYPRE_NO_GLOBAL_PARTITION // my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); #else // my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; #endif /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ HYPRE_Int row_counter = 0; coarse_counter = 0; for (i = 0; i < n_fine; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] > 0) { jj_counter++; fine_to_coarse[i] = coarse_counter; coarse_counter++; } else { /*-------------------------------------------------------------------- * If i is an F-point, interpolation is the approximation of A_{ff}^{-1}A_{fc} *--------------------------------------------------------------------*/ for (jj = minus_Wp_diag_i[row_counter]; jj < minus_Wp_diag_i[row_counter+1]; jj++) { jj_counter++; } if (num_procs > 1) { for (jj = minus_Wp_offd_i[row_counter]; jj < minus_Wp_offd_i[row_counter+1]; jj++) { jj_counter_offd++; } } row_counter++; } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_DEVICE); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_DEVICE); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_DEVICE); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_DEVICE); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ row_counter = 0; for (i = 0; i < n_fine; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { /* Diagonal part of P */ P_diag_i[i] = jj_counter; for (jj = minus_Wp_diag_i[row_counter]; jj < minus_Wp_diag_i[row_counter+1]; jj++) { P_diag_j[jj_counter] = minus_Wp_diag_j[jj]; P_diag_data[jj_counter] = - minus_Wp_diag_data[jj]; jj_counter++; } /* Off-Diagonal part of P */ P_offd_i[i] = jj_counter_offd; if (num_procs > 1) { for (jj = minus_Wp_offd_i[row_counter]; jj < minus_Wp_offd_i[row_counter+1]; jj++) { P_offd_j[jj_counter_offd] = minus_Wp_offd_j[jj]; P_offd_data[jj_counter_offd] = - minus_Wp_offd_data[jj]; jj_counter_offd++; } } row_counter++; } P_offd_i[i+1] = jj_counter_offd; } P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; num_cols_P_offd = hypre_CSRMatrixNumCols(minus_Wp_offd); HYPRE_BigInt *col_map_offd_tmp = hypre_ParCSRMatrixColMapOffd(minus_Wp); if (P_offd_size) { col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); for (i=0; i < num_cols_P_offd; i++) { col_map_offd_P[i] = col_map_offd_tmp[i]; } } if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_MatvecCommPkgCreate(P); *P_ptr = P; hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(C_marker, HYPRE_MEMORY_HOST); hypre_TFree(F_marker, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDestroy(A_fc); hypre_ParCSRMatrixDestroy(minus_Wp); return 0; } HYPRE_Int hypre_MGRBuildInterpApproximateInverse(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_BigInt *num_cpts_global, HYPRE_Int debug_flag, hypre_ParCSRMatrix **P_ptr) { HYPRE_Int *C_marker; HYPRE_Int *F_marker; hypre_ParCSRMatrix *A_ff; hypre_ParCSRMatrix *A_fc; hypre_ParCSRMatrix *A_ff_inv; hypre_ParCSRMatrix *minus_Wp; MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size, P_offd_size; HYPRE_Int jj_counter,jj_counter_offd; //HYPRE_Int jj_begin_row,jj_begin_row_offd; //HYPRE_Int jj_end_row,jj_end_row_offd; HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); HYPRE_Int *fine_to_coarse = NULL; //HYPRE_Int *coarse_counter; HYPRE_Int coarse_counter; HYPRE_BigInt total_global_cpts; HYPRE_Int num_cols_P_offd; // HYPRE_BigInt my_first_cpt; HYPRE_Int i,jj; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; // HYPRE_Int num_threads; // HYPRE_Real wall_time; /* for debugging instrumentation */ C_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); F_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); // create C and F markers for (i = 0; i < n_fine; i++) { C_marker[i] = (CF_marker[i] == 1)? 1: -1; F_marker[i] = (CF_marker[i] == 1) ? -1: 1; } // Get A_FF hypre_MGRGetSubBlock(A, F_marker, F_marker, 0, &A_ff); // Get A_FC hypre_MGRGetSubBlock(A, F_marker, C_marker, 0, &A_fc); hypre_MGRApproximateInverse(A_ff, &A_ff_inv); hypre_ParCSRMatrixPrintIJ(A_ff_inv, 1, 1, "A_ff_inv"); hypre_ParCSRMatrixPrintIJ(A_fc, 1, 1, "A_fc"); minus_Wp = hypre_ParMatmul(A_ff_inv, A_fc); hypre_ParCSRMatrixPrintIJ(minus_Wp, 1, 1, "Wp"); hypre_CSRMatrix *minus_Wp_diag = hypre_ParCSRMatrixDiag(minus_Wp); HYPRE_Real *minus_Wp_diag_data = hypre_CSRMatrixData(minus_Wp_diag); HYPRE_Int *minus_Wp_diag_i = hypre_CSRMatrixI(minus_Wp_diag); HYPRE_Int *minus_Wp_diag_j = hypre_CSRMatrixJ(minus_Wp_diag); hypre_CSRMatrix *minus_Wp_offd = hypre_ParCSRMatrixOffd(minus_Wp); HYPRE_Real *minus_Wp_offd_data = hypre_CSRMatrixData(minus_Wp_offd); HYPRE_Int *minus_Wp_offd_i = hypre_CSRMatrixI(minus_Wp_offd); HYPRE_Int *minus_Wp_offd_j = hypre_CSRMatrixJ(minus_Wp_offd); //hypre_CSRMatrix *minus_Wp_offd = hypre_ParCSRMatrixOffd(minus_Wp); //HYPRE_Int num_cols_minus_Wp_offd = hypre_CSRMatrixNumCols(minus_Wp_offd); hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); // num_threads = hypre_NumThreads(); #ifdef HYPRE_NO_GLOBAL_PARTITION // my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); #else // my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; #endif /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ //coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); //jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); //jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ HYPRE_Int row_counter = 0; coarse_counter = 0; for (i = 0; i < n_fine; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] > 0) { //jj_count[j]++; //fine_to_coarse[i] = coarse_counter[j]; //coarse_counter[j]++; jj_counter++; fine_to_coarse[i] = coarse_counter; coarse_counter++; } else { /*-------------------------------------------------------------------- * If i is an F-point, interpolation is the approximation of A_{ff}^{-1}A_{fc} *--------------------------------------------------------------------*/ for (jj = minus_Wp_diag_i[row_counter]; jj < minus_Wp_diag_i[row_counter+1]; jj++) { //jj_count[j]++; jj_counter++; } if (num_procs > 1) { for (jj = minus_Wp_offd_i[row_counter]; jj < minus_Wp_offd_i[row_counter+1]; jj++) { //jj_count_offd[j]++; jj_counter_offd++; } } row_counter++; } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ /* for (i=0; i < num_threads-1; i++) { coarse_counter[i+1] += coarse_counter[i]; jj_count[i+1] += jj_count[i]; jj_count_offd[i+1] += jj_count_offd[i]; } i = num_threads-1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; */ P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_DEVICE); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_DEVICE); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_DEVICE); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_DEVICE); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; /* if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Internal work 1 = %f\n", my_id, wall_time); fflush(NULL); } */ /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ /* if (num_procs > 1) { if (debug_flag==4) wall_time = time_getWallclockSeconds(); fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_minus_Wp_offd, HYPRE_MEMORY_HOST); for (i = 0; i < n_fine; i++) { fine_to_coarse[i] += my_first_cpt; } comm_pkg = hypre_ParCSRMatrixCommPkg(minus_Wp); if (!comm_pkg) { hypre_MatvecCommPkgCreate(minus_Wp); comm_pkg = hypre_ParCSRMatrixCommPkg(minus_Wp); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, fine_to_coarse_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n", my_id, wall_time); fflush(NULL); } if (debug_flag==4) wall_time = time_getWallclockSeconds(); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt; } */ row_counter = 0; for (i = 0; i < n_fine; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { /* Diagonal part of P */ P_diag_i[i] = jj_counter; for (jj = minus_Wp_diag_i[row_counter]; jj < minus_Wp_diag_i[row_counter+1]; jj++) { //P_marker[row_counter] = jj_counter; P_diag_j[jj_counter] = minus_Wp_diag_j[jj]; P_diag_data[jj_counter] = - minus_Wp_diag_data[jj]; jj_counter++; } /* Off-Diagonal part of P */ P_offd_i[i] = jj_counter_offd; if (num_procs > 1) { for (jj = minus_Wp_offd_i[row_counter]; jj < minus_Wp_offd_i[row_counter+1]; jj++) { //P_marker_offd[row_counter] = jj_counter_offd; P_offd_j[jj_counter_offd] = minus_Wp_offd_j[jj]; P_offd_data[jj_counter_offd] = - minus_Wp_offd_data[jj]; jj_counter_offd++; } } row_counter++; } P_offd_i[i+1] = jj_counter_offd; } //hypre_printf("Num rows of Wp = %d\n", row_counter); //P_offd_i[row_counter] = jj_counter_offd; P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; num_cols_P_offd = hypre_CSRMatrixNumCols(minus_Wp_offd); HYPRE_BigInt *col_map_offd_tmp = hypre_ParCSRMatrixColMapOffd(minus_Wp); if (P_offd_size) { col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); for (i=0; i < num_cols_P_offd; i++) { col_map_offd_P[i] = col_map_offd_tmp[i]; } } /* num_cols_P_offd = 0; if (P_offd_size) { P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_minus_Wp_offd, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i=0; i < num_cols_minus_Wp_offd; i++) P_marker[i] = 0; num_cols_P_offd = 0; for (i=0; i < P_offd_size; i++) { index = P_offd_j[i]; if (!P_marker[index]) { num_cols_P_offd++; P_marker[index] = 1; } } col_map_offd_P = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); index = 0; for (i=0; i < num_cols_P_offd; i++) { while (P_marker[index]==0) index++; col_map_offd_P[i] = index++; } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i=0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(col_map_offd_P, P_offd_j[i], num_cols_P_offd); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } */ if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_MatvecCommPkgCreate(P); //hypre_GetCommPkgRTFromCommPkgA(P,A, fine_to_coarse_offd); *P_ptr = P; //hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); //hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); //if (fine_to_coarse_offd) hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); //hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); //hypre_TFree(jj_count, HYPRE_MEMORY_HOST); //hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); hypre_TFree(C_marker, HYPRE_MEMORY_HOST); hypre_TFree(F_marker, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDestroy(A_ff); hypre_ParCSRMatrixDestroy(A_fc); hypre_ParCSRMatrixDestroy(A_ff_inv); hypre_ParCSRMatrixDestroy(minus_Wp); return 0; } /* Setup interpolation operator */ HYPRE_Int hypre_MGRBuildInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, HYPRE_Int *col_offd_S_to_A, hypre_ParCSRMatrix **P, HYPRE_Int interp_type, HYPRE_Int numsweeps) { //HYPRE_Int i; hypre_ParCSRMatrix *P_ptr = NULL; //HYPRE_Real jac_trunc_threshold = trunc_factor; //HYPRE_Real jac_trunc_threshold_minus = 0.5*jac_trunc_threshold; /* Interpolation for each level */ if (interp_type <3) { hypre_MGRBuildP( A,CF_marker,num_cpts_global,interp_type,debug_flag,&P_ptr); /* Could do a few sweeps of Jacobi to further improve P */ //for(i=0; i<numsweeps; i++) // hypre_BoomerAMGJacobiInterp(A, &P_ptr, S,1, NULL, CF_marker, 0, jac_trunc_threshold, jac_trunc_threshold_minus ); //hypre_BoomerAMGInterpTruncation(P_ptr, trunc_factor, max_elmts); } else if (interp_type == 4) { hypre_MGRBuildInterpApproximateInverse(A, CF_marker, num_cpts_global, debug_flag, &P_ptr); hypre_BoomerAMGInterpTruncation(P_ptr, trunc_factor, max_elmts); } else if (interp_type == 99) { hypre_MGRBuildInterpApproximateInverseExp(A, S, CF_marker, num_cpts_global, debug_flag, &P_ptr); hypre_BoomerAMGInterpTruncation(P_ptr, trunc_factor, max_elmts); } else { /* Classical modified interpolation */ hypre_BoomerAMGBuildInterp(A, CF_marker, S, num_cpts_global,1, NULL,debug_flag, trunc_factor, max_elmts, col_offd_S_to_A, &P_ptr); /* Do k steps of Jacobi build W for P = [-W I]. * Note that BoomerAMGJacobiInterp assumes you have some initial P, * hence we need to initialize P as above, before calling this routine. * If numsweeps = 0, the following step is skipped and P is returned as is. * Looping here is equivalent to improving P by Jacobi interpolation */ //for(i=0; i<numsweeps; i++) // hypre_BoomerAMGJacobiInterp(A, &P_ptr, S,1, NULL, CF_marker, // 0, jac_trunc_threshold, // jac_trunc_threshold_minus ); } /* set pointer to P */ *P = P_ptr; return hypre_error_flag; } /* Setup restriction operator */ HYPRE_Int hypre_MGRBuildRestrict(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, HYPRE_Real S_commpkg_switch, HYPRE_Real strong_threshold, HYPRE_Real max_row_sum, hypre_ParCSRMatrix **R, HYPRE_Int restrict_type, HYPRE_Int numsweeps) { // HYPRE_Int i; hypre_ParCSRMatrix *R_ptr = NULL; hypre_ParCSRMatrix *AT = NULL; hypre_ParCSRMatrix *ST = NULL; HYPRE_Int *col_offd_ST_to_AT = NULL; // HYPRE_Real jac_trunc_threshold = trunc_factor; // HYPRE_Real jac_trunc_threshold_minus = 0.5*jac_trunc_threshold; /* Build AT (transpose A) */ if (restrict_type > 0) { hypre_ParCSRMatrixTranspose(A, &AT, 1); } if (restrict_type > 5) { /* Build new strength matrix */ hypre_BoomerAMGCreateS(AT, strong_threshold, max_row_sum, 1, NULL, &ST); /* use appropriate communication package for Strength matrix */ if (strong_threshold > S_commpkg_switch) hypre_BoomerAMGCreateSCommPkg(AT, ST, &col_offd_ST_to_AT); } /* Interpolation for each level */ if (restrict_type == 0) { hypre_MGRBuildP(A, CF_marker, num_cpts_global, restrict_type, debug_flag, &R_ptr); } else if (restrict_type == 1 || restrict_type == 2) { hypre_MGRBuildP(AT, CF_marker, num_cpts_global, restrict_type, debug_flag, &R_ptr); /* Could do a few sweeps of Jacobi to further improve P */ //for(i=0; i<numsweeps; i++) // hypre_BoomerAMGJacobiInterp(A, &R_ptr, S,1, NULL, CF_marker, 0, jac_trunc_threshold, jac_trunc_threshold_minus ); //hypre_BoomerAMGInterpTruncation(R_ptr, trunc_factor, max_elmts); } else if (restrict_type == 4) { hypre_MGRBuildInterpApproximateInverse(A, CF_marker, num_cpts_global, debug_flag, &R_ptr); hypre_BoomerAMGInterpTruncation(R_ptr, trunc_factor, max_elmts); } else { /* Classical modified interpolation */ hypre_BoomerAMGBuildInterp(AT, CF_marker, ST, num_cpts_global,1, NULL,debug_flag, trunc_factor, max_elmts, col_offd_ST_to_AT, &R_ptr); /* Do k steps of Jacobi build W for P = [-W I]. * Note that BoomerAMGJacobiInterp assumes you have some initial P, * hence we need to initialize P as above, before calling this routine. * If numsweeps = 0, the following step is skipped and P is returned as is. * Looping here is equivalent to improving P by Jacobi interpolation */ // for(i=0; i<numsweeps; i++) // hypre_BoomerAMGJacobiInterp(A, &R_ptr, S,1, NULL, CF_marker, 0, // jac_trunc_threshold, jac_trunc_threshold_minus); } /* set pointer to P */ *R = R_ptr; /* Free memory */ if (restrict_type > 0) { hypre_ParCSRMatrixDestroy(AT); } if (restrict_type > 5) { hypre_ParCSRMatrixDestroy(ST); if (col_offd_ST_to_AT) hypre_TFree(col_offd_ST_to_AT, HYPRE_MEMORY_HOST); } return hypre_error_flag; } void hypre_blas_smat_inv_n4 (HYPRE_Real *a) { const HYPRE_Real a11 = a[0], a12 = a[1], a13 = a[2], a14 = a[3]; const HYPRE_Real a21 = a[4], a22 = a[5], a23 = a[6], a24 = a[7]; const HYPRE_Real a31 = a[8], a32 = a[9], a33 = a[10], a34 = a[11]; const HYPRE_Real a41 = a[12], a42 = a[13], a43 = a[14], a44 = a[15]; const HYPRE_Real M11 = a22*a33*a44 + a23*a34*a42 + a24*a32*a43 - a22*a34*a43 - a23*a32*a44 - a24*a33*a42; const HYPRE_Real M12 = a12*a34*a43 + a13*a32*a44 + a14*a33*a42 - a12*a33*a44 - a13*a34*a42 - a14*a32*a43; const HYPRE_Real M13 = a12*a23*a44 + a13*a24*a42 + a14*a22*a43 - a12*a24*a43 - a13*a22*a44 - a14*a23*a42; const HYPRE_Real M14 = a12*a24*a33 + a13*a22*a34 + a14*a23*a32 - a12*a23*a34 - a13*a24*a32 - a14*a22*a33; const HYPRE_Real M21 = a21*a34*a43 + a23*a31*a44 + a24*a33*a41 - a21*a33*a44 - a23*a34*a41 - a24*a31*a43; const HYPRE_Real M22 = a11*a33*a44 + a13*a34*a41 + a14*a31*a43 - a11*a34*a43 - a13*a31*a44 - a14*a33*a41; const HYPRE_Real M23 = a11*a24*a43 + a13*a21*a44 + a14*a23*a41 - a11*a23*a44 - a13*a24*a41 - a14*a21*a43; const HYPRE_Real M24 = a11*a23*a34 + a13*a24*a31 + a14*a21*a33 - a11*a24*a33 - a13*a21*a34 - a14*a23*a31; const HYPRE_Real M31 = a21*a32*a44 + a22*a34*a41 + a24*a31*a42 - a21*a34*a42 - a22*a31*a44 - a24*a32*a41; const HYPRE_Real M32 = a11*a34*a42 + a12*a31*a44 + a14*a32*a41 - a11*a32*a44 - a12*a34*a41 - a14*a31*a42; const HYPRE_Real M33 = a11*a22*a44 + a12*a24*a41 + a14*a21*a42 - a11*a24*a42 - a12*a21*a44 - a14*a22*a41; const HYPRE_Real M34 = a11*a24*a32 + a12*a21*a34 + a14*a22*a31 - a11*a22*a34 - a12*a24*a31 - a14*a21*a32; const HYPRE_Real M41 = a21*a33*a42 + a22*a31*a43 + a23*a32*a41 - a21*a32*a43 - a22*a33*a41 - a23*a31*a42; const HYPRE_Real M42 = a11*a32*a43 + a12*a33*a41 + a13*a31*a42 - a11*a33*a42 - a12*a31*a43 - a13*a32*a41; const HYPRE_Real M43 = a11*a23*a42 + a12*a21*a43 + a13*a22*a41 - a11*a22*a43 - a12*a23*a41 - a13*a21*a42; const HYPRE_Real M44 = a11*a22*a33 + a12*a23*a31 + a13*a21*a32 - a11*a23*a32 - a12*a21*a33 - a13*a22*a31; const HYPRE_Real det = a11*M11 + a12*M21 + a13*M31 + a14*M41; HYPRE_Real det_inv; //if ( fabs(det) < 1e-22 ) { //hypre_printf("### WARNING: Matrix is nearly singular! det = %e\n", det); /* printf("##----------------------------------------------\n"); printf("## %12.5e %12.5e %12.5e \n", a0, a1, a2); printf("## %12.5e %12.5e %12.5e \n", a3, a4, a5); printf("## %12.5e %12.5e %12.5e \n", a5, a6, a7); printf("##----------------------------------------------\n"); getchar(); */ //} det_inv = 1.0/det; a[0] = M11*det_inv; a[1] = M12*det_inv; a[2] = M13*det_inv; a[3] = M14*det_inv; a[4] = M21*det_inv; a[5] = M22*det_inv; a[6] = M23*det_inv; a[7] = M24*det_inv; a[8] = M31*det_inv; a[9] = M32*det_inv; a[10] = M33*det_inv; a[11] = M34*det_inv; a[12] = M41*det_inv; a[13] = M42*det_inv; a[14] = M43*det_inv; a[15] = M44*det_inv; } void hypre_blas_mat_inv(HYPRE_Real *a, HYPRE_Int n) { HYPRE_Int i,j,k,l,u,kn,in; HYPRE_Real alinv; if (n == 4) { hypre_blas_smat_inv_n4(a); } else { for (k=0; k<n; ++k) { kn = k*n; l = kn+k; //if (fabs(a[l]) < SMALLREAL) { // printf("### WARNING: Diagonal entry is close to zero!"); // printf("### WARNING: diag_%d=%e\n", k, a[l]); // a[l] = SMALLREAL; //} alinv = 1.0/a[l]; a[l] = alinv; for (j=0; j<k; ++j) { u = kn+j; a[u] *= alinv; } for (j=k+1; j<n; ++j) { u = kn+j; a[u] *= alinv; } for (i=0; i<k; ++i) { in = i*n; for (j=0; j<n; ++j) if (j!=k) { u = in+j; a[u] -= a[in+k]*a[kn+j]; } // end if (j!=k) } for (i=k+1; i<n; ++i) { in = i*n; for (j=0; j<n; ++j) if (j!=k) { u = in+j; a[u] -= a[in+k]*a[kn+j]; } // end if (j!=k) } for (i=0; i<k; ++i) { u=i*n+k; a[u] *= -alinv; } for (i=k+1; i<n; ++i) { u=i*n+k; a[u] *= -alinv; } } // end for (k=0; k<n; ++k) }// end if } HYPRE_Int hypre_block_jacobi_scaling(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix **B_ptr, void *mgr_vdata, HYPRE_Int debug_flag) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int num_procs, my_id; HYPRE_Int blk_size = (mgr_data -> block_size); HYPRE_Int reserved_coarse_size = (mgr_data -> reserved_coarse_size); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_ParCSRMatrix *B; hypre_CSRMatrix *B_diag; HYPRE_Real *B_diag_data; HYPRE_Int *B_diag_i; HYPRE_Int *B_diag_j; hypre_CSRMatrix *B_offd; HYPRE_Int i,ii; HYPRE_Int j,jj; HYPRE_Int k; HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int n_block, left_size,inv_size; // HYPRE_Real wall_time; /* for debugging instrumentation */ HYPRE_Int bidx,bidxm1,bidxp1; HYPRE_Real * diaginv; const HYPRE_Int nb2 = blk_size*blk_size; HYPRE_Int block_scaling_error = 0; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); // HYPRE_Int num_threads = hypre_NumThreads(); //printf("n = %d\n",n); if (my_id == num_procs) { n_block = (n - reserved_coarse_size) / blk_size; left_size = n - blk_size*n_block; } else { n_block = n / blk_size; left_size = n - blk_size*n_block; } inv_size = nb2*n_block + left_size*left_size; //printf("inv_size = %d\n",inv_size); hypre_blockRelax_setup(A,blk_size,reserved_coarse_size,&(mgr_data -> diaginv)); // if (debug_flag==4) wall_time = time_getWallclockSeconds(); /*----------------------------------------------------------------------- * First Pass: Determine size of B and fill in *-----------------------------------------------------------------------*/ B_diag_i = hypre_CTAlloc(HYPRE_Int, n+1, HYPRE_MEMORY_HOST); B_diag_j = hypre_CTAlloc(HYPRE_Int, inv_size, HYPRE_MEMORY_HOST); B_diag_data = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST); B_diag_i[n] = inv_size; //B_offd_i = hypre_CTAlloc(HYPRE_Int, n+1, HYPRE_MEMORY_HOST); //B_offd_j = hypre_CTAlloc(HYPRE_Int, 1, HYPRE_MEMORY_HOST); //B_offd_data = hypre_CTAlloc(HYPRE_Real, 1, HYPRE_MEMORY_HOST); //B_offd_i[n] = 1; /*----------------------------------------------------------------- * Get all the diagonal sub-blocks *-----------------------------------------------------------------*/ diaginv = hypre_CTAlloc(HYPRE_Real, nb2, HYPRE_MEMORY_HOST); //printf("n_block = %d\n",n_block); for (i = 0;i < n_block; i++) { bidxm1 = i*blk_size; bidxp1 = (i+1)*blk_size; for (k = 0;k < blk_size; k++) { for (j = 0;j < blk_size; j++) { bidx = k*blk_size + j; diaginv[bidx] = 0.0; } for (ii = A_diag_i[bidxm1+k]; ii < A_diag_i[bidxm1+k+1]; ii++) { jj = A_diag_j[ii]; if (jj >= bidxm1 && jj < bidxp1 && fabs(A_diag_data[ii]) > SMALLREAL) { bidx = k*blk_size + jj - bidxm1; //printf("jj = %d,val = %e, bidx = %d\n",jj,A_diag_data[ii],bidx); diaginv[bidx] = A_diag_data[ii]; } } } /* for (k = 0;k < blk_size; k++) */ /* { */ /* for (j = 0;j < blk_size; j++) */ /* { */ /* bidx = k*blk_size + j; */ /* printf("diaginv[%d] = %e\n",bidx,diaginv[bidx]); */ /* } */ /* } */ hypre_blas_mat_inv(diaginv, blk_size); for (k = 0;k < blk_size; k++) { B_diag_i[i*blk_size+k] = i*nb2 + k*blk_size; //B_offd_i[i*nb2+k] = 0; for (j = 0;j < blk_size; j++) { bidx = i*nb2 + k*blk_size + j; B_diag_j[bidx] = i*blk_size + j; B_diag_data[bidx] = diaginv[k*blk_size + j]; } } } //printf("Before create\n"); B = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParCSRMatrixGlobalNumCols(A), hypre_ParCSRMatrixRowStarts(A), hypre_ParCSRMatrixColStarts(A), 0, inv_size, 0); //printf("After create\n"); B_diag = hypre_ParCSRMatrixDiag(B); hypre_CSRMatrixData(B_diag) = B_diag_data; hypre_CSRMatrixI(B_diag) = B_diag_i; hypre_CSRMatrixJ(B_diag) = B_diag_j; B_offd = hypre_ParCSRMatrixOffd(B); hypre_CSRMatrixData(B_offd) = NULL; hypre_CSRMatrixI(B_offd) = NULL; hypre_CSRMatrixJ(B_offd) = NULL; /* hypre_ParCSRMatrixOwnsRowStarts(B) = 0; */ *B_ptr = B; return(block_scaling_error); } HYPRE_Int hypre_blockRelax_solve (hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u, HYPRE_Real blk_size, HYPRE_Int n_block, HYPRE_Int left_size, HYPRE_Int method, HYPRE_Real *diaginv, hypre_ParVector *Vtemp) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); hypre_Vector *u_local = hypre_ParVectorLocalVector(u); HYPRE_Real *u_data = hypre_VectorData(u_local); hypre_Vector *f_local = hypre_ParVectorLocalVector(f); HYPRE_Real *f_data = hypre_VectorData(f_local); hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp); HYPRE_Real *Vtemp_data = hypre_VectorData(Vtemp_local); HYPRE_Real *Vext_data = NULL; HYPRE_Real *v_buf_data; HYPRE_Int i, j, k; HYPRE_Int ii, jj; HYPRE_Int bidx,bidx1; HYPRE_Int relax_error = 0; HYPRE_Int num_sends; HYPRE_Int index, start; HYPRE_Int num_procs, my_id; HYPRE_Real *res; const HYPRE_Int nb2 = blk_size*blk_size; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); // HYPRE_Int num_threads = hypre_NumThreads(); res = hypre_CTAlloc(HYPRE_Real, blk_size, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST); if (num_cols_offd) { A_offd_j = hypre_CSRMatrixJ(A_offd); A_offd_data = hypre_CSRMatrixData(A_offd); } index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data, Vext_data); } /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n; i++) { Vtemp_data[i] = u_data[i]; //printf("u_old[%d] = %e\n",i,Vtemp_data[i]); } if (num_procs > 1) { hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; } /*----------------------------------------------------------------- * Relax points block by block *-----------------------------------------------------------------*/ for (i = 0;i < n_block; i++) { for (j = 0;j < blk_size; j++) { bidx = i*blk_size +j; res[j] = f_data[bidx]; for (jj = A_diag_i[bidx]; jj < A_diag_i[bidx+1]; jj++) { ii = A_diag_j[jj]; if (method == 0) { // Jacobi for diagonal part res[j] -= A_diag_data[jj] * Vtemp_data[ii]; } else if (method == 1) { // Gauss-Seidel for diagonal part res[j] -= A_diag_data[jj] * u_data[ii]; } else { // Default do Jacobi for diagonal part res[j] -= A_diag_data[jj] * Vtemp_data[ii]; } //printf("%d: Au= %e * %e =%e\n",ii,A_diag_data[jj],Vtemp_data[ii], res[j]); } for (jj = A_offd_i[bidx]; jj < A_offd_i[bidx+1]; jj++) { // always do Jacobi for off-diagonal part ii = A_offd_j[jj]; res[j] -= A_offd_data[jj] * Vext_data[ii]; } //printf("%d: res = %e\n",bidx,res[j]); } for (j = 0;j < blk_size; j++) { bidx1 = i*blk_size +j; for (k = 0;k < blk_size; k++) { bidx = i*nb2 +j*blk_size+k; u_data[bidx1] += res[k]*diaginv[bidx]; //printf("u[%d] = %e, diaginv[%d] = %e\n",bidx1,u_data[bidx1],bidx,diaginv[bidx]); } //printf("u[%d] = %e\n",bidx1,u_data[bidx1]); } } if (num_procs > 1) { hypre_TFree(Vext_data, HYPRE_MEMORY_HOST); hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST); } hypre_TFree(res, HYPRE_MEMORY_HOST); return(relax_error); } HYPRE_Int hypre_block_gs (hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u, HYPRE_Real blk_size, HYPRE_Int n_block, HYPRE_Int left_size, HYPRE_Real *diaginv, hypre_ParVector *Vtemp) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); hypre_Vector *u_local = hypre_ParVectorLocalVector(u); HYPRE_Real *u_data = hypre_VectorData(u_local); hypre_Vector *f_local = hypre_ParVectorLocalVector(f); HYPRE_Real *f_data = hypre_VectorData(f_local); hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp); HYPRE_Real *Vtemp_data = hypre_VectorData(Vtemp_local); HYPRE_Real *Vext_data = NULL; HYPRE_Real *v_buf_data; HYPRE_Int i, j, k; HYPRE_Int ii, jj; HYPRE_Int bidx,bidx1; HYPRE_Int relax_error = 0; HYPRE_Int num_sends; HYPRE_Int index, start; HYPRE_Int num_procs, my_id; HYPRE_Real *res; const HYPRE_Int nb2 = blk_size*blk_size; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); //HYPRE_Int num_threads = hypre_NumThreads(); res = hypre_CTAlloc(HYPRE_Real, blk_size, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST); if (num_cols_offd) { A_offd_j = hypre_CSRMatrixJ(A_offd); A_offd_data = hypre_CSRMatrixData(A_offd); } index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data, Vext_data); } /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n; i++) { Vtemp_data[i] = u_data[i]; //printf("u_old[%d] = %e\n",i,Vtemp_data[i]); } if (num_procs > 1) { hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; } /*----------------------------------------------------------------- * Relax points block by block *-----------------------------------------------------------------*/ for (i = 0;i < n_block; i++) { for (j = 0;j < blk_size; j++) { bidx = i*blk_size +j; res[j] = f_data[bidx]; for (jj = A_diag_i[bidx]; jj < A_diag_i[bidx+1]; jj++) { ii = A_diag_j[jj]; //res[j] -= A_diag_data[jj] * Vtemp_data[ii]; //printf("my_id = %d, %d: Au = %e * %e\n",my_id,ii,A_diag_data[jj],Vtemp_data[ii]); res[j] -= A_diag_data[jj] * u_data[ii]; //printf("%d: Au= %e * %e =%e\n",ii,A_diag_data[jj],Vtemp_data[ii], res[j]); } for (jj = A_offd_i[bidx]; jj < A_offd_i[bidx+1]; jj++) { ii = A_offd_j[jj]; res[j] -= A_offd_data[jj] * Vext_data[ii]; } //printf("%d: res = %e\n",bidx,res[j]); } for (j = 0;j < blk_size; j++) { bidx1 = i*blk_size +j; for (k = 0;k < blk_size; k++) { bidx = i*nb2 +j*blk_size+k; u_data[bidx1] += res[k]*diaginv[bidx]; //printf("u[%d] = %e, diaginv[%d] = %e\n",bidx1,u_data[bidx1],bidx,diaginv[bidx]); } //printf("u[%d] = %e\n",bidx1,u_data[bidx1]); } } if (num_procs > 1) { hypre_TFree(Vext_data, HYPRE_MEMORY_HOST); hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST); } hypre_TFree(res, HYPRE_MEMORY_HOST); return(relax_error); } /*Block smoother*/ HYPRE_Int hypre_blockRelax_setup(hypre_ParCSRMatrix *A, HYPRE_Int blk_size, HYPRE_Int reserved_coarse_size, HYPRE_Real **diaginvptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int i, j,k; HYPRE_Int ii, jj; HYPRE_Int bidx,bidxm1,bidxp1; HYPRE_Int num_procs, my_id; const HYPRE_Int nb2 = blk_size*blk_size; HYPRE_Int n_block; HYPRE_Int left_size,inv_size; HYPRE_Real *diaginv = *diaginvptr; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); //HYPRE_Int num_threads = hypre_NumThreads(); if (my_id == num_procs) { n_block = (n - reserved_coarse_size) / blk_size; left_size = n - blk_size*n_block; } else { n_block = n / blk_size; left_size = n - blk_size*n_block; } inv_size = nb2*n_block + left_size*left_size; if (diaginv !=NULL) { hypre_TFree(diaginv, HYPRE_MEMORY_HOST); diaginv = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST); } else { diaginv = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST); } /*----------------------------------------------------------------- * Get all the diagonal sub-blocks *-----------------------------------------------------------------*/ for (i = 0;i < n_block; i++) { bidxm1 = i*blk_size; bidxp1 = (i+1)*blk_size; //printf("bidxm1 = %d,bidxp1 = %d\n",bidxm1,bidxp1); for (k = 0;k < blk_size; k++) { for (j = 0;j < blk_size; j++) { bidx = i*nb2 + k*blk_size + j; diaginv[bidx] = 0.0; } for (ii = A_diag_i[bidxm1+k]; ii < A_diag_i[bidxm1+k+1]; ii++) { jj = A_diag_j[ii]; if (jj >= bidxm1 && jj < bidxp1 && fabs(A_diag_data[ii]) > SMALLREAL) { bidx = i*nb2 + k*blk_size + jj - bidxm1; //printf("jj = %d,val = %e, bidx = %d\n",jj,A_diag_data[ii],bidx); diaginv[bidx] = A_diag_data[ii]; } } } } for (i = 0;i < left_size; i++) { bidxm1 =n_block*nb2 + i*blk_size; bidxp1 =n_block*nb2 + (i+1)*blk_size; for (j = 0;j < left_size; j++) { bidx = n_block*nb2 + i*blk_size +j; diaginv[bidx] = 0.0; } for (ii = A_diag_i[n_block*blk_size + i]; ii < A_diag_i[n_block*blk_size+i+1]; ii++) { jj = A_diag_j[ii]; if (jj > n_block*blk_size) { bidx = n_block*nb2 + i*blk_size + jj - n_block*blk_size; diaginv[bidx] = A_diag_data[ii]; } } } /*----------------------------------------------------------------- * compute the inverses of all the diagonal sub-blocks *-----------------------------------------------------------------*/ if (blk_size > 1) { for (i = 0;i < n_block; i++) { hypre_blas_mat_inv(diaginv+i*nb2, blk_size); } hypre_blas_mat_inv(diaginv+(HYPRE_Int)(blk_size*nb2),left_size); } else { for (i = 0;i < n; i++) { // FIX-ME: zero-diagonal should be tested previously if (fabs(diaginv[i]) < SMALLREAL) diaginv[i] = 0.0; else diaginv[i] = 1.0 / diaginv[i]; } } *diaginvptr = diaginv; return 1; } HYPRE_Int hypre_blockRelax(hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u, HYPRE_Int blk_size, HYPRE_Int reserved_coarse_size, HYPRE_Int method, hypre_ParVector *Vtemp, hypre_ParVector *Ztemp) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int i, j,k; HYPRE_Int ii, jj; HYPRE_Int bidx,bidxm1,bidxp1; HYPRE_Int relax_error = 0; HYPRE_Int num_procs, my_id; const HYPRE_Int nb2 = blk_size*blk_size; HYPRE_Int n_block; HYPRE_Int left_size,inv_size; HYPRE_Real *diaginv; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); //HYPRE_Int num_threads = hypre_NumThreads(); if (my_id == num_procs) { n_block = (n - reserved_coarse_size) / blk_size; left_size = n - blk_size*n_block; } else { n_block = n / blk_size; left_size = n - blk_size*n_block; } inv_size = nb2*n_block + left_size*left_size; diaginv = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------- * Get all the diagonal sub-blocks *-----------------------------------------------------------------*/ for (i = 0;i < n_block; i++) { bidxm1 = i*blk_size; bidxp1 = (i+1)*blk_size; //printf("bidxm1 = %d,bidxp1 = %d\n",bidxm1,bidxp1); for (k = 0;k < blk_size; k++) { for (j = 0;j < blk_size; j++) { bidx = i*nb2 + k*blk_size + j; diaginv[bidx] = 0.0; } for (ii = A_diag_i[bidxm1+k]; ii < A_diag_i[bidxm1+k+1]; ii++) { jj = A_diag_j[ii]; if (jj >= bidxm1 && jj < bidxp1 && fabs(A_diag_data[ii]) > SMALLREAL) { bidx = i*nb2 + k*blk_size + jj - bidxm1; //printf("jj = %d,val = %e, bidx = %d\n",jj,A_diag_data[ii],bidx); diaginv[bidx] = A_diag_data[ii]; } } } } for (i = 0;i < left_size; i++) { bidxm1 =n_block*nb2 + i*blk_size; bidxp1 =n_block*nb2 + (i+1)*blk_size; for (j = 0;j < left_size; j++) { bidx = n_block*nb2 + i*blk_size +j; diaginv[bidx] = 0.0; } for (ii = A_diag_i[n_block*blk_size + i]; ii < A_diag_i[n_block*blk_size+i+1]; ii++) { jj = A_diag_j[ii]; if (jj > n_block*blk_size) { bidx = n_block*nb2 + i*blk_size + jj - n_block*blk_size; diaginv[bidx] = A_diag_data[ii]; } } } /* for (i = 0;i < n_block; i++) { for (j = 0;j < blk_size; j++) { for (k = 0;k < blk_size; k ++) { bidx = i*nb2 + j*blk_size + k; printf("%e\t",diaginv[bidx]); } printf("\n"); } printf("\n"); } */ /*----------------------------------------------------------------- * compute the inverses of all the diagonal sub-blocks *-----------------------------------------------------------------*/ if (blk_size > 1) { for (i = 0;i < n_block; i++) { hypre_blas_mat_inv(diaginv+i*nb2, blk_size); } hypre_blas_mat_inv(diaginv+(HYPRE_Int)(blk_size*nb2),left_size); /* for (i = 0;i < n_block; i++) { for (j = 0;j < blk_size; j++) { for (k = 0;k < blk_size; k ++) { bidx = i*nb2 + j*blk_size + k; printf("%e\t",diaginv[bidx]); } printf("\n"); } printf("\n"); } */ } else { for (i = 0;i < n; i++) { // FIX-ME: zero-diagonal should be tested previously if (fabs(diaginv[i]) < SMALLREAL) diaginv[i] = 0.0; else diaginv[i] = 1.0 / diaginv[i]; } } hypre_blockRelax_solve(A,f,u,blk_size,n_block,left_size,method,diaginv,Vtemp); /*----------------------------------------------------------------- * Free temperary memeory *-----------------------------------------------------------------*/ hypre_TFree(diaginv, HYPRE_MEMORY_HOST); return(relax_error); } /* set coarse grid solver */ HYPRE_Int hypre_MGRSetFSolver( void *mgr_vdata, HYPRE_Int (*fine_grid_solver_solve)(void*,void*,void*,void*), HYPRE_Int (*fine_grid_solver_setup)(void*,void*,void*,void*), void *fsolver ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); HYPRE_Solver **aff_solver = (mgr_data -> aff_solver); if (aff_solver == NULL) aff_solver = hypre_CTAlloc(HYPRE_Solver*, max_num_coarse_levels, HYPRE_MEMORY_HOST); /* only allow to set F-solver for the first level */ aff_solver[0] = (HYPRE_Solver *) fsolver; (mgr_data -> fine_grid_solver_solve) = fine_grid_solver_solve; (mgr_data -> fine_grid_solver_setup) = fine_grid_solver_setup; (mgr_data -> aff_solver) = aff_solver; (mgr_data -> use_default_fsolver) = 0; return hypre_error_flag; } /* set coarse grid solver */ HYPRE_Int hypre_MGRSetCoarseSolver( void *mgr_vdata, HYPRE_Int (*coarse_grid_solver_solve)(void*,void*,void*,void*), HYPRE_Int (*coarse_grid_solver_setup)(void*,void*,void*,void*), void *coarse_grid_solver ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } (mgr_data -> coarse_grid_solver_solve) = coarse_grid_solver_solve; (mgr_data -> coarse_grid_solver_setup) = coarse_grid_solver_setup; (mgr_data -> coarse_grid_solver) = (HYPRE_Solver) coarse_grid_solver; (mgr_data -> use_default_cgrid_solver) = 0; return hypre_error_flag; } HYPRE_Int hypre_MGRSetAffInv( void *mgr_vdata, hypre_ParCSRMatrix *A_ff_inv ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> A_ff_inv) = A_ff_inv; return hypre_error_flag; } /* Set the maximum number of coarse levels. * maxcoarselevs = 1 yields the default 2-grid scheme. */ HYPRE_Int hypre_MGRSetMaxCoarseLevels( void *mgr_vdata, HYPRE_Int maxcoarselevs ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> max_num_coarse_levels) = maxcoarselevs; return hypre_error_flag; } /* Set the system block size */ HYPRE_Int hypre_MGRSetBlockSize( void *mgr_vdata, HYPRE_Int bsize ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> block_size) = bsize; return hypre_error_flag; } /* Set the relaxation type for the fine levels of the reduction. * Currently supports the following flavors of relaxation types * as described in the documentation: * relax_types 0 - 8, 13, 14, 18, 19, 98. * See par_relax.c and par_relax_more.c for more details. * */ HYPRE_Int hypre_MGRSetRelaxType( void *mgr_vdata, HYPRE_Int relax_type ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> relax_type) = relax_type; return hypre_error_flag; } /* Set the number of relaxation sweeps */ HYPRE_Int hypre_MGRSetNumRelaxSweeps( void *mgr_vdata, HYPRE_Int nsweeps ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> num_relax_sweeps) = nsweeps; return hypre_error_flag; } /* Set the F-relaxation strategy: 0=single level, 1=multi level */ HYPRE_Int hypre_MGRSetFRelaxMethod( void *mgr_vdata, HYPRE_Int relax_method ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if((mgr_data -> Frelax_method) != NULL) { hypre_TFree(mgr_data -> Frelax_method, HYPRE_MEMORY_HOST); (mgr_data -> Frelax_method) = NULL; } HYPRE_Int *Frelax_method = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); for (i=0; i < max_num_coarse_levels; i++) { Frelax_method[i] = relax_method; } (mgr_data -> Frelax_method) = Frelax_method; return hypre_error_flag; } /* Set the F-relaxation strategy: 0=single level, 1=multi level */ HYPRE_Int hypre_MGRSetLevelFRelaxMethod( void *mgr_vdata, HYPRE_Int *relax_method ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if((mgr_data -> Frelax_method) != NULL) { hypre_TFree(mgr_data -> Frelax_method, HYPRE_MEMORY_HOST); (mgr_data -> Frelax_method) = NULL; } HYPRE_Int *Frelax_method = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); if (relax_method != NULL) { for (i=0; i < max_num_coarse_levels; i++) { Frelax_method[i] = relax_method[i]; } } else { for (i = 0; i < max_num_coarse_levels; i++) { Frelax_method[i] = 0; } } (mgr_data -> Frelax_method) = Frelax_method; return hypre_error_flag; } /* Coarse grid method: 0=Galerkin RAP, 1=non-Galerkin with dropping*/ HYPRE_Int hypre_MGRSetCoarseGridMethod( void *mgr_vdata, HYPRE_Int *cg_method ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if((mgr_data -> use_non_galerkin_cg) != NULL) { hypre_TFree(mgr_data -> use_non_galerkin_cg, HYPRE_MEMORY_HOST); (mgr_data -> use_non_galerkin_cg) = NULL; } HYPRE_Int *use_non_galerkin_cg = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); if (cg_method != NULL) { for (i=0; i < max_num_coarse_levels; i++) { use_non_galerkin_cg[i] = cg_method[i]; } } else { for (i = 0; i < max_num_coarse_levels; i++) { use_non_galerkin_cg[i] = 0; } } (mgr_data -> use_non_galerkin_cg) = use_non_galerkin_cg; return hypre_error_flag; } /* Set the F-relaxation number of functions for each level */ HYPRE_Int hypre_MGRSetLevelFRelaxNumFunctions( void *mgr_vdata, HYPRE_Int *num_functions ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if((mgr_data -> Frelax_num_functions) != NULL) { hypre_TFree(mgr_data -> Frelax_num_functions, HYPRE_MEMORY_HOST); (mgr_data -> Frelax_num_functions) = NULL; } HYPRE_Int *Frelax_num_functions = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); if (num_functions != NULL) { for (i=0; i < max_num_coarse_levels; i++) { Frelax_num_functions[i] = num_functions[i]; } } else { for (i = 0; i < max_num_coarse_levels; i++) { Frelax_num_functions[i] = 1; } } (mgr_data -> Frelax_num_functions) = Frelax_num_functions; return hypre_error_flag; } /* Set the type of the restriction type * for computing restriction operator */ HYPRE_Int hypre_MGRSetLevelRestrictType( void *mgr_vdata, HYPRE_Int *restrict_type) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if ((mgr_data -> restrict_type) != NULL) { hypre_TFree((mgr_data -> restrict_type), HYPRE_MEMORY_HOST); (mgr_data -> restrict_type) = NULL; } HYPRE_Int *level_restrict_type = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); if (restrict_type != NULL) { for (i=0; i < max_num_coarse_levels; i++) { level_restrict_type[i] = *(restrict_type + i); } } else { for (i=0; i < max_num_coarse_levels; i++) { level_restrict_type[i] = 0; } } (mgr_data -> restrict_type) = level_restrict_type; return hypre_error_flag; } /* Set the type of the restriction type * for computing restriction operator */ HYPRE_Int hypre_MGRSetRestrictType( void *mgr_vdata, HYPRE_Int restrict_type) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if ((mgr_data -> restrict_type) != NULL) { hypre_TFree((mgr_data -> restrict_type), HYPRE_MEMORY_HOST); (mgr_data -> restrict_type) = NULL; } HYPRE_Int *level_restrict_type = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); for (i=0; i < max_num_coarse_levels; i++) { level_restrict_type[i] = restrict_type; } (mgr_data -> restrict_type) = level_restrict_type; return hypre_error_flag; } /* Set the number of Jacobi interpolation iterations * for computing interpolation operator */ HYPRE_Int hypre_MGRSetNumRestrictSweeps( void *mgr_vdata, HYPRE_Int nsweeps ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> num_restrict_sweeps) = nsweeps; return hypre_error_flag; } /* Set the type of the interpolation * for computing interpolation operator */ HYPRE_Int hypre_MGRSetInterpType( void *mgr_vdata, HYPRE_Int interpType) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if ((mgr_data -> interp_type) != NULL) { hypre_TFree((mgr_data -> interp_type), HYPRE_MEMORY_HOST); (mgr_data -> interp_type) = NULL; } HYPRE_Int *level_interp_type = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); for (i=0; i < max_num_coarse_levels; i++) { level_interp_type[i] = interpType; } (mgr_data -> interp_type) = level_interp_type; return hypre_error_flag; } /* Set the type of the interpolation * for computing interpolation operator */ HYPRE_Int hypre_MGRSetLevelInterpType( void *mgr_vdata, HYPRE_Int *interpType) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if ((mgr_data -> interp_type) != NULL) { hypre_TFree((mgr_data -> interp_type), HYPRE_MEMORY_HOST); (mgr_data -> interp_type) = NULL; } HYPRE_Int *level_interp_type = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); if (interpType != NULL) { for (i=0; i < max_num_coarse_levels; i++) { level_interp_type[i] = *(interpType + i); } } else { for (i=0; i < max_num_coarse_levels; i++) { level_interp_type[i] = 2; } } (mgr_data -> interp_type) = level_interp_type; return hypre_error_flag; } /* Set the number of Jacobi interpolation iterations * for computing interpolation operator */ HYPRE_Int hypre_MGRSetNumInterpSweeps( void *mgr_vdata, HYPRE_Int nsweeps ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> num_interp_sweeps) = nsweeps; return hypre_error_flag; } /* Set the threshold to truncate the coarse grid at each * level of reduction */ HYPRE_Int hypre_MGRSetTruncateCoarseGridThreshold( void *mgr_vdata, HYPRE_Real threshold) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> truncate_coarse_grid_threshold) = threshold; return hypre_error_flag; } /* Set print level for mgr solver */ HYPRE_Int hypre_MGRSetPrintLevel( void *mgr_vdata, HYPRE_Int print_level ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> print_level) = print_level; return hypre_error_flag; } /* Set print level for mgr solver */ HYPRE_Int hypre_MGRSetLogging( void *mgr_vdata, HYPRE_Int logging ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> logging) = logging; return hypre_error_flag; } /* Set max number of iterations for mgr solver */ HYPRE_Int hypre_MGRSetMaxIter( void *mgr_vdata, HYPRE_Int max_iter ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> max_iter) = max_iter; return hypre_error_flag; } /* Set convergence tolerance for mgr solver */ HYPRE_Int hypre_MGRSetTol( void *mgr_vdata, HYPRE_Real tol ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> tol) = tol; return hypre_error_flag; } /* Set max number of iterations for mgr global smoother */ HYPRE_Int hypre_MGRSetMaxGlobalsmoothIters( void *mgr_vdata, HYPRE_Int max_iter ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> global_smooth_iters) = max_iter; return hypre_error_flag; } /* Set global smoothing type for mgr solver */ HYPRE_Int hypre_MGRSetGlobalsmoothType( void *mgr_vdata, HYPRE_Int iter_type ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> global_smooth_type) = iter_type; return hypre_error_flag; } /* Set the maximum number of non-zero entries for restriction and interpolation operator if classical AMG interpolation is used */ HYPRE_Int hypre_MGRSetPMaxElmts( void *mgr_vdata, HYPRE_Int P_max_elmts) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> P_max_elmts) = P_max_elmts; return hypre_error_flag; } /* Get number of iterations for MGR solver */ HYPRE_Int hypre_MGRGetNumIterations( void *mgr_vdata, HYPRE_Int *num_iterations ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } *num_iterations = mgr_data->num_iterations; return hypre_error_flag; } /* Get residual norms for MGR solver */ HYPRE_Int hypre_MGRGetFinalRelativeResidualNorm( void *mgr_vdata, HYPRE_Real *res_norm ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } *res_norm = mgr_data->final_rel_residual_norm; return hypre_error_flag; } HYPRE_Int hypre_MGRGetCoarseGridConvergenceFactor( void *mgr_vdata , HYPRE_Real *conv_factor ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } *conv_factor = (mgr_data -> cg_convergence_factor); return hypre_error_flag; } /* Build A_FF matrix from A given a CF_marker array */ HYPRE_Int hypre_MGRGetSubBlock( hypre_ParCSRMatrix *A, HYPRE_Int *row_cf_marker, HYPRE_Int *col_cf_marker, HYPRE_Int debug_flag, hypre_ParCSRMatrix **A_block_ptr ) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); //HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int *coarse_dof_func_ptr = NULL; HYPRE_BigInt *num_row_cpts_global = NULL; HYPRE_BigInt *num_col_cpts_global = NULL; hypre_ParCSRMatrix *Ablock; HYPRE_BigInt *col_map_offd_Ablock; HYPRE_Int *tmp_map_offd = NULL; HYPRE_Int *CF_marker_offd = NULL; hypre_CSRMatrix *Ablock_diag; hypre_CSRMatrix *Ablock_offd; HYPRE_Real *Ablock_diag_data; HYPRE_Int *Ablock_diag_i; HYPRE_Int *Ablock_diag_j; HYPRE_Real *Ablock_offd_data; HYPRE_Int *Ablock_offd_i; HYPRE_Int *Ablock_offd_j; HYPRE_Int Ablock_diag_size, Ablock_offd_size; HYPRE_Int *Ablock_marker; HYPRE_Int ii_counter; HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int *jj_count, *jj_count_offd; HYPRE_Int start_indexing = 0; /* start indexing for Aff_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int *fine_to_coarse; HYPRE_Int *coarse_counter; HYPRE_Int *col_coarse_counter; HYPRE_Int coarse_shift; HYPRE_BigInt total_global_row_cpts; HYPRE_BigInt total_global_col_cpts; HYPRE_Int num_cols_Ablock_offd; // HYPRE_BigInt my_first_row_cpt, my_first_col_cpt; HYPRE_Int i,i1; HYPRE_Int j,jl,jj; HYPRE_Int start; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_Int num_threads; HYPRE_Int num_sends; HYPRE_Int index; HYPRE_Int ns, ne, size, rest; HYPRE_Int *int_buf_data; HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag); // HYPRE_Real wall_time; /* for debugging instrumentation */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); //num_threads = hypre_NumThreads(); // Temporary fix, disable threading // TODO: enable threading num_threads = 1; /* get the number of coarse rows */ hypre_BoomerAMGCoarseParms(comm, local_numrows, 1, NULL, row_cf_marker, &coarse_dof_func_ptr, &num_row_cpts_global); hypre_TFree(coarse_dof_func_ptr, HYPRE_MEMORY_HOST); coarse_dof_func_ptr = NULL; //hypre_printf("my_id = %d, cpts_this = %d, cpts_next = %d\n", my_id, num_row_cpts_global[0], num_row_cpts_global[1]); #ifdef HYPRE_NO_GLOBAL_PARTITION // my_first_row_cpt = num_row_cpts_global[0]; if (my_id == (num_procs -1)) total_global_row_cpts = num_row_cpts_global[1]; hypre_MPI_Bcast(&total_global_row_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); #else // my_first_row_cpt = num_row_cpts_global[my_id]; total_global_row_cpts = num_row_cpts_global[num_procs]; #endif /* get the number of coarse rows */ hypre_BoomerAMGCoarseParms(comm, local_numrows, 1, NULL, col_cf_marker, &coarse_dof_func_ptr, &num_col_cpts_global); hypre_TFree(coarse_dof_func_ptr, HYPRE_MEMORY_HOST); coarse_dof_func_ptr = NULL; //hypre_printf("my_id = %d, cpts_this = %d, cpts_next = %d\n", my_id, num_col_cpts_global[0], num_col_cpts_global[1]); #ifdef HYPRE_NO_GLOBAL_PARTITION // my_first_col_cpt = num_col_cpts_global[0]; if (my_id == (num_procs -1)) total_global_col_cpts = num_col_cpts_global[1]; hypre_MPI_Bcast(&total_global_col_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); #else // my_first_col_cpt = num_col_cpts_global[my_id]; total_global_col_cpts = num_col_cpts_global[num_procs]; #endif /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (debug_flag < 0) { debug_flag = -debug_flag; } // if (debug_flag==4) wall_time = time_getWallclockSeconds(); if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = col_cf_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); /*----------------------------------------------------------------------- * First Pass: Determine size of Ablock and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); col_coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ /* RDF: this looks a little tricky, but doable */ #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE #endif #endif for (j = 0; j < num_threads; j++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a F-point, we loop through the columns and select * the F-columns. Also set up mapping vector. *--------------------------------------------------------------------*/ if (col_cf_marker[i] > 0) { fine_to_coarse[i] = col_coarse_counter[j]; col_coarse_counter[j]++; } if (row_cf_marker[i] > 0) { //fine_to_coarse[i] = coarse_counter[j]; coarse_counter[j]++; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; if (col_cf_marker[i1] > 0) { jj_count[j]++; } } if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; if (CF_marker_offd[i1] > 0) { jj_count_offd[j]++; } } } } } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ for (i=0; i < num_threads-1; i++) { jj_count[i+1] += jj_count[i]; jj_count_offd[i+1] += jj_count_offd[i]; coarse_counter[i+1] += coarse_counter[i]; col_coarse_counter[i+1] += col_coarse_counter[i]; } i = num_threads-1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; ii_counter = coarse_counter[i]; Ablock_diag_size = jj_counter; Ablock_diag_i = hypre_CTAlloc(HYPRE_Int, ii_counter+1, HYPRE_MEMORY_HOST); Ablock_diag_j = hypre_CTAlloc(HYPRE_Int, Ablock_diag_size, HYPRE_MEMORY_HOST); Ablock_diag_data = hypre_CTAlloc(HYPRE_Real, Ablock_diag_size, HYPRE_MEMORY_HOST); Ablock_diag_i[ii_counter] = jj_counter; Ablock_offd_size = jj_counter_offd; Ablock_offd_i = hypre_CTAlloc(HYPRE_Int, ii_counter+1, HYPRE_MEMORY_HOST); Ablock_offd_j = hypre_CTAlloc(HYPRE_Int, Ablock_offd_size, HYPRE_MEMORY_HOST); Ablock_offd_data = hypre_CTAlloc(HYPRE_Real, Ablock_offd_size, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; //----------------------------------------------------------------------- // Send and receive fine_to_coarse info. //----------------------------------------------------------------------- // if (debug_flag==4) wall_time = time_getWallclockSeconds(); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE #endif #endif for (j = 0; j < num_threads; j++) { coarse_shift = 0; if (j > 0) coarse_shift = col_coarse_counter[j-1]; size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) fine_to_coarse[i] += coarse_shift; } // if (debug_flag==4) wall_time = time_getWallclockSeconds(); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif // for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_col_cpt; #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,jl,i1,jj,ns,ne,size,rest,jj_counter,jj_counter_offd,ii_counter) HYPRE_SMP_SCHEDULE #endif #endif for (jl = 0; jl < num_threads; jl++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (jl < rest) { ns = jl*size+jl; ne = (jl+1)*size+jl+1; } else { ns = jl*size+rest; ne = (jl+1)*size+rest; } jj_counter = 0; if (jl > 0) jj_counter = jj_count[jl-1]; jj_counter_offd = 0; if (jl > 0) jj_counter_offd = jj_count_offd[jl-1]; ii_counter = 0; for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a F-point, we loop through the columns and select * the F-columns. Also set up mapping vector. *--------------------------------------------------------------------*/ if (row_cf_marker[i] > 0) { // Diagonal part of Ablock // Ablock_diag_i[ii_counter] = jj_counter; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; if (col_cf_marker[i1] > 0) { Ablock_diag_j[jj_counter] = fine_to_coarse[i1]; Ablock_diag_data[jj_counter] = A_diag_data[jj]; jj_counter++; } } // Off-Diagonal part of Ablock // Ablock_offd_i[ii_counter] = jj_counter_offd; if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; if (CF_marker_offd[i1] > 0) { Ablock_offd_j[jj_counter_offd] = i1; Ablock_offd_data[jj_counter_offd] = A_offd_data[jj]; jj_counter_offd++; } } } ii_counter++; } } Ablock_offd_i[ii_counter] = jj_counter_offd; Ablock_diag_i[ii_counter] = jj_counter; } Ablock = hypre_ParCSRMatrixCreate(comm, total_global_row_cpts, total_global_col_cpts, num_row_cpts_global, num_col_cpts_global, 0, Ablock_diag_i[ii_counter], Ablock_offd_i[ii_counter]); Ablock_diag = hypre_ParCSRMatrixDiag(Ablock); hypre_CSRMatrixData(Ablock_diag) = Ablock_diag_data; hypre_CSRMatrixI(Ablock_diag) = Ablock_diag_i; hypre_CSRMatrixJ(Ablock_diag) = Ablock_diag_j; Ablock_offd = hypre_ParCSRMatrixOffd(Ablock); hypre_CSRMatrixData(Ablock_offd) = Ablock_offd_data; hypre_CSRMatrixI(Ablock_offd) = Ablock_offd_i; hypre_CSRMatrixJ(Ablock_offd) = Ablock_offd_j; num_cols_Ablock_offd = 0; if (Ablock_offd_size) { Ablock_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i=0; i < num_cols_A_offd; i++) Ablock_marker[i] = 0; num_cols_Ablock_offd = 0; for (i=0; i < Ablock_offd_size; i++) { index = Ablock_offd_j[i]; if (!Ablock_marker[index]) { num_cols_Ablock_offd++; Ablock_marker[index] = 1; } } col_map_offd_Ablock = hypre_CTAlloc(HYPRE_BigInt, num_cols_Ablock_offd, HYPRE_MEMORY_HOST); tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_Ablock_offd, HYPRE_MEMORY_HOST); index = 0; for (i=0; i < num_cols_Ablock_offd; i++) { while (Ablock_marker[index]==0) index++; tmp_map_offd[i] = index++; } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i=0; i < Ablock_offd_size; i++) Ablock_offd_j[i] = hypre_BinarySearch(tmp_map_offd, Ablock_offd_j[i], num_cols_Ablock_offd); hypre_TFree(Ablock_marker, HYPRE_MEMORY_HOST); } if (num_cols_Ablock_offd) { hypre_ParCSRMatrixColMapOffd(Ablock) = col_map_offd_Ablock; hypre_CSRMatrixNumCols(Ablock_offd) = num_cols_Ablock_offd; } hypre_GetCommPkgRTFromCommPkgA(Ablock, A, fine_to_coarse, tmp_map_offd); #ifdef HYPRE_NO_GLOBAL_PARTITION /* Create the assumed partition */ if (hypre_ParCSRMatrixAssumedPartition(Ablock) == NULL) { hypre_ParCSRMatrixCreateAssumedPartition(Ablock); } #endif *A_block_ptr= Ablock; hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(col_coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(jj_count, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); return(0); } /* Build A_FF matrix from A given a CF_marker array */ HYPRE_Int hypre_MGRBuildAff( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_Int debug_flag, hypre_ParCSRMatrix **A_ff_ptr ) { HYPRE_Int i; HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); /* create a copy of the CF_marker array and switch C-points to F-points */ HYPRE_Int *CF_marker_copy = hypre_CTAlloc(HYPRE_Int, local_numrows, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < local_numrows; i++) { CF_marker_copy[i] = -CF_marker[i]; } hypre_MGRGetSubBlock(A, CF_marker_copy, CF_marker_copy, debug_flag, A_ff_ptr); /* Free copy of CF marker */ hypre_TFree(CF_marker_copy, HYPRE_MEMORY_HOST); return(0); } /********************************************************************************* * This routine assumes that the 'toVector' is larger than the 'fromVector' and * the CF_marker is of the same length as the toVector. There must be n 'point_type' * values in the CF_marker, where n is the length of the 'fromVector'. * It adds the values of the 'fromVector' to the 'toVector' where the marker is the * same as the 'point_type' *********************************************************************************/ HYPRE_Int hypre_MGRAddVectorP ( HYPRE_Int *CF_marker, HYPRE_Int point_type, HYPRE_Real a, hypre_ParVector *fromVector, HYPRE_Real b, hypre_ParVector **toVector ) { hypre_Vector *fromVectorLocal = hypre_ParVectorLocalVector(fromVector); HYPRE_Real *fromVectorData = hypre_VectorData(fromVectorLocal); hypre_Vector *toVectorLocal = hypre_ParVectorLocalVector(*toVector); HYPRE_Real *toVectorData = hypre_VectorData(toVectorLocal); HYPRE_Int n = hypre_ParVectorActualLocalSize(*toVector); HYPRE_Int i, j; j = 0; for (i = 0; i < n; i++) { if (CF_marker[i] == point_type) { toVectorData[i] = b * toVectorData[i] + a * fromVectorData[j]; j++; } } return 0; } /************************************************************************************* * This routine assumes that the 'fromVector' is larger than the 'toVector' and * the CF_marker is of the same length as the fromVector. There must be n 'point_type' * values in the CF_marker, where n is the length of the 'toVector'. * It adds the values of the 'fromVector' where the marker is the * same as the 'point_type' to the 'toVector' *************************************************************************************/ HYPRE_Int hypre_MGRAddVectorR ( HYPRE_Int *CF_marker, HYPRE_Int point_type, HYPRE_Real a, hypre_ParVector *fromVector, HYPRE_Real b, hypre_ParVector **toVector ) { hypre_Vector *fromVectorLocal = hypre_ParVectorLocalVector(fromVector); HYPRE_Real *fromVectorData = hypre_VectorData(fromVectorLocal); hypre_Vector *toVectorLocal = hypre_ParVectorLocalVector(*toVector); HYPRE_Real *toVectorData = hypre_VectorData(toVectorLocal); HYPRE_Int n = hypre_ParVectorActualLocalSize(fromVector); HYPRE_Int i, j; j = 0; for (i = 0; i < n; i++) { if (CF_marker[i] == point_type) { toVectorData[j] = b * toVectorData[j] + a * fromVectorData[i]; j++; } } return 0; } /* HYPRE_Int hypre_MGRBuildAffRAP( MPI_Comm comm, HYPRE_Int local_num_variables, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int *CF_marker, HYPRE_Int **coarse_dof_func_ptr, HYPRE_BigInt **coarse_pnts_global_ptr, hypre_ParCSRMatrix *A, HYPRE_Int debug_flag, hypre_ParCSRMatrix **P_f_ptr, hypre_ParCSRMatrix **A_ff_ptr ) { HYPRE_Int *CF_marker_copy = hypre_CTAlloc(HYPRE_Int, local_num_variables, HYPRE_MEMORY_HOST); HYPRE_Int i; for (i = 0; i < local_num_variables; i++) { CF_marker_copy[i] = -CF_marker[i]; } hypre_BoomerAMGCoarseParms(comm, local_num_variables, 1, NULL, CF_marker_copy, coarse_dof_func_ptr, coarse_pnts_global_ptr); hypre_MGRBuildP(A, CF_marker_copy, (*coarse_pnts_global_ptr), 0, debug_flag, P_f_ptr); hypre_BoomerAMGBuildCoarseOperator(*P_f_ptr, A, *P_f_ptr, A_ff_ptr); hypre_TFree(CF_marker_copy, HYPRE_MEMORY_HOST); return 0; } */ /* Get pointer to coarse grid matrix for MGR solver */ HYPRE_Int hypre_MGRGetCoarseGridMatrix( void *mgr_vdata, hypre_ParCSRMatrix **RAP ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } if (mgr_data -> RAP == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC," Coarse grid matrix is NULL. Please make sure MGRSetup() is called \n"); return hypre_error_flag; } *RAP = mgr_data->RAP; return hypre_error_flag; } /* Get pointer to coarse grid solution for MGR solver */ HYPRE_Int hypre_MGRGetCoarseGridSolution( void *mgr_vdata, hypre_ParVector **sol ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } if (mgr_data -> U_array == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC," MGR solution array is NULL. Please make sure MGRSetup() and MGRSolve() are called \n"); return hypre_error_flag; } *sol = mgr_data->U_array[mgr_data->num_coarse_levels]; return hypre_error_flag; } /* Get pointer to coarse grid solution for MGR solver */ HYPRE_Int hypre_MGRGetCoarseGridRHS( void *mgr_vdata, hypre_ParVector **rhs ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } if (mgr_data -> F_array == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC," MGR RHS array is NULL. Please make sure MGRSetup() and MGRSolve() are called \n"); return hypre_error_flag; } *rhs = mgr_data->F_array[mgr_data->num_coarse_levels]; return hypre_error_flag; } /* Print coarse grid linear system (for debugging)*/ HYPRE_Int hypre_MGRPrintCoarseSystem( void *mgr_vdata, HYPRE_Int print_flag) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; mgr_data->print_coarse_system = print_flag; return hypre_error_flag; } /* Print solver params */ HYPRE_Int hypre_MGRWriteSolverParams(void *mgr_vdata) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i, j; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); hypre_printf("MGR Setup parameters: \n"); hypre_printf("Block size: %d\n", (mgr_data -> block_size)); hypre_printf("Max number of coarse levels: %d\n", (mgr_data -> max_num_coarse_levels)); hypre_printf("Relax type: %d\n", (mgr_data -> relax_type)); hypre_printf("Set non-Cpoints to F-points: %d\n", (mgr_data -> set_non_Cpoints_to_F)); hypre_printf("Set Cpoints method: %d\n", (mgr_data -> set_c_points_method)); for (i = 0; i < max_num_coarse_levels; i++) { hypre_printf("Lev = %d, Interpolation type: %d\n", i, (mgr_data -> interp_type)[i]); hypre_printf("Lev = %d, Restriction type: %d\n", i, (mgr_data -> restrict_type)[i]); hypre_printf("Lev = %d, F-relaxation method: %d\n", i, (mgr_data -> Frelax_method)[i]); hypre_printf("Lev = %d, Use non-Galerkin coarse grid: %d\n", i, (mgr_data -> use_non_galerkin_cg)[i]); HYPRE_Int lvl_num_coarse_points = (mgr_data -> block_num_coarse_indexes)[i]; hypre_printf("Lev = %d, Number of Cpoints: %d\n", i, lvl_num_coarse_points); hypre_printf("Cpoints indices: "); for (j = 0; j < lvl_num_coarse_points; j++) { if ((mgr_data -> block_cf_marker)[i][j] == 1) { hypre_printf("%d ", j); } } hypre_printf("\n"); } hypre_printf("Number of Reserved Cpoints: %d\n", (mgr_data -> reserved_coarse_size)); hypre_printf("Keep reserved Cpoints to level: %d\n", (mgr_data -> lvl_to_keep_cpoints)); hypre_printf("\n MGR Solver Parameters: \n"); hypre_printf("Number of relax sweeps: %d\n", (mgr_data -> num_relax_sweeps)); hypre_printf("Number of interpolation sweeps: %d\n", (mgr_data -> num_interp_sweeps)); hypre_printf("Number of restriction sweeps: %d\n", (mgr_data -> num_restrict_sweeps)); hypre_printf("Global smoother type: %d\n", (mgr_data ->global_smooth_type)); hypre_printf("Number of global smoother sweeps: %d\n", (mgr_data ->global_smooth_iters)); hypre_printf("Max number of iterations: %d\n", (mgr_data -> max_iter)); hypre_printf("Stopping tolerance: %e\n", (mgr_data -> tol)); hypre_printf("Use default coarse grid solver: %d\n", (mgr_data -> use_default_cgrid_solver)); if((mgr_data -> use_default_fsolver) >= 0) { hypre_printf("Use default AMG solver for full AMG F-relaxation: %d\n", (mgr_data -> use_default_fsolver)); } return hypre_error_flag; } #ifdef HYPRE_USING_DSUPERLU void * hypre_MGRDirectSolverCreate() { hypre_DSLUData *dslu_data = hypre_CTAlloc(hypre_DSLUData, 1, HYPRE_MEMORY_HOST); return (void *) dslu_data; } HYPRE_Int hypre_MGRDirectSolverSetup( void *solver, hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u ) { /* Par Data Structure variables */ HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A); MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_local; HYPRE_Int num_rows; HYPRE_Int num_procs, my_id; HYPRE_Int pcols=1, prows=1; HYPRE_BigInt *big_rowptr = NULL; hypre_DSLUData *dslu_data = (hypre_DSLUData *) solver; HYPRE_Int info = 0; HYPRE_Int nrhs = 0; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); /* Merge diag and offd into one matrix (global ids) */ A_local = hypre_MergeDiagAndOffd(A); num_rows = hypre_CSRMatrixNumRows(A_local); /* Now convert hypre matrix to a SuperMatrix */ #ifdef HYPRE_MIXEDINT { HYPRE_Int *rowptr = NULL; HYPRE_Int i; rowptr = hypre_CSRMatrixI(A_local); big_rowptr = hypre_CTAlloc(HYPRE_BigInt, (num_rows+1), HYPRE_MEMORY_HOST); for(i=0; i<(num_rows+1); i++) { big_rowptr[i] = (HYPRE_BigInt)rowptr[i]; } } #else big_rowptr = hypre_CSRMatrixI(A_local); #endif dCreate_CompRowLoc_Matrix_dist( &(dslu_data->A_dslu),global_num_rows,global_num_rows, hypre_CSRMatrixNumNonzeros(A_local), num_rows, hypre_ParCSRMatrixFirstRowIndex(A), hypre_CSRMatrixData(A_local), hypre_CSRMatrixBigJ(A_local),big_rowptr, SLU_NR_loc, SLU_D, SLU_GE); /* DOK: SuperLU frees assigned data, so set them to null before * calling hypre_CSRMatrixdestroy on A_local to avoid memory errors. */ #ifndef HYPRE_MIXEDINT hypre_CSRMatrixI(A_local) = NULL; #endif hypre_CSRMatrixData(A_local) = NULL; hypre_CSRMatrixBigJ(A_local) = NULL; hypre_CSRMatrixDestroy(A_local); /*Create process grid */ while (prows*pcols <= num_procs) ++prows; --prows; pcols = num_procs/prows; while (prows*pcols != num_procs) { prows -= 1; pcols = num_procs/prows; } //hypre_printf(" prows %d pcols %d\n", prows, pcols); superlu_gridinit(comm, prows, pcols, &(dslu_data->dslu_data_grid)); set_default_options_dist(&(dslu_data->dslu_options)); dslu_data->dslu_options.Fact = DOFACT; dslu_data->dslu_options.PrintStat = NO; /*dslu_data->dslu_options.IterRefine = SLU_DOUBLE; dslu_data->dslu_options.ColPerm = MMD_AT_PLUS_A; dslu_data->dslu_options.DiagPivotThresh = 1.0; dslu_data->dslu_options.ReplaceTinyPivot = NO; */ dScalePermstructInit(global_num_rows, global_num_rows, &(dslu_data->dslu_ScalePermstruct)); dLUstructInit(global_num_rows, &(dslu_data->dslu_data_LU)); PStatInit(&(dslu_data->dslu_data_stat)); dslu_data->global_num_rows = global_num_rows; dslu_data->berr = hypre_CTAlloc(HYPRE_Real, 1, HYPRE_MEMORY_HOST); dslu_data->berr[0] = 0.0; pdgssvx(&(dslu_data->dslu_options), &(dslu_data->A_dslu), &(dslu_data->dslu_ScalePermstruct), NULL, num_rows, nrhs, &(dslu_data->dslu_data_grid), &(dslu_data->dslu_data_LU), &(dslu_data->dslu_solve), dslu_data->berr, &(dslu_data->dslu_data_stat), &info); dslu_data->dslu_options.Fact = FACTORED; return hypre_error_flag; } HYPRE_Int hypre_MGRDirectSolverSolve( void *solver, hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u ) { hypre_SLUDistSolve(solver, f, u); return hypre_error_flag; } HYPRE_Int hypre_MGRDirectSolverDestroy( void *solver ) { hypre_SLUDistDestroy(solver); return hypre_error_flag; } #endif
GB_binop__isge_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isge_int32) // A.*B function (eWiseMult): GB (_AemultB_01__isge_int32) // A.*B function (eWiseMult): GB (_AemultB_02__isge_int32) // A.*B function (eWiseMult): GB (_AemultB_03__isge_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isge_int32) // A*D function (colscale): GB (_AxD__isge_int32) // D*A function (rowscale): GB (_DxB__isge_int32) // C+=B function (dense accum): GB (_Cdense_accumB__isge_int32) // C+=b function (dense accum): GB (_Cdense_accumb__isge_int32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isge_int32) // C=scalar+B GB (_bind1st__isge_int32) // C=scalar+B' GB (_bind1st_tran__isge_int32) // C=A+scalar GB (_bind2nd__isge_int32) // C=A'+scalar GB (_bind2nd_tran__isge_int32) // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = (aij >= bij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int32_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int32_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x >= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISGE || GxB_NO_INT32 || GxB_NO_ISGE_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__isge_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isge_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isge_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isge_int32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isge_int32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isge_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__isge_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isge_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__isge_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isge_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isge_int32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = GBX (Bx, p, false) ; Cx [p] = (x >= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isge_int32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij >= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x >= aij) ; \ } GrB_Info GB (_bind1st_tran__isge_int32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij >= y) ; \ } GrB_Info GB (_bind2nd_tran__isge_int32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isge_int32) // A.*B function (eWiseMult): GB (_AemultB_01__isge_int32) // A.*B function (eWiseMult): GB (_AemultB_02__isge_int32) // A.*B function (eWiseMult): GB (_AemultB_03__isge_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isge_int32) // A*D function (colscale): GB (_AxD__isge_int32) // D*A function (rowscale): GB (_DxB__isge_int32) // C+=B function (dense accum): GB (_Cdense_accumB__isge_int32) // C+=b function (dense accum): GB (_Cdense_accumb__isge_int32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isge_int32) // C=scalar+B GB (_bind1st__isge_int32) // C=scalar+B' GB (_bind1st_tran__isge_int32) // C=A+scalar GB (_bind2nd__isge_int32) // C=A'+scalar GB (_bind2nd_tran__isge_int32) // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = (aij >= bij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int32_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int32_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x >= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISGE || GxB_NO_INT32 || GxB_NO_ISGE_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__isge_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isge_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isge_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isge_int32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isge_int32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isge_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__isge_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isge_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__isge_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isge_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isge_int32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = GBX (Bx, p, false) ; Cx [p] = (x >= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isge_int32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij >= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x >= aij) ; \ } GrB_Info GB (_bind1st_tran__isge_int32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij >= y) ; \ } GrB_Info GB (_bind2nd_tran__isge_int32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isge_int32) // A.*B function (eWiseMult): GB (_AemultB_01__isge_int32) // A.*B function (eWiseMult): GB (_AemultB_02__isge_int32) // A.*B function (eWiseMult): GB (_AemultB_03__isge_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isge_int32) // A*D function (colscale): GB (_AxD__isge_int32) // D*A function (rowscale): GB (_DxB__isge_int32) // C+=B function (dense accum): GB (_Cdense_accumB__isge_int32) // C+=b function (dense accum): GB (_Cdense_accumb__isge_int32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isge_int32) // C=scalar+B GB (_bind1st__isge_int32) // C=scalar+B' GB (_bind1st_tran__isge_int32) // C=A+scalar GB (_bind2nd__isge_int32) // C=A'+scalar GB (_bind2nd_tran__isge_int32) // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = (aij >= bij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int32_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int32_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x >= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISGE || GxB_NO_INT32 || GxB_NO_ISGE_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__isge_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isge_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isge_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isge_int32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isge_int32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isge_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__isge_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isge_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__isge_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isge_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isge_int32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = GBX (Bx, p, false) ; Cx [p] = (x >= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isge_int32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij >= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x >= aij) ; \ } GrB_Info GB (_bind1st_tran__isge_int32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij >= y) ; \ } GrB_Info GB (_bind2nd_tran__isge_int32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
omp_pause_resource.c
// RUN: %libomp-compile-and-run #include <stdio.h> #include "omp_testsuite.h" int test_omp_pause_resource() { int fails, nthreads, my_dev; fails = 0; nthreads = 0; my_dev = omp_get_initial_device(); #pragma omp parallel #pragma omp single nthreads = omp_get_num_threads(); if (omp_pause_resource(omp_pause_soft, my_dev)) fails++; #pragma omp parallel shared(nthreads) #pragma omp single nthreads = omp_get_num_threads(); if (nthreads == 0) fails++; if (omp_pause_resource(omp_pause_hard, my_dev)) fails++; nthreads = 0; #pragma omp parallel shared(nthreads) #pragma omp single nthreads = omp_get_num_threads(); if (nthreads == 0) fails++; if (omp_pause_resource_all(omp_pause_soft)) fails++; nthreads = 0; #pragma omp parallel shared(nthreads) #pragma omp single nthreads = omp_get_num_threads(); if (nthreads == 0) fails++; return fails == 0; } int main() { int i; int num_failed = 0; for (i = 0; i < REPETITIONS; i++) { if (!test_omp_pause_resource()) { num_failed++; } } return num_failed; }
// RUN:%libomp - compile - and - run #include <stdio.h> #include "omp_testsuite.h" int test_omp_pause_resource() { int fails, nthreads, my_dev; fails = 0; nthreads = 0; my_dev = omp_get_initial_device(); nthreads = omp_get_num_threads(); if (omp_pause_resource(omp_pause_soft, my_dev)) fails++; nthreads = omp_get_num_threads(); if (nthreads == 0) fails++; if (omp_pause_resource(omp_pause_hard, my_dev)) fails++; nthreads = 0; nthreads = omp_get_num_threads(); if (nthreads == 0) fails++; if (omp_pause_resource_all(omp_pause_soft)) fails++; nthreads = 0; nthreads = omp_get_num_threads(); if (nthreads == 0) fails++; return fails == 0; } int main() { int i; int num_failed = 0; for (i = 0; i < REPETITIONS; i++) { if (!test_omp_pause_resource()) { num_failed++; } } return num_failed; }
// RUN:%libomp - compile - and - run #include <stdio.h> #include "omp_testsuite.h" int test_omp_pause_resource() { int fails, nthreads, my_dev; fails = 0; nthreads = 0; my_dev = omp_get_initial_device(); #pragma omp parallel #pragma omp single nthreads = omp_get_num_threads(); if (omp_pause_resource(omp_pause_soft, my_dev)) fails++; #pragma omp parallel shared(nthreads) #pragma omp single nthreads = omp_get_num_threads(); if (nthreads == 0) fails++; if (omp_pause_resource(omp_pause_hard, my_dev)) fails++; nthreads = 0; #pragma omp parallel shared(nthreads) #pragma omp single nthreads = omp_get_num_threads(); if (nthreads == 0) fails++; if (omp_pause_resource_all(omp_pause_soft)) fails++; nthreads = 0; #pragma omp parallel shared(nthreads) #pragma omp single nthreads = omp_get_num_threads(); if (nthreads == 0) fails++; return fails == 0; } int main() { int i; int num_failed = 0; for (i = 0; i < REPETITIONS; i++) { if (!test_omp_pause_resource()) { num_failed++; } } return num_failed; }
fibo_openmp.c
#include <stdio.h> #include <math.h> /***** Begin *****/ long long fiboArry[100]; unsigned long long fibo(int n){ return 1/sqrt(5) * (pow((1+sqrt(5))/2, n) - pow((1-sqrt(5))/2, n)); } int main() { int i; int n; // long long temp1 = 1; // long long temp2 = 1; // long long next_fibo; scanf("%d", &n); fiboArry[0] = 0; fiboArry[1] = 1; #pragma omp parallel for for(i = 2; i <= n; ++i){ fiboArry[i] = fibo(i); } // printf("%d", 1); for(i = 1;i<=n;++i){ printf("%llu ", fiboArry[i]); } printf("\n"); return 0; } /***** End *****/
#include <stdio.h> #include <math.h> /***** Begin *****/ long long fiboArry[100]; unsigned long long fibo(int n) { return 1 / sqrt(5) * (pow((1 + sqrt(5)) / 2, n) - pow((1 - sqrt(5)) / 2, n)); } int main() { int i; int n; //long long temp1 = 1; //long long temp2 = 1; //long long next_fibo; scanf("%d", &n); fiboArry[0] = 0; fiboArry[1] = 1; for (i = 2; i <= n; ++i) { fiboArry[i] = fibo(i); } //printf("%d", 1); for (i = 1; i <= n; ++i) { printf("%llu ", fiboArry[i]); } printf("\n"); return 0; } /***** End *****/
#include <stdio.h> #include <math.h> /***** Begin *****/ long long fiboArry[100]; unsigned long long fibo(int n) { return 1 / sqrt(5) * (pow((1 + sqrt(5)) / 2, n) - pow((1 - sqrt(5)) / 2, n)); } int main() { int i; int n; //long long temp1 = 1; //long long temp2 = 1; //long long next_fibo; scanf("%d", &n); fiboArry[0] = 0; fiboArry[1] = 1; #pragma omp parallel for for (i = 2; i <= n; ++i) { fiboArry[i] = fibo(i); } //printf("%d", 1); for (i = 1; i <= n; ++i) { printf("%llu ", fiboArry[i]); } printf("\n"); return 0; } /***** End *****/
io_and_allocation.h
#ifndef IO_AND_ALLOCATION #define IO_AND_ALLOCATION #include <iostream> #include <string> #include <sstream> #include <fstream> #include <vector> #include <bitset> #include <ctime> #include <algorithm> #include <numeric> #include <random> #include <cmath> // log2 //#include <omp.h> #include "config.h" #include "helper/rngpu.h" // safe division #ifndef SDIV #define SDIV(x,y)(((x)+(y)-1)/(y)) #endif using std::string; using std::vector; float getInitChance(float density, uint8_t factorDim) { float threshold; switch(INITIALIZATIONMODE) { case 1: threshold = (sqrt(1 - pow(1 - density, float(1) / factorDim))); break; case 2: threshold = (density / 100); break; case 3: threshold = (density); break; default: threshold = 0; break; } return threshold; } void generate_random_matrix(const int height, const int width, const uint8_t factorDim, const int num_kiss, vector<uint32_t> &Ab, vector<uint32_t> &Bb, vector<uint32_t> &C0b, float &density) { uint32_t bit_vector_mask = uint32_t(~0) >> (32-factorDim); Ab.clear(); Ab.resize(height, bit_vector_mask); Bb.clear(); Bb.resize(width, bit_vector_mask); uint32_t seed = 42; fast_kiss_state32_t state = get_initial_fast_kiss_state32(seed); for(int i=0; i < height; ++i) { // Ab[i] = bit_vector_mask; for(int kiss = 0; kiss < num_kiss; ++kiss) Ab[i] &= fast_kiss32(state); } for(int j=0; j < width; ++j) { // Bb[j] = bit_vector_mask; for(int kiss = 0; kiss < num_kiss; ++kiss) Bb[j] &= fast_kiss32(state); } // Malloc for C0b int padded_height_32 = SDIV(height, 32); int sizeCb = padded_height_32 * width; C0b.clear(); C0b.resize(sizeCb, 0); // Create C int nonzeroelements = 0; for(int j=0; j < width; ++j) { for(int i=0; i < height; ++i) { if(Ab[i] & Bb[j]) { // int index = j*height+i; int vecId = i / 32 * width + j; int vecLane = i % 32; C0b[vecId] |= 1 << vecLane; ++nonzeroelements; } } } density = float(nonzeroelements) / height / width; printf("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n"); printf("MATRIX CREATION COMPLETE\n"); printf("Height: %i\nWidth: %i\nNon-zero elements: %i\nDensity: %f\n", height, width, nonzeroelements, density); printf("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n"); } void generate_random_matrix(const int height, const int width, const uint8_t factorDim, const int num_kiss, vector<float> &A, vector<float> &B, vector<uint32_t> &C0b, float &density) { uint32_t bit_vector_mask = uint32_t(~0) >> (32-factorDim); A.clear(); A.resize(height * factorDim, 0); B.clear(); B.resize(width * factorDim, 0); uint32_t seed = 42; fast_kiss_state32_t state = get_initial_fast_kiss_state32(seed); for(int i=0; i < height; ++i) { uint32_t mask = bit_vector_mask; for(int kiss = 0; kiss < num_kiss; ++kiss) mask &= fast_kiss32(state); for(int k=0; k < factorDim; ++k) A[i * factorDim + k] = (mask >> k) & 1 ? 1 : 0; } for(int j=0; j < width; ++j) { uint32_t mask = bit_vector_mask; for(int kiss = 0; kiss < num_kiss; ++kiss) mask &= fast_kiss32(state); for(int k=0; k < factorDim; ++k) B[j * factorDim + k] = (mask >> k) & 1 ? 1 : 0; } // Malloc for C0b size_t padded_height_32 = SDIV(height, 32); size_t sizeCb = padded_height_32 * width; C0b.clear(); C0b.resize(sizeCb, 0); // Create C int nonzeroelements = 0; for(int j=0; j < width; ++j) { for(int i=0; i < height; ++i) { for (int k=0; k < factorDim; ++k) { if((A[i * factorDim + k] > 0.5f) && (B[j * factorDim + k] > 0.5f)) { int vecId = i / 32 * width + j; int vecLane = i % 32; C0b[vecId] |= 1 << vecLane; ++nonzeroelements; break; } } } } density = float(nonzeroelements) / height / width; printf("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n"); printf("MATRIX CREATION COMPLETE\n"); printf("Height: %i\nWidth: %i\nNon-zero elements: %i\nDensity: %f\n", height, width, nonzeroelements, density); printf("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n"); } void readInputFileData(const string filename, vector<uint32_t> &C0b, int &height, int &width, float &density) { std::ifstream is {filename}; if(!is.good()) throw std::runtime_error{"File " + filename + " could not be opened!"}; std::uint64_t ones = 0; is >> height >> width >> ones; int padded_height_32 = SDIV(height, 32); int sizeCb = padded_height_32 * width; C0b.clear(); C0b.resize(sizeCb,0); int nonzeroelements = 0; for(; ones > 0; --ones) { std::uint64_t r, c; is >> r >> c; int vecId = r / 32 * width + c; int vecLane = r % 32; C0b[vecId] |= 1 << vecLane; nonzeroelements++; } density = float(nonzeroelements) / height / width; printf("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n"); printf("READING OF DATA FILE COMPLETE\n"); printf("Read height: %i\nRead width: %i\nNon-zero elements: %i\nDensity: %f\n", height, width, nonzeroelements, density); printf("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n"); } // https://stackoverflow.com/questions/874134/find-if-string-ends-with-another-string-in-c bool endsWith(const string& s, const string& suffix) { return s.rfind(suffix) == (s.size()-suffix.size()); } // Initialization of a factor, setting all bits of a row at once void initFactorRowwise(vector<uint32_t> &Ab, const int height, const uint8_t factorDim, const uint32_t seed, const int randDepth) { Ab.clear(); if(randDepth < 16) { const uint32_t factorMask = UINT32_MAX >> (32-factorDim); Ab.resize(height, factorMask); // int counter = 0; #pragma omp parallel //reduce(+:counter) { //fast_kiss_state32_t state = get_initial_fast_kiss_state32(seed + omp_get_thread_num()); fast_kiss_state32_t state = get_initial_fast_kiss_state32(seed); #pragma omp for for (int i = 0; i < height; i++) { for(int d = 0; d < randDepth; ++d) { Ab[i] &= fast_kiss32(state); } // if(Ab[i]) ++counter; } } // std::cout << "nonzero rows in factor: " << counter << std::endl; } else { Ab.resize(height, 0); } } // Initialization of a factor, setting every bits of a row on its own void initFactorBitwise(vector<uint32_t> &Ab, const int height, const uint8_t factorDim, const uint32_t seed, const uint32_t threshold_ui32) { Ab.clear(); Ab.resize(height, 0); // int counter = 0; #pragma omp parallel //reduce(+:counter) { //fast_kiss_state32_t state = get_initial_fast_kiss_state32(seed + omp_get_thread_num()); fast_kiss_state32_t state = get_initial_fast_kiss_state32(seed); #pragma omp for for (int i = 0; i < height; i++) { for (int j = 0; j < factorDim; j++) { if (fast_kiss32(state) < threshold_ui32) Ab[i] |= 1 << j; } // if(Ab[i]) ++counter; } } // std::cout << "nonzero rows in factor: " << counter << std::endl; } // Initialization of a factor, setting every bits of a row on its own void initFactorBitwise( vector<float> &A, const int height, const uint8_t factorDim, const uint32_t seed, const uint32_t threshold_ui32) { A.clear(); A.resize(height * factorDim, 0); // int counter = 0; #pragma omp parallel //reduce(+:counter) { //fast_kiss_state32_t state = get_initial_fast_kiss_state32(seed + omp_get_thread_num()); fast_kiss_state32_t state = get_initial_fast_kiss_state32(seed); #pragma omp for for (int i = 0; i < height; i++) { for (int j = 0; j < factorDim; j++) { // two possibilities: // 1) set value 0 or 1 // 2) set random value in [0,0.5] or [0.5,1] if(fast_kiss32(state) < threshold_ui32) { A[i * factorDim + j] = 1; // A[i * factorDim + j] = (fast_kiss32(state) / float(UINT32_MAX)) / 2 + 0.5f; } else { // A[i * factorDim + j] = (fast_kiss32(state) / float(UINT32_MAX)) / 2; } } } } } template <typename factor_t> void initFactor(vector<factor_t> &Ab, const int height, const uint8_t factorDim, const uint32_t seed, const float threshold) { const int randDepth = -log2(threshold)+1; // std::cout << "Init threshold: " << threshold << std::endl; // std::cout << "Init rand depth: " << randDepth << " -> " << pow(2, -randDepth) << std::endl; if(randDepth < factorDim && std::is_same<factor_t, uint32_t>::value) { initFactorRowwise(Ab, height, factorDim, seed, randDepth); } else { initFactorBitwise(Ab, height, factorDim, seed, threshold * UINT32_MAX); } } // Write result factors to file void writeFactorsToFiles(const string& filename, const vector<uint32_t>& Ab, const vector<uint32_t>& Bb, const uint8_t factorDim) { using std::stringstream; using std::bitset; using std::ofstream; time_t now = time(0); tm *ltm = localtime(&now); stringstream date; date << 1+ltm->tm_mon << '-' << ltm->tm_mday << '_' << ltm->tm_hour << ':' << ltm->tm_min << ':' << ltm->tm_sec; stringstream filename_A; filename_A << filename << "_factor_A_" << date.str() << ".data"; stringstream filename_B; filename_B << filename << "_factor_B_" << date.rdbuf() << ".data"; size_t height = Ab.size(); int nonzeroelements = 0; for (size_t i = 0; i < height; i++){ bitset<32> row(Ab[i]); nonzeroelements += row.count(); } ofstream os_A(filename_A.str()); if (os_A.good()){ os_A << height << " " << int(factorDim) << " " << nonzeroelements << "\n"; for (int i = 0; i < height; i++){ // bitset<32> row(Ab[i] >> (32 - factorDim)); // os_A << row << "\n"; for(int k=0; k < factorDim; ++k) os_A << ((Ab[i] >> k) & 1 ? 1 : 0); os_A << "\n"; } os_A.close(); } else { std::cerr << "File " << filename_A.str() << " could not be openend!" << std::endl; } size_t width = Bb.size(); nonzeroelements = 0; for (size_t j = 0; j < width; j++){ bitset<32> col(Bb[j]); nonzeroelements += col.count(); } ofstream os_B(filename_B.str()); if(os_B.good()){ os_B << width << " " << int(factorDim) << " " << nonzeroelements << "\n"; for (int j = 0; j < width; j++){ // bitset<32> col(Bb[j] >> (32 - factorDim)); // os_B << col << "\n"; for(int k=0; k < factorDim; ++k) os_B << ((Bb[j] >> k) & 1 ? 1 : 0); os_B << "\n"; } os_B.close(); } else { std::cerr << "File " << filename_B.str() << " could not be openend!" << std::endl; } std::cout << "Writing to files \"" << filename_A.rdbuf() << "\" and \"" << filename_B.rdbuf() << "\" complete" << std::endl; } template<typename distance_t> void writeDistancesToFile(const string& filename, const vector<distance_t>& distances) { using std::stringstream; using std::bitset; using std::ofstream; time_t now = time(0); tm *ltm = localtime(&now); stringstream date; date << 1+ltm->tm_mon << '-' << ltm->tm_mday << '_' << ltm->tm_hour << ':' << ltm->tm_min << ':' << ltm->tm_sec; stringstream filename_d; filename_d << filename << "_distances_" << date.str() << ".txt"; ofstream os(filename_d.str()); if (os.good()){ for (size_t i = 0; i < distances.size(); i++){ if(i>0) os << "\n"; os << distances[i]; } os.close(); } else { std::cerr << "File " << filename_d.str() << " could not be openend!" << std::endl; } std::cout << "Writing to files \"" << filename_d.rdbuf() << "\" complete" << std::endl; } #endif
#ifndef IO_AND_ALLOCATION #define IO_AND_ALLOCATION #include <iostream> #include <string> #include <sstream> #include <fstream> #include <vector> #include <bitset> #include <ctime> #include <algorithm> #include <numeric> #include <random> #include <cmath> // log2 // #include <omp.h> #include "config.h" #include "helper/rngpu.h" //safe division #ifndef SDIV #define SDIV(x,y)(((x)+(y)-1)/(y)) #endif using std: :string; using std::vector; float getInitChance(float density, uint8_t factorDim) { float threshold; switch (INITIALIZATIONMODE) { case 1: threshold = (sqrt(1 - pow(1 - density, float (1) / factorDim))); break; case 2: threshold = (density / 100); break; case 3: threshold = (density); break; default: threshold = 0; break; } return threshold; } void generate_random_matrix(const int height, const int width, const uint8_t factorDim, const int num_kiss, vector < uint32_t > &Ab, vector < uint32_t > &Bb, vector < uint32_t > &C0b, float &density) { uint32_t bit_vector_mask = uint32_t(~0) >> (32 - factorDim); Ab.clear(); Ab.resize(height, bit_vector_mask); Bb.clear(); Bb.resize(width, bit_vector_mask); uint32_t seed = 42; fast_kiss_state32_t state = get_initial_fast_kiss_state32(seed); for (int i = 0; i < height; ++i) { //Ab[i] = bit_vector_mask; for (int kiss = 0; kiss < num_kiss; ++kiss) Ab[i] &= fast_kiss32(state); } for (int j = 0; j < width; ++j) { //Bb[j] = bit_vector_mask; for (int kiss = 0; kiss < num_kiss; ++kiss) Bb[j] &= fast_kiss32(state); } //Malloc for C0b int padded_height_32 = SDIV(height, 32); int sizeCb = padded_height_32 * width; C0b.clear(); C0b.resize(sizeCb, 0); //Create C int nonzeroelements = 0; for (int j = 0; j < width; ++j) { for (int i = 0; i < height; ++i) { if (Ab[i] & Bb[j]) { //int index = j * height + i; int vecId = i / 32 * width + j; int vecLane = i % 32; C0b[vecId] |= 1 << vecLane; ++nonzeroelements; } } } density = float (nonzeroelements) / height / width; printf("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n"); printf("MATRIX CREATION COMPLETE\n"); printf("Height: %i\nWidth: %i\nNon-zero elements: %i\nDensity: %f\n", height, width, nonzeroelements, density); printf("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n"); } void generate_random_matrix(const int height, const int width, const uint8_t factorDim, const int num_kiss, vector < float >&A, vector < float >&B, vector < uint32_t > &C0b, float &density) { uint32_t bit_vector_mask = uint32_t(~0) >> (32 - factorDim); A.clear(); A.resize(height * factorDim, 0); B.clear(); B.resize(width * factorDim, 0); uint32_t seed = 42; fast_kiss_state32_t state = get_initial_fast_kiss_state32(seed); for (int i = 0; i < height; ++i) { uint32_t mask = bit_vector_mask; for (int kiss = 0; kiss < num_kiss; ++kiss) mask &= fast_kiss32(state); for (int k = 0; k < factorDim; ++k) A[i * factorDim + k] = (mask >> k) & 1 ? 1 : 0; } for (int j = 0; j < width; ++j) { uint32_t mask = bit_vector_mask; for (int kiss = 0; kiss < num_kiss; ++kiss) mask &= fast_kiss32(state); for (int k = 0; k < factorDim; ++k) B[j * factorDim + k] = (mask >> k) & 1 ? 1 : 0; } //Malloc for C0b size_t padded_height_32 = SDIV(height, 32); size_t sizeCb = padded_height_32 * width; C0b.clear(); C0b.resize(sizeCb, 0); //Create C int nonzeroelements = 0; for (int j = 0; j < width; ++j) { for (int i = 0; i < height; ++i) { for (int k = 0; k < factorDim; ++k) { if ((A[i * factorDim + k] > 0.5 f) && (B[j * factorDim + k] > 0.5 f)) { int vecId = i / 32 * width + j; int vecLane = i % 32; C0b[vecId] |= 1 << vecLane; ++nonzeroelements; break; } } } } density = float (nonzeroelements) / height / width; printf("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n"); printf("MATRIX CREATION COMPLETE\n"); printf("Height: %i\nWidth: %i\nNon-zero elements: %i\nDensity: %f\n", height, width, nonzeroelements, density); printf("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n"); } void readInputFileData(const string filename, vector < uint32_t > &C0b, int &height, int &width, float &density) { std: :ifstream is { filename }; if (!is.good()) throw std::runtime_error { "File " + filename + " could not be opened!" }; std: :uint64_t ones = 0; is >> height >> width >> ones; int padded_height_32 = SDIV(height, 32); int sizeCb = padded_height_32 * width; C0b.clear(); C0b.resize(sizeCb, 0); int nonzeroelements = 0; for (; ones > 0; --ones) { std: : uint64_t r, c; is >> r >> c; int vecId = r / 32 * width + c; int vecLane = r % 32; C0b[vecId] |= 1 << vecLane; nonzeroelements++; } density = float (nonzeroelements) / height / width; printf("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n"); printf("READING OF DATA FILE COMPLETE\n"); printf("Read height: %i\nRead width: %i\nNon-zero elements: %i\nDensity: %f\n", height, width, nonzeroelements, density); printf("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n"); } //https://stackoverflow.com / questions / 874134 / find - if -string -ends - with - another - string - in - c bool endsWith(const string & s, const string & suffix) { return s.rfind(suffix) == (s.size() - suffix.size()); } //Initialization of a factor, setting all bits of a row at once void initFactorRowwise(vector < uint32_t > &Ab, const int height, const uint8_t factorDim, const uint32_t seed, const int randDepth) { Ab.clear(); if (randDepth < 16) { const uint32_t factorMask = UINT32_MAX >> (32 - factorDim); Ab.resize(height, factorMask); //int counter = 0; //fast_kiss_state32_t state = get_initial_fast_kiss_state32(seed + omp_get_thread_num()); fast_kiss_state32_t state = get_initial_fast_kiss_state32(seed); for (int i = 0; i < height; i++) { for (int d = 0; d < randDepth; ++d) { Ab[i] &= fast_kiss32(state); } //if (Ab[i]) ++counter; } //std: : cout << "nonzero rows in factor: " << counter << std: :endl; } else { Ab.resize(height, 0); } } //Initialization of a factor, setting every bits of a row on its own void initFactorBitwise(vector < uint32_t > &Ab, const int height, const uint8_t factorDim, const uint32_t seed, const uint32_t threshold_ui32) { Ab.clear(); Ab.resize(height, 0); //int counter = 0; //fast_kiss_state32_t state = get_initial_fast_kiss_state32(seed + omp_get_thread_num()); fast_kiss_state32_t state = get_initial_fast_kiss_state32(seed); for (int i = 0; i < height; i++) { for (int j = 0; j < factorDim; j++) { if (fast_kiss32(state) < threshold_ui32) Ab[i] |= 1 << j; } //if (Ab[i]) ++counter; } //std: : cout << "nonzero rows in factor: " << counter << std: :endl; } //Initialization of a factor, setting every bits of a row on its own void initFactorBitwise(vector < float >&A, const int height, const uint8_t factorDim, const uint32_t seed, const uint32_t threshold_ui32) { A.clear(); A.resize(height * factorDim, 0); //int counter = 0; //fast_kiss_state32_t state = get_initial_fast_kiss_state32(seed + omp_get_thread_num()); fast_kiss_state32_t state = get_initial_fast_kiss_state32(seed); for (int i = 0; i < height; i++) { for (int j = 0; j < factorDim; j++) { //two possibilities: //1) set value 0 or 1 // 2) set random value in[0, 0.5] or[0.5, 1] if (fast_kiss32(state) < threshold_ui32) { A[i * factorDim + j] = 1; //A[i * factorDim + j] = (fast_kiss32(state) / float (UINT32_MAX))/2 + 0.5 f; } else { //A[i * factorDim + j] = (fast_kiss32(state) / float (UINT32_MAX))/2; } } } } template < typename factor_t > void initFactor(vector < factor_t > &Ab, const int height, const uint8_t factorDim, const uint32_t seed, const float threshold) { const int randDepth = -log2(threshold) + 1; //std: : cout << "Init threshold: " << threshold << std: :endl; //std: : cout << "Init rand depth: " << randDepth << " -> " << pow(2, -randDepth) << std: :endl; if (randDepth < factorDim && std: : is_same < factor_t, uint32_t >: :value) { initFactorRowwise(Ab, height, factorDim, seed, randDepth); } else { initFactorBitwise(Ab, height, factorDim, seed, threshold * UINT32_MAX); } } //Write result factors to file void writeFactorsToFiles(const string & filename, const vector < uint32_t > &Ab, const vector < uint32_t > &Bb, const uint8_t factorDim) { using std::stringstream; using std::bitset; using std::ofstream; time_t now = time(0); tm *ltm = localtime(&now); stringstream date; date << 1 + ltm->tm_mon << '-' << ltm->tm_mday << '_' << ltm->tm_hour << ':' << ltm->tm_min << ':' << ltm->tm_sec; stringstream filename_A; filename_A << filename << "_factor_A_" << date.str() << ".data"; stringstream filename_B; filename_B << filename << "_factor_B_" << date.rdbuf() << ".data"; size_t height = Ab.size(); int nonzeroelements = 0; for (size_t i = 0; i < height; i++) { bitset < 32 > row(Ab[i]); nonzeroelements += row.count(); } ofstream os_A(filename_A.str()); if (os_A.good()) { os_A << height << " " << int (factorDim) << " " << nonzeroelements << "\n"; for (int i = 0; i < height; i++) { //bitset < 32 > row(Ab[i] >> (32 - factorDim)); //os_A << row << "\n"; for (int k = 0; k < factorDim; ++k) os_A << ((Ab[i] >> k) & 1 ? 1 : 0); os_A << "\n"; } os_A.close(); } else { std: : cerr << "File " << filename_A.str() << " could not be openend!" << std: :endl; } size_t width = Bb.size(); nonzeroelements = 0; for (size_t j = 0; j < width; j++) { bitset < 32 > col(Bb[j]); nonzeroelements += col.count(); } ofstream os_B(filename_B.str()); if (os_B.good()) { os_B << width << " " << int (factorDim) << " " << nonzeroelements << "\n"; for (int j = 0; j < width; j++) { //bitset < 32 > col(Bb[j] >> (32 - factorDim)); //os_B << col << "\n"; for (int k = 0; k < factorDim; ++k) os_B << ((Bb[j] >> k) & 1 ? 1 : 0); os_B << "\n"; } os_B.close(); } else { std: : cerr << "File " << filename_B.str() << " could not be openend!" << std: :endl; } std: :cout << "Writing to files \"" << filename_A.rdbuf() << "\" and \"" << filename_B.rdbuf() << "\" complete" << std: :endl; } template < typename distance_t > void writeDistancesToFile(const string & filename, const vector < distance_t > &distances) { using std::stringstream; using std::bitset; using std::ofstream; time_t now = time(0); tm *ltm = localtime(&now); stringstream date; date << 1 + ltm->tm_mon << '-' << ltm->tm_mday << '_' << ltm->tm_hour << ':' << ltm->tm_min << ':' << ltm->tm_sec; stringstream filename_d; filename_d << filename << "_distances_" << date.str() << ".txt"; ofstream os(filename_d.str()); if (os.good()) { for (size_t i = 0; i < distances.size(); i++) { if (i > 0) os << "\n"; os << distances[i]; } os.close(); } else { std: : cerr << "File " << filename_d.str() << " could not be openend!" << std: :endl; } std: : cout << "Writing to files \"" << filename_d.rdbuf() << "\" complete" << std: :endl; } #endif
#ifndef IO_AND_ALLOCATION #define IO_AND_ALLOCATION #include <iostream> #include <string> #include <sstream> #include <fstream> #include <vector> #include <bitset> #include <ctime> #include <algorithm> #include <numeric> #include <random> #include <cmath> // log2 // #include <omp.h> #include "config.h" #include "helper/rngpu.h" //safe division #ifndef SDIV #define SDIV(x,y)(((x)+(y)-1)/(y)) #endif using std: :string; using std::vector; float getInitChance(float density, uint8_t factorDim) { float threshold; switch (INITIALIZATIONMODE) { case 1: threshold = (sqrt(1 - pow(1 - density, float (1) / factorDim))); break; case 2: threshold = (density / 100); break; case 3: threshold = (density); break; default: threshold = 0; break; } return threshold; } void generate_random_matrix(const int height, const int width, const uint8_t factorDim, const int num_kiss, vector < uint32_t > &Ab, vector < uint32_t > &Bb, vector < uint32_t > &C0b, float &density) { uint32_t bit_vector_mask = uint32_t(~0) >> (32 - factorDim); Ab.clear(); Ab.resize(height, bit_vector_mask); Bb.clear(); Bb.resize(width, bit_vector_mask); uint32_t seed = 42; fast_kiss_state32_t state = get_initial_fast_kiss_state32(seed); for (int i = 0; i < height; ++i) { //Ab[i] = bit_vector_mask; for (int kiss = 0; kiss < num_kiss; ++kiss) Ab[i] &= fast_kiss32(state); } for (int j = 0; j < width; ++j) { //Bb[j] = bit_vector_mask; for (int kiss = 0; kiss < num_kiss; ++kiss) Bb[j] &= fast_kiss32(state); } //Malloc for C0b int padded_height_32 = SDIV(height, 32); int sizeCb = padded_height_32 * width; C0b.clear(); C0b.resize(sizeCb, 0); //Create C int nonzeroelements = 0; for (int j = 0; j < width; ++j) { for (int i = 0; i < height; ++i) { if (Ab[i] & Bb[j]) { //int index = j * height + i; int vecId = i / 32 * width + j; int vecLane = i % 32; C0b[vecId] |= 1 << vecLane; ++nonzeroelements; } } } density = float (nonzeroelements) / height / width; printf("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n"); printf("MATRIX CREATION COMPLETE\n"); printf("Height: %i\nWidth: %i\nNon-zero elements: %i\nDensity: %f\n", height, width, nonzeroelements, density); printf("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n"); } void generate_random_matrix(const int height, const int width, const uint8_t factorDim, const int num_kiss, vector < float >&A, vector < float >&B, vector < uint32_t > &C0b, float &density) { uint32_t bit_vector_mask = uint32_t(~0) >> (32 - factorDim); A.clear(); A.resize(height * factorDim, 0); B.clear(); B.resize(width * factorDim, 0); uint32_t seed = 42; fast_kiss_state32_t state = get_initial_fast_kiss_state32(seed); for (int i = 0; i < height; ++i) { uint32_t mask = bit_vector_mask; for (int kiss = 0; kiss < num_kiss; ++kiss) mask &= fast_kiss32(state); for (int k = 0; k < factorDim; ++k) A[i * factorDim + k] = (mask >> k) & 1 ? 1 : 0; } for (int j = 0; j < width; ++j) { uint32_t mask = bit_vector_mask; for (int kiss = 0; kiss < num_kiss; ++kiss) mask &= fast_kiss32(state); for (int k = 0; k < factorDim; ++k) B[j * factorDim + k] = (mask >> k) & 1 ? 1 : 0; } //Malloc for C0b size_t padded_height_32 = SDIV(height, 32); size_t sizeCb = padded_height_32 * width; C0b.clear(); C0b.resize(sizeCb, 0); //Create C int nonzeroelements = 0; for (int j = 0; j < width; ++j) { for (int i = 0; i < height; ++i) { for (int k = 0; k < factorDim; ++k) { if ((A[i * factorDim + k] > 0.5 f) && (B[j * factorDim + k] > 0.5 f)) { int vecId = i / 32 * width + j; int vecLane = i % 32; C0b[vecId] |= 1 << vecLane; ++nonzeroelements; break; } } } } density = float (nonzeroelements) / height / width; printf("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n"); printf("MATRIX CREATION COMPLETE\n"); printf("Height: %i\nWidth: %i\nNon-zero elements: %i\nDensity: %f\n", height, width, nonzeroelements, density); printf("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n"); } void readInputFileData(const string filename, vector < uint32_t > &C0b, int &height, int &width, float &density) { std: :ifstream is { filename }; if (!is.good()) throw std::runtime_error { "File " + filename + " could not be opened!" }; std: :uint64_t ones = 0; is >> height >> width >> ones; int padded_height_32 = SDIV(height, 32); int sizeCb = padded_height_32 * width; C0b.clear(); C0b.resize(sizeCb, 0); int nonzeroelements = 0; for (; ones > 0; --ones) { std: : uint64_t r, c; is >> r >> c; int vecId = r / 32 * width + c; int vecLane = r % 32; C0b[vecId] |= 1 << vecLane; nonzeroelements++; } density = float (nonzeroelements) / height / width; printf("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n"); printf("READING OF DATA FILE COMPLETE\n"); printf("Read height: %i\nRead width: %i\nNon-zero elements: %i\nDensity: %f\n", height, width, nonzeroelements, density); printf("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n"); } //https://stackoverflow.com / questions / 874134 / find - if -string -ends - with - another - string - in - c bool endsWith(const string & s, const string & suffix) { return s.rfind(suffix) == (s.size() - suffix.size()); } //Initialization of a factor, setting all bits of a row at once void initFactorRowwise(vector < uint32_t > &Ab, const int height, const uint8_t factorDim, const uint32_t seed, const int randDepth) { Ab.clear(); if (randDepth < 16) { const uint32_t factorMask = UINT32_MAX >> (32 - factorDim); Ab.resize(height, factorMask); //int counter = 0; #pragma omp parallel //reduce(+:counter) { //fast_kiss_state32_t state = get_initial_fast_kiss_state32(seed + omp_get_thread_num()); fast_kiss_state32_t state = get_initial_fast_kiss_state32(seed); #pragma omp for for (int i = 0; i < height; i++) { for (int d = 0; d < randDepth; ++d) { Ab[i] &= fast_kiss32(state); } //if (Ab[i]) ++counter; } } //std: : cout << "nonzero rows in factor: " << counter << std: :endl; } else { Ab.resize(height, 0); } } //Initialization of a factor, setting every bits of a row on its own void initFactorBitwise(vector < uint32_t > &Ab, const int height, const uint8_t factorDim, const uint32_t seed, const uint32_t threshold_ui32) { Ab.clear(); Ab.resize(height, 0); //int counter = 0; #pragma omp parallel //reduce(+:counter) { //fast_kiss_state32_t state = get_initial_fast_kiss_state32(seed + omp_get_thread_num()); fast_kiss_state32_t state = get_initial_fast_kiss_state32(seed); #pragma omp for for (int i = 0; i < height; i++) { for (int j = 0; j < factorDim; j++) { if (fast_kiss32(state) < threshold_ui32) Ab[i] |= 1 << j; } //if (Ab[i]) ++counter; } } //std: : cout << "nonzero rows in factor: " << counter << std: :endl; } //Initialization of a factor, setting every bits of a row on its own void initFactorBitwise(vector < float >&A, const int height, const uint8_t factorDim, const uint32_t seed, const uint32_t threshold_ui32) { A.clear(); A.resize(height * factorDim, 0); //int counter = 0; #pragma omp parallel //reduce(+:counter) { //fast_kiss_state32_t state = get_initial_fast_kiss_state32(seed + omp_get_thread_num()); fast_kiss_state32_t state = get_initial_fast_kiss_state32(seed); #pragma omp for for (int i = 0; i < height; i++) { for (int j = 0; j < factorDim; j++) { //two possibilities: //1) set value 0 or 1 // 2) set random value in[0, 0.5] or[0.5, 1] if (fast_kiss32(state) < threshold_ui32) { A[i * factorDim + j] = 1; //A[i * factorDim + j] = (fast_kiss32(state) / float (UINT32_MAX))/2 + 0.5 f; } else { //A[i * factorDim + j] = (fast_kiss32(state) / float (UINT32_MAX))/2; } } } } } template < typename factor_t > void initFactor(vector < factor_t > &Ab, const int height, const uint8_t factorDim, const uint32_t seed, const float threshold) { const int randDepth = -log2(threshold) + 1; //std: : cout << "Init threshold: " << threshold << std: :endl; //std: : cout << "Init rand depth: " << randDepth << " -> " << pow(2, -randDepth) << std: :endl; if (randDepth < factorDim && std: : is_same < factor_t, uint32_t >: :value) { initFactorRowwise(Ab, height, factorDim, seed, randDepth); } else { initFactorBitwise(Ab, height, factorDim, seed, threshold * UINT32_MAX); } } //Write result factors to file void writeFactorsToFiles(const string & filename, const vector < uint32_t > &Ab, const vector < uint32_t > &Bb, const uint8_t factorDim) { using std::stringstream; using std::bitset; using std::ofstream; time_t now = time(0); tm *ltm = localtime(&now); stringstream date; date << 1 + ltm->tm_mon << '-' << ltm->tm_mday << '_' << ltm->tm_hour << ':' << ltm->tm_min << ':' << ltm->tm_sec; stringstream filename_A; filename_A << filename << "_factor_A_" << date.str() << ".data"; stringstream filename_B; filename_B << filename << "_factor_B_" << date.rdbuf() << ".data"; size_t height = Ab.size(); int nonzeroelements = 0; for (size_t i = 0; i < height; i++) { bitset < 32 > row(Ab[i]); nonzeroelements += row.count(); } ofstream os_A(filename_A.str()); if (os_A.good()) { os_A << height << " " << int (factorDim) << " " << nonzeroelements << "\n"; for (int i = 0; i < height; i++) { //bitset < 32 > row(Ab[i] >> (32 - factorDim)); //os_A << row << "\n"; for (int k = 0; k < factorDim; ++k) os_A << ((Ab[i] >> k) & 1 ? 1 : 0); os_A << "\n"; } os_A.close(); } else { std: : cerr << "File " << filename_A.str() << " could not be openend!" << std: :endl; } size_t width = Bb.size(); nonzeroelements = 0; for (size_t j = 0; j < width; j++) { bitset < 32 > col(Bb[j]); nonzeroelements += col.count(); } ofstream os_B(filename_B.str()); if (os_B.good()) { os_B << width << " " << int (factorDim) << " " << nonzeroelements << "\n"; for (int j = 0; j < width; j++) { //bitset < 32 > col(Bb[j] >> (32 - factorDim)); //os_B << col << "\n"; for (int k = 0; k < factorDim; ++k) os_B << ((Bb[j] >> k) & 1 ? 1 : 0); os_B << "\n"; } os_B.close(); } else { std: : cerr << "File " << filename_B.str() << " could not be openend!" << std: :endl; } std: :cout << "Writing to files \"" << filename_A.rdbuf() << "\" and \"" << filename_B.rdbuf() << "\" complete" << std: :endl; } template < typename distance_t > void writeDistancesToFile(const string & filename, const vector < distance_t > &distances) { using std::stringstream; using std::bitset; using std::ofstream; time_t now = time(0); tm *ltm = localtime(&now); stringstream date; date << 1 + ltm->tm_mon << '-' << ltm->tm_mday << '_' << ltm->tm_hour << ':' << ltm->tm_min << ':' << ltm->tm_sec; stringstream filename_d; filename_d << filename << "_distances_" << date.str() << ".txt"; ofstream os(filename_d.str()); if (os.good()) { for (size_t i = 0; i < distances.size(); i++) { if (i > 0) os << "\n"; os << distances[i]; } os.close(); } else { std: : cerr << "File " << filename_d.str() << " could not be openend!" << std: :endl; } std: : cout << "Writing to files \"" << filename_d.rdbuf() << "\" complete" << std: :endl; } #endif
decorate.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD EEEEE CCCC OOO RRRR AAA TTTTT EEEEE % % D D E C O O R R A A T E % % D D EEE C O O RRRR AAAAA T EEE % % D D E C O O R R A A T E % % DDDD EEEEE CCCC OOO R R A A T EEEEE % % % % % % MagickCore Image Decoration Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/cache-view.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/decorate.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/thread-private.h" #include "MagickCore/transform.h" /* Define declarations. */ #define AccentuateModulate ScaleCharToQuantum(80) #define HighlightModulate ScaleCharToQuantum(125) #define ShadowModulate ScaleCharToQuantum(135) #define DepthModulate ScaleCharToQuantum(185) #define TroughModulate ScaleCharToQuantum(110) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B o r d e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BorderImage() surrounds the image with a border of the color defined by % the bordercolor member of the image structure. The width and height % of the border are defined by the corresponding members of the border_info % structure. % % The format of the BorderImage method is: % % Image *BorderImage(const Image *image,const RectangleInfo *border_info, % const CompositeOperator compose,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o border_info: define the width and height of the border. % % o compose: the composite operator. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *BorderImage(const Image *image, const RectangleInfo *border_info,const CompositeOperator compose, ExceptionInfo *exception) { Image *border_image, *clone_image; FrameInfo frame_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(border_info != (RectangleInfo *) NULL); frame_info.width=image->columns+(border_info->width << 1); frame_info.height=image->rows+(border_info->height << 1); frame_info.x=(ssize_t) border_info->width; frame_info.y=(ssize_t) border_info->height; frame_info.inner_bevel=0; frame_info.outer_bevel=0; clone_image=CloneImage(image,0,0,MagickTrue,exception); if (clone_image == (Image *) NULL) return((Image *) NULL); clone_image->matte_color=image->border_color; border_image=FrameImage(clone_image,&frame_info,compose,exception); clone_image=DestroyImage(clone_image); if (border_image != (Image *) NULL) border_image->matte_color=image->matte_color; return(border_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F r a m e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FrameImage() adds a simulated three-dimensional border around the image. % The color of the border is defined by the matte_color member of image. % Members width and height of frame_info specify the border width of the % vertical and horizontal sides of the frame. Members inner and outer % indicate the width of the inner and outer shadows of the frame. % % The format of the FrameImage method is: % % Image *FrameImage(const Image *image,const FrameInfo *frame_info, % const CompositeOperator compose,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o frame_info: Define the width and height of the frame and its bevels. % % o compose: the composite operator. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *FrameImage(const Image *image,const FrameInfo *frame_info, const CompositeOperator compose,ExceptionInfo *exception) { #define FrameImageTag "Frame/Image" CacheView *image_view, *frame_view; Image *frame_image; MagickBooleanType status; MagickOffsetType progress; PixelInfo accentuate, highlight, matte, shadow, trough; register ssize_t x; size_t bevel_width, height, width; ssize_t y; /* Check frame geometry. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(frame_info != (FrameInfo *) NULL); if ((frame_info->outer_bevel < 0) || (frame_info->inner_bevel < 0)) ThrowImageException(OptionError,"FrameIsLessThanImageSize"); bevel_width=(size_t) (frame_info->outer_bevel+frame_info->inner_bevel); x=(ssize_t) frame_info->width-frame_info->x-bevel_width; y=(ssize_t) frame_info->height-frame_info->y-bevel_width; if ((x < (ssize_t) image->columns) | (y < (ssize_t) image->rows)) ThrowImageException(OptionError,"FrameIsLessThanImageSize"); /* Initialize framed image attributes. */ frame_image=CloneImage(image,frame_info->width,frame_info->height,MagickTrue, exception); if (frame_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(frame_image,DirectClass,exception) == MagickFalse) { frame_image=DestroyImage(frame_image); return((Image *) NULL); } if ((IsPixelInfoGray(&frame_image->border_color) == MagickFalse) && (IsGrayColorspace(frame_image->colorspace) != MagickFalse)) (void) SetImageColorspace(frame_image,sRGBColorspace,exception); if ((frame_image->matte_color.alpha_trait != UndefinedPixelTrait) && (frame_image->alpha_trait == UndefinedPixelTrait)) (void) SetImageAlpha(frame_image,OpaqueAlpha,exception); frame_image->page=image->page; if ((image->page.width != 0) && (image->page.height != 0)) { frame_image->page.width+=frame_image->columns-image->columns; frame_image->page.height+=frame_image->rows-image->rows; } /* Initialize 3D effects color. */ matte=image->matte_color; accentuate=matte; accentuate.red=(double) (QuantumScale*((QuantumRange- AccentuateModulate)*matte.red+(QuantumRange*AccentuateModulate))); accentuate.green=(double) (QuantumScale*((QuantumRange- AccentuateModulate)*matte.green+(QuantumRange*AccentuateModulate))); accentuate.blue=(double) (QuantumScale*((QuantumRange- AccentuateModulate)*matte.blue+(QuantumRange*AccentuateModulate))); accentuate.black=(double) (QuantumScale*((QuantumRange- AccentuateModulate)*matte.black+(QuantumRange*AccentuateModulate))); accentuate.alpha=matte.alpha; highlight=matte; highlight.red=(double) (QuantumScale*((QuantumRange- HighlightModulate)*matte.red+(QuantumRange*HighlightModulate))); highlight.green=(double) (QuantumScale*((QuantumRange- HighlightModulate)*matte.green+(QuantumRange*HighlightModulate))); highlight.blue=(double) (QuantumScale*((QuantumRange- HighlightModulate)*matte.blue+(QuantumRange*HighlightModulate))); highlight.black=(double) (QuantumScale*((QuantumRange- HighlightModulate)*matte.black+(QuantumRange*HighlightModulate))); highlight.alpha=matte.alpha; shadow=matte; shadow.red=QuantumScale*matte.red*ShadowModulate; shadow.green=QuantumScale*matte.green*ShadowModulate; shadow.blue=QuantumScale*matte.blue*ShadowModulate; shadow.black=QuantumScale*matte.black*ShadowModulate; shadow.alpha=matte.alpha; trough=matte; trough.red=QuantumScale*matte.red*TroughModulate; trough.green=QuantumScale*matte.green*TroughModulate; trough.blue=QuantumScale*matte.blue*TroughModulate; trough.black=QuantumScale*matte.black*TroughModulate; trough.alpha=matte.alpha; status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); frame_view=AcquireAuthenticCacheView(frame_image,exception); height=(size_t) (frame_info->outer_bevel+(frame_info->y-bevel_width)+ frame_info->inner_bevel); if (height != 0) { register ssize_t x; register Quantum *magick_restrict q; /* Draw top of ornamental border. */ q=QueueCacheViewAuthenticPixels(frame_view,0,0,frame_image->columns, height,exception); if (q != (Quantum *) NULL) { /* Draw top of ornamental border. */ for (y=0; y < (ssize_t) frame_info->outer_bevel; y++) { for (x=0; x < (ssize_t) (frame_image->columns-y); x++) { if (x < y) SetPixelViaPixelInfo(frame_image,&highlight,q); else SetPixelViaPixelInfo(frame_image,&accentuate,q); q+=GetPixelChannels(frame_image); } for ( ; x < (ssize_t) frame_image->columns; x++) { SetPixelViaPixelInfo(frame_image,&shadow,q); q+=GetPixelChannels(frame_image); } } for (y=0; y < (ssize_t) (frame_info->y-bevel_width); y++) { for (x=0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelViaPixelInfo(frame_image,&highlight,q); q+=GetPixelChannels(frame_image); } width=frame_image->columns-2*frame_info->outer_bevel; for (x=0; x < (ssize_t) width; x++) { SetPixelViaPixelInfo(frame_image,&matte,q); q+=GetPixelChannels(frame_image); } for (x=0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelViaPixelInfo(frame_image,&shadow,q); q+=GetPixelChannels(frame_image); } } for (y=0; y < (ssize_t) frame_info->inner_bevel; y++) { for (x=0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelViaPixelInfo(frame_image,&highlight,q); q+=GetPixelChannels(frame_image); } for (x=0; x < (ssize_t) (frame_info->x-bevel_width); x++) { SetPixelViaPixelInfo(frame_image,&matte,q); q+=GetPixelChannels(frame_image); } width=image->columns+((size_t) frame_info->inner_bevel << 1)- y; for (x=0; x < (ssize_t) width; x++) { if (x < y) SetPixelViaPixelInfo(frame_image,&shadow,q); else SetPixelViaPixelInfo(frame_image,&trough,q); q+=GetPixelChannels(frame_image); } for ( ; x < (ssize_t) (image->columns+2*frame_info->inner_bevel); x++) { SetPixelViaPixelInfo(frame_image,&highlight,q); q+=GetPixelChannels(frame_image); } width=frame_info->width-frame_info->x-image->columns-bevel_width; for (x=0; x < (ssize_t) width; x++) { SetPixelViaPixelInfo(frame_image,&matte,q); q+=GetPixelChannels(frame_image); } for (x=0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelViaPixelInfo(frame_image,&shadow,q); q+=GetPixelChannels(frame_image); } } (void) SyncCacheViewAuthenticPixels(frame_view,exception); } } /* Draw sides of ornamental border. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(image,frame_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; size_t width; /* Initialize scanline with matte color. */ if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(frame_view,0,frame_info->y+y, frame_image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelViaPixelInfo(frame_image,&highlight,q); q+=GetPixelChannels(frame_image); } for (x=0; x < (ssize_t) (frame_info->x-bevel_width); x++) { SetPixelViaPixelInfo(frame_image,&matte,q); q+=GetPixelChannels(frame_image); } for (x=0; x < (ssize_t) frame_info->inner_bevel; x++) { SetPixelViaPixelInfo(frame_image,&shadow,q); q+=GetPixelChannels(frame_image); } /* Set frame interior pixels. */ for (x=0; x < (ssize_t) image->columns; x++) { SetPixelViaPixelInfo(frame_image,&frame_image->border_color,q); q+=GetPixelChannels(frame_image); } for (x=0; x < (ssize_t) frame_info->inner_bevel; x++) { SetPixelViaPixelInfo(frame_image,&highlight,q); q+=GetPixelChannels(frame_image); } width=frame_info->width-frame_info->x-image->columns-bevel_width; for (x=0; x < (ssize_t) width; x++) { SetPixelViaPixelInfo(frame_image,&matte,q); q+=GetPixelChannels(frame_image); } for (x=0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelViaPixelInfo(frame_image,&shadow,q); q+=GetPixelChannels(frame_image); } if (SyncCacheViewAuthenticPixels(frame_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_FrameImage) #endif proceed=SetImageProgress(image,FrameImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } height=(size_t) (frame_info->inner_bevel+frame_info->height- frame_info->y-image->rows-bevel_width+frame_info->outer_bevel); if (height != 0) { register ssize_t x; register Quantum *magick_restrict q; /* Draw bottom of ornamental border. */ q=QueueCacheViewAuthenticPixels(frame_view,0,(ssize_t) (frame_image->rows- height),frame_image->columns,height,exception); if (q != (Quantum *) NULL) { /* Draw bottom of ornamental border. */ for (y=frame_info->inner_bevel-1; y >= 0; y--) { for (x=0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelViaPixelInfo(frame_image,&highlight,q); q+=GetPixelChannels(frame_image); } for (x=0; x < (ssize_t) (frame_info->x-bevel_width); x++) { SetPixelViaPixelInfo(frame_image,&matte,q); q+=GetPixelChannels(frame_image); } for (x=0; x < y; x++) { SetPixelViaPixelInfo(frame_image,&shadow,q); q+=GetPixelChannels(frame_image); } for ( ; x < (ssize_t) (image->columns+2*frame_info->inner_bevel); x++) { if (x >= (ssize_t) (image->columns+2*frame_info->inner_bevel-y)) SetPixelViaPixelInfo(frame_image,&highlight,q); else SetPixelViaPixelInfo(frame_image,&accentuate,q); q+=GetPixelChannels(frame_image); } width=frame_info->width-frame_info->x-image->columns-bevel_width; for (x=0; x < (ssize_t) width; x++) { SetPixelViaPixelInfo(frame_image,&matte,q); q+=GetPixelChannels(frame_image); } for (x=0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelViaPixelInfo(frame_image,&shadow,q); q+=GetPixelChannels(frame_image); } } height=frame_info->height-frame_info->y-image->rows-bevel_width; for (y=0; y < (ssize_t) height; y++) { for (x=0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelViaPixelInfo(frame_image,&highlight,q); q+=GetPixelChannels(frame_image); } width=frame_image->columns-2*frame_info->outer_bevel; for (x=0; x < (ssize_t) width; x++) { SetPixelViaPixelInfo(frame_image,&matte,q); q+=GetPixelChannels(frame_image); } for (x=0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelViaPixelInfo(frame_image,&shadow,q); q+=GetPixelChannels(frame_image); } } for (y=frame_info->outer_bevel-1; y >= 0; y--) { for (x=0; x < y; x++) { SetPixelViaPixelInfo(frame_image,&highlight,q); q+=GetPixelChannels(frame_image); } for ( ; x < (ssize_t) frame_image->columns; x++) { if (x >= (ssize_t) (frame_image->columns-y)) SetPixelViaPixelInfo(frame_image,&shadow,q); else SetPixelViaPixelInfo(frame_image,&trough,q); q+=GetPixelChannels(frame_image); } } (void) SyncCacheViewAuthenticPixels(frame_view,exception); } } frame_view=DestroyCacheView(frame_view); image_view=DestroyCacheView(image_view); x=(ssize_t) (frame_info->outer_bevel+(frame_info->x-bevel_width)+ frame_info->inner_bevel); y=(ssize_t) (frame_info->outer_bevel+(frame_info->y-bevel_width)+ frame_info->inner_bevel); if (status != MagickFalse) status=CompositeImage(frame_image,image,compose,MagickTrue,x,y, exception); if (status == MagickFalse) frame_image=DestroyImage(frame_image); return(frame_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R a i s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RaiseImage() creates a simulated three-dimensional button-like effect % by lightening and darkening the edges of the image. Members width and % height of raise_info define the width of the vertical and horizontal % edge of the effect. % % The format of the RaiseImage method is: % % MagickBooleanType RaiseImage(const Image *image, % const RectangleInfo *raise_info,const MagickBooleanType raise, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o raise_info: Define the width and height of the raise area. % % o raise: A value other than zero creates a 3-D raise effect, % otherwise it has a lowered effect. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RaiseImage(Image *image, const RectangleInfo *raise_info,const MagickBooleanType raise, ExceptionInfo *exception) { #define AccentuateFactor ScaleCharToQuantum(135) #define HighlightFactor ScaleCharToQuantum(190) #define ShadowFactor ScaleCharToQuantum(190) #define RaiseImageTag "Raise/Image" #define TroughFactor ScaleCharToQuantum(135) CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; Quantum foreground, background; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(raise_info != (RectangleInfo *) NULL); if ((image->columns <= (raise_info->width << 1)) || (image->rows <= (raise_info->height << 1))) ThrowBinaryException(OptionError,"ImageSizeMustExceedBevelWidth", image->filename); foreground=QuantumRange; background=(Quantum) 0; if (raise == MagickFalse) { foreground=(Quantum) 0; background=QuantumRange; } if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); /* Raise image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(image,image,raise_info->height,1) #endif for (y=0; y < (ssize_t) raise_info->height; y++) { register ssize_t i, x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < y; x++) { if (GetPixelWriteMask(image,q) <= (QuantumRange/2)) { q+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampToQuantum(QuantumScale*((double) q[i]*HighlightFactor+(double) foreground*(QuantumRange-HighlightFactor))); } q+=GetPixelChannels(image); } for ( ; x < (ssize_t) (image->columns-y); x++) { if (GetPixelWriteMask(image,q) <= (QuantumRange/2)) { q+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampToQuantum(QuantumScale*((double) q[i]*AccentuateFactor+ (double) foreground*(QuantumRange-AccentuateFactor))); } q+=GetPixelChannels(image); } for ( ; x < (ssize_t) image->columns; x++) { if (GetPixelWriteMask(image,q) <= (QuantumRange/2)) { q+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampToQuantum(QuantumScale*((double) q[i]*ShadowFactor+(double) background*(QuantumRange-ShadowFactor))); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_RaiseImage) #endif proceed=SetImageProgress(image,RaiseImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(image,image,image->rows-2*raise_info->height,1) #endif for (y=(ssize_t) raise_info->height; y < (ssize_t) (image->rows-raise_info->height); y++) { register ssize_t i, x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) raise_info->width; x++) { if (GetPixelWriteMask(image,q) <= (QuantumRange/2)) { q+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampToQuantum(QuantumScale*((double) q[i]*HighlightFactor+(double) foreground*(QuantumRange-HighlightFactor))); } q+=GetPixelChannels(image); } for ( ; x < (ssize_t) (image->columns-raise_info->width); x++) q+=GetPixelChannels(image); for ( ; x < (ssize_t) image->columns; x++) { if (GetPixelWriteMask(image,q) <= (QuantumRange/2)) { q+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampToQuantum(QuantumScale*((double) q[i]*ShadowFactor+(double) background*(QuantumRange-ShadowFactor))); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_RaiseImage) #endif proceed=SetImageProgress(image,RaiseImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(image,image,image->rows-raise_info->height,1) #endif for (y=(ssize_t) (image->rows-raise_info->height); y < (ssize_t) image->rows; y++) { register ssize_t i, x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) (image->rows-y); x++) { if (GetPixelWriteMask(image,q) <= (QuantumRange/2)) { q+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampToQuantum(QuantumScale*((double) q[i]*HighlightFactor+(double) foreground*(QuantumRange-HighlightFactor))); } q+=GetPixelChannels(image); } for ( ; x < (ssize_t) (image->columns-(image->rows-y)); x++) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampToQuantum(QuantumScale*((double) q[i]*TroughFactor+ (double) background*(QuantumRange-TroughFactor))); } q+=GetPixelChannels(image); } for ( ; x < (ssize_t) image->columns; x++) { if (GetPixelWriteMask(image,q) <= (QuantumRange/2)) { q+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampToQuantum(QuantumScale*((double) q[i]*ShadowFactor+(double) background*(QuantumRange-ShadowFactor))); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_RaiseImage) #endif proceed=SetImageProgress(image,RaiseImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); }
/* * Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/cache-view.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/decorate.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/thread-private.h" #include "MagickCore/transform.h" /* * Define declarations. */ #define AccentuateModulate ScaleCharToQuantum(80) #define HighlightModulate ScaleCharToQuantum(125) #define ShadowModulate ScaleCharToQuantum(135) #define DepthModulate ScaleCharToQuantum(185) #define TroughModulate ScaleCharToQuantum(110) /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % B o r d e r I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % BorderImage() surrounds the image with a border of the color * defined by % the bordercolor member of the image structure. The width * and height % of the border are defined by the corresponding members of * the border_info % structure. % % The format of the BorderImage method * is: % % Image *BorderImage(const Image *image,const RectangleInfo * *border_info, % const CompositeOperator compose,ExceptionInfo * *exception) % % A description of each parameter follows: % % o image: * the image. % % o border_info: define the width and height of the * border. % % o compose: the composite operator. % % o exception: * return any errors or warnings in this structure. % */ MagickExport Image * BorderImage(const Image * image, const RectangleInfo * border_info, const CompositeOperator compose, ExceptionInfo * exception) { Image * border_image, *clone_image; FrameInfo frame_info; assert(image != (const Image *)NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(border_info != (RectangleInfo *) NULL); frame_info.width = image->columns + (border_info->width << 1); frame_info.height = image->rows + (border_info->height << 1); frame_info.x = (ssize_t) border_info->width; frame_info.y = (ssize_t) border_info->height; frame_info.inner_bevel = 0; frame_info.outer_bevel = 0; clone_image = CloneImage(image, 0, 0, MagickTrue, exception); if (clone_image == (Image *) NULL) return ((Image *) NULL); clone_image->matte_color = image->border_color; border_image = FrameImage(clone_image, &frame_info, compose, exception); clone_image = DestroyImage(clone_image); if (border_image != (Image *) NULL) border_image->matte_color = image->matte_color; return (border_image); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % F r a m e I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % FrameImage() adds a simulated three-dimensional border around * the image. % The color of the border is defined by the matte_color member * of image. % Members width and height of frame_info specify the border * width of the % vertical and horizontal sides of the frame. Members inner * and outer % indicate the width of the inner and outer shadows of the * frame. % % The format of the FrameImage method is: % % Image * *FrameImage(const Image *image,const FrameInfo *frame_info, % const * CompositeOperator compose,ExceptionInfo *exception) % % A description of * each parameter follows: % % o image: the image. % % o frame_info: * Define the width and height of the frame and its bevels. % % o compose: * the composite operator. % % o exception: return any errors or warnings * in this structure. % */ MagickExport Image * FrameImage(const Image * image, const FrameInfo * frame_info, const CompositeOperator compose, ExceptionInfo * exception) { #define FrameImageTag "Frame/Image" CacheView * image_view, *frame_view; Image * frame_image; MagickBooleanType status; MagickOffsetType progress; PixelInfo accentuate, highlight, matte, shadow, trough; register ssize_t x; size_t bevel_width, height, width; ssize_t y; /* * Check frame geometry. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(frame_info != (FrameInfo *) NULL); if ((frame_info->outer_bevel < 0) || (frame_info->inner_bevel < 0)) ThrowImageException(OptionError, "FrameIsLessThanImageSize"); bevel_width = (size_t) (frame_info->outer_bevel + frame_info->inner_bevel); x = (ssize_t) frame_info->width - frame_info->x - bevel_width; y = (ssize_t) frame_info->height - frame_info->y - bevel_width; if ((x < (ssize_t) image->columns) | (y < (ssize_t) image->rows)) ThrowImageException(OptionError, "FrameIsLessThanImageSize"); /* * Initialize framed image attributes. */ frame_image = CloneImage(image, frame_info->width, frame_info->height, MagickTrue, exception); if (frame_image == (Image *) NULL) return ((Image *) NULL); if (SetImageStorageClass(frame_image, DirectClass, exception) == MagickFalse) { frame_image = DestroyImage(frame_image); return ((Image *) NULL); } if ((IsPixelInfoGray(&frame_image->border_color) == MagickFalse) && (IsGrayColorspace(frame_image->colorspace) != MagickFalse)) (void)SetImageColorspace(frame_image, sRGBColorspace, exception); if ((frame_image->matte_color.alpha_trait != UndefinedPixelTrait) && (frame_image->alpha_trait == UndefinedPixelTrait)) (void)SetImageAlpha(frame_image, OpaqueAlpha, exception); frame_image->page = image->page; if ((image->page.width != 0) && (image->page.height != 0)) { frame_image->page.width += frame_image->columns - image->columns; frame_image->page.height += frame_image->rows - image->rows; } /* * Initialize 3D effects color. */ matte = image->matte_color; accentuate = matte; accentuate.red = (double)(QuantumScale * ((QuantumRange - AccentuateModulate) * matte.red + (QuantumRange * AccentuateModulate))); accentuate.green = (double)(QuantumScale * ((QuantumRange - AccentuateModulate) * matte.green + (QuantumRange * AccentuateModulate))); accentuate.blue = (double)(QuantumScale * ((QuantumRange - AccentuateModulate) * matte.blue + (QuantumRange * AccentuateModulate))); accentuate.black = (double)(QuantumScale * ((QuantumRange - AccentuateModulate) * matte.black + (QuantumRange * AccentuateModulate))); accentuate.alpha = matte.alpha; highlight = matte; highlight.red = (double)(QuantumScale * ((QuantumRange - HighlightModulate) * matte.red + (QuantumRange * HighlightModulate))); highlight.green = (double)(QuantumScale * ((QuantumRange - HighlightModulate) * matte.green + (QuantumRange * HighlightModulate))); highlight.blue = (double)(QuantumScale * ((QuantumRange - HighlightModulate) * matte.blue + (QuantumRange * HighlightModulate))); highlight.black = (double)(QuantumScale * ((QuantumRange - HighlightModulate) * matte.black + (QuantumRange * HighlightModulate))); highlight.alpha = matte.alpha; shadow = matte; shadow.red = QuantumScale * matte.red * ShadowModulate; shadow.green = QuantumScale * matte.green * ShadowModulate; shadow.blue = QuantumScale * matte.blue * ShadowModulate; shadow.black = QuantumScale * matte.black * ShadowModulate; shadow.alpha = matte.alpha; trough = matte; trough.red = QuantumScale * matte.red * TroughModulate; trough.green = QuantumScale * matte.green * TroughModulate; trough.blue = QuantumScale * matte.blue * TroughModulate; trough.black = QuantumScale * matte.black * TroughModulate; trough.alpha = matte.alpha; status = MagickTrue; progress = 0; image_view = AcquireVirtualCacheView(image, exception); frame_view = AcquireAuthenticCacheView(frame_image, exception); height = (size_t) (frame_info->outer_bevel + (frame_info->y - bevel_width) + frame_info->inner_bevel); if (height != 0) { register ssize_t x; register Quantum * magick_restrict q; /* * Draw top of ornamental border. */ q = QueueCacheViewAuthenticPixels(frame_view, 0, 0, frame_image->columns, height, exception); if (q != (Quantum *) NULL) { /* * Draw top of ornamental border. */ for (y = 0; y < (ssize_t) frame_info->outer_bevel; y++) { for (x = 0; x < (ssize_t) (frame_image->columns - y); x++) { if (x < y) SetPixelViaPixelInfo(frame_image, &highlight, q); else SetPixelViaPixelInfo(frame_image, &accentuate, q); q += GetPixelChannels(frame_image); } for (; x < (ssize_t) frame_image->columns; x++) { SetPixelViaPixelInfo(frame_image, &shadow, q); q += GetPixelChannels(frame_image); } } for (y = 0; y < (ssize_t) (frame_info->y - bevel_width); y++) { for (x = 0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelViaPixelInfo(frame_image, &highlight, q); q += GetPixelChannels(frame_image); } width = frame_image->columns - 2 * frame_info->outer_bevel; for (x = 0; x < (ssize_t) width; x++) { SetPixelViaPixelInfo(frame_image, &matte, q); q += GetPixelChannels(frame_image); } for (x = 0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelViaPixelInfo(frame_image, &shadow, q); q += GetPixelChannels(frame_image); } } for (y = 0; y < (ssize_t) frame_info->inner_bevel; y++) { for (x = 0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelViaPixelInfo(frame_image, &highlight, q); q += GetPixelChannels(frame_image); } for (x = 0; x < (ssize_t) (frame_info->x - bevel_width); x++) { SetPixelViaPixelInfo(frame_image, &matte, q); q += GetPixelChannels(frame_image); } width = image->columns + ((size_t) frame_info->inner_bevel << 1) - y; for (x = 0; x < (ssize_t) width; x++) { if (x < y) SetPixelViaPixelInfo(frame_image, &shadow, q); else SetPixelViaPixelInfo(frame_image, &trough, q); q += GetPixelChannels(frame_image); } for (; x < (ssize_t) (image->columns + 2 * frame_info->inner_bevel); x++) { SetPixelViaPixelInfo(frame_image, &highlight, q); q += GetPixelChannels(frame_image); } width = frame_info->width - frame_info->x - image->columns - bevel_width; for (x = 0; x < (ssize_t) width; x++) { SetPixelViaPixelInfo(frame_image, &matte, q); q += GetPixelChannels(frame_image); } for (x = 0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelViaPixelInfo(frame_image, &shadow, q); q += GetPixelChannels(frame_image); } } (void)SyncCacheViewAuthenticPixels(frame_view, exception); } } /* * Draw sides of ornamental border. */ for (y = 0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum * magick_restrict q; size_t width; /* * Initialize scanline with matte color. */ if (status == MagickFalse) continue; q = QueueCacheViewAuthenticPixels(frame_view, 0, frame_info->y + y, frame_image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelViaPixelInfo(frame_image, &highlight, q); q += GetPixelChannels(frame_image); } for (x = 0; x < (ssize_t) (frame_info->x - bevel_width); x++) { SetPixelViaPixelInfo(frame_image, &matte, q); q += GetPixelChannels(frame_image); } for (x = 0; x < (ssize_t) frame_info->inner_bevel; x++) { SetPixelViaPixelInfo(frame_image, &shadow, q); q += GetPixelChannels(frame_image); } /* * Set frame interior pixels. */ for (x = 0; x < (ssize_t) image->columns; x++) { SetPixelViaPixelInfo(frame_image, &frame_image->border_color, q); q += GetPixelChannels(frame_image); } for (x = 0; x < (ssize_t) frame_info->inner_bevel; x++) { SetPixelViaPixelInfo(frame_image, &highlight, q); q += GetPixelChannels(frame_image); } width = frame_info->width - frame_info->x - image->columns - bevel_width; for (x = 0; x < (ssize_t) width; x++) { SetPixelViaPixelInfo(frame_image, &matte, q); q += GetPixelChannels(frame_image); } for (x = 0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelViaPixelInfo(frame_image, &shadow, q); q += GetPixelChannels(frame_image); } if (SyncCacheViewAuthenticPixels(frame_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed = SetImageProgress(image, FrameImageTag, progress++, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } height = (size_t) (frame_info->inner_bevel + frame_info->height - frame_info->y - image->rows - bevel_width + frame_info->outer_bevel); if (height != 0) { register ssize_t x; register Quantum * magick_restrict q; /* * Draw bottom of ornamental border. */ q = QueueCacheViewAuthenticPixels(frame_view, 0, (ssize_t) (frame_image->rows - height), frame_image->columns, height, exception); if (q != (Quantum *) NULL) { /* * Draw bottom of ornamental border. */ for (y = frame_info->inner_bevel - 1; y >= 0; y--) { for (x = 0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelViaPixelInfo(frame_image, &highlight, q); q += GetPixelChannels(frame_image); } for (x = 0; x < (ssize_t) (frame_info->x - bevel_width); x++) { SetPixelViaPixelInfo(frame_image, &matte, q); q += GetPixelChannels(frame_image); } for (x = 0; x < y; x++) { SetPixelViaPixelInfo(frame_image, &shadow, q); q += GetPixelChannels(frame_image); } for (; x < (ssize_t) (image->columns + 2 * frame_info->inner_bevel); x++) { if (x >= (ssize_t) (image->columns + 2 * frame_info->inner_bevel - y)) SetPixelViaPixelInfo(frame_image, &highlight, q); else SetPixelViaPixelInfo(frame_image, &accentuate, q); q += GetPixelChannels(frame_image); } width = frame_info->width - frame_info->x - image->columns - bevel_width; for (x = 0; x < (ssize_t) width; x++) { SetPixelViaPixelInfo(frame_image, &matte, q); q += GetPixelChannels(frame_image); } for (x = 0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelViaPixelInfo(frame_image, &shadow, q); q += GetPixelChannels(frame_image); } } height = frame_info->height - frame_info->y - image->rows - bevel_width; for (y = 0; y < (ssize_t) height; y++) { for (x = 0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelViaPixelInfo(frame_image, &highlight, q); q += GetPixelChannels(frame_image); } width = frame_image->columns - 2 * frame_info->outer_bevel; for (x = 0; x < (ssize_t) width; x++) { SetPixelViaPixelInfo(frame_image, &matte, q); q += GetPixelChannels(frame_image); } for (x = 0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelViaPixelInfo(frame_image, &shadow, q); q += GetPixelChannels(frame_image); } } for (y = frame_info->outer_bevel - 1; y >= 0; y--) { for (x = 0; x < y; x++) { SetPixelViaPixelInfo(frame_image, &highlight, q); q += GetPixelChannels(frame_image); } for (; x < (ssize_t) frame_image->columns; x++) { if (x >= (ssize_t) (frame_image->columns - y)) SetPixelViaPixelInfo(frame_image, &shadow, q); else SetPixelViaPixelInfo(frame_image, &trough, q); q += GetPixelChannels(frame_image); } } (void)SyncCacheViewAuthenticPixels(frame_view, exception); } } frame_view = DestroyCacheView(frame_view); image_view = DestroyCacheView(image_view); x = (ssize_t) (frame_info->outer_bevel + (frame_info->x - bevel_width) + frame_info->inner_bevel); y = (ssize_t) (frame_info->outer_bevel + (frame_info->y - bevel_width) + frame_info->inner_bevel); if (status != MagickFalse) status = CompositeImage(frame_image, image, compose, MagickTrue, x, y, exception); if (status == MagickFalse) frame_image = DestroyImage(frame_image); return (frame_image); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % R a i s e I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % RaiseImage() creates a simulated three-dimensional button-like * effect % by lightening and darkening the edges of the image. Members * width and % height of raise_info define the width of the vertical and * horizontal % edge of the effect. % % The format of the RaiseImage method * is: % % MagickBooleanType RaiseImage(const Image *image, % * const RectangleInfo *raise_info,const MagickBooleanType raise, % * ExceptionInfo *exception) % % A description of each parameter follows: % * % o image: the image. % % o raise_info: Define the width and height * of the raise area. % % o raise: A value other than zero creates a 3-D * raise effect, % otherwise it has a lowered effect. % % o * exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RaiseImage(Image * image, const RectangleInfo * raise_info, const MagickBooleanType raise, ExceptionInfo * exception) { #define AccentuateFactor ScaleCharToQuantum(135) #define HighlightFactor ScaleCharToQuantum(190) #define ShadowFactor ScaleCharToQuantum(190) #define RaiseImageTag "Raise/Image" #define TroughFactor ScaleCharToQuantum(135) CacheView * image_view; MagickBooleanType status; MagickOffsetType progress; Quantum foreground, background; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(raise_info != (RectangleInfo *) NULL); if ((image->columns <= (raise_info->width << 1)) || (image->rows <= (raise_info->height << 1))) ThrowBinaryException(OptionError, "ImageSizeMustExceedBevelWidth", image->filename); foreground = QuantumRange; background = (Quantum) 0; if (raise == MagickFalse) { foreground = (Quantum) 0; background = QuantumRange; } if (SetImageStorageClass(image, DirectClass, exception) == MagickFalse) return (MagickFalse); /* * Raise image. */ status = MagickTrue; progress = 0; image_view = AcquireAuthenticCacheView(image, exception); for (y = 0; y < (ssize_t) raise_info->height; y++) { register ssize_t i, x; register Quantum * magick_restrict q; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < y; x++) { if (GetPixelWriteMask(image, q) <= (QuantumRange / 2)) { q += GetPixelChannels(image); continue; } for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image, i); PixelTrait traits = GetPixelChannelTraits(image, channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i] = ClampToQuantum(QuantumScale * ((double)q[i] * HighlightFactor + (double) foreground * (QuantumRange - HighlightFactor))); } q += GetPixelChannels(image); } for (; x < (ssize_t) (image->columns - y); x++) { if (GetPixelWriteMask(image, q) <= (QuantumRange / 2)) { q += GetPixelChannels(image); continue; } for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image, i); PixelTrait traits = GetPixelChannelTraits(image, channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i] = ClampToQuantum(QuantumScale * ((double)q[i] * AccentuateFactor + (double)foreground * (QuantumRange - AccentuateFactor))); } q += GetPixelChannels(image); } for (; x < (ssize_t) image->columns; x++) { if (GetPixelWriteMask(image, q) <= (QuantumRange / 2)) { q += GetPixelChannels(image); continue; } for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image, i); PixelTrait traits = GetPixelChannelTraits(image, channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i] = ClampToQuantum(QuantumScale * ((double)q[i] * ShadowFactor + (double) background * (QuantumRange - ShadowFactor))); } q += GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed = SetImageProgress(image, RaiseImageTag, progress++, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } for (y = (ssize_t) raise_info->height; y < (ssize_t) (image->rows - raise_info->height); y++) { register ssize_t i, x; register Quantum * magick_restrict q; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) raise_info->width; x++) { if (GetPixelWriteMask(image, q) <= (QuantumRange / 2)) { q += GetPixelChannels(image); continue; } for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image, i); PixelTrait traits = GetPixelChannelTraits(image, channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i] = ClampToQuantum(QuantumScale * ((double)q[i] * HighlightFactor + (double) foreground * (QuantumRange - HighlightFactor))); } q += GetPixelChannels(image); } for (; x < (ssize_t) (image->columns - raise_info->width); x++) q += GetPixelChannels(image); for (; x < (ssize_t) image->columns; x++) { if (GetPixelWriteMask(image, q) <= (QuantumRange / 2)) { q += GetPixelChannels(image); continue; } for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image, i); PixelTrait traits = GetPixelChannelTraits(image, channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i] = ClampToQuantum(QuantumScale * ((double)q[i] * ShadowFactor + (double) background * (QuantumRange - ShadowFactor))); } q += GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed = SetImageProgress(image, RaiseImageTag, progress++, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } for (y = (ssize_t) (image->rows - raise_info->height); y < (ssize_t) image->rows; y++) { register ssize_t i, x; register Quantum * magick_restrict q; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) (image->rows - y); x++) { if (GetPixelWriteMask(image, q) <= (QuantumRange / 2)) { q += GetPixelChannels(image); continue; } for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image, i); PixelTrait traits = GetPixelChannelTraits(image, channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i] = ClampToQuantum(QuantumScale * ((double)q[i] * HighlightFactor + (double) foreground * (QuantumRange - HighlightFactor))); } q += GetPixelChannels(image); } for (; x < (ssize_t) (image->columns - (image->rows - y)); x++) { for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image, i); PixelTrait traits = GetPixelChannelTraits(image, channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i] = ClampToQuantum(QuantumScale * ((double)q[i] * TroughFactor + (double)background * (QuantumRange - TroughFactor))); } q += GetPixelChannels(image); } for (; x < (ssize_t) image->columns; x++) { if (GetPixelWriteMask(image, q) <= (QuantumRange / 2)) { q += GetPixelChannels(image); continue; } for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image, i); PixelTrait traits = GetPixelChannelTraits(image, channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i] = ClampToQuantum(QuantumScale * ((double)q[i] * ShadowFactor + (double) background * (QuantumRange - ShadowFactor))); } q += GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed = SetImageProgress(image, RaiseImageTag, progress++, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } image_view = DestroyCacheView(image_view); return (status); }
/* * Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/cache-view.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/decorate.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/thread-private.h" #include "MagickCore/transform.h" /* * Define declarations. */ #define AccentuateModulate ScaleCharToQuantum(80) #define HighlightModulate ScaleCharToQuantum(125) #define ShadowModulate ScaleCharToQuantum(135) #define DepthModulate ScaleCharToQuantum(185) #define TroughModulate ScaleCharToQuantum(110) /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % B o r d e r I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % BorderImage() surrounds the image with a border of the color * defined by % the bordercolor member of the image structure. The width * and height % of the border are defined by the corresponding members of * the border_info % structure. % % The format of the BorderImage method * is: % % Image *BorderImage(const Image *image,const RectangleInfo * *border_info, % const CompositeOperator compose,ExceptionInfo * *exception) % % A description of each parameter follows: % % o image: * the image. % % o border_info: define the width and height of the * border. % % o compose: the composite operator. % % o exception: * return any errors or warnings in this structure. % */ MagickExport Image * BorderImage(const Image * image, const RectangleInfo * border_info, const CompositeOperator compose, ExceptionInfo * exception) { Image * border_image, *clone_image; FrameInfo frame_info; assert(image != (const Image *)NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(border_info != (RectangleInfo *) NULL); frame_info.width = image->columns + (border_info->width << 1); frame_info.height = image->rows + (border_info->height << 1); frame_info.x = (ssize_t) border_info->width; frame_info.y = (ssize_t) border_info->height; frame_info.inner_bevel = 0; frame_info.outer_bevel = 0; clone_image = CloneImage(image, 0, 0, MagickTrue, exception); if (clone_image == (Image *) NULL) return ((Image *) NULL); clone_image->matte_color = image->border_color; border_image = FrameImage(clone_image, &frame_info, compose, exception); clone_image = DestroyImage(clone_image); if (border_image != (Image *) NULL) border_image->matte_color = image->matte_color; return (border_image); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % F r a m e I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % FrameImage() adds a simulated three-dimensional border around * the image. % The color of the border is defined by the matte_color member * of image. % Members width and height of frame_info specify the border * width of the % vertical and horizontal sides of the frame. Members inner * and outer % indicate the width of the inner and outer shadows of the * frame. % % The format of the FrameImage method is: % % Image * *FrameImage(const Image *image,const FrameInfo *frame_info, % const * CompositeOperator compose,ExceptionInfo *exception) % % A description of * each parameter follows: % % o image: the image. % % o frame_info: * Define the width and height of the frame and its bevels. % % o compose: * the composite operator. % % o exception: return any errors or warnings * in this structure. % */ MagickExport Image * FrameImage(const Image * image, const FrameInfo * frame_info, const CompositeOperator compose, ExceptionInfo * exception) { #define FrameImageTag "Frame/Image" CacheView * image_view, *frame_view; Image * frame_image; MagickBooleanType status; MagickOffsetType progress; PixelInfo accentuate, highlight, matte, shadow, trough; register ssize_t x; size_t bevel_width, height, width; ssize_t y; /* * Check frame geometry. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(frame_info != (FrameInfo *) NULL); if ((frame_info->outer_bevel < 0) || (frame_info->inner_bevel < 0)) ThrowImageException(OptionError, "FrameIsLessThanImageSize"); bevel_width = (size_t) (frame_info->outer_bevel + frame_info->inner_bevel); x = (ssize_t) frame_info->width - frame_info->x - bevel_width; y = (ssize_t) frame_info->height - frame_info->y - bevel_width; if ((x < (ssize_t) image->columns) | (y < (ssize_t) image->rows)) ThrowImageException(OptionError, "FrameIsLessThanImageSize"); /* * Initialize framed image attributes. */ frame_image = CloneImage(image, frame_info->width, frame_info->height, MagickTrue, exception); if (frame_image == (Image *) NULL) return ((Image *) NULL); if (SetImageStorageClass(frame_image, DirectClass, exception) == MagickFalse) { frame_image = DestroyImage(frame_image); return ((Image *) NULL); } if ((IsPixelInfoGray(&frame_image->border_color) == MagickFalse) && (IsGrayColorspace(frame_image->colorspace) != MagickFalse)) (void)SetImageColorspace(frame_image, sRGBColorspace, exception); if ((frame_image->matte_color.alpha_trait != UndefinedPixelTrait) && (frame_image->alpha_trait == UndefinedPixelTrait)) (void)SetImageAlpha(frame_image, OpaqueAlpha, exception); frame_image->page = image->page; if ((image->page.width != 0) && (image->page.height != 0)) { frame_image->page.width += frame_image->columns - image->columns; frame_image->page.height += frame_image->rows - image->rows; } /* * Initialize 3D effects color. */ matte = image->matte_color; accentuate = matte; accentuate.red = (double)(QuantumScale * ((QuantumRange - AccentuateModulate) * matte.red + (QuantumRange * AccentuateModulate))); accentuate.green = (double)(QuantumScale * ((QuantumRange - AccentuateModulate) * matte.green + (QuantumRange * AccentuateModulate))); accentuate.blue = (double)(QuantumScale * ((QuantumRange - AccentuateModulate) * matte.blue + (QuantumRange * AccentuateModulate))); accentuate.black = (double)(QuantumScale * ((QuantumRange - AccentuateModulate) * matte.black + (QuantumRange * AccentuateModulate))); accentuate.alpha = matte.alpha; highlight = matte; highlight.red = (double)(QuantumScale * ((QuantumRange - HighlightModulate) * matte.red + (QuantumRange * HighlightModulate))); highlight.green = (double)(QuantumScale * ((QuantumRange - HighlightModulate) * matte.green + (QuantumRange * HighlightModulate))); highlight.blue = (double)(QuantumScale * ((QuantumRange - HighlightModulate) * matte.blue + (QuantumRange * HighlightModulate))); highlight.black = (double)(QuantumScale * ((QuantumRange - HighlightModulate) * matte.black + (QuantumRange * HighlightModulate))); highlight.alpha = matte.alpha; shadow = matte; shadow.red = QuantumScale * matte.red * ShadowModulate; shadow.green = QuantumScale * matte.green * ShadowModulate; shadow.blue = QuantumScale * matte.blue * ShadowModulate; shadow.black = QuantumScale * matte.black * ShadowModulate; shadow.alpha = matte.alpha; trough = matte; trough.red = QuantumScale * matte.red * TroughModulate; trough.green = QuantumScale * matte.green * TroughModulate; trough.blue = QuantumScale * matte.blue * TroughModulate; trough.black = QuantumScale * matte.black * TroughModulate; trough.alpha = matte.alpha; status = MagickTrue; progress = 0; image_view = AcquireVirtualCacheView(image, exception); frame_view = AcquireAuthenticCacheView(frame_image, exception); height = (size_t) (frame_info->outer_bevel + (frame_info->y - bevel_width) + frame_info->inner_bevel); if (height != 0) { register ssize_t x; register Quantum * magick_restrict q; /* * Draw top of ornamental border. */ q = QueueCacheViewAuthenticPixels(frame_view, 0, 0, frame_image->columns, height, exception); if (q != (Quantum *) NULL) { /* * Draw top of ornamental border. */ for (y = 0; y < (ssize_t) frame_info->outer_bevel; y++) { for (x = 0; x < (ssize_t) (frame_image->columns - y); x++) { if (x < y) SetPixelViaPixelInfo(frame_image, &highlight, q); else SetPixelViaPixelInfo(frame_image, &accentuate, q); q += GetPixelChannels(frame_image); } for (; x < (ssize_t) frame_image->columns; x++) { SetPixelViaPixelInfo(frame_image, &shadow, q); q += GetPixelChannels(frame_image); } } for (y = 0; y < (ssize_t) (frame_info->y - bevel_width); y++) { for (x = 0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelViaPixelInfo(frame_image, &highlight, q); q += GetPixelChannels(frame_image); } width = frame_image->columns - 2 * frame_info->outer_bevel; for (x = 0; x < (ssize_t) width; x++) { SetPixelViaPixelInfo(frame_image, &matte, q); q += GetPixelChannels(frame_image); } for (x = 0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelViaPixelInfo(frame_image, &shadow, q); q += GetPixelChannels(frame_image); } } for (y = 0; y < (ssize_t) frame_info->inner_bevel; y++) { for (x = 0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelViaPixelInfo(frame_image, &highlight, q); q += GetPixelChannels(frame_image); } for (x = 0; x < (ssize_t) (frame_info->x - bevel_width); x++) { SetPixelViaPixelInfo(frame_image, &matte, q); q += GetPixelChannels(frame_image); } width = image->columns + ((size_t) frame_info->inner_bevel << 1) - y; for (x = 0; x < (ssize_t) width; x++) { if (x < y) SetPixelViaPixelInfo(frame_image, &shadow, q); else SetPixelViaPixelInfo(frame_image, &trough, q); q += GetPixelChannels(frame_image); } for (; x < (ssize_t) (image->columns + 2 * frame_info->inner_bevel); x++) { SetPixelViaPixelInfo(frame_image, &highlight, q); q += GetPixelChannels(frame_image); } width = frame_info->width - frame_info->x - image->columns - bevel_width; for (x = 0; x < (ssize_t) width; x++) { SetPixelViaPixelInfo(frame_image, &matte, q); q += GetPixelChannels(frame_image); } for (x = 0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelViaPixelInfo(frame_image, &shadow, q); q += GetPixelChannels(frame_image); } } (void)SyncCacheViewAuthenticPixels(frame_view, exception); } } /* * Draw sides of ornamental border. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(image,frame_image,image->rows,1) #endif for (y = 0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum * magick_restrict q; size_t width; /* * Initialize scanline with matte color. */ if (status == MagickFalse) continue; q = QueueCacheViewAuthenticPixels(frame_view, 0, frame_info->y + y, frame_image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelViaPixelInfo(frame_image, &highlight, q); q += GetPixelChannels(frame_image); } for (x = 0; x < (ssize_t) (frame_info->x - bevel_width); x++) { SetPixelViaPixelInfo(frame_image, &matte, q); q += GetPixelChannels(frame_image); } for (x = 0; x < (ssize_t) frame_info->inner_bevel; x++) { SetPixelViaPixelInfo(frame_image, &shadow, q); q += GetPixelChannels(frame_image); } /* * Set frame interior pixels. */ for (x = 0; x < (ssize_t) image->columns; x++) { SetPixelViaPixelInfo(frame_image, &frame_image->border_color, q); q += GetPixelChannels(frame_image); } for (x = 0; x < (ssize_t) frame_info->inner_bevel; x++) { SetPixelViaPixelInfo(frame_image, &highlight, q); q += GetPixelChannels(frame_image); } width = frame_info->width - frame_info->x - image->columns - bevel_width; for (x = 0; x < (ssize_t) width; x++) { SetPixelViaPixelInfo(frame_image, &matte, q); q += GetPixelChannels(frame_image); } for (x = 0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelViaPixelInfo(frame_image, &shadow, q); q += GetPixelChannels(frame_image); } if (SyncCacheViewAuthenticPixels(frame_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_FrameImage) #endif proceed = SetImageProgress(image, FrameImageTag, progress++, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } height = (size_t) (frame_info->inner_bevel + frame_info->height - frame_info->y - image->rows - bevel_width + frame_info->outer_bevel); if (height != 0) { register ssize_t x; register Quantum * magick_restrict q; /* * Draw bottom of ornamental border. */ q = QueueCacheViewAuthenticPixels(frame_view, 0, (ssize_t) (frame_image->rows - height), frame_image->columns, height, exception); if (q != (Quantum *) NULL) { /* * Draw bottom of ornamental border. */ for (y = frame_info->inner_bevel - 1; y >= 0; y--) { for (x = 0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelViaPixelInfo(frame_image, &highlight, q); q += GetPixelChannels(frame_image); } for (x = 0; x < (ssize_t) (frame_info->x - bevel_width); x++) { SetPixelViaPixelInfo(frame_image, &matte, q); q += GetPixelChannels(frame_image); } for (x = 0; x < y; x++) { SetPixelViaPixelInfo(frame_image, &shadow, q); q += GetPixelChannels(frame_image); } for (; x < (ssize_t) (image->columns + 2 * frame_info->inner_bevel); x++) { if (x >= (ssize_t) (image->columns + 2 * frame_info->inner_bevel - y)) SetPixelViaPixelInfo(frame_image, &highlight, q); else SetPixelViaPixelInfo(frame_image, &accentuate, q); q += GetPixelChannels(frame_image); } width = frame_info->width - frame_info->x - image->columns - bevel_width; for (x = 0; x < (ssize_t) width; x++) { SetPixelViaPixelInfo(frame_image, &matte, q); q += GetPixelChannels(frame_image); } for (x = 0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelViaPixelInfo(frame_image, &shadow, q); q += GetPixelChannels(frame_image); } } height = frame_info->height - frame_info->y - image->rows - bevel_width; for (y = 0; y < (ssize_t) height; y++) { for (x = 0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelViaPixelInfo(frame_image, &highlight, q); q += GetPixelChannels(frame_image); } width = frame_image->columns - 2 * frame_info->outer_bevel; for (x = 0; x < (ssize_t) width; x++) { SetPixelViaPixelInfo(frame_image, &matte, q); q += GetPixelChannels(frame_image); } for (x = 0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelViaPixelInfo(frame_image, &shadow, q); q += GetPixelChannels(frame_image); } } for (y = frame_info->outer_bevel - 1; y >= 0; y--) { for (x = 0; x < y; x++) { SetPixelViaPixelInfo(frame_image, &highlight, q); q += GetPixelChannels(frame_image); } for (; x < (ssize_t) frame_image->columns; x++) { if (x >= (ssize_t) (frame_image->columns - y)) SetPixelViaPixelInfo(frame_image, &shadow, q); else SetPixelViaPixelInfo(frame_image, &trough, q); q += GetPixelChannels(frame_image); } } (void)SyncCacheViewAuthenticPixels(frame_view, exception); } } frame_view = DestroyCacheView(frame_view); image_view = DestroyCacheView(image_view); x = (ssize_t) (frame_info->outer_bevel + (frame_info->x - bevel_width) + frame_info->inner_bevel); y = (ssize_t) (frame_info->outer_bevel + (frame_info->y - bevel_width) + frame_info->inner_bevel); if (status != MagickFalse) status = CompositeImage(frame_image, image, compose, MagickTrue, x, y, exception); if (status == MagickFalse) frame_image = DestroyImage(frame_image); return (frame_image); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % R a i s e I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % RaiseImage() creates a simulated three-dimensional button-like * effect % by lightening and darkening the edges of the image. Members * width and % height of raise_info define the width of the vertical and * horizontal % edge of the effect. % % The format of the RaiseImage method * is: % % MagickBooleanType RaiseImage(const Image *image, % * const RectangleInfo *raise_info,const MagickBooleanType raise, % * ExceptionInfo *exception) % % A description of each parameter follows: % * % o image: the image. % % o raise_info: Define the width and height * of the raise area. % % o raise: A value other than zero creates a 3-D * raise effect, % otherwise it has a lowered effect. % % o * exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RaiseImage(Image * image, const RectangleInfo * raise_info, const MagickBooleanType raise, ExceptionInfo * exception) { #define AccentuateFactor ScaleCharToQuantum(135) #define HighlightFactor ScaleCharToQuantum(190) #define ShadowFactor ScaleCharToQuantum(190) #define RaiseImageTag "Raise/Image" #define TroughFactor ScaleCharToQuantum(135) CacheView * image_view; MagickBooleanType status; MagickOffsetType progress; Quantum foreground, background; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(raise_info != (RectangleInfo *) NULL); if ((image->columns <= (raise_info->width << 1)) || (image->rows <= (raise_info->height << 1))) ThrowBinaryException(OptionError, "ImageSizeMustExceedBevelWidth", image->filename); foreground = QuantumRange; background = (Quantum) 0; if (raise == MagickFalse) { foreground = (Quantum) 0; background = QuantumRange; } if (SetImageStorageClass(image, DirectClass, exception) == MagickFalse) return (MagickFalse); /* * Raise image. */ status = MagickTrue; progress = 0; image_view = AcquireAuthenticCacheView(image, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(image,image,raise_info->height,1) #endif for (y = 0; y < (ssize_t) raise_info->height; y++) { register ssize_t i, x; register Quantum * magick_restrict q; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < y; x++) { if (GetPixelWriteMask(image, q) <= (QuantumRange / 2)) { q += GetPixelChannels(image); continue; } for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image, i); PixelTrait traits = GetPixelChannelTraits(image, channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i] = ClampToQuantum(QuantumScale * ((double)q[i] * HighlightFactor + (double) foreground * (QuantumRange - HighlightFactor))); } q += GetPixelChannels(image); } for (; x < (ssize_t) (image->columns - y); x++) { if (GetPixelWriteMask(image, q) <= (QuantumRange / 2)) { q += GetPixelChannels(image); continue; } for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image, i); PixelTrait traits = GetPixelChannelTraits(image, channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i] = ClampToQuantum(QuantumScale * ((double)q[i] * AccentuateFactor + (double)foreground * (QuantumRange - AccentuateFactor))); } q += GetPixelChannels(image); } for (; x < (ssize_t) image->columns; x++) { if (GetPixelWriteMask(image, q) <= (QuantumRange / 2)) { q += GetPixelChannels(image); continue; } for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image, i); PixelTrait traits = GetPixelChannelTraits(image, channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i] = ClampToQuantum(QuantumScale * ((double)q[i] * ShadowFactor + (double) background * (QuantumRange - ShadowFactor))); } q += GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_RaiseImage) #endif proceed = SetImageProgress(image, RaiseImageTag, progress++, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(image,image,image->rows-2*raise_info->height,1) #endif for (y = (ssize_t) raise_info->height; y < (ssize_t) (image->rows - raise_info->height); y++) { register ssize_t i, x; register Quantum * magick_restrict q; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) raise_info->width; x++) { if (GetPixelWriteMask(image, q) <= (QuantumRange / 2)) { q += GetPixelChannels(image); continue; } for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image, i); PixelTrait traits = GetPixelChannelTraits(image, channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i] = ClampToQuantum(QuantumScale * ((double)q[i] * HighlightFactor + (double) foreground * (QuantumRange - HighlightFactor))); } q += GetPixelChannels(image); } for (; x < (ssize_t) (image->columns - raise_info->width); x++) q += GetPixelChannels(image); for (; x < (ssize_t) image->columns; x++) { if (GetPixelWriteMask(image, q) <= (QuantumRange / 2)) { q += GetPixelChannels(image); continue; } for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image, i); PixelTrait traits = GetPixelChannelTraits(image, channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i] = ClampToQuantum(QuantumScale * ((double)q[i] * ShadowFactor + (double) background * (QuantumRange - ShadowFactor))); } q += GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_RaiseImage) #endif proceed = SetImageProgress(image, RaiseImageTag, progress++, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(image,image,image->rows-raise_info->height,1) #endif for (y = (ssize_t) (image->rows - raise_info->height); y < (ssize_t) image->rows; y++) { register ssize_t i, x; register Quantum * magick_restrict q; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) (image->rows - y); x++) { if (GetPixelWriteMask(image, q) <= (QuantumRange / 2)) { q += GetPixelChannels(image); continue; } for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image, i); PixelTrait traits = GetPixelChannelTraits(image, channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i] = ClampToQuantum(QuantumScale * ((double)q[i] * HighlightFactor + (double) foreground * (QuantumRange - HighlightFactor))); } q += GetPixelChannels(image); } for (; x < (ssize_t) (image->columns - (image->rows - y)); x++) { for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image, i); PixelTrait traits = GetPixelChannelTraits(image, channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i] = ClampToQuantum(QuantumScale * ((double)q[i] * TroughFactor + (double)background * (QuantumRange - TroughFactor))); } q += GetPixelChannels(image); } for (; x < (ssize_t) image->columns; x++) { if (GetPixelWriteMask(image, q) <= (QuantumRange / 2)) { q += GetPixelChannels(image); continue; } for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image, i); PixelTrait traits = GetPixelChannelTraits(image, channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i] = ClampToQuantum(QuantumScale * ((double)q[i] * ShadowFactor + (double) background * (QuantumRange - ShadowFactor))); } q += GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_RaiseImage) #endif proceed = SetImageProgress(image, RaiseImageTag, progress++, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } image_view = DestroyCacheView(image_view); return (status); }
3d25pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 8; tile_size[3] = 512; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=floord(Nt-1,3);t1++) { lbp=max(ceild(t1,2),ceild(6*t1-Nt+2,6)); ubp=min(floord(4*Nt+Nz-9,24),floord(12*t1+Nz+6,24)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(max(0,ceild(3*t1,2)),ceild(24*t2-Nz+5,8)),3*t1-3*t2+1);t3<=min(min(min(floord(4*Nt+Ny-9,8),floord(12*t1+Ny+15,8)),floord(24*t2+Ny+11,8)),floord(24*t1-24*t2+Nz+Ny+13,8));t3++) { for (t4=max(max(max(max(0,ceild(3*t1-3*t2-62,64)),ceild(3*t1-126,128)),ceild(24*t2-Nz-499,512)),ceild(8*t3-Ny-499,512));t4<=min(min(min(min(floord(4*Nt+Nx-9,512),floord(12*t1+Nx+15,512)),floord(24*t2+Nx+11,512)),floord(8*t3+Nx-5,512)),floord(24*t1-24*t2+Nz+Nx+13,512));t4++) { for (t5=max(max(max(max(max(0,ceild(24*t2-Nz+5,4)),ceild(8*t3-Ny+5,4)),ceild(512*t4-Nx+5,4)),3*t1),6*t1-6*t2+1);t5<=min(min(min(min(min(floord(24*t1-24*t2+Nz+18,4),2*t3),Nt-1),3*t1+5),6*t2+4),128*t4+126);t5++) { for (t6=max(max(24*t2,4*t5+4),-24*t1+24*t2+8*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(8*t3,4*t5+4);t7<=min(8*t3+7,4*t5+Ny-5);t7++) { lbv=max(512*t4,4*t5+4); ubv=min(512*t4+511,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* * Subtract the `struct timeval' values X and Y, storing the result in * RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* * Compute the time remaining to wait. tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1]) + 8; Ny = atoi(argv[2]) + 8; Nz = atoi(argv[3]) + 8; } if (argc > 4) Nt = atoi(argv[4]); //allocate the arrays double ****A = (double ****)malloc(sizeof(double ***) * 2); for (m = 0; m < 2; m++) { A[m] = (double ***)malloc(sizeof(double **) * Nz); for (i = 0; i < Nz; i++) { A[m][i] = (double **)malloc(sizeof(double *) * Ny); for (j = 0; j < Ny; j++) { A[m][i][j] = (double *)malloc(sizeof(double) * Nx); } } } double ****coef = (double ****)malloc(sizeof(double ***) * 13); for (m = 0; m < 13; m++) { coef[m] = (double ***)malloc(sizeof(double **) * Nz); for (i = 0; i < Nz; i++) { coef[m][i] = (double **)malloc(sizeof(double *) * Ny); for (j = 0; j < Ny; j++) { coef[m][i][j] = (double *)malloc(sizeof(double) * Nx); } } } //tile size information, including extra element to decide the list length int *tile_size = (int *)malloc(sizeof(int)); tile_size[0] = -1; //The list is modified here before source - to - source transformations tile_size = (int *)realloc((void *)tile_size, sizeof(int) * 5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 8; tile_size[3] = 512; tile_size[4] = -1; //for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff = 1.e100; const int BASE = 1024; //initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m = 0; m < 13; m++) { for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; LIKWID_MARKER_THREADINIT; LIKWID_MARKER_START("calc"); #endif int num_threads = 1; for (test = 0; test < TESTS; test++) { gettimeofday(&start, 0); //serial execution - Addition: 6 && Multiplication:2 /* * Copyright (C) 1991-2014 Free Software Foundation, Inc. This file * is part of the GNU C Library. * * The GNU C Library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation; either version 2.1 * of the License, or (at your option) any later version. * * The GNU C Library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with the GNU C Library; if not, see * <http://www.gnu.org/licenses/>. */ /* * This header is separate from features.h so that the compiler can * include it implicitly at the start of every compilation. It must * not itself include <features.h> or any other header that includes * <features.h> because the implicit include comes before any feature * test macros that may be defined in a source file before it first * explicitly includes a system header. GCC knows the name of this * header in order to preinclude it. */ /* * glibc's intent is to support the IEC 559 math functionality, real * and complex. If the GCC (4.9 and later) predefined macros * specifying compiler intent are available, use them to determine * whether the overall intent is to support these features; * otherwise, presume an older compiler has intent to support these * features and define these macros by default. */ /* * wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / * Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1 = -1; t1 <= floord(Nt - 1, 3); t1++) { lbp = max(ceild(t1, 2), ceild(6 * t1 - Nt + 2, 6)); ubp = min(floord(4 * Nt + Nz - 9, 24), floord(12 * t1 + Nz + 6, 24)); for (t2 = lbp; t2 <= ubp; t2++) { for (t3 = max(max(max(0, ceild(3 * t1, 2)), ceild(24 * t2 - Nz + 5, 8)), 3 * t1 - 3 * t2 + 1); t3 <= min(min(min(floord(4 * Nt + Ny - 9, 8), floord(12 * t1 + Ny + 15, 8)), floord(24 * t2 + Ny + 11, 8)), floord(24 * t1 - 24 * t2 + Nz + Ny + 13, 8)); t3++) { for (t4 = max(max(max(max(0, ceild(3 * t1 - 3 * t2 - 62, 64)), ceild(3 * t1 - 126, 128)), ceild(24 * t2 - Nz - 499, 512)), ceild(8 * t3 - Ny - 499, 512)); t4 <= min(min(min(min(floord(4 * Nt + Nx - 9, 512), floord(12 * t1 + Nx + 15, 512)), floord(24 * t2 + Nx + 11, 512)), floord(8 * t3 + Nx - 5, 512)), floord(24 * t1 - 24 * t2 + Nz + Nx + 13, 512)); t4++) { for (t5 = max(max(max(max(max(0, ceild(24 * t2 - Nz + 5, 4)), ceild(8 * t3 - Ny + 5, 4)), ceild(512 * t4 - Nx + 5, 4)), 3 * t1), 6 * t1 - 6 * t2 + 1); t5 <= min(min(min(min(min(floord(24 * t1 - 24 * t2 + Nz + 18, 4), 2 * t3), Nt - 1), 3 * t1 + 5), 6 * t2 + 4), 128 * t4 + 126); t5++) { for (t6 = max(max(24 * t2, 4 * t5 + 4), -24 * t1 + 24 * t2 + 8 * t5 - 23); t6 <= min(min(24 * t2 + 23, -24 * t1 + 24 * t2 + 8 * t5), 4 * t5 + Nz - 5); t6++) { for (t7 = max(8 * t3, 4 * t5 + 4); t7 <= min(8 * t3 + 7, 4 * t5 + Ny - 5); t7++) { lbv = max(512 * t4, 4 * t5 + 4); ubv = min(512 * t4 + 511, 4 * t5 + Nx - 5); #pragma ivdep #pragma vector always for (t8 = lbv; t8 <= ubv; t8++) { A[(t5 + 1) % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] = (((((((((((((coef[0][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)]) + (coef[1][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6) - 1][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 1][(-4 * t5 + t7)][(-4 * t5 + t8)]))) + (coef[2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 1][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 1][(-4 * t5 + t8)]))) + (coef[3][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 1] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 1]))) + (coef[4][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6) - 2][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 2][(-4 * t5 + t7)][(-4 * t5 + t8)]))) + (coef[5][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 2][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 2][(-4 * t5 + t8)]))) + (coef[6][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 2] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 2]))) + (coef[7][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6) - 3][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 3][(-4 * t5 + t7)][(-4 * t5 + t8)]))) + (coef[8][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 3][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 3][(-4 * t5 + t8)]))) + (coef[9][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 3] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 3]))) + (coef[10][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6) - 4][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 4][(-4 * t5 + t7)][(-4 * t5 + t8)]))) + (coef[11][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 4][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 4][(-4 * t5 + t8)]))) + (coef[12][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 4] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double)(result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON LIKWID_MARKER_STOP("calc"); LIKWID_MARKER_CLOSE; #endif //Free allocated arrays for (i = 0; i < Nz; i++) { for (j = 0; j < Ny; j++) { free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for (m = 0; m < 13; m++) { for (i = 0; i < Nz; i++) { for (j = 0; j < Ny; j++) { free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* * Subtract the `struct timeval' values X and Y, storing the result in * RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* * Compute the time remaining to wait. tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1]) + 8; Ny = atoi(argv[2]) + 8; Nz = atoi(argv[3]) + 8; } if (argc > 4) Nt = atoi(argv[4]); //allocate the arrays double ****A = (double ****)malloc(sizeof(double ***) * 2); for (m = 0; m < 2; m++) { A[m] = (double ***)malloc(sizeof(double **) * Nz); for (i = 0; i < Nz; i++) { A[m][i] = (double **)malloc(sizeof(double *) * Ny); for (j = 0; j < Ny; j++) { A[m][i][j] = (double *)malloc(sizeof(double) * Nx); } } } double ****coef = (double ****)malloc(sizeof(double ***) * 13); for (m = 0; m < 13; m++) { coef[m] = (double ***)malloc(sizeof(double **) * Nz); for (i = 0; i < Nz; i++) { coef[m][i] = (double **)malloc(sizeof(double *) * Ny); for (j = 0; j < Ny; j++) { coef[m][i][j] = (double *)malloc(sizeof(double) * Nx); } } } //tile size information, including extra element to decide the list length int *tile_size = (int *)malloc(sizeof(int)); tile_size[0] = -1; //The list is modified here before source - to - source transformations tile_size = (int *)realloc((void *)tile_size, sizeof(int) * 5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 8; tile_size[3] = 512; tile_size[4] = -1; //for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff = 1.e100; const int BASE = 1024; //initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m = 0; m < 13; m++) { for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for (test = 0; test < TESTS; test++) { gettimeofday(&start, 0); //serial execution - Addition: 6 && Multiplication:2 /* * Copyright (C) 1991-2014 Free Software Foundation, Inc. This file * is part of the GNU C Library. * * The GNU C Library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation; either version 2.1 * of the License, or (at your option) any later version. * * The GNU C Library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with the GNU C Library; if not, see * <http://www.gnu.org/licenses/>. */ /* * This header is separate from features.h so that the compiler can * include it implicitly at the start of every compilation. It must * not itself include <features.h> or any other header that includes * <features.h> because the implicit include comes before any feature * test macros that may be defined in a source file before it first * explicitly includes a system header. GCC knows the name of this * header in order to preinclude it. */ /* * glibc's intent is to support the IEC 559 math functionality, real * and complex. If the GCC (4.9 and later) predefined macros * specifying compiler intent are available, use them to determine * whether the overall intent is to support these features; * otherwise, presume an older compiler has intent to support these * features and define these macros by default. */ /* * wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / * Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1 = -1; t1 <= floord(Nt - 1, 3); t1++) { lbp = max(ceild(t1, 2), ceild(6 * t1 - Nt + 2, 6)); ubp = min(floord(4 * Nt + Nz - 9, 24), floord(12 * t1 + Nz + 6, 24)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2 = lbp; t2 <= ubp; t2++) { for (t3 = max(max(max(0, ceild(3 * t1, 2)), ceild(24 * t2 - Nz + 5, 8)), 3 * t1 - 3 * t2 + 1); t3 <= min(min(min(floord(4 * Nt + Ny - 9, 8), floord(12 * t1 + Ny + 15, 8)), floord(24 * t2 + Ny + 11, 8)), floord(24 * t1 - 24 * t2 + Nz + Ny + 13, 8)); t3++) { for (t4 = max(max(max(max(0, ceild(3 * t1 - 3 * t2 - 62, 64)), ceild(3 * t1 - 126, 128)), ceild(24 * t2 - Nz - 499, 512)), ceild(8 * t3 - Ny - 499, 512)); t4 <= min(min(min(min(floord(4 * Nt + Nx - 9, 512), floord(12 * t1 + Nx + 15, 512)), floord(24 * t2 + Nx + 11, 512)), floord(8 * t3 + Nx - 5, 512)), floord(24 * t1 - 24 * t2 + Nz + Nx + 13, 512)); t4++) { for (t5 = max(max(max(max(max(0, ceild(24 * t2 - Nz + 5, 4)), ceild(8 * t3 - Ny + 5, 4)), ceild(512 * t4 - Nx + 5, 4)), 3 * t1), 6 * t1 - 6 * t2 + 1); t5 <= min(min(min(min(min(floord(24 * t1 - 24 * t2 + Nz + 18, 4), 2 * t3), Nt - 1), 3 * t1 + 5), 6 * t2 + 4), 128 * t4 + 126); t5++) { for (t6 = max(max(24 * t2, 4 * t5 + 4), -24 * t1 + 24 * t2 + 8 * t5 - 23); t6 <= min(min(24 * t2 + 23, -24 * t1 + 24 * t2 + 8 * t5), 4 * t5 + Nz - 5); t6++) { for (t7 = max(8 * t3, 4 * t5 + 4); t7 <= min(8 * t3 + 7, 4 * t5 + Ny - 5); t7++) { lbv = max(512 * t4, 4 * t5 + 4); ubv = min(512 * t4 + 511, 4 * t5 + Nx - 5); #pragma ivdep #pragma vector always for (t8 = lbv; t8 <= ubv; t8++) { A[(t5 + 1) % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] = (((((((((((((coef[0][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)]) + (coef[1][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6) - 1][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 1][(-4 * t5 + t7)][(-4 * t5 + t8)]))) + (coef[2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 1][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 1][(-4 * t5 + t8)]))) + (coef[3][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 1] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 1]))) + (coef[4][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6) - 2][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 2][(-4 * t5 + t7)][(-4 * t5 + t8)]))) + (coef[5][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 2][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 2][(-4 * t5 + t8)]))) + (coef[6][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 2] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 2]))) + (coef[7][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6) - 3][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 3][(-4 * t5 + t7)][(-4 * t5 + t8)]))) + (coef[8][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 3][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 3][(-4 * t5 + t8)]))) + (coef[9][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 3] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 3]))) + (coef[10][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6) - 4][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 4][(-4 * t5 + t7)][(-4 * t5 + t8)]))) + (coef[11][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 4][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 4][(-4 * t5 + t8)]))) + (coef[12][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 4] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double)(result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif //Free allocated arrays for (i = 0; i < Nz; i++) { for (j = 0; j < Ny; j++) { free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for (m = 0; m < 13; m++) { for (i = 0; i < Nz; i++) { for (j = 0; j < Ny; j++) { free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
OpenMPClause.h
//===- OpenMPClause.h - Classes for OpenMP clauses --------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// /// \file /// \brief This file defines OpenMP AST classes for clauses. /// There are clauses for executable directives, clauses for declarative /// directives and clauses which can be used in both kinds of directives. /// //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_OPENMPCLAUSE_H #define LLVM_CLANG_AST_OPENMPCLAUSE_H #include "clang/AST/Expr.h" #include "clang/AST/Stmt.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/SourceLocation.h" namespace clang { //===----------------------------------------------------------------------===// // AST classes for clauses. //===----------------------------------------------------------------------===// /// \brief This is a basic class for representing single OpenMP clause. /// class OMPClause { /// \brief Starting location of the clause (the clause keyword). SourceLocation StartLoc; /// \brief Ending location of the clause. SourceLocation EndLoc; /// \brief Kind of the clause. OpenMPClauseKind Kind; protected: OMPClause(OpenMPClauseKind K, SourceLocation StartLoc, SourceLocation EndLoc) : StartLoc(StartLoc), EndLoc(EndLoc), Kind(K) {} public: /// \brief Returns the starting location of the clause. SourceLocation getLocStart() const { return StartLoc; } /// \brief Returns the ending location of the clause. SourceLocation getLocEnd() const { return EndLoc; } /// \brief Sets the starting location of the clause. void setLocStart(SourceLocation Loc) { StartLoc = Loc; } /// \brief Sets the ending location of the clause. void setLocEnd(SourceLocation Loc) { EndLoc = Loc; } /// \brief Returns kind of OpenMP clause (private, shared, reduction, etc.). OpenMPClauseKind getClauseKind() const { return Kind; } bool isImplicit() const { return StartLoc.isInvalid(); } StmtRange children(); ConstStmtRange children() const { return const_cast<OMPClause *>(this)->children(); } static bool classof(const OMPClause *) { return true; } }; /// \brief This represents clauses with the list of variables like 'private', /// 'firstprivate', 'copyin', 'shared', or 'reduction' clauses in the /// '#pragma omp ...' directives. template <class T> class OMPVarListClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Number of variables in the list. unsigned NumVars; protected: /// \brief Fetches list of variables associated with this clause. MutableArrayRef<Expr *> getVarRefs() { return MutableArrayRef<Expr *>( reinterpret_cast<Expr **>( reinterpret_cast<char *>(this) + llvm::RoundUpToAlignment(sizeof(T), llvm::alignOf<Expr *>())), NumVars); } /// \brief Sets the list of variables for this clause. void setVarRefs(ArrayRef<Expr *> VL) { assert(VL.size() == NumVars && "Number of variables is not the same as the preallocated buffer"); std::copy( VL.begin(), VL.end(), reinterpret_cast<Expr **>( reinterpret_cast<char *>(this) + llvm::RoundUpToAlignment(sizeof(T), llvm::alignOf<Expr *>()))); } /// \brief Build a clause with \a N variables /// /// \param K Kind of the clause. /// \param StartLoc Starting location of the clause (the clause keyword). /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// OMPVarListClause(OpenMPClauseKind K, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPClause(K, StartLoc, EndLoc), LParenLoc(LParenLoc), NumVars(N) {} public: typedef MutableArrayRef<Expr *>::iterator varlist_iterator; typedef ArrayRef<const Expr *>::iterator varlist_const_iterator; typedef llvm::iterator_range<varlist_iterator> varlist_range; typedef llvm::iterator_range<varlist_const_iterator> varlist_const_range; unsigned varlist_size() const { return NumVars; } bool varlist_empty() const { return NumVars == 0; } varlist_range varlists() { return varlist_range(varlist_begin(), varlist_end()); } varlist_const_range varlists() const { return varlist_const_range(varlist_begin(), varlist_end()); } varlist_iterator varlist_begin() { return getVarRefs().begin(); } varlist_iterator varlist_end() { return getVarRefs().end(); } varlist_const_iterator varlist_begin() const { return getVarRefs().begin(); } varlist_const_iterator varlist_end() const { return getVarRefs().end(); } /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Fetches list of all variables in the clause. ArrayRef<const Expr *> getVarRefs() const { return llvm::makeArrayRef( reinterpret_cast<const Expr *const *>( reinterpret_cast<const char *>(this) + llvm::RoundUpToAlignment(sizeof(T), llvm::alignOf<const Expr *>())), NumVars); } }; /// \brief This represents 'if' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp parallel if(a > 5) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'if' /// clause with condition 'a > 5'. /// class OMPIfClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Condition of the 'if' clause. Stmt *Condition; /// \brief Set condition. /// void setCondition(Expr *Cond) { Condition = Cond; } public: /// \brief Build 'if' clause with condition \a Cond. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param Cond Condition of the clause. /// \param EndLoc Ending location of the clause. /// OMPIfClause(Expr *Cond, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_if, StartLoc, EndLoc), LParenLoc(LParenLoc), Condition(Cond) {} /// \brief Build an empty clause. /// OMPIfClause() : OMPClause(OMPC_if, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), Condition(nullptr) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Returns condition. Expr *getCondition() const { return cast_or_null<Expr>(Condition); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_if; } StmtRange children() { return StmtRange(&Condition, &Condition + 1); } }; /// \brief This represents 'final' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp task final(a > 5) /// \endcode /// In this example directive '#pragma omp task' has simple 'final' /// clause with condition 'a > 5'. /// class OMPFinalClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Condition of the 'if' clause. Stmt *Condition; /// \brief Set condition. /// void setCondition(Expr *Cond) { Condition = Cond; } public: /// \brief Build 'final' clause with condition \a Cond. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param Cond Condition of the clause. /// \param EndLoc Ending location of the clause. /// OMPFinalClause(Expr *Cond, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_final, StartLoc, EndLoc), LParenLoc(LParenLoc), Condition(Cond) {} /// \brief Build an empty clause. /// OMPFinalClause() : OMPClause(OMPC_final, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), Condition(nullptr) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Returns condition. Expr *getCondition() const { return cast_or_null<Expr>(Condition); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_final; } StmtRange children() { return StmtRange(&Condition, &Condition + 1); } }; /// \brief This represents 'num_threads' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp parallel num_threads(6) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'num_threads' /// clause with number of threads '6'. /// class OMPNumThreadsClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Condition of the 'num_threads' clause. Stmt *NumThreads; /// \brief Set condition. /// void setNumThreads(Expr *NThreads) { NumThreads = NThreads; } public: /// \brief Build 'num_threads' clause with condition \a NumThreads. /// /// \param NumThreads Number of threads for the construct. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// OMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_num_threads, StartLoc, EndLoc), LParenLoc(LParenLoc), NumThreads(NumThreads) {} /// \brief Build an empty clause. /// OMPNumThreadsClause() : OMPClause(OMPC_num_threads, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), NumThreads(nullptr) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Returns number of threads. Expr *getNumThreads() const { return cast_or_null<Expr>(NumThreads); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_num_threads; } StmtRange children() { return StmtRange(&NumThreads, &NumThreads + 1); } }; /// \brief This represents 'safelen' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp simd safelen(4) /// \endcode /// In this example directive '#pragma omp simd' has clause 'safelen' /// with single expression '4'. /// If the safelen clause is used then no two iterations executed /// concurrently with SIMD instructions can have a greater distance /// in the logical iteration space than its value. The parameter of /// the safelen clause must be a constant positive integer expression. /// class OMPSafelenClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Safe iteration space distance. Stmt *Safelen; /// \brief Set safelen. void setSafelen(Expr *Len) { Safelen = Len; } public: /// \brief Build 'safelen' clause. /// /// \param Len Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPSafelenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_safelen, StartLoc, EndLoc), LParenLoc(LParenLoc), Safelen(Len) {} /// \brief Build an empty clause. /// explicit OMPSafelenClause() : OMPClause(OMPC_safelen, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), Safelen(nullptr) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Return safe iteration space distance. Expr *getSafelen() const { return cast_or_null<Expr>(Safelen); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_safelen; } StmtRange children() { return StmtRange(&Safelen, &Safelen + 1); } }; /// \brief This represents 'collapse' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp simd collapse(3) /// \endcode /// In this example directive '#pragma omp simd' has clause 'collapse' /// with single expression '3'. /// The parameter must be a constant positive integer expression, it specifies /// the number of nested loops that should be collapsed into a single iteration /// space. /// class OMPCollapseClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Number of for-loops. Stmt *NumForLoops; /// \brief Set the number of associated for-loops. void setNumForLoops(Expr *Num) { NumForLoops = Num; } public: /// \brief Build 'collapse' clause. /// /// \param Num Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// OMPCollapseClause(Expr *Num, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_collapse, StartLoc, EndLoc), LParenLoc(LParenLoc), NumForLoops(Num) {} /// \brief Build an empty clause. /// explicit OMPCollapseClause() : OMPClause(OMPC_collapse, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), NumForLoops(nullptr) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Return the number of associated for-loops. Expr *getNumForLoops() const { return cast_or_null<Expr>(NumForLoops); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_collapse; } StmtRange children() { return StmtRange(&NumForLoops, &NumForLoops + 1); } }; /// \brief This represents 'default' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp parallel default(shared) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'default' /// clause with kind 'shared'. /// class OMPDefaultClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief A kind of the 'default' clause. OpenMPDefaultClauseKind Kind; /// \brief Start location of the kind in source code. SourceLocation KindKwLoc; /// \brief Set kind of the clauses. /// /// \param K Argument of clause. /// void setDefaultKind(OpenMPDefaultClauseKind K) { Kind = K; } /// \brief Set argument location. /// /// \param KLoc Argument location. /// void setDefaultKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// \brief Build 'default' clause with argument \a A ('none' or 'shared'). /// /// \param A Argument of the clause ('none' or 'shared'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// OMPDefaultClause(OpenMPDefaultClauseKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_default, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// \brief Build an empty clause. /// OMPDefaultClause() : OMPClause(OMPC_default, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), Kind(OMPC_DEFAULT_unknown), KindKwLoc(SourceLocation()) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Returns kind of the clause. OpenMPDefaultClauseKind getDefaultKind() const { return Kind; } /// \brief Returns location of clause kind. SourceLocation getDefaultKindKwLoc() const { return KindKwLoc; } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_default; } StmtRange children() { return StmtRange(); } }; /// \brief This represents 'proc_bind' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp parallel proc_bind(master) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'proc_bind' /// clause with kind 'master'. /// class OMPProcBindClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief A kind of the 'proc_bind' clause. OpenMPProcBindClauseKind Kind; /// \brief Start location of the kind in source code. SourceLocation KindKwLoc; /// \brief Set kind of the clause. /// /// \param K Kind of clause. /// void setProcBindKind(OpenMPProcBindClauseKind K) { Kind = K; } /// \brief Set clause kind location. /// /// \param KLoc Kind location. /// void setProcBindKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// \brief Build 'proc_bind' clause with argument \a A ('master', 'close' or /// 'spread'). /// /// \param A Argument of the clause ('master', 'close' or 'spread'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// OMPProcBindClause(OpenMPProcBindClauseKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_proc_bind, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// \brief Build an empty clause. /// OMPProcBindClause() : OMPClause(OMPC_proc_bind, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), Kind(OMPC_PROC_BIND_unknown), KindKwLoc(SourceLocation()) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Returns kind of the clause. OpenMPProcBindClauseKind getProcBindKind() const { return Kind; } /// \brief Returns location of clause kind. SourceLocation getProcBindKindKwLoc() const { return KindKwLoc; } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_proc_bind; } StmtRange children() { return StmtRange(); } }; /// \brief This represents 'schedule' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp for schedule(static, 3) /// \endcode /// In this example directive '#pragma omp for' has 'schedule' clause with /// arguments 'static' and '3'. /// class OMPScheduleClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief A kind of the 'schedule' clause. OpenMPScheduleClauseKind Kind; /// \brief Start location of the schedule ind in source code. SourceLocation KindLoc; /// \brief Location of ',' (if any). SourceLocation CommaLoc; /// \brief Chunk size. Stmt *ChunkSize; /// \brief Set schedule kind. /// /// \param K Schedule kind. /// void setScheduleKind(OpenMPScheduleClauseKind K) { Kind = K; } /// \brief Sets the location of '('. /// /// \param Loc Location of '('. /// void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Set schedule kind start location. /// /// \param KLoc Schedule kind location. /// void setScheduleKindLoc(SourceLocation KLoc) { KindLoc = KLoc; } /// \brief Set location of ','. /// /// \param Loc Location of ','. /// void setCommaLoc(SourceLocation Loc) { CommaLoc = Loc; } /// \brief Set chunk size. /// /// \param E Chunk size. /// void setChunkSize(Expr *E) { ChunkSize = E; } public: /// \brief Build 'schedule' clause with schedule kind \a Kind and chunk size /// expression \a ChunkSize. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param KLoc Starting location of the argument. /// \param CommaLoc Location of ','. /// \param EndLoc Ending location of the clause. /// \param Kind Schedule kind. /// \param ChunkSize Chunk size. /// OMPScheduleClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KLoc, SourceLocation CommaLoc, SourceLocation EndLoc, OpenMPScheduleClauseKind Kind, Expr *ChunkSize) : OMPClause(OMPC_schedule, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(Kind), KindLoc(KLoc), CommaLoc(CommaLoc), ChunkSize(ChunkSize) {} /// \brief Build an empty clause. /// explicit OMPScheduleClause() : OMPClause(OMPC_schedule, SourceLocation(), SourceLocation()), Kind(OMPC_SCHEDULE_unknown), ChunkSize(nullptr) {} /// \brief Get kind of the clause. /// OpenMPScheduleClauseKind getScheduleKind() const { return Kind; } /// \brief Get location of '('. /// SourceLocation getLParenLoc() { return LParenLoc; } /// \brief Get kind location. /// SourceLocation getScheduleKindLoc() { return KindLoc; } /// \brief Get location of ','. /// SourceLocation getCommaLoc() { return CommaLoc; } /// \brief Get chunk size. /// Expr *getChunkSize() { return dyn_cast_or_null<Expr>(ChunkSize); } /// \brief Get chunk size. /// Expr *getChunkSize() const { return dyn_cast_or_null<Expr>(ChunkSize); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_schedule; } StmtRange children() { return StmtRange(&ChunkSize, &ChunkSize + 1); } }; /// \brief This represents 'ordered' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp for ordered /// \endcode /// In this example directive '#pragma omp for' has 'ordered' clause. /// class OMPOrderedClause : public OMPClause { public: /// \brief Build 'ordered' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_ordered, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPOrderedClause() : OMPClause(OMPC_ordered, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_ordered; } StmtRange children() { return StmtRange(); } }; /// \brief This represents 'nowait' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp for nowait /// \endcode /// In this example directive '#pragma omp for' has 'nowait' clause. /// class OMPNowaitClause : public OMPClause { public: /// \brief Build 'nowait' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_nowait, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPNowaitClause() : OMPClause(OMPC_nowait, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_nowait; } StmtRange children() { return StmtRange(); } }; /// \brief This represents 'untied' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp task untied /// \endcode /// In this example directive '#pragma omp task' has 'untied' clause. /// class OMPUntiedClause : public OMPClause { public: /// \brief Build 'untied' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_untied, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPUntiedClause() : OMPClause(OMPC_untied, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_untied; } StmtRange children() { return StmtRange(); } }; /// \brief This represents 'mergeable' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp task mergeable /// \endcode /// In this example directive '#pragma omp task' has 'mergeable' clause. /// class OMPMergeableClause : public OMPClause { public: /// \brief Build 'mergeable' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_mergeable, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPMergeableClause() : OMPClause(OMPC_mergeable, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_mergeable; } StmtRange children() { return StmtRange(); } }; /// \brief This represents 'read' clause in the '#pragma omp atomic' directive. /// /// \code /// #pragma omp atomic read /// \endcode /// In this example directive '#pragma omp atomic' has 'read' clause. /// class OMPReadClause : public OMPClause { public: /// \brief Build 'read' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_read, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPReadClause() : OMPClause(OMPC_read, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_read; } StmtRange children() { return StmtRange(); } }; /// \brief This represents 'write' clause in the '#pragma omp atomic' directive. /// /// \code /// #pragma omp atomic write /// \endcode /// In this example directive '#pragma omp atomic' has 'write' clause. /// class OMPWriteClause : public OMPClause { public: /// \brief Build 'write' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_write, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPWriteClause() : OMPClause(OMPC_write, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_write; } StmtRange children() { return StmtRange(); } }; /// \brief This represents 'update' clause in the '#pragma omp atomic' /// directive. /// /// \code /// #pragma omp atomic update /// \endcode /// In this example directive '#pragma omp atomic' has 'update' clause. /// class OMPUpdateClause : public OMPClause { public: /// \brief Build 'update' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_update, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPUpdateClause() : OMPClause(OMPC_update, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_update; } StmtRange children() { return StmtRange(); } }; /// \brief This represents 'capture' clause in the '#pragma omp atomic' /// directive. /// /// \code /// #pragma omp atomic capture /// \endcode /// In this example directive '#pragma omp atomic' has 'capture' clause. /// class OMPCaptureClause : public OMPClause { public: /// \brief Build 'capture' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_capture, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPCaptureClause() : OMPClause(OMPC_capture, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_capture; } StmtRange children() { return StmtRange(); } }; /// \brief This represents 'seq_cst' clause in the '#pragma omp atomic' /// directive. /// /// \code /// #pragma omp atomic seq_cst /// \endcode /// In this example directive '#pragma omp atomic' has 'seq_cst' clause. /// class OMPSeqCstClause : public OMPClause { public: /// \brief Build 'seq_cst' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_seq_cst, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPSeqCstClause() : OMPClause(OMPC_seq_cst, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_seq_cst; } StmtRange children() { return StmtRange(); } }; /// \brief This represents clause 'private' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel private(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'private' /// with the variables 'a' and 'b'. /// class OMPPrivateClause : public OMPVarListClause<OMPPrivateClause> { friend class OMPClauseReader; /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// OMPPrivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPPrivateClause>(OMPC_private, StartLoc, LParenLoc, EndLoc, N) {} /// \brief Build an empty clause. /// /// \param N Number of variables. /// explicit OMPPrivateClause(unsigned N) : OMPVarListClause<OMPPrivateClause>(OMPC_private, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// \brief Sets the list of references to private copies with initializers for /// new private variables. /// \param VL List of references. void setPrivateCopies(ArrayRef<Expr *> VL); /// \brief Gets the list of references to private copies with initializers for /// new private variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param PrivateVL List of references to private copies with initializers. /// static OMPPrivateClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL); /// \brief Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// static OMPPrivateClause *CreateEmpty(const ASTContext &C, unsigned N); typedef MutableArrayRef<Expr *>::iterator private_copies_iterator; typedef ArrayRef<const Expr *>::iterator private_copies_const_iterator; typedef llvm::iterator_range<private_copies_iterator> private_copies_range; typedef llvm::iterator_range<private_copies_const_iterator> private_copies_const_range; private_copies_range private_copies() { return private_copies_range(getPrivateCopies().begin(), getPrivateCopies().end()); } private_copies_const_range private_copies() const { return private_copies_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } StmtRange children() { return StmtRange(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_private; } }; /// \brief This represents clause 'firstprivate' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp parallel firstprivate(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'firstprivate' /// with the variables 'a' and 'b'. /// class OMPFirstprivateClause : public OMPVarListClause<OMPFirstprivateClause> { friend class OMPClauseReader; /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// OMPFirstprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPFirstprivateClause>(OMPC_firstprivate, StartLoc, LParenLoc, EndLoc, N) {} /// \brief Build an empty clause. /// /// \param N Number of variables. /// explicit OMPFirstprivateClause(unsigned N) : OMPVarListClause<OMPFirstprivateClause>( OMPC_firstprivate, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// \brief Sets the list of references to private copies with initializers for /// new private variables. /// \param VL List of references. void setPrivateCopies(ArrayRef<Expr *> VL); /// \brief Gets the list of references to private copies with initializers for /// new private variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// \brief Sets the list of references to initializer variables for new /// private variables. /// \param VL List of references. void setInits(ArrayRef<Expr *> VL); /// \brief Gets the list of references to initializer variables for new /// private variables. MutableArrayRef<Expr *> getInits() { return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size()); } ArrayRef<const Expr *> getInits() const { return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size()); } public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the original variables. /// \param PrivateVL List of references to private copies with initializers. /// \param InitVL List of references to auto generated variables used for /// initialization of a single array element. Used if firstprivate variable is /// of array type. /// static OMPFirstprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL, ArrayRef<Expr *> InitVL); /// \brief Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// static OMPFirstprivateClause *CreateEmpty(const ASTContext &C, unsigned N); typedef MutableArrayRef<Expr *>::iterator private_copies_iterator; typedef ArrayRef<const Expr *>::iterator private_copies_const_iterator; typedef llvm::iterator_range<private_copies_iterator> private_copies_range; typedef llvm::iterator_range<private_copies_const_iterator> private_copies_const_range; private_copies_range private_copies() { return private_copies_range(getPrivateCopies().begin(), getPrivateCopies().end()); } private_copies_const_range private_copies() const { return private_copies_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } typedef MutableArrayRef<Expr *>::iterator inits_iterator; typedef ArrayRef<const Expr *>::iterator inits_const_iterator; typedef llvm::iterator_range<inits_iterator> inits_range; typedef llvm::iterator_range<inits_const_iterator> inits_const_range; inits_range inits() { return inits_range(getInits().begin(), getInits().end()); } inits_const_range inits() const { return inits_const_range(getInits().begin(), getInits().end()); } StmtRange children() { return StmtRange(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_firstprivate; } }; /// \brief This represents clause 'lastprivate' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp simd lastprivate(a,b) /// \endcode /// In this example directive '#pragma omp simd' has clause 'lastprivate' /// with the variables 'a' and 'b'. class OMPLastprivateClause : public OMPVarListClause<OMPLastprivateClause> { // There are 4 additional tail-allocated arrays at the end of the class: // 1. Contains list of pseudo variables with the default initialization for // each non-firstprivate variables. Used in codegen for initialization of // lastprivate copies. // 2. List of helper expressions for proper generation of assignment operation // required for lastprivate clause. This list represents private variables // (for arrays, single array element). // 3. List of helper expressions for proper generation of assignment operation // required for lastprivate clause. This list represents original variables // (for arrays, single array element). // 4. List of helper expressions that represents assignment operation: // \code // DstExprs = SrcExprs; // \endcode // Required for proper codegen of final assignment performed by the // lastprivate clause. // friend class OMPClauseReader; /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// OMPLastprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPLastprivateClause>(OMPC_lastprivate, StartLoc, LParenLoc, EndLoc, N) {} /// \brief Build an empty clause. /// /// \param N Number of variables. /// explicit OMPLastprivateClause(unsigned N) : OMPVarListClause<OMPLastprivateClause>( OMPC_lastprivate, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// \brief Get the list of helper expressions for initialization of private /// copies for lastprivate variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent private variables (for arrays, single /// array element) in the final assignment statement performed by the /// lastprivate clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// \brief Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size()); } /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent original variables (for arrays, single /// array element) in the final assignment statement performed by the /// lastprivate clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// \brief Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// \brief Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign private copy of the variable to original variable. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// \brief Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size()); } public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for lastprivate clause. This list represents /// private variables (for arrays, single array element). /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for lastprivate clause. This list represents /// original variables (for arrays, single array element). /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of final assignment performed by the /// lastprivate clause. /// /// static OMPLastprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps); /// \brief Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// static OMPLastprivateClause *CreateEmpty(const ASTContext &C, unsigned N); typedef MutableArrayRef<Expr *>::iterator helper_expr_iterator; typedef ArrayRef<const Expr *>::iterator helper_expr_const_iterator; typedef llvm::iterator_range<helper_expr_iterator> helper_expr_range; typedef llvm::iterator_range<helper_expr_const_iterator> helper_expr_const_range; /// \brief Set list of helper expressions, required for generation of private /// copies of original lastprivate variables. void setPrivateCopies(ArrayRef<Expr *> PrivateCopies); helper_expr_const_range private_copies() const { return helper_expr_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } helper_expr_range private_copies() { return helper_expr_range(getPrivateCopies().begin(), getPrivateCopies().end()); } helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } StmtRange children() { return StmtRange(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_lastprivate; } }; /// \brief This represents clause 'shared' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel shared(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'shared' /// with the variables 'a' and 'b'. /// class OMPSharedClause : public OMPVarListClause<OMPSharedClause> { /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// OMPSharedClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPSharedClause>(OMPC_shared, StartLoc, LParenLoc, EndLoc, N) {} /// \brief Build an empty clause. /// /// \param N Number of variables. /// explicit OMPSharedClause(unsigned N) : OMPVarListClause<OMPSharedClause>(OMPC_shared, SourceLocation(), SourceLocation(), SourceLocation(), N) {} public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// static OMPSharedClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// \brief Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// static OMPSharedClause *CreateEmpty(const ASTContext &C, unsigned N); StmtRange children() { return StmtRange(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_shared; } }; /// \brief This represents clause 'reduction' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp parallel reduction(+:a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'reduction' /// with operator '+' and the variables 'a' and 'b'. /// class OMPReductionClause : public OMPVarListClause<OMPReductionClause> { friend class OMPClauseReader; /// \brief Location of ':'. SourceLocation ColonLoc; /// \brief Nested name specifier for C++. NestedNameSpecifierLoc QualifierLoc; /// \brief Name of custom operator. DeclarationNameInfo NameInfo; /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param ColonLoc Location of ':'. /// \param N Number of the variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. /// OMPReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo) : OMPVarListClause<OMPReductionClause>(OMPC_reduction, StartLoc, LParenLoc, EndLoc, N), ColonLoc(ColonLoc), QualifierLoc(QualifierLoc), NameInfo(NameInfo) {} /// \brief Build an empty clause. /// /// \param N Number of variables. /// explicit OMPReductionClause(unsigned N) : OMPVarListClause<OMPReductionClause>(OMPC_reduction, SourceLocation(), SourceLocation(), SourceLocation(), N), ColonLoc(), QualifierLoc(), NameInfo() {} /// \brief Sets location of ':' symbol in clause. void setColonLoc(SourceLocation CL) { ColonLoc = CL; } /// \brief Sets the name info for specified reduction identifier. void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; } /// \brief Sets the nested name specifier. void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; } /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent LHS expression in the final /// reduction expression performed by the reduction clause. void setLHSExprs(ArrayRef<Expr *> LHSExprs); /// \brief Get the list of helper LHS expressions. MutableArrayRef<Expr *> getLHSExprs() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getLHSExprs() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent RHS expression in the final /// reduction expression performed by the reduction clause. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. void setRHSExprs(ArrayRef<Expr *> RHSExprs); /// \brief Get the list of helper destination expressions. MutableArrayRef<Expr *> getRHSExprs() { return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getRHSExprs() const { return llvm::makeArrayRef(getLHSExprs().end(), varlist_size()); } /// \brief Set list of helper reduction expressions, required for proper /// codegen of the clause. These expressions are binary expressions or /// operator/custom reduction call that calculates new value from source /// helper expressions to destination helper expressions. void setReductionOps(ArrayRef<Expr *> ReductionOps); /// \brief Get the list of helper reduction expressions. MutableArrayRef<Expr *> getReductionOps() { return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getReductionOps() const { return llvm::makeArrayRef(getRHSExprs().end(), varlist_size()); } public: /// \brief Creates clause with a list of variables \a VL. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL The variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. /// \param LHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// LHSs of the reduction expressions. /// \param RHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// RHSs of the reduction expressions. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. /// \param ReductionOps List of helper expressions that represents reduction /// expressions: /// \code /// LHSExprs binop RHSExprs; /// operator binop(LHSExpr, RHSExpr); /// <CutomReduction>(LHSExpr, RHSExpr); /// \endcode /// Required for proper codegen of final reduction operation performed by the /// reduction clause. /// static OMPReductionClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs, ArrayRef<Expr *> ReductionOps); /// \brief Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// static OMPReductionClause *CreateEmpty(const ASTContext &C, unsigned N); /// \brief Gets location of ':' symbol in clause. SourceLocation getColonLoc() const { return ColonLoc; } /// \brief Gets the name info for specified reduction identifier. const DeclarationNameInfo &getNameInfo() const { return NameInfo; } /// \brief Gets the nested name specifier. NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; } typedef MutableArrayRef<Expr *>::iterator helper_expr_iterator; typedef ArrayRef<const Expr *>::iterator helper_expr_const_iterator; typedef llvm::iterator_range<helper_expr_iterator> helper_expr_range; typedef llvm::iterator_range<helper_expr_const_iterator> helper_expr_const_range; helper_expr_const_range lhs_exprs() const { return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_range lhs_exprs() { return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_const_range rhs_exprs() const { return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_range rhs_exprs() { return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_const_range reduction_ops() const { return helper_expr_const_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_range reduction_ops() { return helper_expr_range(getReductionOps().begin(), getReductionOps().end()); } StmtRange children() { return StmtRange(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_reduction; } }; /// \brief This represents clause 'linear' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp simd linear(a,b : 2) /// \endcode /// In this example directive '#pragma omp simd' has clause 'linear' /// with variables 'a', 'b' and linear step '2'. /// class OMPLinearClause : public OMPVarListClause<OMPLinearClause> { friend class OMPClauseReader; /// \brief Location of ':'. SourceLocation ColonLoc; /// \brief Sets the linear step for clause. void setStep(Expr *Step) { *(getFinals().end()) = Step; } /// \brief Sets the expression to calculate linear step for clause. void setCalcStep(Expr *CalcStep) { *(getFinals().end() + 1) = CalcStep; } /// \brief Build 'linear' clause with given number of variables \a NumVars. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of variables. /// OMPLinearClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned NumVars) : OMPVarListClause<OMPLinearClause>(OMPC_linear, StartLoc, LParenLoc, EndLoc, NumVars), ColonLoc(ColonLoc) {} /// \brief Build an empty clause. /// /// \param NumVars Number of variables. /// explicit OMPLinearClause(unsigned NumVars) : OMPVarListClause<OMPLinearClause>(OMPC_linear, SourceLocation(), SourceLocation(), SourceLocation(), NumVars), ColonLoc(SourceLocation()) {} /// \brief Gets the list of initial values for linear variables. /// /// There are NumVars expressions with initial values allocated after the /// varlist, they are followed by NumVars update expressions (used to update /// the linear variable's value on current iteration) and they are followed by /// NumVars final expressions (used to calculate the linear variable's /// value after the loop body). After these lists, there are 2 helper /// expressions - linear step and a helper to calculate it before the /// loop body (used when the linear step is not constant): /// /// { Vars[] /* in OMPVarListClause */; Inits[]; Updates[]; Finals[]; /// Step; CalcStep; } /// MutableArrayRef<Expr *> getInits() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getInits() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// \brief Sets the list of update expressions for linear variables. MutableArrayRef<Expr *> getUpdates() { return MutableArrayRef<Expr *>(getInits().end(), varlist_size()); } ArrayRef<const Expr *> getUpdates() const { return llvm::makeArrayRef(getInits().end(), varlist_size()); } /// \brief Sets the list of final update expressions for linear variables. MutableArrayRef<Expr *> getFinals() { return MutableArrayRef<Expr *>(getUpdates().end(), varlist_size()); } ArrayRef<const Expr *> getFinals() const { return llvm::makeArrayRef(getUpdates().end(), varlist_size()); } /// \brief Sets the list of the initial values for linear variables. /// \param IL List of expressions. void setInits(ArrayRef<Expr *> IL); public: /// \brief Creates clause with a list of variables \a VL and a linear step /// \a Step. /// /// \param C AST Context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param IL List of initial values for the variables. /// \param Step Linear step. /// \param CalcStep Calculation of the linear step. static OMPLinearClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> IL, Expr *Step, Expr *CalcStep); /// \brief Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param NumVars Number of variables. /// static OMPLinearClause *CreateEmpty(const ASTContext &C, unsigned NumVars); /// \brief Sets the location of ':'. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getColonLoc() const { return ColonLoc; } /// \brief Returns linear step. Expr *getStep() { return *(getFinals().end()); } /// \brief Returns linear step. const Expr *getStep() const { return *(getFinals().end()); } /// \brief Returns expression to calculate linear step. Expr *getCalcStep() { return *(getFinals().end() + 1); } /// \brief Returns expression to calculate linear step. const Expr *getCalcStep() const { return *(getFinals().end() + 1); } /// \brief Sets the list of update expressions for linear variables. /// \param UL List of expressions. void setUpdates(ArrayRef<Expr *> UL); /// \brief Sets the list of final update expressions for linear variables. /// \param FL List of expressions. void setFinals(ArrayRef<Expr *> FL); typedef MutableArrayRef<Expr *>::iterator inits_iterator; typedef ArrayRef<const Expr *>::iterator inits_const_iterator; typedef llvm::iterator_range<inits_iterator> inits_range; typedef llvm::iterator_range<inits_const_iterator> inits_const_range; inits_range inits() { return inits_range(getInits().begin(), getInits().end()); } inits_const_range inits() const { return inits_const_range(getInits().begin(), getInits().end()); } typedef MutableArrayRef<Expr *>::iterator updates_iterator; typedef ArrayRef<const Expr *>::iterator updates_const_iterator; typedef llvm::iterator_range<updates_iterator> updates_range; typedef llvm::iterator_range<updates_const_iterator> updates_const_range; updates_range updates() { return updates_range(getUpdates().begin(), getUpdates().end()); } updates_const_range updates() const { return updates_const_range(getUpdates().begin(), getUpdates().end()); } typedef MutableArrayRef<Expr *>::iterator finals_iterator; typedef ArrayRef<const Expr *>::iterator finals_const_iterator; typedef llvm::iterator_range<finals_iterator> finals_range; typedef llvm::iterator_range<finals_const_iterator> finals_const_range; finals_range finals() { return finals_range(getFinals().begin(), getFinals().end()); } finals_const_range finals() const { return finals_const_range(getFinals().begin(), getFinals().end()); } StmtRange children() { return StmtRange(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(getFinals().end() + 2)); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_linear; } }; /// \brief This represents clause 'aligned' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp simd aligned(a,b : 8) /// \endcode /// In this example directive '#pragma omp simd' has clause 'aligned' /// with variables 'a', 'b' and alignment '8'. /// class OMPAlignedClause : public OMPVarListClause<OMPAlignedClause> { friend class OMPClauseReader; /// \brief Location of ':'. SourceLocation ColonLoc; /// \brief Sets the alignment for clause. void setAlignment(Expr *A) { *varlist_end() = A; } /// \brief Build 'aligned' clause with given number of variables \a NumVars. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of variables. /// OMPAlignedClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned NumVars) : OMPVarListClause<OMPAlignedClause>(OMPC_aligned, StartLoc, LParenLoc, EndLoc, NumVars), ColonLoc(ColonLoc) {} /// \brief Build an empty clause. /// /// \param NumVars Number of variables. /// explicit OMPAlignedClause(unsigned NumVars) : OMPVarListClause<OMPAlignedClause>(OMPC_aligned, SourceLocation(), SourceLocation(), SourceLocation(), NumVars), ColonLoc(SourceLocation()) {} public: /// \brief Creates clause with a list of variables \a VL and alignment \a A. /// /// \param C AST Context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param A Alignment. static OMPAlignedClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, Expr *A); /// \brief Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param NumVars Number of variables. /// static OMPAlignedClause *CreateEmpty(const ASTContext &C, unsigned NumVars); /// \brief Sets the location of ':'. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// \brief Returns the location of ':'. SourceLocation getColonLoc() const { return ColonLoc; } /// \brief Returns alignment. Expr *getAlignment() { return *varlist_end(); } /// \brief Returns alignment. const Expr *getAlignment() const { return *varlist_end(); } StmtRange children() { return StmtRange(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end() + 1)); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_aligned; } }; /// \brief This represents clause 'copyin' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel copyin(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'copyin' /// with the variables 'a' and 'b'. /// class OMPCopyinClause : public OMPVarListClause<OMPCopyinClause> { // Class has 3 additional tail allocated arrays: // 1. List of helper expressions for proper generation of assignment operation // required for copyin clause. This list represents sources. // 2. List of helper expressions for proper generation of assignment operation // required for copyin clause. This list represents destinations. // 3. List of helper expressions that represents assignment operation: // \code // DstExprs = SrcExprs; // \endcode // Required for proper codegen of propagation of master's thread values of // threadprivate variables to local instances of that variables in other // implicit threads. friend class OMPClauseReader; /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// OMPCopyinClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPCopyinClause>(OMPC_copyin, StartLoc, LParenLoc, EndLoc, N) {} /// \brief Build an empty clause. /// /// \param N Number of variables. /// explicit OMPCopyinClause(unsigned N) : OMPVarListClause<OMPCopyinClause>(OMPC_copyin, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent source expression in the final /// assignment statement performed by the copyin clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// \brief Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent destination expression in the final /// assignment statement performed by the copyin clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// \brief Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// \brief Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign source helper expressions to destination helper expressions /// correspondingly. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// \brief Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size()); } public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for copyin clause. This list represents /// sources. /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for copyin clause. This list represents /// destinations. /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of propagation of master's thread values of /// threadprivate variables to local instances of that variables in other /// implicit threads. /// static OMPCopyinClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps); /// \brief Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// static OMPCopyinClause *CreateEmpty(const ASTContext &C, unsigned N); typedef MutableArrayRef<Expr *>::iterator helper_expr_iterator; typedef ArrayRef<const Expr *>::iterator helper_expr_const_iterator; typedef llvm::iterator_range<helper_expr_iterator> helper_expr_range; typedef llvm::iterator_range<helper_expr_const_iterator> helper_expr_const_range; helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } StmtRange children() { return StmtRange(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_copyin; } }; /// \brief This represents clause 'copyprivate' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp single copyprivate(a,b) /// \endcode /// In this example directive '#pragma omp single' has clause 'copyprivate' /// with the variables 'a' and 'b'. /// class OMPCopyprivateClause : public OMPVarListClause<OMPCopyprivateClause> { friend class OMPClauseReader; /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// OMPCopyprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPCopyprivateClause>(OMPC_copyprivate, StartLoc, LParenLoc, EndLoc, N) {} /// \brief Build an empty clause. /// /// \param N Number of variables. /// explicit OMPCopyprivateClause(unsigned N) : OMPVarListClause<OMPCopyprivateClause>( OMPC_copyprivate, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent source expression in the final /// assignment statement performed by the copyprivate clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// \brief Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent destination expression in the final /// assignment statement performed by the copyprivate clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// \brief Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// \brief Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign source helper expressions to destination helper expressions /// correspondingly. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// \brief Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size()); } public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// sources. /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// destinations. /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of final assignment performed by the /// copyprivate clause. /// static OMPCopyprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps); /// \brief Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// static OMPCopyprivateClause *CreateEmpty(const ASTContext &C, unsigned N); typedef MutableArrayRef<Expr *>::iterator helper_expr_iterator; typedef ArrayRef<const Expr *>::iterator helper_expr_const_iterator; typedef llvm::iterator_range<helper_expr_iterator> helper_expr_range; typedef llvm::iterator_range<helper_expr_const_iterator> helper_expr_const_range; helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } StmtRange children() { return StmtRange(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_copyprivate; } }; /// \brief This represents implicit clause 'flush' for the '#pragma omp flush' /// directive. /// This clause does not exist by itself, it can be only as a part of 'omp /// flush' directive. This clause is introduced to keep the original structure /// of \a OMPExecutableDirective class and its derivatives and to use the /// existing infrastructure of clauses with the list of variables. /// /// \code /// #pragma omp flush(a,b) /// \endcode /// In this example directive '#pragma omp flush' has implicit clause 'flush' /// with the variables 'a' and 'b'. /// class OMPFlushClause : public OMPVarListClause<OMPFlushClause> { /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// OMPFlushClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPFlushClause>(OMPC_flush, StartLoc, LParenLoc, EndLoc, N) {} /// \brief Build an empty clause. /// /// \param N Number of variables. /// explicit OMPFlushClause(unsigned N) : OMPVarListClause<OMPFlushClause>(OMPC_flush, SourceLocation(), SourceLocation(), SourceLocation(), N) {} public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// static OMPFlushClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// \brief Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// static OMPFlushClause *CreateEmpty(const ASTContext &C, unsigned N); StmtRange children() { return StmtRange(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_flush; } }; } // end namespace clang #endif
//===- OpenMPClause.h - Classes for OpenMP clauses --------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// /// \file /// \brief This file defines OpenMP AST classes for clauses. /// There are clauses for executable directives, clauses for declarative /// directives and clauses which can be used in both kinds of directives. /// //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_OPENMPCLAUSE_H #define LLVM_CLANG_AST_OPENMPCLAUSE_H #include "clang/AST/Expr.h" #include "clang/AST/Stmt.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/SourceLocation.h" namespace clang { //===----------------------------------------------------------------------===// // AST classes for clauses. //===----------------------------------------------------------------------===// /// \brief This is a basic class for representing single OpenMP clause. /// class OMPClause { /// \brief Starting location of the clause (the clause keyword). SourceLocation StartLoc; /// \brief Ending location of the clause. SourceLocation EndLoc; /// \brief Kind of the clause. OpenMPClauseKind Kind; protected: OMPClause(OpenMPClauseKind K, SourceLocation StartLoc, SourceLocation EndLoc) : StartLoc(StartLoc), EndLoc(EndLoc), Kind(K) {} public: /// \brief Returns the starting location of the clause. SourceLocation getLocStart() const { return StartLoc; } /// \brief Returns the ending location of the clause. SourceLocation getLocEnd() const { return EndLoc; } /// \brief Sets the starting location of the clause. void setLocStart(SourceLocation Loc) { StartLoc = Loc; } /// \brief Sets the ending location of the clause. void setLocEnd(SourceLocation Loc) { EndLoc = Loc; } /// \brief Returns kind of OpenMP clause (private, shared, reduction, etc.). OpenMPClauseKind getClauseKind() const { return Kind; } bool isImplicit() const { return StartLoc.isInvalid(); } StmtRange children(); ConstStmtRange children() const { return const_cast<OMPClause *>(this)->children(); } static bool classof(const OMPClause *) { return true; } }; /// \brief This represents clauses with the list of variables like 'private', /// 'firstprivate', 'copyin', 'shared', or 'reduction' clauses in the /// ' template <class T> class OMPVarListClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Number of variables in the list. unsigned NumVars; protected: /// \brief Fetches list of variables associated with this clause. MutableArrayRef<Expr *> getVarRefs() { return MutableArrayRef<Expr *>( reinterpret_cast<Expr **>( reinterpret_cast<char *>(this) + llvm::RoundUpToAlignment(sizeof(T), llvm::alignOf<Expr *>())), NumVars); } /// \brief Sets the list of variables for this clause. void setVarRefs(ArrayRef<Expr *> VL) { assert(VL.size() == NumVars && "Number of variables is not the same as the preallocated buffer"); std::copy( VL.begin(), VL.end(), reinterpret_cast<Expr **>( reinterpret_cast<char *>(this) + llvm::RoundUpToAlignment(sizeof(T), llvm::alignOf<Expr *>()))); } /// \brief Build a clause with \a N variables /// /// \param K Kind of the clause. /// \param StartLoc Starting location of the clause (the clause keyword). /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// OMPVarListClause(OpenMPClauseKind K, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPClause(K, StartLoc, EndLoc), LParenLoc(LParenLoc), NumVars(N) {} public: typedef MutableArrayRef<Expr *>::iterator varlist_iterator; typedef ArrayRef<const Expr *>::iterator varlist_const_iterator; typedef llvm::iterator_range<varlist_iterator> varlist_range; typedef llvm::iterator_range<varlist_const_iterator> varlist_const_range; unsigned varlist_size() const { return NumVars; } bool varlist_empty() const { return NumVars == 0; } varlist_range varlists() { return varlist_range(varlist_begin(), varlist_end()); } varlist_const_range varlists() const { return varlist_const_range(varlist_begin(), varlist_end()); } varlist_iterator varlist_begin() { return getVarRefs().begin(); } varlist_iterator varlist_end() { return getVarRefs().end(); } varlist_const_iterator varlist_begin() const { return getVarRefs().begin(); } varlist_const_iterator varlist_end() const { return getVarRefs().end(); } /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Fetches list of all variables in the clause. ArrayRef<const Expr *> getVarRefs() const { return llvm::makeArrayRef( reinterpret_cast<const Expr *const *>( reinterpret_cast<const char *>(this) + llvm::RoundUpToAlignment(sizeof(T), llvm::alignOf<const Expr *>())), NumVars); } }; /// \brief This represents 'if' clause in the ' /// /// \code /// /// \endcode /// In this example directive ' /// clause with condition 'a > 5'. /// class OMPIfClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Condition of the 'if' clause. Stmt *Condition; /// \brief Set condition. /// void setCondition(Expr *Cond) { Condition = Cond; } public: /// \brief Build 'if' clause with condition \a Cond. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param Cond Condition of the clause. /// \param EndLoc Ending location of the clause. /// OMPIfClause(Expr *Cond, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_if, StartLoc, EndLoc), LParenLoc(LParenLoc), Condition(Cond) {} /// \brief Build an empty clause. /// OMPIfClause() : OMPClause(OMPC_if, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), Condition(nullptr) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Returns condition. Expr *getCondition() const { return cast_or_null<Expr>(Condition); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_if; } StmtRange children() { return StmtRange(&Condition, &Condition + 1); } }; /// \brief This represents 'final' clause in the ' /// /// \code /// /// \endcode /// In this example directive ' /// clause with condition 'a > 5'. /// class OMPFinalClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Condition of the 'if' clause. Stmt *Condition; /// \brief Set condition. /// void setCondition(Expr *Cond) { Condition = Cond; } public: /// \brief Build 'final' clause with condition \a Cond. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param Cond Condition of the clause. /// \param EndLoc Ending location of the clause. /// OMPFinalClause(Expr *Cond, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_final, StartLoc, EndLoc), LParenLoc(LParenLoc), Condition(Cond) {} /// \brief Build an empty clause. /// OMPFinalClause() : OMPClause(OMPC_final, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), Condition(nullptr) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Returns condition. Expr *getCondition() const { return cast_or_null<Expr>(Condition); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_final; } StmtRange children() { return StmtRange(&Condition, &Condition + 1); } }; /// \brief This represents 'num_threads' clause in the ' /// directive. /// /// \code /// /// \endcode /// In this example directive ' /// clause with number of threads '6'. /// class OMPNumThreadsClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Condition of the 'num_threads' clause. Stmt *NumThreads; /// \brief Set condition. /// void setNumThreads(Expr *NThreads) { NumThreads = NThreads; } public: /// \brief Build 'num_threads' clause with condition \a NumThreads. /// /// \param NumThreads Number of threads for the construct. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// OMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_num_threads, StartLoc, EndLoc), LParenLoc(LParenLoc), NumThreads(NumThreads) {} /// \brief Build an empty clause. /// OMPNumThreadsClause() : OMPClause(OMPC_num_threads, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), NumThreads(nullptr) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Returns number of threads. Expr *getNumThreads() const { return cast_or_null<Expr>(NumThreads); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_num_threads; } StmtRange children() { return StmtRange(&NumThreads, &NumThreads + 1); } }; /// \brief This represents 'safelen' clause in the ' /// directive. /// /// \code /// /// \endcode /// In this example directive ' /// with single expression '4'. /// If the safelen clause is used then no two iterations executed /// concurrently with SIMD instructions can have a greater distance /// in the logical iteration space than its value. The parameter of /// the safelen clause must be a constant positive integer expression. /// class OMPSafelenClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Safe iteration space distance. Stmt *Safelen; /// \brief Set safelen. void setSafelen(Expr *Len) { Safelen = Len; } public: /// \brief Build 'safelen' clause. /// /// \param Len Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPSafelenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_safelen, StartLoc, EndLoc), LParenLoc(LParenLoc), Safelen(Len) {} /// \brief Build an empty clause. /// explicit OMPSafelenClause() : OMPClause(OMPC_safelen, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), Safelen(nullptr) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Return safe iteration space distance. Expr *getSafelen() const { return cast_or_null<Expr>(Safelen); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_safelen; } StmtRange children() { return StmtRange(&Safelen, &Safelen + 1); } }; /// \brief This represents 'collapse' clause in the ' /// directive. /// /// \code /// /// \endcode /// In this example directive ' /// with single expression '3'. /// The parameter must be a constant positive integer expression, it specifies /// the number of nested loops that should be collapsed into a single iteration /// space. /// class OMPCollapseClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Number of for-loops. Stmt *NumForLoops; /// \brief Set the number of associated for-loops. void setNumForLoops(Expr *Num) { NumForLoops = Num; } public: /// \brief Build 'collapse' clause. /// /// \param Num Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// OMPCollapseClause(Expr *Num, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_collapse, StartLoc, EndLoc), LParenLoc(LParenLoc), NumForLoops(Num) {} /// \brief Build an empty clause. /// explicit OMPCollapseClause() : OMPClause(OMPC_collapse, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), NumForLoops(nullptr) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Return the number of associated for-loops. Expr *getNumForLoops() const { return cast_or_null<Expr>(NumForLoops); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_collapse; } StmtRange children() { return StmtRange(&NumForLoops, &NumForLoops + 1); } }; /// \brief This represents 'default' clause in the ' /// /// \code /// /// \endcode /// In this example directive ' /// clause with kind 'shared'. /// class OMPDefaultClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief A kind of the 'default' clause. OpenMPDefaultClauseKind Kind; /// \brief Start location of the kind in source code. SourceLocation KindKwLoc; /// \brief Set kind of the clauses. /// /// \param K Argument of clause. /// void setDefaultKind(OpenMPDefaultClauseKind K) { Kind = K; } /// \brief Set argument location. /// /// \param KLoc Argument location. /// void setDefaultKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// \brief Build 'default' clause with argument \a A ('none' or 'shared'). /// /// \param A Argument of the clause ('none' or 'shared'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// OMPDefaultClause(OpenMPDefaultClauseKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_default, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// \brief Build an empty clause. /// OMPDefaultClause() : OMPClause(OMPC_default, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), Kind(OMPC_DEFAULT_unknown), KindKwLoc(SourceLocation()) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Returns kind of the clause. OpenMPDefaultClauseKind getDefaultKind() const { return Kind; } /// \brief Returns location of clause kind. SourceLocation getDefaultKindKwLoc() const { return KindKwLoc; } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_default; } StmtRange children() { return StmtRange(); } }; /// \brief This represents 'proc_bind' clause in the ' /// directive. /// /// \code /// /// \endcode /// In this example directive ' /// clause with kind 'master'. /// class OMPProcBindClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief A kind of the 'proc_bind' clause. OpenMPProcBindClauseKind Kind; /// \brief Start location of the kind in source code. SourceLocation KindKwLoc; /// \brief Set kind of the clause. /// /// \param K Kind of clause. /// void setProcBindKind(OpenMPProcBindClauseKind K) { Kind = K; } /// \brief Set clause kind location. /// /// \param KLoc Kind location. /// void setProcBindKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// \brief Build 'proc_bind' clause with argument \a A ('master', 'close' or /// 'spread'). /// /// \param A Argument of the clause ('master', 'close' or 'spread'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// OMPProcBindClause(OpenMPProcBindClauseKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_proc_bind, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// \brief Build an empty clause. /// OMPProcBindClause() : OMPClause(OMPC_proc_bind, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), Kind(OMPC_PROC_BIND_unknown), KindKwLoc(SourceLocation()) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Returns kind of the clause. OpenMPProcBindClauseKind getProcBindKind() const { return Kind; } /// \brief Returns location of clause kind. SourceLocation getProcBindKindKwLoc() const { return KindKwLoc; } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_proc_bind; } StmtRange children() { return StmtRange(); } }; /// \brief This represents 'schedule' clause in the ' /// /// \code /// /// \endcode /// In this example directive ' /// arguments 'static' and '3'. /// class OMPScheduleClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief A kind of the 'schedule' clause. OpenMPScheduleClauseKind Kind; /// \brief Start location of the schedule ind in source code. SourceLocation KindLoc; /// \brief Location of ',' (if any). SourceLocation CommaLoc; /// \brief Chunk size. Stmt *ChunkSize; /// \brief Set schedule kind. /// /// \param K Schedule kind. /// void setScheduleKind(OpenMPScheduleClauseKind K) { Kind = K; } /// \brief Sets the location of '('. /// /// \param Loc Location of '('. /// void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Set schedule kind start location. /// /// \param KLoc Schedule kind location. /// void setScheduleKindLoc(SourceLocation KLoc) { KindLoc = KLoc; } /// \brief Set location of ','. /// /// \param Loc Location of ','. /// void setCommaLoc(SourceLocation Loc) { CommaLoc = Loc; } /// \brief Set chunk size. /// /// \param E Chunk size. /// void setChunkSize(Expr *E) { ChunkSize = E; } public: /// \brief Build 'schedule' clause with schedule kind \a Kind and chunk size /// expression \a ChunkSize. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param KLoc Starting location of the argument. /// \param CommaLoc Location of ','. /// \param EndLoc Ending location of the clause. /// \param Kind Schedule kind. /// \param ChunkSize Chunk size. /// OMPScheduleClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KLoc, SourceLocation CommaLoc, SourceLocation EndLoc, OpenMPScheduleClauseKind Kind, Expr *ChunkSize) : OMPClause(OMPC_schedule, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(Kind), KindLoc(KLoc), CommaLoc(CommaLoc), ChunkSize(ChunkSize) {} /// \brief Build an empty clause. /// explicit OMPScheduleClause() : OMPClause(OMPC_schedule, SourceLocation(), SourceLocation()), Kind(OMPC_SCHEDULE_unknown), ChunkSize(nullptr) {} /// \brief Get kind of the clause. /// OpenMPScheduleClauseKind getScheduleKind() const { return Kind; } /// \brief Get location of '('. /// SourceLocation getLParenLoc() { return LParenLoc; } /// \brief Get kind location. /// SourceLocation getScheduleKindLoc() { return KindLoc; } /// \brief Get location of ','. /// SourceLocation getCommaLoc() { return CommaLoc; } /// \brief Get chunk size. /// Expr *getChunkSize() { return dyn_cast_or_null<Expr>(ChunkSize); } /// \brief Get chunk size. /// Expr *getChunkSize() const { return dyn_cast_or_null<Expr>(ChunkSize); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_schedule; } StmtRange children() { return StmtRange(&ChunkSize, &ChunkSize + 1); } }; /// \brief This represents 'ordered' clause in the ' /// /// \code /// /// \endcode /// In this example directive ' /// class OMPOrderedClause : public OMPClause { public: /// \brief Build 'ordered' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_ordered, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPOrderedClause() : OMPClause(OMPC_ordered, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_ordered; } StmtRange children() { return StmtRange(); } }; /// \brief This represents 'nowait' clause in the ' /// /// \code /// /// \endcode /// In this example directive ' /// class OMPNowaitClause : public OMPClause { public: /// \brief Build 'nowait' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_nowait, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPNowaitClause() : OMPClause(OMPC_nowait, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_nowait; } StmtRange children() { return StmtRange(); } }; /// \brief This represents 'untied' clause in the ' /// /// \code /// /// \endcode /// In this example directive ' /// class OMPUntiedClause : public OMPClause { public: /// \brief Build 'untied' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_untied, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPUntiedClause() : OMPClause(OMPC_untied, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_untied; } StmtRange children() { return StmtRange(); } }; /// \brief This represents 'mergeable' clause in the ' /// directive. /// /// \code /// /// \endcode /// In this example directive ' /// class OMPMergeableClause : public OMPClause { public: /// \brief Build 'mergeable' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_mergeable, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPMergeableClause() : OMPClause(OMPC_mergeable, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_mergeable; } StmtRange children() { return StmtRange(); } }; /// \brief This represents 'read' clause in the ' /// /// \code /// /// \endcode /// In this example directive ' /// class OMPReadClause : public OMPClause { public: /// \brief Build 'read' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_read, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPReadClause() : OMPClause(OMPC_read, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_read; } StmtRange children() { return StmtRange(); } }; /// \brief This represents 'write' clause in the ' /// /// \code /// /// \endcode /// In this example directive ' /// class OMPWriteClause : public OMPClause { public: /// \brief Build 'write' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_write, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPWriteClause() : OMPClause(OMPC_write, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_write; } StmtRange children() { return StmtRange(); } }; /// \brief This represents 'update' clause in the ' /// directive. /// /// \code /// /// \endcode /// In this example directive ' /// class OMPUpdateClause : public OMPClause { public: /// \brief Build 'update' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_update, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPUpdateClause() : OMPClause(OMPC_update, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_update; } StmtRange children() { return StmtRange(); } }; /// \brief This represents 'capture' clause in the ' /// directive. /// /// \code /// /// \endcode /// In this example directive ' /// class OMPCaptureClause : public OMPClause { public: /// \brief Build 'capture' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_capture, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPCaptureClause() : OMPClause(OMPC_capture, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_capture; } StmtRange children() { return StmtRange(); } }; /// \brief This represents 'seq_cst' clause in the ' /// directive. /// /// \code /// /// \endcode /// In this example directive ' /// class OMPSeqCstClause : public OMPClause { public: /// \brief Build 'seq_cst' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_seq_cst, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPSeqCstClause() : OMPClause(OMPC_seq_cst, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_seq_cst; } StmtRange children() { return StmtRange(); } }; /// \brief This represents clause 'private' in the ' /// /// \code /// /// \endcode /// In this example directive ' /// with the variables 'a' and 'b'. /// class OMPPrivateClause : public OMPVarListClause<OMPPrivateClause> { friend class OMPClauseReader; /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// OMPPrivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPPrivateClause>(OMPC_private, StartLoc, LParenLoc, EndLoc, N) {} /// \brief Build an empty clause. /// /// \param N Number of variables. /// explicit OMPPrivateClause(unsigned N) : OMPVarListClause<OMPPrivateClause>(OMPC_private, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// \brief Sets the list of references to private copies with initializers for /// new private variables. /// \param VL List of references. void setPrivateCopies(ArrayRef<Expr *> VL); /// \brief Gets the list of references to private copies with initializers for /// new private variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param PrivateVL List of references to private copies with initializers. /// static OMPPrivateClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL); /// \brief Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// static OMPPrivateClause *CreateEmpty(const ASTContext &C, unsigned N); typedef MutableArrayRef<Expr *>::iterator private_copies_iterator; typedef ArrayRef<const Expr *>::iterator private_copies_const_iterator; typedef llvm::iterator_range<private_copies_iterator> private_copies_range; typedef llvm::iterator_range<private_copies_const_iterator> private_copies_const_range; private_copies_range private_copies() { return private_copies_range(getPrivateCopies().begin(), getPrivateCopies().end()); } private_copies_const_range private_copies() const { return private_copies_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } StmtRange children() { return StmtRange(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_private; } }; /// \brief This represents clause 'firstprivate' in the ' /// directives. /// /// \code /// /// \endcode /// In this example directive ' /// with the variables 'a' and 'b'. /// class OMPFirstprivateClause : public OMPVarListClause<OMPFirstprivateClause> { friend class OMPClauseReader; /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// OMPFirstprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPFirstprivateClause>(OMPC_firstprivate, StartLoc, LParenLoc, EndLoc, N) {} /// \brief Build an empty clause. /// /// \param N Number of variables. /// explicit OMPFirstprivateClause(unsigned N) : OMPVarListClause<OMPFirstprivateClause>( OMPC_firstprivate, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// \brief Sets the list of references to private copies with initializers for /// new private variables. /// \param VL List of references. void setPrivateCopies(ArrayRef<Expr *> VL); /// \brief Gets the list of references to private copies with initializers for /// new private variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// \brief Sets the list of references to initializer variables for new /// private variables. /// \param VL List of references. void setInits(ArrayRef<Expr *> VL); /// \brief Gets the list of references to initializer variables for new /// private variables. MutableArrayRef<Expr *> getInits() { return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size()); } ArrayRef<const Expr *> getInits() const { return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size()); } public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the original variables. /// \param PrivateVL List of references to private copies with initializers. /// \param InitVL List of references to auto generated variables used for /// initialization of a single array element. Used if firstprivate variable is /// of array type. /// static OMPFirstprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL, ArrayRef<Expr *> InitVL); /// \brief Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// static OMPFirstprivateClause *CreateEmpty(const ASTContext &C, unsigned N); typedef MutableArrayRef<Expr *>::iterator private_copies_iterator; typedef ArrayRef<const Expr *>::iterator private_copies_const_iterator; typedef llvm::iterator_range<private_copies_iterator> private_copies_range; typedef llvm::iterator_range<private_copies_const_iterator> private_copies_const_range; private_copies_range private_copies() { return private_copies_range(getPrivateCopies().begin(), getPrivateCopies().end()); } private_copies_const_range private_copies() const { return private_copies_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } typedef MutableArrayRef<Expr *>::iterator inits_iterator; typedef ArrayRef<const Expr *>::iterator inits_const_iterator; typedef llvm::iterator_range<inits_iterator> inits_range; typedef llvm::iterator_range<inits_const_iterator> inits_const_range; inits_range inits() { return inits_range(getInits().begin(), getInits().end()); } inits_const_range inits() const { return inits_const_range(getInits().begin(), getInits().end()); } StmtRange children() { return StmtRange(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_firstprivate; } }; /// \brief This represents clause 'lastprivate' in the ' /// directives. /// /// \code /// /// \endcode /// In this example directive ' /// with the variables 'a' and 'b'. class OMPLastprivateClause : public OMPVarListClause<OMPLastprivateClause> { // There are 4 additional tail-allocated arrays at the end of the class: // 1. Contains list of pseudo variables with the default initialization for // each non-firstprivate variables. Used in codegen for initialization of // lastprivate copies. // 2. List of helper expressions for proper generation of assignment operation // required for lastprivate clause. This list represents private variables // (for arrays, single array element). // 3. List of helper expressions for proper generation of assignment operation // required for lastprivate clause. This list represents original variables // (for arrays, single array element). // 4. List of helper expressions that represents assignment operation: // \code // DstExprs = SrcExprs; // \endcode // Required for proper codegen of final assignment performed by the // lastprivate clause. // friend class OMPClauseReader; /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// OMPLastprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPLastprivateClause>(OMPC_lastprivate, StartLoc, LParenLoc, EndLoc, N) {} /// \brief Build an empty clause. /// /// \param N Number of variables. /// explicit OMPLastprivateClause(unsigned N) : OMPVarListClause<OMPLastprivateClause>( OMPC_lastprivate, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// \brief Get the list of helper expressions for initialization of private /// copies for lastprivate variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent private variables (for arrays, single /// array element) in the final assignment statement performed by the /// lastprivate clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// \brief Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size()); } /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent original variables (for arrays, single /// array element) in the final assignment statement performed by the /// lastprivate clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// \brief Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// \brief Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign private copy of the variable to original variable. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// \brief Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size()); } public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for lastprivate clause. This list represents /// private variables (for arrays, single array element). /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for lastprivate clause. This list represents /// original variables (for arrays, single array element). /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of final assignment performed by the /// lastprivate clause. /// /// static OMPLastprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps); /// \brief Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// static OMPLastprivateClause *CreateEmpty(const ASTContext &C, unsigned N); typedef MutableArrayRef<Expr *>::iterator helper_expr_iterator; typedef ArrayRef<const Expr *>::iterator helper_expr_const_iterator; typedef llvm::iterator_range<helper_expr_iterator> helper_expr_range; typedef llvm::iterator_range<helper_expr_const_iterator> helper_expr_const_range; /// \brief Set list of helper expressions, required for generation of private /// copies of original lastprivate variables. void setPrivateCopies(ArrayRef<Expr *> PrivateCopies); helper_expr_const_range private_copies() const { return helper_expr_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } helper_expr_range private_copies() { return helper_expr_range(getPrivateCopies().begin(), getPrivateCopies().end()); } helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } StmtRange children() { return StmtRange(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_lastprivate; } }; /// \brief This represents clause 'shared' in the ' /// /// \code /// /// \endcode /// In this example directive ' /// with the variables 'a' and 'b'. /// class OMPSharedClause : public OMPVarListClause<OMPSharedClause> { /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// OMPSharedClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPSharedClause>(OMPC_shared, StartLoc, LParenLoc, EndLoc, N) {} /// \brief Build an empty clause. /// /// \param N Number of variables. /// explicit OMPSharedClause(unsigned N) : OMPVarListClause<OMPSharedClause>(OMPC_shared, SourceLocation(), SourceLocation(), SourceLocation(), N) {} public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// static OMPSharedClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// \brief Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// static OMPSharedClause *CreateEmpty(const ASTContext &C, unsigned N); StmtRange children() { return StmtRange(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_shared; } }; /// \brief This represents clause 'reduction' in the ' /// directives. /// /// \code /// /// \endcode /// In this example directive ' /// with operator '+' and the variables 'a' and 'b'. /// class OMPReductionClause : public OMPVarListClause<OMPReductionClause> { friend class OMPClauseReader; /// \brief Location of ':'. SourceLocation ColonLoc; /// \brief Nested name specifier for C++. NestedNameSpecifierLoc QualifierLoc; /// \brief Name of custom operator. DeclarationNameInfo NameInfo; /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param ColonLoc Location of ':'. /// \param N Number of the variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. /// OMPReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo) : OMPVarListClause<OMPReductionClause>(OMPC_reduction, StartLoc, LParenLoc, EndLoc, N), ColonLoc(ColonLoc), QualifierLoc(QualifierLoc), NameInfo(NameInfo) {} /// \brief Build an empty clause. /// /// \param N Number of variables. /// explicit OMPReductionClause(unsigned N) : OMPVarListClause<OMPReductionClause>(OMPC_reduction, SourceLocation(), SourceLocation(), SourceLocation(), N), ColonLoc(), QualifierLoc(), NameInfo() {} /// \brief Sets location of ':' symbol in clause. void setColonLoc(SourceLocation CL) { ColonLoc = CL; } /// \brief Sets the name info for specified reduction identifier. void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; } /// \brief Sets the nested name specifier. void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; } /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent LHS expression in the final /// reduction expression performed by the reduction clause. void setLHSExprs(ArrayRef<Expr *> LHSExprs); /// \brief Get the list of helper LHS expressions. MutableArrayRef<Expr *> getLHSExprs() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getLHSExprs() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent RHS expression in the final /// reduction expression performed by the reduction clause. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. void setRHSExprs(ArrayRef<Expr *> RHSExprs); /// \brief Get the list of helper destination expressions. MutableArrayRef<Expr *> getRHSExprs() { return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getRHSExprs() const { return llvm::makeArrayRef(getLHSExprs().end(), varlist_size()); } /// \brief Set list of helper reduction expressions, required for proper /// codegen of the clause. These expressions are binary expressions or /// operator/custom reduction call that calculates new value from source /// helper expressions to destination helper expressions. void setReductionOps(ArrayRef<Expr *> ReductionOps); /// \brief Get the list of helper reduction expressions. MutableArrayRef<Expr *> getReductionOps() { return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getReductionOps() const { return llvm::makeArrayRef(getRHSExprs().end(), varlist_size()); } public: /// \brief Creates clause with a list of variables \a VL. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL The variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. /// \param LHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// LHSs of the reduction expressions. /// \param RHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// RHSs of the reduction expressions. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. /// \param ReductionOps List of helper expressions that represents reduction /// expressions: /// \code /// LHSExprs binop RHSExprs; /// operator binop(LHSExpr, RHSExpr); /// <CutomReduction>(LHSExpr, RHSExpr); /// \endcode /// Required for proper codegen of final reduction operation performed by the /// reduction clause. /// static OMPReductionClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs, ArrayRef<Expr *> ReductionOps); /// \brief Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// static OMPReductionClause *CreateEmpty(const ASTContext &C, unsigned N); /// \brief Gets location of ':' symbol in clause. SourceLocation getColonLoc() const { return ColonLoc; } /// \brief Gets the name info for specified reduction identifier. const DeclarationNameInfo &getNameInfo() const { return NameInfo; } /// \brief Gets the nested name specifier. NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; } typedef MutableArrayRef<Expr *>::iterator helper_expr_iterator; typedef ArrayRef<const Expr *>::iterator helper_expr_const_iterator; typedef llvm::iterator_range<helper_expr_iterator> helper_expr_range; typedef llvm::iterator_range<helper_expr_const_iterator> helper_expr_const_range; helper_expr_const_range lhs_exprs() const { return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_range lhs_exprs() { return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_const_range rhs_exprs() const { return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_range rhs_exprs() { return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_const_range reduction_ops() const { return helper_expr_const_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_range reduction_ops() { return helper_expr_range(getReductionOps().begin(), getReductionOps().end()); } StmtRange children() { return StmtRange(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_reduction; } }; /// \brief This represents clause 'linear' in the ' /// directives. /// /// \code /// /// \endcode /// In this example directive ' /// with variables 'a', 'b' and linear step '2'. /// class OMPLinearClause : public OMPVarListClause<OMPLinearClause> { friend class OMPClauseReader; /// \brief Location of ':'. SourceLocation ColonLoc; /// \brief Sets the linear step for clause. void setStep(Expr *Step) { *(getFinals().end()) = Step; } /// \brief Sets the expression to calculate linear step for clause. void setCalcStep(Expr *CalcStep) { *(getFinals().end() + 1) = CalcStep; } /// \brief Build 'linear' clause with given number of variables \a NumVars. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of variables. /// OMPLinearClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned NumVars) : OMPVarListClause<OMPLinearClause>(OMPC_linear, StartLoc, LParenLoc, EndLoc, NumVars), ColonLoc(ColonLoc) {} /// \brief Build an empty clause. /// /// \param NumVars Number of variables. /// explicit OMPLinearClause(unsigned NumVars) : OMPVarListClause<OMPLinearClause>(OMPC_linear, SourceLocation(), SourceLocation(), SourceLocation(), NumVars), ColonLoc(SourceLocation()) {} /// \brief Gets the list of initial values for linear variables. /// /// There are NumVars expressions with initial values allocated after the /// varlist, they are followed by NumVars update expressions (used to update /// the linear variable's value on current iteration) and they are followed by /// NumVars final expressions (used to calculate the linear variable's /// value after the loop body). After these lists, there are 2 helper /// expressions - linear step and a helper to calculate it before the /// loop body (used when the linear step is not constant): /// /// { Vars[] /* in OMPVarListClause */; Inits[]; Updates[]; Finals[]; /// Step; CalcStep; } /// MutableArrayRef<Expr *> getInits() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getInits() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// \brief Sets the list of update expressions for linear variables. MutableArrayRef<Expr *> getUpdates() { return MutableArrayRef<Expr *>(getInits().end(), varlist_size()); } ArrayRef<const Expr *> getUpdates() const { return llvm::makeArrayRef(getInits().end(), varlist_size()); } /// \brief Sets the list of final update expressions for linear variables. MutableArrayRef<Expr *> getFinals() { return MutableArrayRef<Expr *>(getUpdates().end(), varlist_size()); } ArrayRef<const Expr *> getFinals() const { return llvm::makeArrayRef(getUpdates().end(), varlist_size()); } /// \brief Sets the list of the initial values for linear variables. /// \param IL List of expressions. void setInits(ArrayRef<Expr *> IL); public: /// \brief Creates clause with a list of variables \a VL and a linear step /// \a Step. /// /// \param C AST Context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param IL List of initial values for the variables. /// \param Step Linear step. /// \param CalcStep Calculation of the linear step. static OMPLinearClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> IL, Expr *Step, Expr *CalcStep); /// \brief Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param NumVars Number of variables. /// static OMPLinearClause *CreateEmpty(const ASTContext &C, unsigned NumVars); /// \brief Sets the location of ':'. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getColonLoc() const { return ColonLoc; } /// \brief Returns linear step. Expr *getStep() { return *(getFinals().end()); } /// \brief Returns linear step. const Expr *getStep() const { return *(getFinals().end()); } /// \brief Returns expression to calculate linear step. Expr *getCalcStep() { return *(getFinals().end() + 1); } /// \brief Returns expression to calculate linear step. const Expr *getCalcStep() const { return *(getFinals().end() + 1); } /// \brief Sets the list of update expressions for linear variables. /// \param UL List of expressions. void setUpdates(ArrayRef<Expr *> UL); /// \brief Sets the list of final update expressions for linear variables. /// \param FL List of expressions. void setFinals(ArrayRef<Expr *> FL); typedef MutableArrayRef<Expr *>::iterator inits_iterator; typedef ArrayRef<const Expr *>::iterator inits_const_iterator; typedef llvm::iterator_range<inits_iterator> inits_range; typedef llvm::iterator_range<inits_const_iterator> inits_const_range; inits_range inits() { return inits_range(getInits().begin(), getInits().end()); } inits_const_range inits() const { return inits_const_range(getInits().begin(), getInits().end()); } typedef MutableArrayRef<Expr *>::iterator updates_iterator; typedef ArrayRef<const Expr *>::iterator updates_const_iterator; typedef llvm::iterator_range<updates_iterator> updates_range; typedef llvm::iterator_range<updates_const_iterator> updates_const_range; updates_range updates() { return updates_range(getUpdates().begin(), getUpdates().end()); } updates_const_range updates() const { return updates_const_range(getUpdates().begin(), getUpdates().end()); } typedef MutableArrayRef<Expr *>::iterator finals_iterator; typedef ArrayRef<const Expr *>::iterator finals_const_iterator; typedef llvm::iterator_range<finals_iterator> finals_range; typedef llvm::iterator_range<finals_const_iterator> finals_const_range; finals_range finals() { return finals_range(getFinals().begin(), getFinals().end()); } finals_const_range finals() const { return finals_const_range(getFinals().begin(), getFinals().end()); } StmtRange children() { return StmtRange(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(getFinals().end() + 2)); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_linear; } }; /// \brief This represents clause 'aligned' in the ' /// directives. /// /// \code /// /// \endcode /// In this example directive ' /// with variables 'a', 'b' and alignment '8'. /// class OMPAlignedClause : public OMPVarListClause<OMPAlignedClause> { friend class OMPClauseReader; /// \brief Location of ':'. SourceLocation ColonLoc; /// \brief Sets the alignment for clause. void setAlignment(Expr *A) { *varlist_end() = A; } /// \brief Build 'aligned' clause with given number of variables \a NumVars. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of variables. /// OMPAlignedClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned NumVars) : OMPVarListClause<OMPAlignedClause>(OMPC_aligned, StartLoc, LParenLoc, EndLoc, NumVars), ColonLoc(ColonLoc) {} /// \brief Build an empty clause. /// /// \param NumVars Number of variables. /// explicit OMPAlignedClause(unsigned NumVars) : OMPVarListClause<OMPAlignedClause>(OMPC_aligned, SourceLocation(), SourceLocation(), SourceLocation(), NumVars), ColonLoc(SourceLocation()) {} public: /// \brief Creates clause with a list of variables \a VL and alignment \a A. /// /// \param C AST Context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param A Alignment. static OMPAlignedClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, Expr *A); /// \brief Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param NumVars Number of variables. /// static OMPAlignedClause *CreateEmpty(const ASTContext &C, unsigned NumVars); /// \brief Sets the location of ':'. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// \brief Returns the location of ':'. SourceLocation getColonLoc() const { return ColonLoc; } /// \brief Returns alignment. Expr *getAlignment() { return *varlist_end(); } /// \brief Returns alignment. const Expr *getAlignment() const { return *varlist_end(); } StmtRange children() { return StmtRange(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end() + 1)); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_aligned; } }; /// \brief This represents clause 'copyin' in the ' /// /// \code /// /// \endcode /// In this example directive ' /// with the variables 'a' and 'b'. /// class OMPCopyinClause : public OMPVarListClause<OMPCopyinClause> { // Class has 3 additional tail allocated arrays: // 1. List of helper expressions for proper generation of assignment operation // required for copyin clause. This list represents sources. // 2. List of helper expressions for proper generation of assignment operation // required for copyin clause. This list represents destinations. // 3. List of helper expressions that represents assignment operation: // \code // DstExprs = SrcExprs; // \endcode // Required for proper codegen of propagation of master's thread values of // threadprivate variables to local instances of that variables in other // implicit threads. friend class OMPClauseReader; /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// OMPCopyinClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPCopyinClause>(OMPC_copyin, StartLoc, LParenLoc, EndLoc, N) {} /// \brief Build an empty clause. /// /// \param N Number of variables. /// explicit OMPCopyinClause(unsigned N) : OMPVarListClause<OMPCopyinClause>(OMPC_copyin, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent source expression in the final /// assignment statement performed by the copyin clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// \brief Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent destination expression in the final /// assignment statement performed by the copyin clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// \brief Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// \brief Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign source helper expressions to destination helper expressions /// correspondingly. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// \brief Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size()); } public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for copyin clause. This list represents /// sources. /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for copyin clause. This list represents /// destinations. /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of propagation of master's thread values of /// threadprivate variables to local instances of that variables in other /// implicit threads. /// static OMPCopyinClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps); /// \brief Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// static OMPCopyinClause *CreateEmpty(const ASTContext &C, unsigned N); typedef MutableArrayRef<Expr *>::iterator helper_expr_iterator; typedef ArrayRef<const Expr *>::iterator helper_expr_const_iterator; typedef llvm::iterator_range<helper_expr_iterator> helper_expr_range; typedef llvm::iterator_range<helper_expr_const_iterator> helper_expr_const_range; helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } StmtRange children() { return StmtRange(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_copyin; } }; /// \brief This represents clause 'copyprivate' in the ' /// directives. /// /// \code /// /// \endcode /// In this example directive ' /// with the variables 'a' and 'b'. /// class OMPCopyprivateClause : public OMPVarListClause<OMPCopyprivateClause> { friend class OMPClauseReader; /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// OMPCopyprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPCopyprivateClause>(OMPC_copyprivate, StartLoc, LParenLoc, EndLoc, N) {} /// \brief Build an empty clause. /// /// \param N Number of variables. /// explicit OMPCopyprivateClause(unsigned N) : OMPVarListClause<OMPCopyprivateClause>( OMPC_copyprivate, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent source expression in the final /// assignment statement performed by the copyprivate clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// \brief Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent destination expression in the final /// assignment statement performed by the copyprivate clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// \brief Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// \brief Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign source helper expressions to destination helper expressions /// correspondingly. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// \brief Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size()); } public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// sources. /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// destinations. /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of final assignment performed by the /// copyprivate clause. /// static OMPCopyprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps); /// \brief Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// static OMPCopyprivateClause *CreateEmpty(const ASTContext &C, unsigned N); typedef MutableArrayRef<Expr *>::iterator helper_expr_iterator; typedef ArrayRef<const Expr *>::iterator helper_expr_const_iterator; typedef llvm::iterator_range<helper_expr_iterator> helper_expr_range; typedef llvm::iterator_range<helper_expr_const_iterator> helper_expr_const_range; helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } StmtRange children() { return StmtRange(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_copyprivate; } }; /// \brief This represents implicit clause 'flush' for the ' /// directive. /// This clause does not exist by itself, it can be only as a part of 'omp /// flush' directive. This clause is introduced to keep the original structure /// of \a OMPExecutableDirective class and its derivatives and to use the /// existing infrastructure of clauses with the list of variables. /// /// \code /// /// \endcode /// In this example directive ' /// with the variables 'a' and 'b'. /// class OMPFlushClause : public OMPVarListClause<OMPFlushClause> { /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// OMPFlushClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPFlushClause>(OMPC_flush, StartLoc, LParenLoc, EndLoc, N) {} /// \brief Build an empty clause. /// /// \param N Number of variables. /// explicit OMPFlushClause(unsigned N) : OMPVarListClause<OMPFlushClause>(OMPC_flush, SourceLocation(), SourceLocation(), SourceLocation(), N) {} public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// static OMPFlushClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// \brief Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// static OMPFlushClause *CreateEmpty(const ASTContext &C, unsigned N); StmtRange children() { return StmtRange(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_flush; } }; } // end namespace clang #endif
//===- OpenMPClause.h - Classes for OpenMP clauses --------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// /// \file /// \brief This file defines OpenMP AST classes for clauses. /// There are clauses for executable directives, clauses for declarative /// directives and clauses which can be used in both kinds of directives. /// //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_OPENMPCLAUSE_H #define LLVM_CLANG_AST_OPENMPCLAUSE_H #include "clang/AST/Expr.h" #include "clang/AST/Stmt.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/SourceLocation.h" namespace clang { //===----------------------------------------------------------------------===// // AST classes for clauses. //===----------------------------------------------------------------------===// /// \brief This is a basic class for representing single OpenMP clause. /// class OMPClause { /// \brief Starting location of the clause (the clause keyword). SourceLocation StartLoc; /// \brief Ending location of the clause. SourceLocation EndLoc; /// \brief Kind of the clause. OpenMPClauseKind Kind; protected: OMPClause(OpenMPClauseKind K, SourceLocation StartLoc, SourceLocation EndLoc) : StartLoc(StartLoc), EndLoc(EndLoc), Kind(K) {} public: /// \brief Returns the starting location of the clause. SourceLocation getLocStart() const { return StartLoc; } /// \brief Returns the ending location of the clause. SourceLocation getLocEnd() const { return EndLoc; } /// \brief Sets the starting location of the clause. void setLocStart(SourceLocation Loc) { StartLoc = Loc; } /// \brief Sets the ending location of the clause. void setLocEnd(SourceLocation Loc) { EndLoc = Loc; } /// \brief Returns kind of OpenMP clause (private, shared, reduction, etc.). OpenMPClauseKind getClauseKind() const { return Kind; } bool isImplicit() const { return StartLoc.isInvalid(); } StmtRange children(); ConstStmtRange children() const { return const_cast<OMPClause *>(this)->children(); } static bool classof(const OMPClause *) { return true; } }; /// \brief This represents clauses with the list of variables like 'private', /// 'firstprivate', 'copyin', 'shared', or 'reduction' clauses in the /// '#pragma omp ...' directives. template <class T> class OMPVarListClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Number of variables in the list. unsigned NumVars; protected: /// \brief Fetches list of variables associated with this clause. MutableArrayRef<Expr *> getVarRefs() { return MutableArrayRef<Expr *>( reinterpret_cast<Expr **>( reinterpret_cast<char *>(this) + llvm::RoundUpToAlignment(sizeof(T), llvm::alignOf<Expr *>())), NumVars); } /// \brief Sets the list of variables for this clause. void setVarRefs(ArrayRef<Expr *> VL) { assert(VL.size() == NumVars && "Number of variables is not the same as the preallocated buffer"); std::copy( VL.begin(), VL.end(), reinterpret_cast<Expr **>( reinterpret_cast<char *>(this) + llvm::RoundUpToAlignment(sizeof(T), llvm::alignOf<Expr *>()))); } /// \brief Build a clause with \a N variables /// /// \param K Kind of the clause. /// \param StartLoc Starting location of the clause (the clause keyword). /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// OMPVarListClause(OpenMPClauseKind K, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPClause(K, StartLoc, EndLoc), LParenLoc(LParenLoc), NumVars(N) {} public: typedef MutableArrayRef<Expr *>::iterator varlist_iterator; typedef ArrayRef<const Expr *>::iterator varlist_const_iterator; typedef llvm::iterator_range<varlist_iterator> varlist_range; typedef llvm::iterator_range<varlist_const_iterator> varlist_const_range; unsigned varlist_size() const { return NumVars; } bool varlist_empty() const { return NumVars == 0; } varlist_range varlists() { return varlist_range(varlist_begin(), varlist_end()); } varlist_const_range varlists() const { return varlist_const_range(varlist_begin(), varlist_end()); } varlist_iterator varlist_begin() { return getVarRefs().begin(); } varlist_iterator varlist_end() { return getVarRefs().end(); } varlist_const_iterator varlist_begin() const { return getVarRefs().begin(); } varlist_const_iterator varlist_end() const { return getVarRefs().end(); } /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Fetches list of all variables in the clause. ArrayRef<const Expr *> getVarRefs() const { return llvm::makeArrayRef( reinterpret_cast<const Expr *const *>( reinterpret_cast<const char *>(this) + llvm::RoundUpToAlignment(sizeof(T), llvm::alignOf<const Expr *>())), NumVars); } }; /// \brief This represents 'if' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp parallel if(a > 5) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'if' /// clause with condition 'a > 5'. /// class OMPIfClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Condition of the 'if' clause. Stmt *Condition; /// \brief Set condition. /// void setCondition(Expr *Cond) { Condition = Cond; } public: /// \brief Build 'if' clause with condition \a Cond. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param Cond Condition of the clause. /// \param EndLoc Ending location of the clause. /// OMPIfClause(Expr *Cond, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_if, StartLoc, EndLoc), LParenLoc(LParenLoc), Condition(Cond) {} /// \brief Build an empty clause. /// OMPIfClause() : OMPClause(OMPC_if, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), Condition(nullptr) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Returns condition. Expr *getCondition() const { return cast_or_null<Expr>(Condition); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_if; } StmtRange children() { return StmtRange(&Condition, &Condition + 1); } }; /// \brief This represents 'final' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp task final(a > 5) /// \endcode /// In this example directive '#pragma omp task' has simple 'final' /// clause with condition 'a > 5'. /// class OMPFinalClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Condition of the 'if' clause. Stmt *Condition; /// \brief Set condition. /// void setCondition(Expr *Cond) { Condition = Cond; } public: /// \brief Build 'final' clause with condition \a Cond. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param Cond Condition of the clause. /// \param EndLoc Ending location of the clause. /// OMPFinalClause(Expr *Cond, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_final, StartLoc, EndLoc), LParenLoc(LParenLoc), Condition(Cond) {} /// \brief Build an empty clause. /// OMPFinalClause() : OMPClause(OMPC_final, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), Condition(nullptr) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Returns condition. Expr *getCondition() const { return cast_or_null<Expr>(Condition); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_final; } StmtRange children() { return StmtRange(&Condition, &Condition + 1); } }; /// \brief This represents 'num_threads' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp parallel num_threads(6) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'num_threads' /// clause with number of threads '6'. /// class OMPNumThreadsClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Condition of the 'num_threads' clause. Stmt *NumThreads; /// \brief Set condition. /// void setNumThreads(Expr *NThreads) { NumThreads = NThreads; } public: /// \brief Build 'num_threads' clause with condition \a NumThreads. /// /// \param NumThreads Number of threads for the construct. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// OMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_num_threads, StartLoc, EndLoc), LParenLoc(LParenLoc), NumThreads(NumThreads) {} /// \brief Build an empty clause. /// OMPNumThreadsClause() : OMPClause(OMPC_num_threads, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), NumThreads(nullptr) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Returns number of threads. Expr *getNumThreads() const { return cast_or_null<Expr>(NumThreads); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_num_threads; } StmtRange children() { return StmtRange(&NumThreads, &NumThreads + 1); } }; /// \brief This represents 'safelen' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp simd safelen(4) /// \endcode /// In this example directive '#pragma omp simd' has clause 'safelen' /// with single expression '4'. /// If the safelen clause is used then no two iterations executed /// concurrently with SIMD instructions can have a greater distance /// in the logical iteration space than its value. The parameter of /// the safelen clause must be a constant positive integer expression. /// class OMPSafelenClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Safe iteration space distance. Stmt *Safelen; /// \brief Set safelen. void setSafelen(Expr *Len) { Safelen = Len; } public: /// \brief Build 'safelen' clause. /// /// \param Len Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPSafelenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_safelen, StartLoc, EndLoc), LParenLoc(LParenLoc), Safelen(Len) {} /// \brief Build an empty clause. /// explicit OMPSafelenClause() : OMPClause(OMPC_safelen, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), Safelen(nullptr) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Return safe iteration space distance. Expr *getSafelen() const { return cast_or_null<Expr>(Safelen); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_safelen; } StmtRange children() { return StmtRange(&Safelen, &Safelen + 1); } }; /// \brief This represents 'collapse' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp simd collapse(3) /// \endcode /// In this example directive '#pragma omp simd' has clause 'collapse' /// with single expression '3'. /// The parameter must be a constant positive integer expression, it specifies /// the number of nested loops that should be collapsed into a single iteration /// space. /// class OMPCollapseClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Number of for-loops. Stmt *NumForLoops; /// \brief Set the number of associated for-loops. void setNumForLoops(Expr *Num) { NumForLoops = Num; } public: /// \brief Build 'collapse' clause. /// /// \param Num Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// OMPCollapseClause(Expr *Num, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_collapse, StartLoc, EndLoc), LParenLoc(LParenLoc), NumForLoops(Num) {} /// \brief Build an empty clause. /// explicit OMPCollapseClause() : OMPClause(OMPC_collapse, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), NumForLoops(nullptr) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Return the number of associated for-loops. Expr *getNumForLoops() const { return cast_or_null<Expr>(NumForLoops); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_collapse; } StmtRange children() { return StmtRange(&NumForLoops, &NumForLoops + 1); } }; /// \brief This represents 'default' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp parallel default(shared) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'default' /// clause with kind 'shared'. /// class OMPDefaultClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief A kind of the 'default' clause. OpenMPDefaultClauseKind Kind; /// \brief Start location of the kind in source code. SourceLocation KindKwLoc; /// \brief Set kind of the clauses. /// /// \param K Argument of clause. /// void setDefaultKind(OpenMPDefaultClauseKind K) { Kind = K; } /// \brief Set argument location. /// /// \param KLoc Argument location. /// void setDefaultKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// \brief Build 'default' clause with argument \a A ('none' or 'shared'). /// /// \param A Argument of the clause ('none' or 'shared'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// OMPDefaultClause(OpenMPDefaultClauseKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_default, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// \brief Build an empty clause. /// OMPDefaultClause() : OMPClause(OMPC_default, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), Kind(OMPC_DEFAULT_unknown), KindKwLoc(SourceLocation()) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Returns kind of the clause. OpenMPDefaultClauseKind getDefaultKind() const { return Kind; } /// \brief Returns location of clause kind. SourceLocation getDefaultKindKwLoc() const { return KindKwLoc; } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_default; } StmtRange children() { return StmtRange(); } }; /// \brief This represents 'proc_bind' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp parallel proc_bind(master) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'proc_bind' /// clause with kind 'master'. /// class OMPProcBindClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief A kind of the 'proc_bind' clause. OpenMPProcBindClauseKind Kind; /// \brief Start location of the kind in source code. SourceLocation KindKwLoc; /// \brief Set kind of the clause. /// /// \param K Kind of clause. /// void setProcBindKind(OpenMPProcBindClauseKind K) { Kind = K; } /// \brief Set clause kind location. /// /// \param KLoc Kind location. /// void setProcBindKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// \brief Build 'proc_bind' clause with argument \a A ('master', 'close' or /// 'spread'). /// /// \param A Argument of the clause ('master', 'close' or 'spread'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// OMPProcBindClause(OpenMPProcBindClauseKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_proc_bind, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// \brief Build an empty clause. /// OMPProcBindClause() : OMPClause(OMPC_proc_bind, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), Kind(OMPC_PROC_BIND_unknown), KindKwLoc(SourceLocation()) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Returns kind of the clause. OpenMPProcBindClauseKind getProcBindKind() const { return Kind; } /// \brief Returns location of clause kind. SourceLocation getProcBindKindKwLoc() const { return KindKwLoc; } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_proc_bind; } StmtRange children() { return StmtRange(); } }; /// \brief This represents 'schedule' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp for schedule(static, 3) /// \endcode /// In this example directive '#pragma omp for' has 'schedule' clause with /// arguments 'static' and '3'. /// class OMPScheduleClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief A kind of the 'schedule' clause. OpenMPScheduleClauseKind Kind; /// \brief Start location of the schedule ind in source code. SourceLocation KindLoc; /// \brief Location of ',' (if any). SourceLocation CommaLoc; /// \brief Chunk size. Stmt *ChunkSize; /// \brief Set schedule kind. /// /// \param K Schedule kind. /// void setScheduleKind(OpenMPScheduleClauseKind K) { Kind = K; } /// \brief Sets the location of '('. /// /// \param Loc Location of '('. /// void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Set schedule kind start location. /// /// \param KLoc Schedule kind location. /// void setScheduleKindLoc(SourceLocation KLoc) { KindLoc = KLoc; } /// \brief Set location of ','. /// /// \param Loc Location of ','. /// void setCommaLoc(SourceLocation Loc) { CommaLoc = Loc; } /// \brief Set chunk size. /// /// \param E Chunk size. /// void setChunkSize(Expr *E) { ChunkSize = E; } public: /// \brief Build 'schedule' clause with schedule kind \a Kind and chunk size /// expression \a ChunkSize. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param KLoc Starting location of the argument. /// \param CommaLoc Location of ','. /// \param EndLoc Ending location of the clause. /// \param Kind Schedule kind. /// \param ChunkSize Chunk size. /// OMPScheduleClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KLoc, SourceLocation CommaLoc, SourceLocation EndLoc, OpenMPScheduleClauseKind Kind, Expr *ChunkSize) : OMPClause(OMPC_schedule, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(Kind), KindLoc(KLoc), CommaLoc(CommaLoc), ChunkSize(ChunkSize) {} /// \brief Build an empty clause. /// explicit OMPScheduleClause() : OMPClause(OMPC_schedule, SourceLocation(), SourceLocation()), Kind(OMPC_SCHEDULE_unknown), ChunkSize(nullptr) {} /// \brief Get kind of the clause. /// OpenMPScheduleClauseKind getScheduleKind() const { return Kind; } /// \brief Get location of '('. /// SourceLocation getLParenLoc() { return LParenLoc; } /// \brief Get kind location. /// SourceLocation getScheduleKindLoc() { return KindLoc; } /// \brief Get location of ','. /// SourceLocation getCommaLoc() { return CommaLoc; } /// \brief Get chunk size. /// Expr *getChunkSize() { return dyn_cast_or_null<Expr>(ChunkSize); } /// \brief Get chunk size. /// Expr *getChunkSize() const { return dyn_cast_or_null<Expr>(ChunkSize); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_schedule; } StmtRange children() { return StmtRange(&ChunkSize, &ChunkSize + 1); } }; /// \brief This represents 'ordered' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp for ordered /// \endcode /// In this example directive '#pragma omp for' has 'ordered' clause. /// class OMPOrderedClause : public OMPClause { public: /// \brief Build 'ordered' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_ordered, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPOrderedClause() : OMPClause(OMPC_ordered, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_ordered; } StmtRange children() { return StmtRange(); } }; /// \brief This represents 'nowait' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp for nowait /// \endcode /// In this example directive '#pragma omp for' has 'nowait' clause. /// class OMPNowaitClause : public OMPClause { public: /// \brief Build 'nowait' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_nowait, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPNowaitClause() : OMPClause(OMPC_nowait, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_nowait; } StmtRange children() { return StmtRange(); } }; /// \brief This represents 'untied' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp task untied /// \endcode /// In this example directive '#pragma omp task' has 'untied' clause. /// class OMPUntiedClause : public OMPClause { public: /// \brief Build 'untied' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_untied, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPUntiedClause() : OMPClause(OMPC_untied, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_untied; } StmtRange children() { return StmtRange(); } }; /// \brief This represents 'mergeable' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp task mergeable /// \endcode /// In this example directive '#pragma omp task' has 'mergeable' clause. /// class OMPMergeableClause : public OMPClause { public: /// \brief Build 'mergeable' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_mergeable, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPMergeableClause() : OMPClause(OMPC_mergeable, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_mergeable; } StmtRange children() { return StmtRange(); } }; /// \brief This represents 'read' clause in the '#pragma omp atomic' directive. /// /// \code /// #pragma omp atomic read /// \endcode /// In this example directive '#pragma omp atomic' has 'read' clause. /// class OMPReadClause : public OMPClause { public: /// \brief Build 'read' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_read, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPReadClause() : OMPClause(OMPC_read, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_read; } StmtRange children() { return StmtRange(); } }; /// \brief This represents 'write' clause in the '#pragma omp atomic' directive. /// /// \code /// #pragma omp atomic write /// \endcode /// In this example directive '#pragma omp atomic' has 'write' clause. /// class OMPWriteClause : public OMPClause { public: /// \brief Build 'write' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_write, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPWriteClause() : OMPClause(OMPC_write, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_write; } StmtRange children() { return StmtRange(); } }; /// \brief This represents 'update' clause in the '#pragma omp atomic' /// directive. /// /// \code /// #pragma omp atomic update /// \endcode /// In this example directive '#pragma omp atomic' has 'update' clause. /// class OMPUpdateClause : public OMPClause { public: /// \brief Build 'update' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_update, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPUpdateClause() : OMPClause(OMPC_update, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_update; } StmtRange children() { return StmtRange(); } }; /// \brief This represents 'capture' clause in the '#pragma omp atomic' /// directive. /// /// \code /// #pragma omp atomic capture /// \endcode /// In this example directive '#pragma omp atomic' has 'capture' clause. /// class OMPCaptureClause : public OMPClause { public: /// \brief Build 'capture' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_capture, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPCaptureClause() : OMPClause(OMPC_capture, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_capture; } StmtRange children() { return StmtRange(); } }; /// \brief This represents 'seq_cst' clause in the '#pragma omp atomic' /// directive. /// /// \code /// #pragma omp atomic seq_cst /// \endcode /// In this example directive '#pragma omp atomic' has 'seq_cst' clause. /// class OMPSeqCstClause : public OMPClause { public: /// \brief Build 'seq_cst' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_seq_cst, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPSeqCstClause() : OMPClause(OMPC_seq_cst, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_seq_cst; } StmtRange children() { return StmtRange(); } }; /// \brief This represents clause 'private' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel private(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'private' /// with the variables 'a' and 'b'. /// class OMPPrivateClause : public OMPVarListClause<OMPPrivateClause> { friend class OMPClauseReader; /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// OMPPrivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPPrivateClause>(OMPC_private, StartLoc, LParenLoc, EndLoc, N) {} /// \brief Build an empty clause. /// /// \param N Number of variables. /// explicit OMPPrivateClause(unsigned N) : OMPVarListClause<OMPPrivateClause>(OMPC_private, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// \brief Sets the list of references to private copies with initializers for /// new private variables. /// \param VL List of references. void setPrivateCopies(ArrayRef<Expr *> VL); /// \brief Gets the list of references to private copies with initializers for /// new private variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param PrivateVL List of references to private copies with initializers. /// static OMPPrivateClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL); /// \brief Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// static OMPPrivateClause *CreateEmpty(const ASTContext &C, unsigned N); typedef MutableArrayRef<Expr *>::iterator private_copies_iterator; typedef ArrayRef<const Expr *>::iterator private_copies_const_iterator; typedef llvm::iterator_range<private_copies_iterator> private_copies_range; typedef llvm::iterator_range<private_copies_const_iterator> private_copies_const_range; private_copies_range private_copies() { return private_copies_range(getPrivateCopies().begin(), getPrivateCopies().end()); } private_copies_const_range private_copies() const { return private_copies_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } StmtRange children() { return StmtRange(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_private; } }; /// \brief This represents clause 'firstprivate' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp parallel firstprivate(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'firstprivate' /// with the variables 'a' and 'b'. /// class OMPFirstprivateClause : public OMPVarListClause<OMPFirstprivateClause> { friend class OMPClauseReader; /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// OMPFirstprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPFirstprivateClause>(OMPC_firstprivate, StartLoc, LParenLoc, EndLoc, N) {} /// \brief Build an empty clause. /// /// \param N Number of variables. /// explicit OMPFirstprivateClause(unsigned N) : OMPVarListClause<OMPFirstprivateClause>( OMPC_firstprivate, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// \brief Sets the list of references to private copies with initializers for /// new private variables. /// \param VL List of references. void setPrivateCopies(ArrayRef<Expr *> VL); /// \brief Gets the list of references to private copies with initializers for /// new private variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// \brief Sets the list of references to initializer variables for new /// private variables. /// \param VL List of references. void setInits(ArrayRef<Expr *> VL); /// \brief Gets the list of references to initializer variables for new /// private variables. MutableArrayRef<Expr *> getInits() { return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size()); } ArrayRef<const Expr *> getInits() const { return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size()); } public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the original variables. /// \param PrivateVL List of references to private copies with initializers. /// \param InitVL List of references to auto generated variables used for /// initialization of a single array element. Used if firstprivate variable is /// of array type. /// static OMPFirstprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL, ArrayRef<Expr *> InitVL); /// \brief Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// static OMPFirstprivateClause *CreateEmpty(const ASTContext &C, unsigned N); typedef MutableArrayRef<Expr *>::iterator private_copies_iterator; typedef ArrayRef<const Expr *>::iterator private_copies_const_iterator; typedef llvm::iterator_range<private_copies_iterator> private_copies_range; typedef llvm::iterator_range<private_copies_const_iterator> private_copies_const_range; private_copies_range private_copies() { return private_copies_range(getPrivateCopies().begin(), getPrivateCopies().end()); } private_copies_const_range private_copies() const { return private_copies_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } typedef MutableArrayRef<Expr *>::iterator inits_iterator; typedef ArrayRef<const Expr *>::iterator inits_const_iterator; typedef llvm::iterator_range<inits_iterator> inits_range; typedef llvm::iterator_range<inits_const_iterator> inits_const_range; inits_range inits() { return inits_range(getInits().begin(), getInits().end()); } inits_const_range inits() const { return inits_const_range(getInits().begin(), getInits().end()); } StmtRange children() { return StmtRange(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_firstprivate; } }; /// \brief This represents clause 'lastprivate' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp simd lastprivate(a,b) /// \endcode /// In this example directive '#pragma omp simd' has clause 'lastprivate' /// with the variables 'a' and 'b'. class OMPLastprivateClause : public OMPVarListClause<OMPLastprivateClause> { // There are 4 additional tail-allocated arrays at the end of the class: // 1. Contains list of pseudo variables with the default initialization for // each non-firstprivate variables. Used in codegen for initialization of // lastprivate copies. // 2. List of helper expressions for proper generation of assignment operation // required for lastprivate clause. This list represents private variables // (for arrays, single array element). // 3. List of helper expressions for proper generation of assignment operation // required for lastprivate clause. This list represents original variables // (for arrays, single array element). // 4. List of helper expressions that represents assignment operation: // \code // DstExprs = SrcExprs; // \endcode // Required for proper codegen of final assignment performed by the // lastprivate clause. // friend class OMPClauseReader; /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// OMPLastprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPLastprivateClause>(OMPC_lastprivate, StartLoc, LParenLoc, EndLoc, N) {} /// \brief Build an empty clause. /// /// \param N Number of variables. /// explicit OMPLastprivateClause(unsigned N) : OMPVarListClause<OMPLastprivateClause>( OMPC_lastprivate, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// \brief Get the list of helper expressions for initialization of private /// copies for lastprivate variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent private variables (for arrays, single /// array element) in the final assignment statement performed by the /// lastprivate clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// \brief Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size()); } /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent original variables (for arrays, single /// array element) in the final assignment statement performed by the /// lastprivate clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// \brief Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// \brief Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign private copy of the variable to original variable. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// \brief Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size()); } public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for lastprivate clause. This list represents /// private variables (for arrays, single array element). /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for lastprivate clause. This list represents /// original variables (for arrays, single array element). /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of final assignment performed by the /// lastprivate clause. /// /// static OMPLastprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps); /// \brief Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// static OMPLastprivateClause *CreateEmpty(const ASTContext &C, unsigned N); typedef MutableArrayRef<Expr *>::iterator helper_expr_iterator; typedef ArrayRef<const Expr *>::iterator helper_expr_const_iterator; typedef llvm::iterator_range<helper_expr_iterator> helper_expr_range; typedef llvm::iterator_range<helper_expr_const_iterator> helper_expr_const_range; /// \brief Set list of helper expressions, required for generation of private /// copies of original lastprivate variables. void setPrivateCopies(ArrayRef<Expr *> PrivateCopies); helper_expr_const_range private_copies() const { return helper_expr_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } helper_expr_range private_copies() { return helper_expr_range(getPrivateCopies().begin(), getPrivateCopies().end()); } helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } StmtRange children() { return StmtRange(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_lastprivate; } }; /// \brief This represents clause 'shared' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel shared(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'shared' /// with the variables 'a' and 'b'. /// class OMPSharedClause : public OMPVarListClause<OMPSharedClause> { /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// OMPSharedClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPSharedClause>(OMPC_shared, StartLoc, LParenLoc, EndLoc, N) {} /// \brief Build an empty clause. /// /// \param N Number of variables. /// explicit OMPSharedClause(unsigned N) : OMPVarListClause<OMPSharedClause>(OMPC_shared, SourceLocation(), SourceLocation(), SourceLocation(), N) {} public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// static OMPSharedClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// \brief Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// static OMPSharedClause *CreateEmpty(const ASTContext &C, unsigned N); StmtRange children() { return StmtRange(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_shared; } }; /// \brief This represents clause 'reduction' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp parallel reduction(+:a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'reduction' /// with operator '+' and the variables 'a' and 'b'. /// class OMPReductionClause : public OMPVarListClause<OMPReductionClause> { friend class OMPClauseReader; /// \brief Location of ':'. SourceLocation ColonLoc; /// \brief Nested name specifier for C++. NestedNameSpecifierLoc QualifierLoc; /// \brief Name of custom operator. DeclarationNameInfo NameInfo; /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param ColonLoc Location of ':'. /// \param N Number of the variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. /// OMPReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo) : OMPVarListClause<OMPReductionClause>(OMPC_reduction, StartLoc, LParenLoc, EndLoc, N), ColonLoc(ColonLoc), QualifierLoc(QualifierLoc), NameInfo(NameInfo) {} /// \brief Build an empty clause. /// /// \param N Number of variables. /// explicit OMPReductionClause(unsigned N) : OMPVarListClause<OMPReductionClause>(OMPC_reduction, SourceLocation(), SourceLocation(), SourceLocation(), N), ColonLoc(), QualifierLoc(), NameInfo() {} /// \brief Sets location of ':' symbol in clause. void setColonLoc(SourceLocation CL) { ColonLoc = CL; } /// \brief Sets the name info for specified reduction identifier. void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; } /// \brief Sets the nested name specifier. void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; } /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent LHS expression in the final /// reduction expression performed by the reduction clause. void setLHSExprs(ArrayRef<Expr *> LHSExprs); /// \brief Get the list of helper LHS expressions. MutableArrayRef<Expr *> getLHSExprs() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getLHSExprs() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent RHS expression in the final /// reduction expression performed by the reduction clause. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. void setRHSExprs(ArrayRef<Expr *> RHSExprs); /// \brief Get the list of helper destination expressions. MutableArrayRef<Expr *> getRHSExprs() { return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getRHSExprs() const { return llvm::makeArrayRef(getLHSExprs().end(), varlist_size()); } /// \brief Set list of helper reduction expressions, required for proper /// codegen of the clause. These expressions are binary expressions or /// operator/custom reduction call that calculates new value from source /// helper expressions to destination helper expressions. void setReductionOps(ArrayRef<Expr *> ReductionOps); /// \brief Get the list of helper reduction expressions. MutableArrayRef<Expr *> getReductionOps() { return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getReductionOps() const { return llvm::makeArrayRef(getRHSExprs().end(), varlist_size()); } public: /// \brief Creates clause with a list of variables \a VL. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL The variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. /// \param LHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// LHSs of the reduction expressions. /// \param RHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// RHSs of the reduction expressions. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. /// \param ReductionOps List of helper expressions that represents reduction /// expressions: /// \code /// LHSExprs binop RHSExprs; /// operator binop(LHSExpr, RHSExpr); /// <CutomReduction>(LHSExpr, RHSExpr); /// \endcode /// Required for proper codegen of final reduction operation performed by the /// reduction clause. /// static OMPReductionClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs, ArrayRef<Expr *> ReductionOps); /// \brief Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// static OMPReductionClause *CreateEmpty(const ASTContext &C, unsigned N); /// \brief Gets location of ':' symbol in clause. SourceLocation getColonLoc() const { return ColonLoc; } /// \brief Gets the name info for specified reduction identifier. const DeclarationNameInfo &getNameInfo() const { return NameInfo; } /// \brief Gets the nested name specifier. NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; } typedef MutableArrayRef<Expr *>::iterator helper_expr_iterator; typedef ArrayRef<const Expr *>::iterator helper_expr_const_iterator; typedef llvm::iterator_range<helper_expr_iterator> helper_expr_range; typedef llvm::iterator_range<helper_expr_const_iterator> helper_expr_const_range; helper_expr_const_range lhs_exprs() const { return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_range lhs_exprs() { return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_const_range rhs_exprs() const { return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_range rhs_exprs() { return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_const_range reduction_ops() const { return helper_expr_const_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_range reduction_ops() { return helper_expr_range(getReductionOps().begin(), getReductionOps().end()); } StmtRange children() { return StmtRange(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_reduction; } }; /// \brief This represents clause 'linear' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp simd linear(a,b : 2) /// \endcode /// In this example directive '#pragma omp simd' has clause 'linear' /// with variables 'a', 'b' and linear step '2'. /// class OMPLinearClause : public OMPVarListClause<OMPLinearClause> { friend class OMPClauseReader; /// \brief Location of ':'. SourceLocation ColonLoc; /// \brief Sets the linear step for clause. void setStep(Expr *Step) { *(getFinals().end()) = Step; } /// \brief Sets the expression to calculate linear step for clause. void setCalcStep(Expr *CalcStep) { *(getFinals().end() + 1) = CalcStep; } /// \brief Build 'linear' clause with given number of variables \a NumVars. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of variables. /// OMPLinearClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned NumVars) : OMPVarListClause<OMPLinearClause>(OMPC_linear, StartLoc, LParenLoc, EndLoc, NumVars), ColonLoc(ColonLoc) {} /// \brief Build an empty clause. /// /// \param NumVars Number of variables. /// explicit OMPLinearClause(unsigned NumVars) : OMPVarListClause<OMPLinearClause>(OMPC_linear, SourceLocation(), SourceLocation(), SourceLocation(), NumVars), ColonLoc(SourceLocation()) {} /// \brief Gets the list of initial values for linear variables. /// /// There are NumVars expressions with initial values allocated after the /// varlist, they are followed by NumVars update expressions (used to update /// the linear variable's value on current iteration) and they are followed by /// NumVars final expressions (used to calculate the linear variable's /// value after the loop body). After these lists, there are 2 helper /// expressions - linear step and a helper to calculate it before the /// loop body (used when the linear step is not constant): /// /// { Vars[] /* in OMPVarListClause */; Inits[]; Updates[]; Finals[]; /// Step; CalcStep; } /// MutableArrayRef<Expr *> getInits() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getInits() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// \brief Sets the list of update expressions for linear variables. MutableArrayRef<Expr *> getUpdates() { return MutableArrayRef<Expr *>(getInits().end(), varlist_size()); } ArrayRef<const Expr *> getUpdates() const { return llvm::makeArrayRef(getInits().end(), varlist_size()); } /// \brief Sets the list of final update expressions for linear variables. MutableArrayRef<Expr *> getFinals() { return MutableArrayRef<Expr *>(getUpdates().end(), varlist_size()); } ArrayRef<const Expr *> getFinals() const { return llvm::makeArrayRef(getUpdates().end(), varlist_size()); } /// \brief Sets the list of the initial values for linear variables. /// \param IL List of expressions. void setInits(ArrayRef<Expr *> IL); public: /// \brief Creates clause with a list of variables \a VL and a linear step /// \a Step. /// /// \param C AST Context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param IL List of initial values for the variables. /// \param Step Linear step. /// \param CalcStep Calculation of the linear step. static OMPLinearClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> IL, Expr *Step, Expr *CalcStep); /// \brief Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param NumVars Number of variables. /// static OMPLinearClause *CreateEmpty(const ASTContext &C, unsigned NumVars); /// \brief Sets the location of ':'. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getColonLoc() const { return ColonLoc; } /// \brief Returns linear step. Expr *getStep() { return *(getFinals().end()); } /// \brief Returns linear step. const Expr *getStep() const { return *(getFinals().end()); } /// \brief Returns expression to calculate linear step. Expr *getCalcStep() { return *(getFinals().end() + 1); } /// \brief Returns expression to calculate linear step. const Expr *getCalcStep() const { return *(getFinals().end() + 1); } /// \brief Sets the list of update expressions for linear variables. /// \param UL List of expressions. void setUpdates(ArrayRef<Expr *> UL); /// \brief Sets the list of final update expressions for linear variables. /// \param FL List of expressions. void setFinals(ArrayRef<Expr *> FL); typedef MutableArrayRef<Expr *>::iterator inits_iterator; typedef ArrayRef<const Expr *>::iterator inits_const_iterator; typedef llvm::iterator_range<inits_iterator> inits_range; typedef llvm::iterator_range<inits_const_iterator> inits_const_range; inits_range inits() { return inits_range(getInits().begin(), getInits().end()); } inits_const_range inits() const { return inits_const_range(getInits().begin(), getInits().end()); } typedef MutableArrayRef<Expr *>::iterator updates_iterator; typedef ArrayRef<const Expr *>::iterator updates_const_iterator; typedef llvm::iterator_range<updates_iterator> updates_range; typedef llvm::iterator_range<updates_const_iterator> updates_const_range; updates_range updates() { return updates_range(getUpdates().begin(), getUpdates().end()); } updates_const_range updates() const { return updates_const_range(getUpdates().begin(), getUpdates().end()); } typedef MutableArrayRef<Expr *>::iterator finals_iterator; typedef ArrayRef<const Expr *>::iterator finals_const_iterator; typedef llvm::iterator_range<finals_iterator> finals_range; typedef llvm::iterator_range<finals_const_iterator> finals_const_range; finals_range finals() { return finals_range(getFinals().begin(), getFinals().end()); } finals_const_range finals() const { return finals_const_range(getFinals().begin(), getFinals().end()); } StmtRange children() { return StmtRange(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(getFinals().end() + 2)); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_linear; } }; /// \brief This represents clause 'aligned' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp simd aligned(a,b : 8) /// \endcode /// In this example directive '#pragma omp simd' has clause 'aligned' /// with variables 'a', 'b' and alignment '8'. /// class OMPAlignedClause : public OMPVarListClause<OMPAlignedClause> { friend class OMPClauseReader; /// \brief Location of ':'. SourceLocation ColonLoc; /// \brief Sets the alignment for clause. void setAlignment(Expr *A) { *varlist_end() = A; } /// \brief Build 'aligned' clause with given number of variables \a NumVars. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of variables. /// OMPAlignedClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned NumVars) : OMPVarListClause<OMPAlignedClause>(OMPC_aligned, StartLoc, LParenLoc, EndLoc, NumVars), ColonLoc(ColonLoc) {} /// \brief Build an empty clause. /// /// \param NumVars Number of variables. /// explicit OMPAlignedClause(unsigned NumVars) : OMPVarListClause<OMPAlignedClause>(OMPC_aligned, SourceLocation(), SourceLocation(), SourceLocation(), NumVars), ColonLoc(SourceLocation()) {} public: /// \brief Creates clause with a list of variables \a VL and alignment \a A. /// /// \param C AST Context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param A Alignment. static OMPAlignedClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, Expr *A); /// \brief Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param NumVars Number of variables. /// static OMPAlignedClause *CreateEmpty(const ASTContext &C, unsigned NumVars); /// \brief Sets the location of ':'. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// \brief Returns the location of ':'. SourceLocation getColonLoc() const { return ColonLoc; } /// \brief Returns alignment. Expr *getAlignment() { return *varlist_end(); } /// \brief Returns alignment. const Expr *getAlignment() const { return *varlist_end(); } StmtRange children() { return StmtRange(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end() + 1)); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_aligned; } }; /// \brief This represents clause 'copyin' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel copyin(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'copyin' /// with the variables 'a' and 'b'. /// class OMPCopyinClause : public OMPVarListClause<OMPCopyinClause> { // Class has 3 additional tail allocated arrays: // 1. List of helper expressions for proper generation of assignment operation // required for copyin clause. This list represents sources. // 2. List of helper expressions for proper generation of assignment operation // required for copyin clause. This list represents destinations. // 3. List of helper expressions that represents assignment operation: // \code // DstExprs = SrcExprs; // \endcode // Required for proper codegen of propagation of master's thread values of // threadprivate variables to local instances of that variables in other // implicit threads. friend class OMPClauseReader; /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// OMPCopyinClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPCopyinClause>(OMPC_copyin, StartLoc, LParenLoc, EndLoc, N) {} /// \brief Build an empty clause. /// /// \param N Number of variables. /// explicit OMPCopyinClause(unsigned N) : OMPVarListClause<OMPCopyinClause>(OMPC_copyin, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent source expression in the final /// assignment statement performed by the copyin clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// \brief Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent destination expression in the final /// assignment statement performed by the copyin clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// \brief Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// \brief Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign source helper expressions to destination helper expressions /// correspondingly. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// \brief Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size()); } public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for copyin clause. This list represents /// sources. /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for copyin clause. This list represents /// destinations. /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of propagation of master's thread values of /// threadprivate variables to local instances of that variables in other /// implicit threads. /// static OMPCopyinClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps); /// \brief Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// static OMPCopyinClause *CreateEmpty(const ASTContext &C, unsigned N); typedef MutableArrayRef<Expr *>::iterator helper_expr_iterator; typedef ArrayRef<const Expr *>::iterator helper_expr_const_iterator; typedef llvm::iterator_range<helper_expr_iterator> helper_expr_range; typedef llvm::iterator_range<helper_expr_const_iterator> helper_expr_const_range; helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } StmtRange children() { return StmtRange(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_copyin; } }; /// \brief This represents clause 'copyprivate' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp single copyprivate(a,b) /// \endcode /// In this example directive '#pragma omp single' has clause 'copyprivate' /// with the variables 'a' and 'b'. /// class OMPCopyprivateClause : public OMPVarListClause<OMPCopyprivateClause> { friend class OMPClauseReader; /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// OMPCopyprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPCopyprivateClause>(OMPC_copyprivate, StartLoc, LParenLoc, EndLoc, N) {} /// \brief Build an empty clause. /// /// \param N Number of variables. /// explicit OMPCopyprivateClause(unsigned N) : OMPVarListClause<OMPCopyprivateClause>( OMPC_copyprivate, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent source expression in the final /// assignment statement performed by the copyprivate clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// \brief Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent destination expression in the final /// assignment statement performed by the copyprivate clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// \brief Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// \brief Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign source helper expressions to destination helper expressions /// correspondingly. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// \brief Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size()); } public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// sources. /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// destinations. /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of final assignment performed by the /// copyprivate clause. /// static OMPCopyprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps); /// \brief Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// static OMPCopyprivateClause *CreateEmpty(const ASTContext &C, unsigned N); typedef MutableArrayRef<Expr *>::iterator helper_expr_iterator; typedef ArrayRef<const Expr *>::iterator helper_expr_const_iterator; typedef llvm::iterator_range<helper_expr_iterator> helper_expr_range; typedef llvm::iterator_range<helper_expr_const_iterator> helper_expr_const_range; helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } StmtRange children() { return StmtRange(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_copyprivate; } }; /// \brief This represents implicit clause 'flush' for the '#pragma omp flush' /// directive. /// This clause does not exist by itself, it can be only as a part of 'omp /// flush' directive. This clause is introduced to keep the original structure /// of \a OMPExecutableDirective class and its derivatives and to use the /// existing infrastructure of clauses with the list of variables. /// /// \code /// #pragma omp flush(a,b) /// \endcode /// In this example directive '#pragma omp flush' has implicit clause 'flush' /// with the variables 'a' and 'b'. /// class OMPFlushClause : public OMPVarListClause<OMPFlushClause> { /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// OMPFlushClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPFlushClause>(OMPC_flush, StartLoc, LParenLoc, EndLoc, N) {} /// \brief Build an empty clause. /// /// \param N Number of variables. /// explicit OMPFlushClause(unsigned N) : OMPVarListClause<OMPFlushClause>(OMPC_flush, SourceLocation(), SourceLocation(), SourceLocation(), N) {} public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// static OMPFlushClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// \brief Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// static OMPFlushClause *CreateEmpty(const ASTContext &C, unsigned N); StmtRange children() { return StmtRange(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_flush; } }; } // end namespace clang #endif
GB_unaryop__identity_int8_int32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_int8_int32 // op(A') function: GB_tran__identity_int8_int32 // C type: int8_t // A type: int32_t // cast: int8_t cij = (int8_t) aij // unaryop: cij = aij #define GB_ATYPE \ int32_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ int8_t z = (int8_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT8 || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_int8_int32 ( int8_t *Cx, // Cx and Ax may be aliased int32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_int8_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_int8_int32 // op(A') function: GB_tran__identity_int8_int32 // C type: int8_t // A type: int32_t // cast: int8_t cij = (int8_t) aij // unaryop: cij = aij #define GB_ATYPE \ int32_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ int8_t z = (int8_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT8 || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_int8_int32 ( int8_t *Cx, // Cx and Ax may be aliased int32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_int8_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_int8_int32 // op(A') function: GB_tran__identity_int8_int32 // C type: int8_t // A type: int32_t // cast: int8_t cij = (int8_t) aij // unaryop: cij = aij #define GB_ATYPE \ int32_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ int8_t z = (int8_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT8 || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_int8_int32 ( int8_t *Cx, // Cx and Ax may be aliased int32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_int8_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
omp_dr.h
/* * OpenMP + dag_recorder */ /* this file provides macros with which users can easily turn on/off dag recorder for your OpenMP task parallel programs. provided macros are: (i) pragma_omp_task(option, statement) (ii) pragma_omp_taskc(option, callable) (iii) pragma_omp_taskwait they are respectively translated into #pragma omp task option statement #pragma omp task option callable() #pragma omp taskwait when DAG_RECORDER is set to a number >= 2, they insert instrumentation code for dag recorder. ideally we like to instrument OpenMP programs written with the regular pragma's, but I don't know how to do it. so we ask the programmer to write OpenMP fragment such as #pragma omp task shared(x) x = foo(); as pragma_omp_task(shared(x), x = foo()); */ #pragma once #include <omp.h> #include <dag_recorder.h> #define do_pragma(x) _Pragma( #x ) #define pragma_omp(x) do_pragma(omp x) #define pragma_omp_task_no_prof(options, statement) \ pragma_omp(task options) do { statement; } while(0) #define pragma_omp_taskc_no_prof(options, callable) \ pragma_omp_task_no_prof(options, callable()) #define pragma_omp_taskwait_no_prof pragma_omp(taskwait) #define pragma_omp_task_with_prof(options, statement) do { \ dr_dag_node * __c__ = 0; \ dr_dag_node * __t__ = dr_enter_create_task(&__c__); \ pragma_omp(task options) do { \ dr_start_task(__c__); \ statement; \ dr_end_task(); \ } while(0); \ dr_return_from_create_task(__t__); \ } while (0) #define pragma_omp_taskc_with_prof(options, callable) \ pragma_omp_task_with_prof(options, callable()) #define pragma_omp_taskwait_with_prof do { \ dr_dag_node * __t__ = dr_enter_wait_tasks(); \ pragma_omp(taskwait); \ dr_return_from_wait_tasks(__t__); \ } while(0) #if DAG_RECORDER>=2 #define pragma_omp_task(options, statement) \ pragma_omp_task_with_prof(options, statement) #define pragma_omp_taskc(options, callable) \ pragma_omp_taskc_with_prof(options, callable) #define pragma_omp_taskwait pragma_omp_taskwait_with_prof #define dr_get_max_workers() (omp_in_parallel() ? omp_get_num_threads() : omp_get_max_threads()) #define dr_get_worker() omp_get_thread_num() /* when using DAG Recorder with OpenMP task parallelism, the following usual sequence needs to be instrumented #pragma omp parallel #pragma omp single S; to the following { dr_dag_node * __t__ = dr_enter_other(); #pragma omp parallel #pragma omp single { dr_return_from_other(__t__); S; __t__ = dr_enter_other(); } dr_return_from_other(__t__); } */ #define pragma_omp_parallel_single(clause, S) \ do { \ dr_dag_node * __t__ = dr_enter_other(); \ pragma_omp(parallel) { \ pragma_omp(single clause) { \ dr_return_from_other(__t__); \ S \ __t__ = dr_enter_other(); \ } \ } \ dr_return_from_other(__t__); \ } while(0) #else #define pragma_omp_task(options, statement) \ pragma_omp_task_no_prof(options, statement) #define pragma_omp_taskc(options, callable) \ pragma_omp_taskc_no_prof(options, callable) #define pragma_omp_taskwait pragma_omp_taskwait_no_prof #define pragma_omp_parallel_single(clause, S) \ do { \ pragma_omp(parallel) { \ pragma_omp(single clause) { \ S \ } \ } \ } while(0) #endif
/* * OpenMP + dag_recorder */ /* * this file provides macros with which users can easily turn on/off dag * recorder for your OpenMP task parallel programs. * * provided macros are: * * (i) pragma_omp_task(option, statement) (ii) pragma_omp_taskc(option, * callable) (iii) pragma_omp_taskwait * * they are respectively translated into statement callable() * * when DAG_RECORDER is set to a number >= 2, they insert instrumentation code * for dag recorder. * * ideally we like to instrument OpenMP programs written with the regular * pragma's, but I don't know how to do it. so we ask the programmer to * write OpenMP fragment such as x = foo(); * * as * pragma_omp_task(shared(x), x = foo()); * */ #pragma once #include <omp.h> #include <dag_recorder.h> #define do_pragma(x) _Pragma( #x ) #define pragma_omp(x) do_pragma(omp x) #define pragma_omp_task_no_prof(options, statement) \ pragma_omp(task options) do { statement; } while(0) #define pragma_omp_taskc_no_prof(options, callable) \ pragma_omp_task_no_prof(options, callable()) #define pragma_omp_taskwait_no_prof pragma_omp(taskwait) #define pragma_omp_task_with_prof(options, statement) do { \ dr_dag_node * __c__ = 0; \ dr_dag_node * __t__ = dr_enter_create_task(&__c__); \ pragma_omp(task options) do { \ dr_start_task(__c__); \ statement; \ dr_end_task(); \ } while(0); \ dr_return_from_create_task(__t__); \ } while (0) #define pragma_omp_taskc_with_prof(options, callable) \ pragma_omp_task_with_prof(options, callable()) #define pragma_omp_taskwait_with_prof do { \ dr_dag_node * __t__ = dr_enter_wait_tasks(); \ pragma_omp(taskwait); \ dr_return_from_wait_tasks(__t__); \ } while(0) #if DAG_RECORDER>=2 #define pragma_omp_task(options, statement) \ pragma_omp_task_with_prof(options, statement) #define pragma_omp_taskc(options, callable) \ pragma_omp_taskc_with_prof(options, callable) #define pragma_omp_taskwait pragma_omp_taskwait_with_prof #define dr_get_max_workers() (omp_in_parallel() ? omp_get_num_threads() : omp_get_max_threads()) #define dr_get_worker() omp_get_thread_num() /* * when using DAG Recorder with OpenMP task parallelism, the following usual * sequence needs to be instrumented * * S; * to the following { dr_dag_node * __t__ = dr_enter_other(); * dr_return_from_other(__t__); S; __t__ = dr_enter_other(); * * dr_return_from_other(__t__); } * */ #define pragma_omp_parallel_single(clause, S) \ do { \ dr_dag_node * __t__ = dr_enter_other(); \ pragma_omp(parallel) { \ pragma_omp(single clause) { \ dr_return_from_other(__t__); \ S \ __t__ = dr_enter_other(); \ } \ } \ dr_return_from_other(__t__); \ } while(0) #else #define pragma_omp_task(options, statement) \ pragma_omp_task_no_prof(options, statement) #define pragma_omp_taskc(options, callable) \ pragma_omp_taskc_no_prof(options, callable) #define pragma_omp_taskwait pragma_omp_taskwait_no_prof #define pragma_omp_parallel_single(clause, S) \ do { \ pragma_omp(parallel) { \ pragma_omp(single clause) { \ S \ } \ } \ } while(0) #endif
/* * OpenMP + dag_recorder */ /* * this file provides macros with which users can easily turn on/off dag * recorder for your OpenMP task parallel programs. * * provided macros are: * * (i) pragma_omp_task(option, statement) (ii) pragma_omp_taskc(option, * callable) (iii) pragma_omp_taskwait * * they are respectively translated into #pragma omp task option statement * * #pragma omp task option callable() * * #pragma omp taskwait * * when DAG_RECORDER is set to a number >= 2, they insert instrumentation code * for dag recorder. * * ideally we like to instrument OpenMP programs written with the regular * pragma's, but I don't know how to do it. so we ask the programmer to * write OpenMP fragment such as * * #pragma omp task shared(x) x = foo(); * * as * pragma_omp_task(shared(x), x = foo()); * */ #pragma once #include <omp.h> #include <dag_recorder.h> #define do_pragma(x) _Pragma( #x ) #define pragma_omp(x) do_pragma(omp x) #define pragma_omp_task_no_prof(options, statement) \ pragma_omp(task options) do { statement; } while(0) #define pragma_omp_taskc_no_prof(options, callable) \ pragma_omp_task_no_prof(options, callable()) #define pragma_omp_taskwait_no_prof pragma_omp(taskwait) #define pragma_omp_task_with_prof(options, statement) do { \ dr_dag_node * __c__ = 0; \ dr_dag_node * __t__ = dr_enter_create_task(&__c__); \ pragma_omp(task options) do { \ dr_start_task(__c__); \ statement; \ dr_end_task(); \ } while(0); \ dr_return_from_create_task(__t__); \ } while (0) #define pragma_omp_taskc_with_prof(options, callable) \ pragma_omp_task_with_prof(options, callable()) #define pragma_omp_taskwait_with_prof do { \ dr_dag_node * __t__ = dr_enter_wait_tasks(); \ pragma_omp(taskwait); \ dr_return_from_wait_tasks(__t__); \ } while(0) #if DAG_RECORDER>=2 #define pragma_omp_task(options, statement) \ pragma_omp_task_with_prof(options, statement) #define pragma_omp_taskc(options, callable) \ pragma_omp_taskc_with_prof(options, callable) #define pragma_omp_taskwait pragma_omp_taskwait_with_prof #define dr_get_max_workers() (omp_in_parallel() ? omp_get_num_threads() : omp_get_max_threads()) #define dr_get_worker() omp_get_thread_num() /* * when using DAG Recorder with OpenMP task parallelism, the following usual * sequence needs to be instrumented #pragma omp parallel #pragma omp single * S; * * to the following { dr_dag_node * __t__ = dr_enter_other(); #pragma omp * parallel #pragma omp single { dr_return_from_other(__t__); S; __t__ = * dr_enter_other(); } dr_return_from_other(__t__); } * */ #define pragma_omp_parallel_single(clause, S) \ do { \ dr_dag_node * __t__ = dr_enter_other(); \ pragma_omp(parallel) { \ pragma_omp(single clause) { \ dr_return_from_other(__t__); \ S \ __t__ = dr_enter_other(); \ } \ } \ dr_return_from_other(__t__); \ } while(0) #else #define pragma_omp_task(options, statement) \ pragma_omp_task_no_prof(options, statement) #define pragma_omp_taskc(options, callable) \ pragma_omp_taskc_no_prof(options, callable) #define pragma_omp_taskwait pragma_omp_taskwait_no_prof #define pragma_omp_parallel_single(clause, S) \ do { \ pragma_omp(parallel) { \ pragma_omp(single clause) { \ S \ } \ } \ } while(0) #endif
statistical_analysis.c
#include<stdio.h> #include<omp.h> #include<math.h> #define INT_MAX 9999999 #define INT_MIN -999999 int main() { int n; scanf("%d", &n); int marks[n]; for(int i=0; i<n; i++) { scanf("%d", (marks+i)); } int max = INT_MIN; int min = INT_MAX; int sum = 0; #pragma omp parallel for shared(marks) reduction (+:sum) for(int i=0; i<n; i++) { int current = marks[i]; sum+=current; #pragma omp critical { if(current>max) max = current; if(current < min) min = current; } } double average = sum/(double)n; double sqDiff = 0; #pragma omp parallel for reduction (+:sqDiff) for(int i=0; i<n; i++) { sqDiff += pow(average-(double)marks[i],2); } double sd = sqrt(sqDiff/n); printf("Maximum is : %d\n", max); printf("Minimum is : %d\n", min); printf("Mean is : %2f\n", average); printf("Standard Deviation is : %2f\n", sd); return 0; }
#include<stdio.h> #include<omp.h> #include<math.h> #define INT_MAX 9999999 #define INT_MIN -999999 int main() { int n; scanf("%d", &n); int marks[n]; for (int i = 0; i < n; i++) { scanf("%d", (marks + i)); } int max = INT_MIN; int min = INT_MAX; int sum = 0; for (int i = 0; i < n; i++) { int current = marks[i]; sum += current; if (current > max) max = current; if (current < min) min = current; } double average = sum / (double)n; double sqDiff = 0; for (int i = 0; i < n; i++) { sqDiff += pow(average - (double)marks[i], 2); } double sd = sqrt(sqDiff / n); printf("Maximum is : %d\n", max); printf("Minimum is : %d\n", min); printf("Mean is : %2f\n", average); printf("Standard Deviation is : %2f\n", sd); return 0; }
#include<stdio.h> #include<omp.h> #include<math.h> #define INT_MAX 9999999 #define INT_MIN -999999 int main() { int n; scanf("%d", &n); int marks[n]; for (int i = 0; i < n; i++) { scanf("%d", (marks + i)); } int max = INT_MIN; int min = INT_MAX; int sum = 0; #pragma omp parallel for shared(marks) reduction (+:sum) for (int i = 0; i < n; i++) { int current = marks[i]; sum += current; #pragma omp critical { if (current > max) max = current; if (current < min) min = current; } } double average = sum / (double)n; double sqDiff = 0; #pragma omp parallel for reduction (+:sqDiff) for (int i = 0; i < n; i++) { sqDiff += pow(average - (double)marks[i], 2); } double sd = sqrt(sqDiff / n); printf("Maximum is : %d\n", max); printf("Minimum is : %d\n", min); printf("Mean is : %2f\n", average); printf("Standard Deviation is : %2f\n", sd); return 0; }
decorate.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD EEEEE CCCC OOO RRRR AAA TTTTT EEEEE % % D D E C O O R R A A T E % % D D EEE C O O RRRR AAAAA T EEE % % D D E C O O R R A A T E % % DDDD EEEEE CCCC OOO R R A A T EEEEE % % % % % % MagickCore Image Decoration Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/cache-view.h" #include "magick/channel.h" #include "magick/color-private.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/decorate.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/image.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/pixel-accessor.h" #include "magick/pixel-private.h" #include "magick/quantum.h" #include "magick/resource_.h" #include "magick/thread-private.h" #include "magick/transform.h" /* Define declarations. */ #define AccentuateModulate ScaleCharToQuantum(80) #define HighlightModulate ScaleCharToQuantum(125) #define ShadowModulate ScaleCharToQuantum(135) #define DepthModulate ScaleCharToQuantum(185) #define TroughModulate ScaleCharToQuantum(110) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B o r d e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BorderImage() surrounds the image with a border of the color defined by % the bordercolor member of the image structure. The width and height % of the border are defined by the corresponding members of the border_info % structure. % % The format of the BorderImage method is: % % Image *BorderImage(const Image *image,const RectangleInfo *border_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o border_info: Define the width and height of the border. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *BorderImage(const Image *image, const RectangleInfo *border_info,ExceptionInfo *exception) { Image *border_image, *clone_image; FrameInfo frame_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(border_info != (RectangleInfo *) NULL); frame_info.width=image->columns+(border_info->width << 1); frame_info.height=image->rows+(border_info->height << 1); frame_info.x=(ssize_t) border_info->width; frame_info.y=(ssize_t) border_info->height; frame_info.inner_bevel=0; frame_info.outer_bevel=0; clone_image=CloneImage(image,0,0,MagickTrue,exception); if (clone_image == (Image *) NULL) return((Image *) NULL); clone_image->matte_color=image->border_color; border_image=FrameImage(clone_image,&frame_info,exception); clone_image=DestroyImage(clone_image); if (border_image != (Image *) NULL) border_image->matte_color=image->matte_color; return(border_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F r a m e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FrameImage() adds a simulated three-dimensional border around the image. % The color of the border is defined by the matte_color member of image. % Members width and height of frame_info specify the border width of the % vertical and horizontal sides of the frame. Members inner and outer % indicate the width of the inner and outer shadows of the frame. % % The format of the FrameImage method is: % % Image *FrameImage(const Image *image,const FrameInfo *frame_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o frame_info: Define the width and height of the frame and its bevels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *FrameImage(const Image *image,const FrameInfo *frame_info, ExceptionInfo *exception) { #define FrameImageTag "Frame/Image" CacheView *image_view, *frame_view; Image *frame_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket accentuate, border, highlight, matte, shadow, trough; register ssize_t x; size_t bevel_width, height, width; ssize_t y; /* Check frame geometry. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(frame_info != (FrameInfo *) NULL); if ((frame_info->outer_bevel < 0) || (frame_info->inner_bevel < 0)) ThrowImageException(OptionError,"FrameIsLessThanImageSize"); bevel_width=(size_t) (frame_info->outer_bevel+frame_info->inner_bevel); x=(ssize_t) frame_info->width-frame_info->x-bevel_width; y=(ssize_t) frame_info->height-frame_info->y-bevel_width; if ((x < (ssize_t) image->columns) || (y < (ssize_t) image->rows)) ThrowImageException(OptionError,"FrameIsLessThanImageSize"); /* Initialize framed image attributes. */ frame_image=CloneImage(image,frame_info->width,frame_info->height,MagickTrue, exception); if (frame_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(frame_image,DirectClass) == MagickFalse) { InheritException(exception,&frame_image->exception); frame_image=DestroyImage(frame_image); return((Image *) NULL); } if ((IsPixelGray(&frame_image->border_color) == MagickFalse) && (IsGrayColorspace(frame_image->colorspace) != MagickFalse)) (void) SetImageColorspace(frame_image,sRGBColorspace); if ((frame_image->border_color.opacity != OpaqueOpacity) && (frame_image->matte == MagickFalse)) (void) SetImageAlphaChannel(frame_image,OpaqueAlphaChannel); frame_image->page=image->page; if ((image->page.width != 0) && (image->page.height != 0)) { frame_image->page.width+=frame_image->columns-image->columns; frame_image->page.height+=frame_image->rows-image->rows; } /* Initialize 3D effects color. */ GetMagickPixelPacket(frame_image,&matte); matte.colorspace=sRGBColorspace; SetMagickPixelPacket(frame_image,&image->matte_color,(IndexPacket *) NULL, &matte); GetMagickPixelPacket(frame_image,&border); border.colorspace=sRGBColorspace; SetMagickPixelPacket(frame_image,&image->border_color,(IndexPacket *) NULL, &border); GetMagickPixelPacket(frame_image,&accentuate); accentuate.red=(MagickRealType) (QuantumScale*((QuantumRange- AccentuateModulate)*matte.red+(QuantumRange*AccentuateModulate))); accentuate.green=(MagickRealType) (QuantumScale*((QuantumRange- AccentuateModulate)*matte.green+(QuantumRange*AccentuateModulate))); accentuate.blue=(MagickRealType) (QuantumScale*((QuantumRange- AccentuateModulate)*matte.blue+(QuantumRange*AccentuateModulate))); accentuate.opacity=matte.opacity; GetMagickPixelPacket(frame_image,&highlight); highlight.red=(MagickRealType) (QuantumScale*((QuantumRange- HighlightModulate)*matte.red+(QuantumRange*HighlightModulate))); highlight.green=(MagickRealType) (QuantumScale*((QuantumRange- HighlightModulate)*matte.green+(QuantumRange*HighlightModulate))); highlight.blue=(MagickRealType) (QuantumScale*((QuantumRange- HighlightModulate)*matte.blue+(QuantumRange*HighlightModulate))); highlight.opacity=matte.opacity; GetMagickPixelPacket(frame_image,&shadow); shadow.red=QuantumScale*matte.red*ShadowModulate; shadow.green=QuantumScale*matte.green*ShadowModulate; shadow.blue=QuantumScale*matte.blue*ShadowModulate; shadow.opacity=matte.opacity; GetMagickPixelPacket(frame_image,&trough); trough.red=QuantumScale*matte.red*TroughModulate; trough.green=QuantumScale*matte.green*TroughModulate; trough.blue=QuantumScale*matte.blue*TroughModulate; trough.opacity=matte.opacity; if (image->colorspace == CMYKColorspace) { ConvertRGBToCMYK(&matte); ConvertRGBToCMYK(&border); ConvertRGBToCMYK(&accentuate); ConvertRGBToCMYK(&highlight); ConvertRGBToCMYK(&shadow); ConvertRGBToCMYK(&trough); } status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); frame_view=AcquireAuthenticCacheView(frame_image,exception); height=(size_t) (frame_info->outer_bevel+(frame_info->y-bevel_width)+ frame_info->inner_bevel); if (height != 0) { register IndexPacket *magick_restrict frame_indexes; register ssize_t x; register PixelPacket *magick_restrict q; /* Draw top of ornamental border. */ q=QueueCacheViewAuthenticPixels(frame_view,0,0,frame_image->columns, height,exception); frame_indexes=GetCacheViewAuthenticIndexQueue(frame_view); if (q != (PixelPacket *) NULL) { /* Draw top of ornamental border. */ for (y=0; y < (ssize_t) frame_info->outer_bevel; y++) { for (x=0; x < (ssize_t) (frame_image->columns-y); x++) { if (x < y) SetPixelPacket(frame_image,&highlight,q,frame_indexes); else SetPixelPacket(frame_image,&accentuate,q,frame_indexes); q++; frame_indexes++; } for ( ; x < (ssize_t) frame_image->columns; x++) { SetPixelPacket(frame_image,&shadow,q,frame_indexes); q++; frame_indexes++; } } for (y=0; y < (ssize_t) (frame_info->y-bevel_width); y++) { for (x=0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelPacket(frame_image,&highlight,q,frame_indexes); q++; frame_indexes++; } width=frame_image->columns-2*frame_info->outer_bevel; for (x=0; x < (ssize_t) width; x++) { SetPixelPacket(frame_image,&matte,q,frame_indexes); q++; frame_indexes++; } for (x=0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelPacket(frame_image,&shadow,q,frame_indexes); q++; frame_indexes++; } } for (y=0; y < (ssize_t) frame_info->inner_bevel; y++) { for (x=0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelPacket(frame_image,&highlight,q,frame_indexes); q++; frame_indexes++; } for (x=0; x < (ssize_t) (frame_info->x-bevel_width); x++) { SetPixelPacket(frame_image,&matte,q,frame_indexes); q++; frame_indexes++; } width=image->columns+((size_t) frame_info->inner_bevel << 1)- y; for (x=0; x < (ssize_t) width; x++) { if (x < y) SetPixelPacket(frame_image,&shadow,q,frame_indexes); else SetPixelPacket(frame_image,&trough,q,frame_indexes); q++; frame_indexes++; } for ( ; x < (ssize_t) (image->columns+2*frame_info->inner_bevel); x++) { SetPixelPacket(frame_image,&highlight,q,frame_indexes); q++; frame_indexes++; } width=frame_info->width-frame_info->x-image->columns-bevel_width; for (x=0; x < (ssize_t) width; x++) { SetPixelPacket(frame_image,&matte,q,frame_indexes); q++; frame_indexes++; } for (x=0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelPacket(frame_image,&shadow,q,frame_indexes); q++; frame_indexes++; } } (void) SyncCacheViewAuthenticPixels(frame_view,exception); } } /* Draw sides of ornamental border. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,frame_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict frame_indexes; register ssize_t x; register PixelPacket *magick_restrict q; /* Initialize scanline with matte color. */ if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(frame_view,0,frame_info->y+y, frame_image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } frame_indexes=GetCacheViewAuthenticIndexQueue(frame_view); for (x=0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelPacket(frame_image,&highlight,q,frame_indexes); q++; frame_indexes++; } for (x=0; x < (ssize_t) (frame_info->x-bevel_width); x++) { SetPixelPacket(frame_image,&matte,q,frame_indexes); q++; frame_indexes++; } for (x=0; x < (ssize_t) frame_info->inner_bevel; x++) { SetPixelPacket(frame_image,&shadow,q,frame_indexes); q++; frame_indexes++; } /* Set frame interior pixels. */ for (x=0; x < (ssize_t) image->columns; x++) { SetPixelPacket(frame_image,&border,q,frame_indexes); q++; frame_indexes++; } for (x=0; x < (ssize_t) frame_info->inner_bevel; x++) { SetPixelPacket(frame_image,&highlight,q,frame_indexes); q++; frame_indexes++; } width=frame_info->width-frame_info->x-image->columns-bevel_width; for (x=0; x < (ssize_t) width; x++) { SetPixelPacket(frame_image,&matte,q,frame_indexes); q++; frame_indexes++; } for (x=0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelPacket(frame_image,&shadow,q,frame_indexes); q++; frame_indexes++; } if (SyncCacheViewAuthenticPixels(frame_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_FrameImage) #endif proceed=SetImageProgress(image,FrameImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } height=(size_t) (frame_info->inner_bevel+frame_info->height- frame_info->y-image->rows-bevel_width+frame_info->outer_bevel); if (height != 0) { register IndexPacket *magick_restrict frame_indexes; register ssize_t x; register PixelPacket *magick_restrict q; /* Draw bottom of ornamental border. */ q=QueueCacheViewAuthenticPixels(frame_view,0,(ssize_t) (frame_image->rows- height),frame_image->columns,height,exception); if (q != (PixelPacket *) NULL) { /* Draw bottom of ornamental border. */ frame_indexes=GetCacheViewAuthenticIndexQueue(frame_view); for (y=frame_info->inner_bevel-1; y >= 0; y--) { for (x=0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelPacket(frame_image,&highlight,q,frame_indexes); q++; frame_indexes++; } for (x=0; x < (ssize_t) (frame_info->x-bevel_width); x++) { SetPixelPacket(frame_image,&matte,q,frame_indexes); q++; frame_indexes++; } for (x=0; x < y; x++) { SetPixelPacket(frame_image,&shadow,q,frame_indexes); q++; frame_indexes++; } for ( ; x < (ssize_t) (image->columns+2*frame_info->inner_bevel); x++) { if (x >= (ssize_t) (image->columns+2*frame_info->inner_bevel-y)) SetPixelPacket(frame_image,&highlight,q,frame_indexes); else SetPixelPacket(frame_image,&accentuate,q,frame_indexes); q++; frame_indexes++; } width=frame_info->width-frame_info->x-image->columns-bevel_width; for (x=0; x < (ssize_t) width; x++) { SetPixelPacket(frame_image,&matte,q,frame_indexes); q++; frame_indexes++; } for (x=0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelPacket(frame_image,&shadow,q,frame_indexes); q++; frame_indexes++; } } height=frame_info->height-frame_info->y-image->rows-bevel_width; for (y=0; y < (ssize_t) height; y++) { for (x=0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelPacket(frame_image,&highlight,q,frame_indexes); q++; frame_indexes++; } width=frame_image->columns-2*frame_info->outer_bevel; for (x=0; x < (ssize_t) width; x++) { SetPixelPacket(frame_image,&matte,q,frame_indexes); q++; frame_indexes++; } for (x=0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelPacket(frame_image,&shadow,q,frame_indexes); q++; frame_indexes++; } } for (y=frame_info->outer_bevel-1; y >= 0; y--) { for (x=0; x < y; x++) { SetPixelPacket(frame_image,&highlight,q,frame_indexes); q++; frame_indexes++; } for ( ; x < (ssize_t) frame_image->columns; x++) { if (x >= (ssize_t) (frame_image->columns-y)) SetPixelPacket(frame_image,&shadow,q,frame_indexes); else SetPixelPacket(frame_image,&trough,q,frame_indexes); q++; frame_indexes++; } } (void) SyncCacheViewAuthenticPixels(frame_view,exception); } } frame_view=DestroyCacheView(frame_view); image_view=DestroyCacheView(image_view); x=(ssize_t) (frame_info->outer_bevel+(frame_info->x-bevel_width)+ frame_info->inner_bevel); y=(ssize_t) (frame_info->outer_bevel+(frame_info->y-bevel_width)+ frame_info->inner_bevel); if (status != MagickFalse) status=CompositeImage(frame_image,image->compose,image,x,y); if (status == MagickFalse) frame_image=DestroyImage(frame_image); return(frame_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R a i s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RaiseImage() creates a simulated three-dimensional button-like effect % by lightening and darkening the edges of the image. Members width and % height of raise_info define the width of the vertical and horizontal % edge of the effect. % % The format of the RaiseImage method is: % % MagickBooleanType RaiseImage(const Image *image, % const RectangleInfo *raise_info,const MagickBooleanType raise) % % A description of each parameter follows: % % o image: the image. % % o raise_info: Define the width and height of the raise area. % % o raise: A value other than zero creates a 3-D raise effect, % otherwise it has a lowered effect. % */ MagickExport MagickBooleanType RaiseImage(Image *image, const RectangleInfo *raise_info,const MagickBooleanType raise) { #define AccentuateFactor ScaleCharToQuantum(135) #define HighlightFactor ScaleCharToQuantum(190) #define ShadowFactor ScaleCharToQuantum(190) #define RaiseImageTag "Raise/Image" #define TroughFactor ScaleCharToQuantum(135) CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; Quantum foreground, background; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(raise_info != (RectangleInfo *) NULL); exception=(&image->exception); if ((image->columns <= (raise_info->width << 1)) || (image->rows <= (raise_info->height << 1))) ThrowBinaryException(OptionError,"ImageSizeMustExceedBevelWidth", image->filename); foreground=QuantumRange; background=(Quantum) 0; if (raise == MagickFalse) { foreground=(Quantum) 0; background=QuantumRange; } if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); /* Raise image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,raise_info->height,1) #endif for (y=0; y < (ssize_t) raise_info->height; y++) { register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < y; x++) { SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType) GetPixelRed(q)*HighlightFactor+(MagickRealType) foreground* (QuantumRange-HighlightFactor)))); SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType) GetPixelGreen(q)*HighlightFactor+(MagickRealType) foreground* (QuantumRange-HighlightFactor)))); SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType) GetPixelBlue(q)*HighlightFactor+(MagickRealType) foreground* (QuantumRange-HighlightFactor)))); q++; } for ( ; x < (ssize_t) (image->columns-y); x++) { SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType) GetPixelRed(q)*AccentuateFactor+(MagickRealType) foreground* (QuantumRange-AccentuateFactor)))); SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType) GetPixelGreen(q)*AccentuateFactor+(MagickRealType) foreground* (QuantumRange-AccentuateFactor)))); SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType) GetPixelBlue(q)*AccentuateFactor+(MagickRealType) foreground* (QuantumRange-AccentuateFactor)))); q++; } for ( ; x < (ssize_t) image->columns; x++) { SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType) GetPixelRed(q)*ShadowFactor+(MagickRealType) background* (QuantumRange-ShadowFactor)))); SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType) GetPixelGreen(q)*ShadowFactor+(MagickRealType) background* (QuantumRange-ShadowFactor)))); SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType) GetPixelBlue(q)*ShadowFactor+(MagickRealType) background* (QuantumRange-ShadowFactor)))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_RaiseImage) #endif proceed=SetImageProgress(image,RaiseImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows-2*raise_info->height,1) #endif for (y=(ssize_t) raise_info->height; y < (ssize_t) (image->rows-raise_info->height); y++) { register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) raise_info->width; x++) { SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType) GetPixelRed(q)*HighlightFactor+(MagickRealType) foreground* (QuantumRange-HighlightFactor)))); SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType) GetPixelGreen(q)*HighlightFactor+(MagickRealType) foreground* (QuantumRange-HighlightFactor)))); SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType) GetPixelBlue(q)*HighlightFactor+(MagickRealType) foreground* (QuantumRange-HighlightFactor)))); q++; } for ( ; x < (ssize_t) (image->columns-raise_info->width); x++) q++; for ( ; x < (ssize_t) image->columns; x++) { SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType) GetPixelRed(q)*ShadowFactor+(MagickRealType) background* (QuantumRange-ShadowFactor)))); SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType) GetPixelGreen(q)*ShadowFactor+(MagickRealType) background* (QuantumRange-ShadowFactor)))); SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType) GetPixelBlue(q)*ShadowFactor+(MagickRealType) background* (QuantumRange-ShadowFactor)))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_RaiseImage) #endif proceed=SetImageProgress(image,RaiseImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows-raise_info->height,1) #endif for (y=(ssize_t) (image->rows-raise_info->height); y < (ssize_t) image->rows; y++) { register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) (image->rows-y); x++) { SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType) GetPixelRed(q)*HighlightFactor+(MagickRealType) foreground* (QuantumRange-HighlightFactor)))); SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType) GetPixelGreen(q)*HighlightFactor+(MagickRealType) foreground* (QuantumRange-HighlightFactor)))); SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType) GetPixelBlue(q)*HighlightFactor+(MagickRealType) foreground* (QuantumRange-HighlightFactor)))); q++; } for ( ; x < (ssize_t) (image->columns-(image->rows-y)); x++) { SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType) GetPixelRed(q)*TroughFactor+(MagickRealType) background* (QuantumRange-TroughFactor)))); SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType) GetPixelGreen(q)*TroughFactor+(MagickRealType) background* (QuantumRange-TroughFactor)))); SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType) GetPixelBlue(q)*TroughFactor+(MagickRealType) background* (QuantumRange-TroughFactor)))); q++; } for ( ; x < (ssize_t) image->columns; x++) { SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType) GetPixelRed(q)*ShadowFactor+(MagickRealType) background* (QuantumRange-ShadowFactor)))); SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType) GetPixelGreen(q)*ShadowFactor+(MagickRealType) background* (QuantumRange-ShadowFactor)))); SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType) GetPixelBlue(q)*ShadowFactor+(MagickRealType) background* (QuantumRange-ShadowFactor)))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_RaiseImage) #endif proceed=SetImageProgress(image,RaiseImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); }
/* * Include declarations. */ #include "magick/studio.h" #include "magick/cache-view.h" #include "magick/channel.h" #include "magick/color-private.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/decorate.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/image.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/pixel-accessor.h" #include "magick/pixel-private.h" #include "magick/quantum.h" #include "magick/resource_.h" #include "magick/thread-private.h" #include "magick/transform.h" /* * Define declarations. */ #define AccentuateModulate ScaleCharToQuantum(80) #define HighlightModulate ScaleCharToQuantum(125) #define ShadowModulate ScaleCharToQuantum(135) #define DepthModulate ScaleCharToQuantum(185) #define TroughModulate ScaleCharToQuantum(110) /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % B o r d e r I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % BorderImage() surrounds the image with a border of the color * defined by % the bordercolor member of the image structure. The width * and height % of the border are defined by the corresponding members of * the border_info % structure. % % The format of the BorderImage method * is: % % Image *BorderImage(const Image *image,const RectangleInfo * *border_info, % ExceptionInfo *exception) % % A description of * each parameter follows: % % o image: the image. % % o border_info: * Define the width and height of the border. % % o exception: return any * errors or warnings in this structure. % */ MagickExport Image * BorderImage(const Image * image, const RectangleInfo * border_info, ExceptionInfo * exception) { Image * border_image, *clone_image; FrameInfo frame_info; assert(image != (const Image *)NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(border_info != (RectangleInfo *) NULL); frame_info.width = image->columns + (border_info->width << 1); frame_info.height = image->rows + (border_info->height << 1); frame_info.x = (ssize_t) border_info->width; frame_info.y = (ssize_t) border_info->height; frame_info.inner_bevel = 0; frame_info.outer_bevel = 0; clone_image = CloneImage(image, 0, 0, MagickTrue, exception); if (clone_image == (Image *) NULL) return ((Image *) NULL); clone_image->matte_color = image->border_color; border_image = FrameImage(clone_image, &frame_info, exception); clone_image = DestroyImage(clone_image); if (border_image != (Image *) NULL) border_image->matte_color = image->matte_color; return (border_image); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % F r a m e I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % FrameImage() adds a simulated three-dimensional border around * the image. % The color of the border is defined by the matte_color member * of image. % Members width and height of frame_info specify the border * width of the % vertical and horizontal sides of the frame. Members inner * and outer % indicate the width of the inner and outer shadows of the * frame. % % The format of the FrameImage method is: % % Image * *FrameImage(const Image *image,const FrameInfo *frame_info, % * ExceptionInfo *exception) % % A description of each parameter follows: % * % o image: the image. % % o frame_info: Define the width and height * of the frame and its bevels. % % o exception: return any errors or * warnings in this structure. % */ MagickExport Image * FrameImage(const Image * image, const FrameInfo * frame_info, ExceptionInfo * exception) { #define FrameImageTag "Frame/Image" CacheView * image_view, *frame_view; Image * frame_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket accentuate, border, highlight, matte, shadow, trough; register ssize_t x; size_t bevel_width, height, width; ssize_t y; /* * Check frame geometry. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(frame_info != (FrameInfo *) NULL); if ((frame_info->outer_bevel < 0) || (frame_info->inner_bevel < 0)) ThrowImageException(OptionError, "FrameIsLessThanImageSize"); bevel_width = (size_t) (frame_info->outer_bevel + frame_info->inner_bevel); x = (ssize_t) frame_info->width - frame_info->x - bevel_width; y = (ssize_t) frame_info->height - frame_info->y - bevel_width; if ((x < (ssize_t) image->columns) || (y < (ssize_t) image->rows)) ThrowImageException(OptionError, "FrameIsLessThanImageSize"); /* * Initialize framed image attributes. */ frame_image = CloneImage(image, frame_info->width, frame_info->height, MagickTrue, exception); if (frame_image == (Image *) NULL) return ((Image *) NULL); if (SetImageStorageClass(frame_image, DirectClass) == MagickFalse) { InheritException(exception, &frame_image->exception); frame_image = DestroyImage(frame_image); return ((Image *) NULL); } if ((IsPixelGray(&frame_image->border_color) == MagickFalse) && (IsGrayColorspace(frame_image->colorspace) != MagickFalse)) (void)SetImageColorspace(frame_image, sRGBColorspace); if ((frame_image->border_color.opacity != OpaqueOpacity) && (frame_image->matte == MagickFalse)) (void)SetImageAlphaChannel(frame_image, OpaqueAlphaChannel); frame_image->page = image->page; if ((image->page.width != 0) && (image->page.height != 0)) { frame_image->page.width += frame_image->columns - image->columns; frame_image->page.height += frame_image->rows - image->rows; } /* * Initialize 3D effects color. */ GetMagickPixelPacket(frame_image, &matte); matte.colorspace = sRGBColorspace; SetMagickPixelPacket(frame_image, &image->matte_color, (IndexPacket *) NULL, &matte); GetMagickPixelPacket(frame_image, &border); border.colorspace = sRGBColorspace; SetMagickPixelPacket(frame_image, &image->border_color, (IndexPacket *) NULL, &border); GetMagickPixelPacket(frame_image, &accentuate); accentuate.red = (MagickRealType) (QuantumScale * ((QuantumRange - AccentuateModulate) * matte.red + (QuantumRange * AccentuateModulate))); accentuate.green = (MagickRealType) (QuantumScale * ((QuantumRange - AccentuateModulate) * matte.green + (QuantumRange * AccentuateModulate))); accentuate.blue = (MagickRealType) (QuantumScale * ((QuantumRange - AccentuateModulate) * matte.blue + (QuantumRange * AccentuateModulate))); accentuate.opacity = matte.opacity; GetMagickPixelPacket(frame_image, &highlight); highlight.red = (MagickRealType) (QuantumScale * ((QuantumRange - HighlightModulate) * matte.red + (QuantumRange * HighlightModulate))); highlight.green = (MagickRealType) (QuantumScale * ((QuantumRange - HighlightModulate) * matte.green + (QuantumRange * HighlightModulate))); highlight.blue = (MagickRealType) (QuantumScale * ((QuantumRange - HighlightModulate) * matte.blue + (QuantumRange * HighlightModulate))); highlight.opacity = matte.opacity; GetMagickPixelPacket(frame_image, &shadow); shadow.red = QuantumScale * matte.red * ShadowModulate; shadow.green = QuantumScale * matte.green * ShadowModulate; shadow.blue = QuantumScale * matte.blue * ShadowModulate; shadow.opacity = matte.opacity; GetMagickPixelPacket(frame_image, &trough); trough.red = QuantumScale * matte.red * TroughModulate; trough.green = QuantumScale * matte.green * TroughModulate; trough.blue = QuantumScale * matte.blue * TroughModulate; trough.opacity = matte.opacity; if (image->colorspace == CMYKColorspace) { ConvertRGBToCMYK(&matte); ConvertRGBToCMYK(&border); ConvertRGBToCMYK(&accentuate); ConvertRGBToCMYK(&highlight); ConvertRGBToCMYK(&shadow); ConvertRGBToCMYK(&trough); } status = MagickTrue; progress = 0; image_view = AcquireVirtualCacheView(image, exception); frame_view = AcquireAuthenticCacheView(frame_image, exception); height = (size_t) (frame_info->outer_bevel + (frame_info->y - bevel_width) + frame_info->inner_bevel); if (height != 0) { register IndexPacket * magick_restrict frame_indexes; register ssize_t x; register PixelPacket * magick_restrict q; /* * Draw top of ornamental border. */ q = QueueCacheViewAuthenticPixels(frame_view, 0, 0, frame_image->columns, height, exception); frame_indexes = GetCacheViewAuthenticIndexQueue(frame_view); if (q != (PixelPacket *) NULL) { /* * Draw top of ornamental border. */ for (y = 0; y < (ssize_t) frame_info->outer_bevel; y++) { for (x = 0; x < (ssize_t) (frame_image->columns - y); x++) { if (x < y) SetPixelPacket(frame_image, &highlight, q, frame_indexes); else SetPixelPacket(frame_image, &accentuate, q, frame_indexes); q++; frame_indexes++; } for (; x < (ssize_t) frame_image->columns; x++) { SetPixelPacket(frame_image, &shadow, q, frame_indexes); q++; frame_indexes++; } } for (y = 0; y < (ssize_t) (frame_info->y - bevel_width); y++) { for (x = 0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelPacket(frame_image, &highlight, q, frame_indexes); q++; frame_indexes++; } width = frame_image->columns - 2 * frame_info->outer_bevel; for (x = 0; x < (ssize_t) width; x++) { SetPixelPacket(frame_image, &matte, q, frame_indexes); q++; frame_indexes++; } for (x = 0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelPacket(frame_image, &shadow, q, frame_indexes); q++; frame_indexes++; } } for (y = 0; y < (ssize_t) frame_info->inner_bevel; y++) { for (x = 0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelPacket(frame_image, &highlight, q, frame_indexes); q++; frame_indexes++; } for (x = 0; x < (ssize_t) (frame_info->x - bevel_width); x++) { SetPixelPacket(frame_image, &matte, q, frame_indexes); q++; frame_indexes++; } width = image->columns + ((size_t) frame_info->inner_bevel << 1) - y; for (x = 0; x < (ssize_t) width; x++) { if (x < y) SetPixelPacket(frame_image, &shadow, q, frame_indexes); else SetPixelPacket(frame_image, &trough, q, frame_indexes); q++; frame_indexes++; } for (; x < (ssize_t) (image->columns + 2 * frame_info->inner_bevel); x++) { SetPixelPacket(frame_image, &highlight, q, frame_indexes); q++; frame_indexes++; } width = frame_info->width - frame_info->x - image->columns - bevel_width; for (x = 0; x < (ssize_t) width; x++) { SetPixelPacket(frame_image, &matte, q, frame_indexes); q++; frame_indexes++; } for (x = 0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelPacket(frame_image, &shadow, q, frame_indexes); q++; frame_indexes++; } } (void)SyncCacheViewAuthenticPixels(frame_view, exception); } } /* * Draw sides of ornamental border. */ for (y = 0; y < (ssize_t) image->rows; y++) { register IndexPacket * magick_restrict frame_indexes; register ssize_t x; register PixelPacket * magick_restrict q; /* * Initialize scanline with matte color. */ if (status == MagickFalse) continue; q = QueueCacheViewAuthenticPixels(frame_view, 0, frame_info->y + y, frame_image->columns, 1, exception); if (q == (PixelPacket *) NULL) { status = MagickFalse; continue; } frame_indexes = GetCacheViewAuthenticIndexQueue(frame_view); for (x = 0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelPacket(frame_image, &highlight, q, frame_indexes); q++; frame_indexes++; } for (x = 0; x < (ssize_t) (frame_info->x - bevel_width); x++) { SetPixelPacket(frame_image, &matte, q, frame_indexes); q++; frame_indexes++; } for (x = 0; x < (ssize_t) frame_info->inner_bevel; x++) { SetPixelPacket(frame_image, &shadow, q, frame_indexes); q++; frame_indexes++; } /* * Set frame interior pixels. */ for (x = 0; x < (ssize_t) image->columns; x++) { SetPixelPacket(frame_image, &border, q, frame_indexes); q++; frame_indexes++; } for (x = 0; x < (ssize_t) frame_info->inner_bevel; x++) { SetPixelPacket(frame_image, &highlight, q, frame_indexes); q++; frame_indexes++; } width = frame_info->width - frame_info->x - image->columns - bevel_width; for (x = 0; x < (ssize_t) width; x++) { SetPixelPacket(frame_image, &matte, q, frame_indexes); q++; frame_indexes++; } for (x = 0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelPacket(frame_image, &shadow, q, frame_indexes); q++; frame_indexes++; } if (SyncCacheViewAuthenticPixels(frame_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed = SetImageProgress(image, FrameImageTag, progress++, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } height = (size_t) (frame_info->inner_bevel + frame_info->height - frame_info->y - image->rows - bevel_width + frame_info->outer_bevel); if (height != 0) { register IndexPacket * magick_restrict frame_indexes; register ssize_t x; register PixelPacket * magick_restrict q; /* * Draw bottom of ornamental border. */ q = QueueCacheViewAuthenticPixels(frame_view, 0, (ssize_t) (frame_image->rows - height), frame_image->columns, height, exception); if (q != (PixelPacket *) NULL) { /* * Draw bottom of ornamental border. */ frame_indexes = GetCacheViewAuthenticIndexQueue(frame_view); for (y = frame_info->inner_bevel - 1; y >= 0; y--) { for (x = 0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelPacket(frame_image, &highlight, q, frame_indexes); q++; frame_indexes++; } for (x = 0; x < (ssize_t) (frame_info->x - bevel_width); x++) { SetPixelPacket(frame_image, &matte, q, frame_indexes); q++; frame_indexes++; } for (x = 0; x < y; x++) { SetPixelPacket(frame_image, &shadow, q, frame_indexes); q++; frame_indexes++; } for (; x < (ssize_t) (image->columns + 2 * frame_info->inner_bevel); x++) { if (x >= (ssize_t) (image->columns + 2 * frame_info->inner_bevel - y)) SetPixelPacket(frame_image, &highlight, q, frame_indexes); else SetPixelPacket(frame_image, &accentuate, q, frame_indexes); q++; frame_indexes++; } width = frame_info->width - frame_info->x - image->columns - bevel_width; for (x = 0; x < (ssize_t) width; x++) { SetPixelPacket(frame_image, &matte, q, frame_indexes); q++; frame_indexes++; } for (x = 0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelPacket(frame_image, &shadow, q, frame_indexes); q++; frame_indexes++; } } height = frame_info->height - frame_info->y - image->rows - bevel_width; for (y = 0; y < (ssize_t) height; y++) { for (x = 0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelPacket(frame_image, &highlight, q, frame_indexes); q++; frame_indexes++; } width = frame_image->columns - 2 * frame_info->outer_bevel; for (x = 0; x < (ssize_t) width; x++) { SetPixelPacket(frame_image, &matte, q, frame_indexes); q++; frame_indexes++; } for (x = 0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelPacket(frame_image, &shadow, q, frame_indexes); q++; frame_indexes++; } } for (y = frame_info->outer_bevel - 1; y >= 0; y--) { for (x = 0; x < y; x++) { SetPixelPacket(frame_image, &highlight, q, frame_indexes); q++; frame_indexes++; } for (; x < (ssize_t) frame_image->columns; x++) { if (x >= (ssize_t) (frame_image->columns - y)) SetPixelPacket(frame_image, &shadow, q, frame_indexes); else SetPixelPacket(frame_image, &trough, q, frame_indexes); q++; frame_indexes++; } } (void)SyncCacheViewAuthenticPixels(frame_view, exception); } } frame_view = DestroyCacheView(frame_view); image_view = DestroyCacheView(image_view); x = (ssize_t) (frame_info->outer_bevel + (frame_info->x - bevel_width) + frame_info->inner_bevel); y = (ssize_t) (frame_info->outer_bevel + (frame_info->y - bevel_width) + frame_info->inner_bevel); if (status != MagickFalse) status = CompositeImage(frame_image, image->compose, image, x, y); if (status == MagickFalse) frame_image = DestroyImage(frame_image); return (frame_image); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % R a i s e I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % RaiseImage() creates a simulated three-dimensional button-like * effect % by lightening and darkening the edges of the image. Members * width and % height of raise_info define the width of the vertical and * horizontal % edge of the effect. % % The format of the RaiseImage method * is: % % MagickBooleanType RaiseImage(const Image *image, % * const RectangleInfo *raise_info,const MagickBooleanType raise) % % A * description of each parameter follows: % % o image: the image. % % o * raise_info: Define the width and height of the raise area. % % o raise: * A value other than zero creates a 3-D raise effect, % otherwise it * has a lowered effect. % */ MagickExport MagickBooleanType RaiseImage(Image * image, const RectangleInfo * raise_info, const MagickBooleanType raise) { #define AccentuateFactor ScaleCharToQuantum(135) #define HighlightFactor ScaleCharToQuantum(190) #define ShadowFactor ScaleCharToQuantum(190) #define RaiseImageTag "Raise/Image" #define TroughFactor ScaleCharToQuantum(135) CacheView * image_view; ExceptionInfo * exception; MagickBooleanType status; MagickOffsetType progress; Quantum foreground, background; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(raise_info != (RectangleInfo *) NULL); exception = (&image->exception); if ((image->columns <= (raise_info->width << 1)) || (image->rows <= (raise_info->height << 1))) ThrowBinaryException(OptionError, "ImageSizeMustExceedBevelWidth", image->filename); foreground = QuantumRange; background = (Quantum) 0; if (raise == MagickFalse) { foreground = (Quantum) 0; background = QuantumRange; } if (SetImageStorageClass(image, DirectClass) == MagickFalse) return (MagickFalse); /* * Raise image. */ status = MagickTrue; progress = 0; image_view = AcquireAuthenticCacheView(image, exception); for (y = 0; y < (ssize_t) raise_info->height; y++) { register ssize_t x; register PixelPacket * magick_restrict q; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (PixelPacket *) NULL) { status = MagickFalse; continue; } for (x = 0; x < y; x++) { SetPixelRed(q, ClampToQuantum(QuantumScale * ((MagickRealType) GetPixelRed(q) * HighlightFactor + (MagickRealType) foreground * (QuantumRange - HighlightFactor)))); SetPixelGreen(q, ClampToQuantum(QuantumScale * ((MagickRealType) GetPixelGreen(q) * HighlightFactor + (MagickRealType) foreground * (QuantumRange - HighlightFactor)))); SetPixelBlue(q, ClampToQuantum(QuantumScale * ((MagickRealType) GetPixelBlue(q) * HighlightFactor + (MagickRealType) foreground * (QuantumRange - HighlightFactor)))); q++; } for (; x < (ssize_t) (image->columns - y); x++) { SetPixelRed(q, ClampToQuantum(QuantumScale * ((MagickRealType) GetPixelRed(q) * AccentuateFactor + (MagickRealType) foreground * (QuantumRange - AccentuateFactor)))); SetPixelGreen(q, ClampToQuantum(QuantumScale * ((MagickRealType) GetPixelGreen(q) * AccentuateFactor + (MagickRealType) foreground * (QuantumRange - AccentuateFactor)))); SetPixelBlue(q, ClampToQuantum(QuantumScale * ((MagickRealType) GetPixelBlue(q) * AccentuateFactor + (MagickRealType) foreground * (QuantumRange - AccentuateFactor)))); q++; } for (; x < (ssize_t) image->columns; x++) { SetPixelRed(q, ClampToQuantum(QuantumScale * ((MagickRealType) GetPixelRed(q) * ShadowFactor + (MagickRealType) background * (QuantumRange - ShadowFactor)))); SetPixelGreen(q, ClampToQuantum(QuantumScale * ((MagickRealType) GetPixelGreen(q) * ShadowFactor + (MagickRealType) background * (QuantumRange - ShadowFactor)))); SetPixelBlue(q, ClampToQuantum(QuantumScale * ((MagickRealType) GetPixelBlue(q) * ShadowFactor + (MagickRealType) background * (QuantumRange - ShadowFactor)))); q++; } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed = SetImageProgress(image, RaiseImageTag, progress++, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } for (y = (ssize_t) raise_info->height; y < (ssize_t) (image->rows - raise_info->height); y++) { register ssize_t x; register PixelPacket * magick_restrict q; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (PixelPacket *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) raise_info->width; x++) { SetPixelRed(q, ClampToQuantum(QuantumScale * ((MagickRealType) GetPixelRed(q) * HighlightFactor + (MagickRealType) foreground * (QuantumRange - HighlightFactor)))); SetPixelGreen(q, ClampToQuantum(QuantumScale * ((MagickRealType) GetPixelGreen(q) * HighlightFactor + (MagickRealType) foreground * (QuantumRange - HighlightFactor)))); SetPixelBlue(q, ClampToQuantum(QuantumScale * ((MagickRealType) GetPixelBlue(q) * HighlightFactor + (MagickRealType) foreground * (QuantumRange - HighlightFactor)))); q++; } for (; x < (ssize_t) (image->columns - raise_info->width); x++) q++; for (; x < (ssize_t) image->columns; x++) { SetPixelRed(q, ClampToQuantum(QuantumScale * ((MagickRealType) GetPixelRed(q) * ShadowFactor + (MagickRealType) background * (QuantumRange - ShadowFactor)))); SetPixelGreen(q, ClampToQuantum(QuantumScale * ((MagickRealType) GetPixelGreen(q) * ShadowFactor + (MagickRealType) background * (QuantumRange - ShadowFactor)))); SetPixelBlue(q, ClampToQuantum(QuantumScale * ((MagickRealType) GetPixelBlue(q) * ShadowFactor + (MagickRealType) background * (QuantumRange - ShadowFactor)))); q++; } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed = SetImageProgress(image, RaiseImageTag, progress++, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } for (y = (ssize_t) (image->rows - raise_info->height); y < (ssize_t) image->rows; y++) { register ssize_t x; register PixelPacket * magick_restrict q; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (PixelPacket *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) (image->rows - y); x++) { SetPixelRed(q, ClampToQuantum(QuantumScale * ((MagickRealType) GetPixelRed(q) * HighlightFactor + (MagickRealType) foreground * (QuantumRange - HighlightFactor)))); SetPixelGreen(q, ClampToQuantum(QuantumScale * ((MagickRealType) GetPixelGreen(q) * HighlightFactor + (MagickRealType) foreground * (QuantumRange - HighlightFactor)))); SetPixelBlue(q, ClampToQuantum(QuantumScale * ((MagickRealType) GetPixelBlue(q) * HighlightFactor + (MagickRealType) foreground * (QuantumRange - HighlightFactor)))); q++; } for (; x < (ssize_t) (image->columns - (image->rows - y)); x++) { SetPixelRed(q, ClampToQuantum(QuantumScale * ((MagickRealType) GetPixelRed(q) * TroughFactor + (MagickRealType) background * (QuantumRange - TroughFactor)))); SetPixelGreen(q, ClampToQuantum(QuantumScale * ((MagickRealType) GetPixelGreen(q) * TroughFactor + (MagickRealType) background * (QuantumRange - TroughFactor)))); SetPixelBlue(q, ClampToQuantum(QuantumScale * ((MagickRealType) GetPixelBlue(q) * TroughFactor + (MagickRealType) background * (QuantumRange - TroughFactor)))); q++; } for (; x < (ssize_t) image->columns; x++) { SetPixelRed(q, ClampToQuantum(QuantumScale * ((MagickRealType) GetPixelRed(q) * ShadowFactor + (MagickRealType) background * (QuantumRange - ShadowFactor)))); SetPixelGreen(q, ClampToQuantum(QuantumScale * ((MagickRealType) GetPixelGreen(q) * ShadowFactor + (MagickRealType) background * (QuantumRange - ShadowFactor)))); SetPixelBlue(q, ClampToQuantum(QuantumScale * ((MagickRealType) GetPixelBlue(q) * ShadowFactor + (MagickRealType) background * (QuantumRange - ShadowFactor)))); q++; } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed = SetImageProgress(image, RaiseImageTag, progress++, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } image_view = DestroyCacheView(image_view); return (status); }
/* * Include declarations. */ #include "magick/studio.h" #include "magick/cache-view.h" #include "magick/channel.h" #include "magick/color-private.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/decorate.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/image.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/pixel-accessor.h" #include "magick/pixel-private.h" #include "magick/quantum.h" #include "magick/resource_.h" #include "magick/thread-private.h" #include "magick/transform.h" /* * Define declarations. */ #define AccentuateModulate ScaleCharToQuantum(80) #define HighlightModulate ScaleCharToQuantum(125) #define ShadowModulate ScaleCharToQuantum(135) #define DepthModulate ScaleCharToQuantum(185) #define TroughModulate ScaleCharToQuantum(110) /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % B o r d e r I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % BorderImage() surrounds the image with a border of the color * defined by % the bordercolor member of the image structure. The width * and height % of the border are defined by the corresponding members of * the border_info % structure. % % The format of the BorderImage method * is: % % Image *BorderImage(const Image *image,const RectangleInfo * *border_info, % ExceptionInfo *exception) % % A description of * each parameter follows: % % o image: the image. % % o border_info: * Define the width and height of the border. % % o exception: return any * errors or warnings in this structure. % */ MagickExport Image * BorderImage(const Image * image, const RectangleInfo * border_info, ExceptionInfo * exception) { Image * border_image, *clone_image; FrameInfo frame_info; assert(image != (const Image *)NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(border_info != (RectangleInfo *) NULL); frame_info.width = image->columns + (border_info->width << 1); frame_info.height = image->rows + (border_info->height << 1); frame_info.x = (ssize_t) border_info->width; frame_info.y = (ssize_t) border_info->height; frame_info.inner_bevel = 0; frame_info.outer_bevel = 0; clone_image = CloneImage(image, 0, 0, MagickTrue, exception); if (clone_image == (Image *) NULL) return ((Image *) NULL); clone_image->matte_color = image->border_color; border_image = FrameImage(clone_image, &frame_info, exception); clone_image = DestroyImage(clone_image); if (border_image != (Image *) NULL) border_image->matte_color = image->matte_color; return (border_image); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % F r a m e I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % FrameImage() adds a simulated three-dimensional border around * the image. % The color of the border is defined by the matte_color member * of image. % Members width and height of frame_info specify the border * width of the % vertical and horizontal sides of the frame. Members inner * and outer % indicate the width of the inner and outer shadows of the * frame. % % The format of the FrameImage method is: % % Image * *FrameImage(const Image *image,const FrameInfo *frame_info, % * ExceptionInfo *exception) % % A description of each parameter follows: % * % o image: the image. % % o frame_info: Define the width and height * of the frame and its bevels. % % o exception: return any errors or * warnings in this structure. % */ MagickExport Image * FrameImage(const Image * image, const FrameInfo * frame_info, ExceptionInfo * exception) { #define FrameImageTag "Frame/Image" CacheView * image_view, *frame_view; Image * frame_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket accentuate, border, highlight, matte, shadow, trough; register ssize_t x; size_t bevel_width, height, width; ssize_t y; /* * Check frame geometry. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(frame_info != (FrameInfo *) NULL); if ((frame_info->outer_bevel < 0) || (frame_info->inner_bevel < 0)) ThrowImageException(OptionError, "FrameIsLessThanImageSize"); bevel_width = (size_t) (frame_info->outer_bevel + frame_info->inner_bevel); x = (ssize_t) frame_info->width - frame_info->x - bevel_width; y = (ssize_t) frame_info->height - frame_info->y - bevel_width; if ((x < (ssize_t) image->columns) || (y < (ssize_t) image->rows)) ThrowImageException(OptionError, "FrameIsLessThanImageSize"); /* * Initialize framed image attributes. */ frame_image = CloneImage(image, frame_info->width, frame_info->height, MagickTrue, exception); if (frame_image == (Image *) NULL) return ((Image *) NULL); if (SetImageStorageClass(frame_image, DirectClass) == MagickFalse) { InheritException(exception, &frame_image->exception); frame_image = DestroyImage(frame_image); return ((Image *) NULL); } if ((IsPixelGray(&frame_image->border_color) == MagickFalse) && (IsGrayColorspace(frame_image->colorspace) != MagickFalse)) (void)SetImageColorspace(frame_image, sRGBColorspace); if ((frame_image->border_color.opacity != OpaqueOpacity) && (frame_image->matte == MagickFalse)) (void)SetImageAlphaChannel(frame_image, OpaqueAlphaChannel); frame_image->page = image->page; if ((image->page.width != 0) && (image->page.height != 0)) { frame_image->page.width += frame_image->columns - image->columns; frame_image->page.height += frame_image->rows - image->rows; } /* * Initialize 3D effects color. */ GetMagickPixelPacket(frame_image, &matte); matte.colorspace = sRGBColorspace; SetMagickPixelPacket(frame_image, &image->matte_color, (IndexPacket *) NULL, &matte); GetMagickPixelPacket(frame_image, &border); border.colorspace = sRGBColorspace; SetMagickPixelPacket(frame_image, &image->border_color, (IndexPacket *) NULL, &border); GetMagickPixelPacket(frame_image, &accentuate); accentuate.red = (MagickRealType) (QuantumScale * ((QuantumRange - AccentuateModulate) * matte.red + (QuantumRange * AccentuateModulate))); accentuate.green = (MagickRealType) (QuantumScale * ((QuantumRange - AccentuateModulate) * matte.green + (QuantumRange * AccentuateModulate))); accentuate.blue = (MagickRealType) (QuantumScale * ((QuantumRange - AccentuateModulate) * matte.blue + (QuantumRange * AccentuateModulate))); accentuate.opacity = matte.opacity; GetMagickPixelPacket(frame_image, &highlight); highlight.red = (MagickRealType) (QuantumScale * ((QuantumRange - HighlightModulate) * matte.red + (QuantumRange * HighlightModulate))); highlight.green = (MagickRealType) (QuantumScale * ((QuantumRange - HighlightModulate) * matte.green + (QuantumRange * HighlightModulate))); highlight.blue = (MagickRealType) (QuantumScale * ((QuantumRange - HighlightModulate) * matte.blue + (QuantumRange * HighlightModulate))); highlight.opacity = matte.opacity; GetMagickPixelPacket(frame_image, &shadow); shadow.red = QuantumScale * matte.red * ShadowModulate; shadow.green = QuantumScale * matte.green * ShadowModulate; shadow.blue = QuantumScale * matte.blue * ShadowModulate; shadow.opacity = matte.opacity; GetMagickPixelPacket(frame_image, &trough); trough.red = QuantumScale * matte.red * TroughModulate; trough.green = QuantumScale * matte.green * TroughModulate; trough.blue = QuantumScale * matte.blue * TroughModulate; trough.opacity = matte.opacity; if (image->colorspace == CMYKColorspace) { ConvertRGBToCMYK(&matte); ConvertRGBToCMYK(&border); ConvertRGBToCMYK(&accentuate); ConvertRGBToCMYK(&highlight); ConvertRGBToCMYK(&shadow); ConvertRGBToCMYK(&trough); } status = MagickTrue; progress = 0; image_view = AcquireVirtualCacheView(image, exception); frame_view = AcquireAuthenticCacheView(frame_image, exception); height = (size_t) (frame_info->outer_bevel + (frame_info->y - bevel_width) + frame_info->inner_bevel); if (height != 0) { register IndexPacket * magick_restrict frame_indexes; register ssize_t x; register PixelPacket * magick_restrict q; /* * Draw top of ornamental border. */ q = QueueCacheViewAuthenticPixels(frame_view, 0, 0, frame_image->columns, height, exception); frame_indexes = GetCacheViewAuthenticIndexQueue(frame_view); if (q != (PixelPacket *) NULL) { /* * Draw top of ornamental border. */ for (y = 0; y < (ssize_t) frame_info->outer_bevel; y++) { for (x = 0; x < (ssize_t) (frame_image->columns - y); x++) { if (x < y) SetPixelPacket(frame_image, &highlight, q, frame_indexes); else SetPixelPacket(frame_image, &accentuate, q, frame_indexes); q++; frame_indexes++; } for (; x < (ssize_t) frame_image->columns; x++) { SetPixelPacket(frame_image, &shadow, q, frame_indexes); q++; frame_indexes++; } } for (y = 0; y < (ssize_t) (frame_info->y - bevel_width); y++) { for (x = 0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelPacket(frame_image, &highlight, q, frame_indexes); q++; frame_indexes++; } width = frame_image->columns - 2 * frame_info->outer_bevel; for (x = 0; x < (ssize_t) width; x++) { SetPixelPacket(frame_image, &matte, q, frame_indexes); q++; frame_indexes++; } for (x = 0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelPacket(frame_image, &shadow, q, frame_indexes); q++; frame_indexes++; } } for (y = 0; y < (ssize_t) frame_info->inner_bevel; y++) { for (x = 0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelPacket(frame_image, &highlight, q, frame_indexes); q++; frame_indexes++; } for (x = 0; x < (ssize_t) (frame_info->x - bevel_width); x++) { SetPixelPacket(frame_image, &matte, q, frame_indexes); q++; frame_indexes++; } width = image->columns + ((size_t) frame_info->inner_bevel << 1) - y; for (x = 0; x < (ssize_t) width; x++) { if (x < y) SetPixelPacket(frame_image, &shadow, q, frame_indexes); else SetPixelPacket(frame_image, &trough, q, frame_indexes); q++; frame_indexes++; } for (; x < (ssize_t) (image->columns + 2 * frame_info->inner_bevel); x++) { SetPixelPacket(frame_image, &highlight, q, frame_indexes); q++; frame_indexes++; } width = frame_info->width - frame_info->x - image->columns - bevel_width; for (x = 0; x < (ssize_t) width; x++) { SetPixelPacket(frame_image, &matte, q, frame_indexes); q++; frame_indexes++; } for (x = 0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelPacket(frame_image, &shadow, q, frame_indexes); q++; frame_indexes++; } } (void)SyncCacheViewAuthenticPixels(frame_view, exception); } } /* * Draw sides of ornamental border. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,frame_image,image->rows,1) #endif for (y = 0; y < (ssize_t) image->rows; y++) { register IndexPacket * magick_restrict frame_indexes; register ssize_t x; register PixelPacket * magick_restrict q; /* * Initialize scanline with matte color. */ if (status == MagickFalse) continue; q = QueueCacheViewAuthenticPixels(frame_view, 0, frame_info->y + y, frame_image->columns, 1, exception); if (q == (PixelPacket *) NULL) { status = MagickFalse; continue; } frame_indexes = GetCacheViewAuthenticIndexQueue(frame_view); for (x = 0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelPacket(frame_image, &highlight, q, frame_indexes); q++; frame_indexes++; } for (x = 0; x < (ssize_t) (frame_info->x - bevel_width); x++) { SetPixelPacket(frame_image, &matte, q, frame_indexes); q++; frame_indexes++; } for (x = 0; x < (ssize_t) frame_info->inner_bevel; x++) { SetPixelPacket(frame_image, &shadow, q, frame_indexes); q++; frame_indexes++; } /* * Set frame interior pixels. */ for (x = 0; x < (ssize_t) image->columns; x++) { SetPixelPacket(frame_image, &border, q, frame_indexes); q++; frame_indexes++; } for (x = 0; x < (ssize_t) frame_info->inner_bevel; x++) { SetPixelPacket(frame_image, &highlight, q, frame_indexes); q++; frame_indexes++; } width = frame_info->width - frame_info->x - image->columns - bevel_width; for (x = 0; x < (ssize_t) width; x++) { SetPixelPacket(frame_image, &matte, q, frame_indexes); q++; frame_indexes++; } for (x = 0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelPacket(frame_image, &shadow, q, frame_indexes); q++; frame_indexes++; } if (SyncCacheViewAuthenticPixels(frame_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_FrameImage) #endif proceed = SetImageProgress(image, FrameImageTag, progress++, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } height = (size_t) (frame_info->inner_bevel + frame_info->height - frame_info->y - image->rows - bevel_width + frame_info->outer_bevel); if (height != 0) { register IndexPacket * magick_restrict frame_indexes; register ssize_t x; register PixelPacket * magick_restrict q; /* * Draw bottom of ornamental border. */ q = QueueCacheViewAuthenticPixels(frame_view, 0, (ssize_t) (frame_image->rows - height), frame_image->columns, height, exception); if (q != (PixelPacket *) NULL) { /* * Draw bottom of ornamental border. */ frame_indexes = GetCacheViewAuthenticIndexQueue(frame_view); for (y = frame_info->inner_bevel - 1; y >= 0; y--) { for (x = 0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelPacket(frame_image, &highlight, q, frame_indexes); q++; frame_indexes++; } for (x = 0; x < (ssize_t) (frame_info->x - bevel_width); x++) { SetPixelPacket(frame_image, &matte, q, frame_indexes); q++; frame_indexes++; } for (x = 0; x < y; x++) { SetPixelPacket(frame_image, &shadow, q, frame_indexes); q++; frame_indexes++; } for (; x < (ssize_t) (image->columns + 2 * frame_info->inner_bevel); x++) { if (x >= (ssize_t) (image->columns + 2 * frame_info->inner_bevel - y)) SetPixelPacket(frame_image, &highlight, q, frame_indexes); else SetPixelPacket(frame_image, &accentuate, q, frame_indexes); q++; frame_indexes++; } width = frame_info->width - frame_info->x - image->columns - bevel_width; for (x = 0; x < (ssize_t) width; x++) { SetPixelPacket(frame_image, &matte, q, frame_indexes); q++; frame_indexes++; } for (x = 0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelPacket(frame_image, &shadow, q, frame_indexes); q++; frame_indexes++; } } height = frame_info->height - frame_info->y - image->rows - bevel_width; for (y = 0; y < (ssize_t) height; y++) { for (x = 0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelPacket(frame_image, &highlight, q, frame_indexes); q++; frame_indexes++; } width = frame_image->columns - 2 * frame_info->outer_bevel; for (x = 0; x < (ssize_t) width; x++) { SetPixelPacket(frame_image, &matte, q, frame_indexes); q++; frame_indexes++; } for (x = 0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelPacket(frame_image, &shadow, q, frame_indexes); q++; frame_indexes++; } } for (y = frame_info->outer_bevel - 1; y >= 0; y--) { for (x = 0; x < y; x++) { SetPixelPacket(frame_image, &highlight, q, frame_indexes); q++; frame_indexes++; } for (; x < (ssize_t) frame_image->columns; x++) { if (x >= (ssize_t) (frame_image->columns - y)) SetPixelPacket(frame_image, &shadow, q, frame_indexes); else SetPixelPacket(frame_image, &trough, q, frame_indexes); q++; frame_indexes++; } } (void)SyncCacheViewAuthenticPixels(frame_view, exception); } } frame_view = DestroyCacheView(frame_view); image_view = DestroyCacheView(image_view); x = (ssize_t) (frame_info->outer_bevel + (frame_info->x - bevel_width) + frame_info->inner_bevel); y = (ssize_t) (frame_info->outer_bevel + (frame_info->y - bevel_width) + frame_info->inner_bevel); if (status != MagickFalse) status = CompositeImage(frame_image, image->compose, image, x, y); if (status == MagickFalse) frame_image = DestroyImage(frame_image); return (frame_image); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % R a i s e I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % RaiseImage() creates a simulated three-dimensional button-like * effect % by lightening and darkening the edges of the image. Members * width and % height of raise_info define the width of the vertical and * horizontal % edge of the effect. % % The format of the RaiseImage method * is: % % MagickBooleanType RaiseImage(const Image *image, % * const RectangleInfo *raise_info,const MagickBooleanType raise) % % A * description of each parameter follows: % % o image: the image. % % o * raise_info: Define the width and height of the raise area. % % o raise: * A value other than zero creates a 3-D raise effect, % otherwise it * has a lowered effect. % */ MagickExport MagickBooleanType RaiseImage(Image * image, const RectangleInfo * raise_info, const MagickBooleanType raise) { #define AccentuateFactor ScaleCharToQuantum(135) #define HighlightFactor ScaleCharToQuantum(190) #define ShadowFactor ScaleCharToQuantum(190) #define RaiseImageTag "Raise/Image" #define TroughFactor ScaleCharToQuantum(135) CacheView * image_view; ExceptionInfo * exception; MagickBooleanType status; MagickOffsetType progress; Quantum foreground, background; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(raise_info != (RectangleInfo *) NULL); exception = (&image->exception); if ((image->columns <= (raise_info->width << 1)) || (image->rows <= (raise_info->height << 1))) ThrowBinaryException(OptionError, "ImageSizeMustExceedBevelWidth", image->filename); foreground = QuantumRange; background = (Quantum) 0; if (raise == MagickFalse) { foreground = (Quantum) 0; background = QuantumRange; } if (SetImageStorageClass(image, DirectClass) == MagickFalse) return (MagickFalse); /* * Raise image. */ status = MagickTrue; progress = 0; image_view = AcquireAuthenticCacheView(image, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,raise_info->height,1) #endif for (y = 0; y < (ssize_t) raise_info->height; y++) { register ssize_t x; register PixelPacket * magick_restrict q; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (PixelPacket *) NULL) { status = MagickFalse; continue; } for (x = 0; x < y; x++) { SetPixelRed(q, ClampToQuantum(QuantumScale * ((MagickRealType) GetPixelRed(q) * HighlightFactor + (MagickRealType) foreground * (QuantumRange - HighlightFactor)))); SetPixelGreen(q, ClampToQuantum(QuantumScale * ((MagickRealType) GetPixelGreen(q) * HighlightFactor + (MagickRealType) foreground * (QuantumRange - HighlightFactor)))); SetPixelBlue(q, ClampToQuantum(QuantumScale * ((MagickRealType) GetPixelBlue(q) * HighlightFactor + (MagickRealType) foreground * (QuantumRange - HighlightFactor)))); q++; } for (; x < (ssize_t) (image->columns - y); x++) { SetPixelRed(q, ClampToQuantum(QuantumScale * ((MagickRealType) GetPixelRed(q) * AccentuateFactor + (MagickRealType) foreground * (QuantumRange - AccentuateFactor)))); SetPixelGreen(q, ClampToQuantum(QuantumScale * ((MagickRealType) GetPixelGreen(q) * AccentuateFactor + (MagickRealType) foreground * (QuantumRange - AccentuateFactor)))); SetPixelBlue(q, ClampToQuantum(QuantumScale * ((MagickRealType) GetPixelBlue(q) * AccentuateFactor + (MagickRealType) foreground * (QuantumRange - AccentuateFactor)))); q++; } for (; x < (ssize_t) image->columns; x++) { SetPixelRed(q, ClampToQuantum(QuantumScale * ((MagickRealType) GetPixelRed(q) * ShadowFactor + (MagickRealType) background * (QuantumRange - ShadowFactor)))); SetPixelGreen(q, ClampToQuantum(QuantumScale * ((MagickRealType) GetPixelGreen(q) * ShadowFactor + (MagickRealType) background * (QuantumRange - ShadowFactor)))); SetPixelBlue(q, ClampToQuantum(QuantumScale * ((MagickRealType) GetPixelBlue(q) * ShadowFactor + (MagickRealType) background * (QuantumRange - ShadowFactor)))); q++; } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_RaiseImage) #endif proceed = SetImageProgress(image, RaiseImageTag, progress++, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows-2*raise_info->height,1) #endif for (y = (ssize_t) raise_info->height; y < (ssize_t) (image->rows - raise_info->height); y++) { register ssize_t x; register PixelPacket * magick_restrict q; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (PixelPacket *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) raise_info->width; x++) { SetPixelRed(q, ClampToQuantum(QuantumScale * ((MagickRealType) GetPixelRed(q) * HighlightFactor + (MagickRealType) foreground * (QuantumRange - HighlightFactor)))); SetPixelGreen(q, ClampToQuantum(QuantumScale * ((MagickRealType) GetPixelGreen(q) * HighlightFactor + (MagickRealType) foreground * (QuantumRange - HighlightFactor)))); SetPixelBlue(q, ClampToQuantum(QuantumScale * ((MagickRealType) GetPixelBlue(q) * HighlightFactor + (MagickRealType) foreground * (QuantumRange - HighlightFactor)))); q++; } for (; x < (ssize_t) (image->columns - raise_info->width); x++) q++; for (; x < (ssize_t) image->columns; x++) { SetPixelRed(q, ClampToQuantum(QuantumScale * ((MagickRealType) GetPixelRed(q) * ShadowFactor + (MagickRealType) background * (QuantumRange - ShadowFactor)))); SetPixelGreen(q, ClampToQuantum(QuantumScale * ((MagickRealType) GetPixelGreen(q) * ShadowFactor + (MagickRealType) background * (QuantumRange - ShadowFactor)))); SetPixelBlue(q, ClampToQuantum(QuantumScale * ((MagickRealType) GetPixelBlue(q) * ShadowFactor + (MagickRealType) background * (QuantumRange - ShadowFactor)))); q++; } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_RaiseImage) #endif proceed = SetImageProgress(image, RaiseImageTag, progress++, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows-raise_info->height,1) #endif for (y = (ssize_t) (image->rows - raise_info->height); y < (ssize_t) image->rows; y++) { register ssize_t x; register PixelPacket * magick_restrict q; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (PixelPacket *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) (image->rows - y); x++) { SetPixelRed(q, ClampToQuantum(QuantumScale * ((MagickRealType) GetPixelRed(q) * HighlightFactor + (MagickRealType) foreground * (QuantumRange - HighlightFactor)))); SetPixelGreen(q, ClampToQuantum(QuantumScale * ((MagickRealType) GetPixelGreen(q) * HighlightFactor + (MagickRealType) foreground * (QuantumRange - HighlightFactor)))); SetPixelBlue(q, ClampToQuantum(QuantumScale * ((MagickRealType) GetPixelBlue(q) * HighlightFactor + (MagickRealType) foreground * (QuantumRange - HighlightFactor)))); q++; } for (; x < (ssize_t) (image->columns - (image->rows - y)); x++) { SetPixelRed(q, ClampToQuantum(QuantumScale * ((MagickRealType) GetPixelRed(q) * TroughFactor + (MagickRealType) background * (QuantumRange - TroughFactor)))); SetPixelGreen(q, ClampToQuantum(QuantumScale * ((MagickRealType) GetPixelGreen(q) * TroughFactor + (MagickRealType) background * (QuantumRange - TroughFactor)))); SetPixelBlue(q, ClampToQuantum(QuantumScale * ((MagickRealType) GetPixelBlue(q) * TroughFactor + (MagickRealType) background * (QuantumRange - TroughFactor)))); q++; } for (; x < (ssize_t) image->columns; x++) { SetPixelRed(q, ClampToQuantum(QuantumScale * ((MagickRealType) GetPixelRed(q) * ShadowFactor + (MagickRealType) background * (QuantumRange - ShadowFactor)))); SetPixelGreen(q, ClampToQuantum(QuantumScale * ((MagickRealType) GetPixelGreen(q) * ShadowFactor + (MagickRealType) background * (QuantumRange - ShadowFactor)))); SetPixelBlue(q, ClampToQuantum(QuantumScale * ((MagickRealType) GetPixelBlue(q) * ShadowFactor + (MagickRealType) background * (QuantumRange - ShadowFactor)))); q++; } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_RaiseImage) #endif proceed = SetImageProgress(image, RaiseImageTag, progress++, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } image_view = DestroyCacheView(image_view); return (status); }
GB_unop__identity_uint64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint64_fc64) // op(A') function: GB (_unop_tran__identity_uint64_fc64) // C type: uint64_t // A type: GxB_FC64_t // cast: uint64_t cij = GB_cast_to_uint64_t (creal (aij)) // unaryop: cij = aij #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint64_t z = GB_cast_to_uint64_t (creal (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint64_t z = GB_cast_to_uint64_t (creal (aij)) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint64_fc64) ( uint64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; uint64_t z = GB_cast_to_uint64_t (creal (aij)) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; uint64_t z = GB_cast_to_uint64_t (creal (aij)) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint64_fc64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint64_fc64) // op(A') function: GB (_unop_tran__identity_uint64_fc64) // C type: uint64_t // A type: GxB_FC64_t // cast: uint64_t cij = GB_cast_to_uint64_t (creal (aij)) // unaryop: cij = aij #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint64_t z = GB_cast_to_uint64_t (creal (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint64_t z = GB_cast_to_uint64_t (creal (aij)) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint64_fc64) ( uint64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; uint64_t z = GB_cast_to_uint64_t (creal (aij)) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; uint64_t z = GB_cast_to_uint64_t (creal (aij)) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint64_fc64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint64_fc64) // op(A') function: GB (_unop_tran__identity_uint64_fc64) // C type: uint64_t // A type: GxB_FC64_t // cast: uint64_t cij = GB_cast_to_uint64_t (creal (aij)) // unaryop: cij = aij #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint64_t z = GB_cast_to_uint64_t (creal (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint64_t z = GB_cast_to_uint64_t (creal (aij)) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint64_fc64) ( uint64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; uint64_t z = GB_cast_to_uint64_t (creal (aij)) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; uint64_t z = GB_cast_to_uint64_t (creal (aij)) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint64_fc64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__times_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__times_int8) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__times_int8) // A.*B function (eWiseMult): GB (_AemultB_03__times_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__times_int8) // A*D function (colscale): GB (_AxD__times_int8) // D*A function (rowscale): GB (_DxB__times_int8) // C+=B function (dense accum): GB (_Cdense_accumB__times_int8) // C+=b function (dense accum): GB (_Cdense_accumb__times_int8) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__times_int8) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__times_int8) // C=scalar+B GB (_bind1st__times_int8) // C=scalar+B' GB (_bind1st_tran__times_int8) // C=A+scalar GB (_bind2nd__times_int8) // C=A'+scalar GB (_bind2nd_tran__times_int8) // C type: int8_t // A type: int8_t // B,b type: int8_t // BinaryOp: cij = (aij * bij) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x * y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_TIMES || GxB_NO_INT8 || GxB_NO_TIMES_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__times_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__times_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__times_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__times_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__times_int8) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__times_int8) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__times_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__times_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__times_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__times_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__times_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__times_int8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = Bx [p] ; Cx [p] = (x * bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__times_int8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = Ax [p] ; Cx [p] = (aij * y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = (x * aij) ; \ } GrB_Info GB (_bind1st_tran__times_int8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = (aij * y) ; \ } GrB_Info GB (_bind2nd_tran__times_int8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__times_int8) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__times_int8) // A.*B function (eWiseMult): GB (_AemultB_03__times_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__times_int8) // A*D function (colscale): GB (_AxD__times_int8) // D*A function (rowscale): GB (_DxB__times_int8) // C+=B function (dense accum): GB (_Cdense_accumB__times_int8) // C+=b function (dense accum): GB (_Cdense_accumb__times_int8) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__times_int8) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__times_int8) // C=scalar+B GB (_bind1st__times_int8) // C=scalar+B' GB (_bind1st_tran__times_int8) // C=A+scalar GB (_bind2nd__times_int8) // C=A'+scalar GB (_bind2nd_tran__times_int8) // C type: int8_t // A type: int8_t // B,b type: int8_t // BinaryOp: cij = (aij * bij) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x * y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_TIMES || GxB_NO_INT8 || GxB_NO_TIMES_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__times_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__times_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__times_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__times_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__times_int8) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__times_int8) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__times_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__times_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__times_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__times_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__times_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__times_int8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = Bx [p] ; Cx [p] = (x * bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__times_int8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = Ax [p] ; Cx [p] = (aij * y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = (x * aij) ; \ } GrB_Info GB (_bind1st_tran__times_int8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = (aij * y) ; \ } GrB_Info GB (_bind2nd_tran__times_int8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__times_int8) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__times_int8) // A.*B function (eWiseMult): GB (_AemultB_03__times_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__times_int8) // A*D function (colscale): GB (_AxD__times_int8) // D*A function (rowscale): GB (_DxB__times_int8) // C+=B function (dense accum): GB (_Cdense_accumB__times_int8) // C+=b function (dense accum): GB (_Cdense_accumb__times_int8) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__times_int8) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__times_int8) // C=scalar+B GB (_bind1st__times_int8) // C=scalar+B' GB (_bind1st_tran__times_int8) // C=A+scalar GB (_bind2nd__times_int8) // C=A'+scalar GB (_bind2nd_tran__times_int8) // C type: int8_t // A type: int8_t // B,b type: int8_t // BinaryOp: cij = (aij * bij) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x * y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_TIMES || GxB_NO_INT8 || GxB_NO_TIMES_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__times_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__times_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__times_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__times_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__times_int8) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__times_int8) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__times_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__times_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__times_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__times_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__times_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__times_int8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = Bx [p] ; Cx [p] = (x * bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__times_int8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = Ax [p] ; Cx [p] = (aij * y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = (x * aij) ; \ } GrB_Info GB (_bind1st_tran__times_int8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = (aij * y) ; \ } GrB_Info GB (_bind2nd_tran__times_int8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
debug_so8_acoustic.c
#define _POSIX_C_SOURCE 200809L #include "stdlib.h" #include "math.h" #include "sys/time.h" #include "xmmintrin.h" #include "pmmintrin.h" #include <stdio.h> #include "omp.h" #define min(a, b) (((a) < (b)) ? (a) : (b)) #define max(a, b) (((a) > (b)) ? (a) : (b)) struct dataobj { void *restrict data; int *size; int *npsize; int *dsize; int *hsize; int *hofs; int *oofs; }; struct profiler { double section0; }; int Kernel(struct dataobj *restrict damp_vec, const float dt, const float h_x, const float h_y, const float h_z, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict save_src_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict usol_vec, struct dataobj *restrict vp_vec, const int sp_zi_m, const int time_M, const int time_m, struct profiler *timers, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m) { float(*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[damp_vec->size[1]][damp_vec->size[2]])damp_vec->data; int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data; float(*restrict save_src)[save_src_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_vec->size[1]])save_src_vec->data; int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data; float(*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (float(*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data; int(*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data; float(*restrict usol)[usol_vec->size[1]][usol_vec->size[2]][usol_vec->size[3]] __attribute__((aligned(64))) = (float(*)[usol_vec->size[1]][usol_vec->size[2]][usol_vec->size[3]])usol_vec->data; float(*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[vp_vec->size[1]][vp_vec->size[2]])vp_vec->data; /* Flush denormal numbers to zero in hardware */ _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON); _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON); int xb_size = 32; int yb_size = 32; // to fix as 8/16 etc int x0_blk0_size = 8; int y0_blk0_size = 8; int sf = 4; //int t_blk_size = time_M - time_m ; int t_blk_size = 2 * sf * (time_M - time_m); struct timeval start_section0, end_section0; gettimeofday(&start_section0, NULL); for (int t_blk = time_m; t_blk < sf * (time_M - time_m); t_blk += sf * t_blk_size) // for each t block { //printf(" Change of tblock %d \n", t_blk); for (int xb = x_m; xb <= (x_M + sf * (time_M - time_m)); xb += xb_size + 1) { //printf(" Change of outer xblock %d \n", xb); for (int yb = y_m; yb <= (y_M + sf * (time_M - time_m)); yb += yb_size + 1) { //printf(" Change of yblock %d \n", yb); for (int time = t_blk, t0 = (time + 1) % (3), t1 = (time) % (3), t2 = (time + 2) % (3); time <= 1 + min(t_blk + t_blk_size - 1, sf * (time_M - time_m)); time += sf, t0 = (((time / sf) % (time_M - time_m + 1)) + 1) % (3), t1 = (((time / sf) % (time_M - time_m + 1))) % (3), t2 = (((time / sf) % (time_M - time_m + 1)) + 2) % (3)) { int tw = ((time / sf) % (time_M - time_m + 1)); //printf(" Change of time %d t0: %d t1: %d t2: %d \n", tw, t0, t1, t2); /* Begin section0 */ #pragma omp parallel num_threads(8) { #pragma omp for collapse(2) schedule(dynamic, 1) for (int x0_blk0 = max((x_m + time), xb); x0_blk0 <= min((x_M + time), (xb + xb_size)); x0_blk0 += x0_blk0_size) { // printf(" Change of inner xblock %d \n", x0_blk0); for (int y0_blk0 = max((y_m + time), yb); y0_blk0 <= min((y_M + time), (yb + yb_size)); y0_blk0 += y0_blk0_size) { for (int x = x0_blk0; x <= min(min((x_M + time), (xb + xb_size)), (x0_blk0 + x0_blk0_size - 1)); x++) { //printf(" time: %d , x: %d \n", time, x - time); for (int y = y0_blk0; y <= min(min((y_M + time), (yb + yb_size)), (y0_blk0 + y0_blk0_size - 1)); y++) { #pragma omp simd aligned(damp, usol, vp : 32) for (int z = z_m; z <= z_M; z += 1) { float r14 = -2.84722222F * usol[t1][x - time + 8][y - time + 8][z + 8]; float r13 = 1.0 / dt; float r12 = 1.0 / (dt * dt); float r11 = 1.0 / (vp[x - time + 8][y - time + 8][z + 8] * vp[x - time + 8][y - time + 8][z + 8]); usol[t0][x - time + 8][y - time + 8][z + 8] = (r11 * (-r12 * (-2.0F * usol[t1][x - time + 8][y - time + 8][z + 8] + usol[t2][x - time + 8][y - time + 8][z + 8])) + r13 * (damp[x - time + 1][y - time + 1][z + 1] * usol[t1][x - time + 8][y - time + 8][z + 8]) + (r14 - 1.78571429e-3F * (usol[t1][x - time + 8][y - time + 8][z + 4] + usol[t1][x - time + 8][y - time + 8][z + 12]) + 2.53968254e-2F * (usol[t1][x - time + 8][y - time + 8][z + 5] + usol[t1][x - time + 8][y - time + 8][z + 11]) - 2.0e-1F * (usol[t1][x - time + 8][y - time + 8][z + 6] + usol[t1][x - time + 8][y - time + 8][z + 10]) + 1.6F * (usol[t1][x - time + 8][y - time + 8][z + 7] + usol[t1][x - time + 8][y - time + 8][z + 9])) / ((h_z * h_z)) + (r14 - 1.78571429e-3F * (usol[t1][x - time + 8][y - time + 4][z + 8] + usol[t1][x - time + 8][y - time + 12][z + 8]) + 2.53968254e-2F * (usol[t1][x - time + 8][y - time + 5][z + 8] + usol[t1][x - time + 8][y - time + 11][z + 8]) - 2.0e-1F * (usol[t1][x - time + 8][y - time + 6][z + 8] + usol[t1][x - time + 8][y - time + 10][z + 8]) + 1.6F * (usol[t1][x - time + 8][y - time + 7][z + 8] + usol[t1][x - time + 8][y - time + 9][z + 8])) / ((h_y * h_y)) + (r14 - 1.78571429e-3F * (usol[t1][x - time + 4][y - time + 8][z + 8] + usol[t1][x - time + 12][y - time + 8][z + 8]) + 2.53968254e-2F * (usol[t1][x - time + 5][y - time + 8][z + 8] + usol[t1][x - time + 11][y - time + 8][z + 8]) - 2.0e-1F * (usol[t1][x - time + 6][y - time + 8][z + 8] + usol[t1][x - time + 10][y - time + 8][z + 8]) + 1.6F * (usol[t1][x - time + 7][y - time + 8][z + 8] + usol[t1][x - time + 9][y - time + 8][z + 8])) / ((h_x * h_x))) / (r11 * r12 + r13 * damp[x - time + 1][y - time + 1][z + 1]); } //int sp_zi_M = nnz_sp_source_mask[x + 1][y + 1]; #pragma omp simd aligned(damp, usol, vp : 32) for (int sp_zi = sp_zi_m; sp_zi <= nnz_sp_source_mask[x - time][y - time]-1; sp_zi += 1) { //printf(" sp_zi = %d \n", sp_zi); //printf(" sp_source_mask = %d \n", sp_source_mask[x + 1][y + 1][sp_zi] + 1); //int zind = sp_source_mask[x - time + 8][y - time + 8][sp_zi] + 1; //printf(" source_mask = %d \n", source_mask[x - time + 2][y - time + 2][zind]); int zind = sp_source_mask[x - time][y - time][sp_zi]; //printf(" source_id = %d \n", source_id[x + 1][y + 1][zind + 1]); //printf(" source_mask = %f \n", source_mask[x -time][y - time ][zind]); float r0 = save_src[((time / sf) % (time_M - time_m + 1))][source_id[x - time][y - time][zind]] * source_mask[x - time][y - time][zind]; //printf(" Input %f \n", r0); //printf(" time is : %d \n", ((time / sf) % (time_M - time_m + 1))); usol[t0][x - time + 8][y - time + 8][zind + 8] += r0; //4.49016082216644F * (vp[x - time + 8][y - time + 8][zind + 8] * vp[x - time + 8][y - time + 8][zind + 8]) * r0; } } } } } } } } } } /* End section0 */ gettimeofday(&end_section0, NULL); timers->section0 += (double)(end_section0.tv_sec - start_section0.tv_sec) + (double)(end_section0.tv_usec - start_section0.tv_usec) / 1000000; return 0; } /* Backdoor edit at Thu Jul 9 11:44:33 2020*/ /* Backdoor edit at Thu Jul 9 11:45:12 2020*/ /* Backdoor edit at Thu Jul 9 11:52:30 2020*/ /* Backdoor edit at Thu Jul 9 11:53:50 2020*/ /* Backdoor edit at Thu Jul 9 11:55:13 2020*/ /* Backdoor edit at Thu Jul 9 11:58:50 2020*/ /* Backdoor edit at Thu Jul 9 12:01:11 2020*/ /* Backdoor edit at Thu Jul 9 12:04:29 2020*/ /* Backdoor edit at Thu Jul 9 12:06:49 2020*/ /* Backdoor edit at Thu Jul 9 12:28:24 2020*/ /* Backdoor edit at Thu Jul 9 12:42:14 2020*/ /* Backdoor edit at Thu Jul 9 12:43:50 2020*/ /* Backdoor edit at Thu Jul 9 12:48:57 2020*/ /* Backdoor edit at Thu Jul 9 12:52:25 2020*/ /* Backdoor edit at Thu Jul 9 12:54:44 2020*/ /* Backdoor edit at Thu Jul 9 12:56:41 2020*/ /* Backdoor edit at Thu Jul 9 13:31:02 2020*/
#define _POSIX_C_SOURCE 200809L #include "stdlib.h" #include "math.h" #include "sys/time.h" #include "xmmintrin.h" #include "pmmintrin.h" #include <stdio.h> #include "omp.h" #define min(a, b) (((a) < (b)) ? (a) : (b)) #define max(a, b) (((a) > (b)) ? (a) : (b)) struct dataobj { void *restrict data; int *size; int *npsize; int *dsize; int *hsize; int *hofs; int *oofs; }; struct profiler { double section0; }; int Kernel(struct dataobj *restrict damp_vec, const float dt, const float h_x, const float h_y, const float h_z, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict save_src_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict usol_vec, struct dataobj *restrict vp_vec, const int sp_zi_m, const int time_M, const int time_m, struct profiler *timers, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m) { float (*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__((aligned(64))) = (float (*)[damp_vec->size[1]][damp_vec->size[2]])damp_vec->data; int (*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int (*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data; float (*restrict save_src)[save_src_vec->size[1]] __attribute__((aligned(64))) = (float (*)[save_src_vec->size[1]])save_src_vec->data; int (*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int (*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data; float (*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (float (*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data; int (*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int (*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data; float (*restrict usol)[usol_vec->size[1]][usol_vec->size[2]][usol_vec->size[3]] __attribute__((aligned(64))) = (float (*)[usol_vec->size[1]][usol_vec->size[2]][usol_vec->size[3]])usol_vec->data; float (*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__((aligned(64))) = (float (*)[vp_vec->size[1]][vp_vec->size[2]])vp_vec->data; /* Flush denormal numbers to zero in hardware */ _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON); _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON); int xb_size = 32; int yb_size = 32; //to fix as 8 / 16 etc int x0_blk0_size = 8; int y0_blk0_size = 8; int sf = 4; //int t_blk_size = time_M - time_m; int t_blk_size = 2 * sf * (time_M - time_m); struct timeval start_section0, end_section0; gettimeofday(&start_section0, NULL); for (int t_blk = time_m; t_blk < sf * (time_M - time_m); t_blk += sf * t_blk_size) //for each t block { //printf(" Change of tblock %d \n", t_blk); for (int xb = x_m; xb <= (x_M + sf * (time_M - time_m)); xb += xb_size + 1) { //printf(" Change of outer xblock %d \n", xb); for (int yb = y_m; yb <= (y_M + sf * (time_M - time_m)); yb += yb_size + 1) { //printf(" Change of yblock %d \n", yb); for (int time = t_blk, t0 = (time + 1) % (3), t1 = (time) % (3), t2 = (time + 2) % (3); time <= 1 + min(t_blk + t_blk_size - 1, sf * (time_M - time_m)); time += sf, t0 = (((time / sf) % (time_M - time_m + 1)) + 1) % (3), t1 = (((time / sf) % (time_M - time_m + 1))) % (3), t2 = (((time / sf) % (time_M - time_m + 1)) + 2) % (3)) { int tw = ((time / sf) % (time_M - time_m + 1)); //printf(" Change of time %d t0: %d t1: %d t2: %d \n", tw, t0, t1, t2); /* Begin section0 */ for (int x0_blk0 = max((x_m + time), xb); x0_blk0 <= min((x_M + time), (xb + xb_size)); x0_blk0 += x0_blk0_size) { //printf(" Change of inner xblock %d \n", x0_blk0); for (int y0_blk0 = max((y_m + time), yb); y0_blk0 <= min((y_M + time), (yb + yb_size)); y0_blk0 += y0_blk0_size) { for (int x = x0_blk0; x <= min(min((x_M + time), (xb + xb_size)), (x0_blk0 + x0_blk0_size - 1)); x++) { //printf(" time: %d , x: %d \n", time, x - time); for (int y = y0_blk0; y <= min(min((y_M + time), (yb + yb_size)), (y0_blk0 + y0_blk0_size - 1)); y++) { for (int z = z_m; z <= z_M; z += 1) { float r14 = -2.84722222 F * usol[t1][x - time + 8][y - time + 8][z + 8]; float r13 = 1.0 / dt; float r12 = 1.0 / (dt * dt); float r11 = 1.0 / (vp[x - time + 8][y - time + 8][z + 8] * vp[x - time + 8][y - time + 8][z + 8]); usol[t0][x - time + 8][y - time + 8][z + 8] = (r11 * (-r12 * (-2.0 F * usol[t1][x - time + 8][y - time + 8][z + 8] + usol[t2][x - time + 8][y - time + 8][z + 8])) + r13 * (damp[x - time + 1][y - time + 1][z + 1] * usol[t1][x - time + 8][y - time + 8][z + 8]) + (r14 - 1.78571429e-3 F * (usol[t1][x - time + 8][y - time + 8][z + 4] + usol[t1][x - time + 8][y - time + 8][z + 12]) + 2.53968254e-2 F * (usol[t1][x - time + 8][y - time + 8][z + 5] + usol[t1][x - time + 8][y - time + 8][z + 11]) - 2.0e-1 F * (usol[t1][x - time + 8][y - time + 8][z + 6] + usol[t1][x - time + 8][y - time + 8][z + 10]) + 1.6 F * (usol[t1][x - time + 8][y - time + 8][z + 7] + usol[t1][x - time + 8][y - time + 8][z + 9])) / ((h_z * h_z)) + (r14 - 1.78571429e-3 F * (usol[t1][x - time + 8][y - time + 4][z + 8] + usol[t1][x - time + 8][y - time + 12][z + 8]) + 2.53968254e-2 F * (usol[t1][x - time + 8][y - time + 5][z + 8] + usol[t1][x - time + 8][y - time + 11][z + 8]) - 2.0e-1 F * (usol[t1][x - time + 8][y - time + 6][z + 8] + usol[t1][x - time + 8][y - time + 10][z + 8]) + 1.6 F * (usol[t1][x - time + 8][y - time + 7][z + 8] + usol[t1][x - time + 8][y - time + 9][z + 8])) / ((h_y * h_y)) + (r14 - 1.78571429e-3 F * (usol[t1][x - time + 4][y - time + 8][z + 8] + usol[t1][x - time + 12][y - time + 8][z + 8]) + 2.53968254e-2 F * (usol[t1][x - time + 5][y - time + 8][z + 8] + usol[t1][x - time + 11][y - time + 8][z + 8]) - 2.0e-1 F * (usol[t1][x - time + 6][y - time + 8][z + 8] + usol[t1][x - time + 10][y - time + 8][z + 8]) + 1.6 F * (usol[t1][x - time + 7][y - time + 8][z + 8] + usol[t1][x - time + 9][y - time + 8][z + 8])) / ((h_x * h_x))) / (r11 * r12 + r13 * damp[x - time + 1][y - time + 1][z + 1]); } //int sp_zi_M = nnz_sp_source_mask[x + 1][y + 1]; for (int sp_zi = sp_zi_m; sp_zi <= nnz_sp_source_mask[x - time][y - time] - 1; sp_zi += 1) { //printf(" sp_zi = %d \n", sp_zi); //printf(" sp_source_mask = %d \n", sp_source_mask[x + 1][y + 1][sp_zi] + 1); //int zind = sp_source_mask[x - time + 8][y - time + 8][sp_zi] + 1; //printf(" source_mask = %d \n", source_mask[x - time + 2][y - time + 2][zind]); int zind = sp_source_mask[x - time][y - time][sp_zi]; //printf(" source_id = %d \n", source_id[x + 1][y + 1][zind + 1]); //printf(" source_mask = %f \n", source_mask[x - time][y - time][zind]); float r0 = save_src[((time / sf) % (time_M - time_m + 1))][source_id[x - time][y - time][zind]] * source_mask[x - time][y - time][zind]; //printf(" Input %f \n", r0); //printf(" time is : %d \n", ((time / sf) % (time_M - time_m + 1))); usol[t0][x - time + 8][y - time + 8][zind + 8] += r0; //4.49016082216644 F * (vp[x - time + 8][y - time + 8][zind + 8] * vp[x - time + 8][y - time + 8][zind + 8]) * r0; } } } } } } } } } /* End section0 */ gettimeofday(&end_section0, NULL); timers->section0 += (double)(end_section0.tv_sec - start_section0.tv_sec) + (double)(end_section0.tv_usec - start_section0.tv_usec) / 1000000; return 0; } /* Backdoor edit at Thu Jul 9 11:44:33 2020 */ /* Backdoor edit at Thu Jul 9 11:45:12 2020 */ /* Backdoor edit at Thu Jul 9 11:52:30 2020 */ /* Backdoor edit at Thu Jul 9 11:53:50 2020 */ /* Backdoor edit at Thu Jul 9 11:55:13 2020 */ /* Backdoor edit at Thu Jul 9 11:58:50 2020 */ /* Backdoor edit at Thu Jul 9 12:01:11 2020 */ /* Backdoor edit at Thu Jul 9 12:04:29 2020 */ /* Backdoor edit at Thu Jul 9 12:06:49 2020 */ /* Backdoor edit at Thu Jul 9 12:28:24 2020 */ /* Backdoor edit at Thu Jul 9 12:42:14 2020 */ /* Backdoor edit at Thu Jul 9 12:43:50 2020 */ /* Backdoor edit at Thu Jul 9 12:48:57 2020 */ /* Backdoor edit at Thu Jul 9 12:52:25 2020 */ /* Backdoor edit at Thu Jul 9 12:54:44 2020 */ /* Backdoor edit at Thu Jul 9 12:56:41 2020 */ /* Backdoor edit at Thu Jul 9 13:31:02 2020 */
#define _POSIX_C_SOURCE 200809L #include "stdlib.h" #include "math.h" #include "sys/time.h" #include "xmmintrin.h" #include "pmmintrin.h" #include <stdio.h> #include "omp.h" #define min(a, b) (((a) < (b)) ? (a) : (b)) #define max(a, b) (((a) > (b)) ? (a) : (b)) struct dataobj { void *restrict data; int *size; int *npsize; int *dsize; int *hsize; int *hofs; int *oofs; }; struct profiler { double section0; }; int Kernel(struct dataobj *restrict damp_vec, const float dt, const float h_x, const float h_y, const float h_z, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict save_src_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict usol_vec, struct dataobj *restrict vp_vec, const int sp_zi_m, const int time_M, const int time_m, struct profiler *timers, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m) { float (*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__((aligned(64))) = (float (*)[damp_vec->size[1]][damp_vec->size[2]])damp_vec->data; int (*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int (*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data; float (*restrict save_src)[save_src_vec->size[1]] __attribute__((aligned(64))) = (float (*)[save_src_vec->size[1]])save_src_vec->data; int (*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int (*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data; float (*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (float (*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data; int (*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int (*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data; float (*restrict usol)[usol_vec->size[1]][usol_vec->size[2]][usol_vec->size[3]] __attribute__((aligned(64))) = (float (*)[usol_vec->size[1]][usol_vec->size[2]][usol_vec->size[3]])usol_vec->data; float (*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__((aligned(64))) = (float (*)[vp_vec->size[1]][vp_vec->size[2]])vp_vec->data; /* Flush denormal numbers to zero in hardware */ _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON); _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON); int xb_size = 32; int yb_size = 32; //to fix as 8 / 16 etc int x0_blk0_size = 8; int y0_blk0_size = 8; int sf = 4; //int t_blk_size = time_M - time_m; int t_blk_size = 2 * sf * (time_M - time_m); struct timeval start_section0, end_section0; gettimeofday(&start_section0, NULL); for (int t_blk = time_m; t_blk < sf * (time_M - time_m); t_blk += sf * t_blk_size) //for each t block { //printf(" Change of tblock %d \n", t_blk); for (int xb = x_m; xb <= (x_M + sf * (time_M - time_m)); xb += xb_size + 1) { //printf(" Change of outer xblock %d \n", xb); for (int yb = y_m; yb <= (y_M + sf * (time_M - time_m)); yb += yb_size + 1) { //printf(" Change of yblock %d \n", yb); for (int time = t_blk, t0 = (time + 1) % (3), t1 = (time) % (3), t2 = (time + 2) % (3); time <= 1 + min(t_blk + t_blk_size - 1, sf * (time_M - time_m)); time += sf, t0 = (((time / sf) % (time_M - time_m + 1)) + 1) % (3), t1 = (((time / sf) % (time_M - time_m + 1))) % (3), t2 = (((time / sf) % (time_M - time_m + 1)) + 2) % (3)) { int tw = ((time / sf) % (time_M - time_m + 1)); //printf(" Change of time %d t0: %d t1: %d t2: %d \n", tw, t0, t1, t2); /* Begin section0 */ #pragma omp parallel num_threads(8) { #pragma omp for collapse(2) schedule(dynamic, 1) for (int x0_blk0 = max((x_m + time), xb); x0_blk0 <= min((x_M + time), (xb + xb_size)); x0_blk0 += x0_blk0_size) { //printf(" Change of inner xblock %d \n", x0_blk0); for (int y0_blk0 = max((y_m + time), yb); y0_blk0 <= min((y_M + time), (yb + yb_size)); y0_blk0 += y0_blk0_size) { for (int x = x0_blk0; x <= min(min((x_M + time), (xb + xb_size)), (x0_blk0 + x0_blk0_size - 1)); x++) { //printf(" time: %d , x: %d \n", time, x - time); for (int y = y0_blk0; y <= min(min((y_M + time), (yb + yb_size)), (y0_blk0 + y0_blk0_size - 1)); y++) { #pragma omp simd aligned(damp, usol, vp : 32) for (int z = z_m; z <= z_M; z += 1) { float r14 = -2.84722222 F * usol[t1][x - time + 8][y - time + 8][z + 8]; float r13 = 1.0 / dt; float r12 = 1.0 / (dt * dt); float r11 = 1.0 / (vp[x - time + 8][y - time + 8][z + 8] * vp[x - time + 8][y - time + 8][z + 8]); usol[t0][x - time + 8][y - time + 8][z + 8] = (r11 * (-r12 * (-2.0 F * usol[t1][x - time + 8][y - time + 8][z + 8] + usol[t2][x - time + 8][y - time + 8][z + 8])) + r13 * (damp[x - time + 1][y - time + 1][z + 1] * usol[t1][x - time + 8][y - time + 8][z + 8]) + (r14 - 1.78571429e-3 F * (usol[t1][x - time + 8][y - time + 8][z + 4] + usol[t1][x - time + 8][y - time + 8][z + 12]) + 2.53968254e-2 F * (usol[t1][x - time + 8][y - time + 8][z + 5] + usol[t1][x - time + 8][y - time + 8][z + 11]) - 2.0e-1 F * (usol[t1][x - time + 8][y - time + 8][z + 6] + usol[t1][x - time + 8][y - time + 8][z + 10]) + 1.6 F * (usol[t1][x - time + 8][y - time + 8][z + 7] + usol[t1][x - time + 8][y - time + 8][z + 9])) / ((h_z * h_z)) + (r14 - 1.78571429e-3 F * (usol[t1][x - time + 8][y - time + 4][z + 8] + usol[t1][x - time + 8][y - time + 12][z + 8]) + 2.53968254e-2 F * (usol[t1][x - time + 8][y - time + 5][z + 8] + usol[t1][x - time + 8][y - time + 11][z + 8]) - 2.0e-1 F * (usol[t1][x - time + 8][y - time + 6][z + 8] + usol[t1][x - time + 8][y - time + 10][z + 8]) + 1.6 F * (usol[t1][x - time + 8][y - time + 7][z + 8] + usol[t1][x - time + 8][y - time + 9][z + 8])) / ((h_y * h_y)) + (r14 - 1.78571429e-3 F * (usol[t1][x - time + 4][y - time + 8][z + 8] + usol[t1][x - time + 12][y - time + 8][z + 8]) + 2.53968254e-2 F * (usol[t1][x - time + 5][y - time + 8][z + 8] + usol[t1][x - time + 11][y - time + 8][z + 8]) - 2.0e-1 F * (usol[t1][x - time + 6][y - time + 8][z + 8] + usol[t1][x - time + 10][y - time + 8][z + 8]) + 1.6 F * (usol[t1][x - time + 7][y - time + 8][z + 8] + usol[t1][x - time + 9][y - time + 8][z + 8])) / ((h_x * h_x))) / (r11 * r12 + r13 * damp[x - time + 1][y - time + 1][z + 1]); } //int sp_zi_M = nnz_sp_source_mask[x + 1][y + 1]; #pragma omp simd aligned(damp, usol, vp : 32) for (int sp_zi = sp_zi_m; sp_zi <= nnz_sp_source_mask[x - time][y - time] - 1; sp_zi += 1) { //printf(" sp_zi = %d \n", sp_zi); //printf(" sp_source_mask = %d \n", sp_source_mask[x + 1][y + 1][sp_zi] + 1); //int zind = sp_source_mask[x - time + 8][y - time + 8][sp_zi] + 1; //printf(" source_mask = %d \n", source_mask[x - time + 2][y - time + 2][zind]); int zind = sp_source_mask[x - time][y - time][sp_zi]; //printf(" source_id = %d \n", source_id[x + 1][y + 1][zind + 1]); //printf(" source_mask = %f \n", source_mask[x - time][y - time][zind]); float r0 = save_src[((time / sf) % (time_M - time_m + 1))][source_id[x - time][y - time][zind]] * source_mask[x - time][y - time][zind]; //printf(" Input %f \n", r0); //printf(" time is : %d \n", ((time / sf) % (time_M - time_m + 1))); usol[t0][x - time + 8][y - time + 8][zind + 8] += r0; //4.49016082216644 F * (vp[x - time + 8][y - time + 8][zind + 8] * vp[x - time + 8][y - time + 8][zind + 8]) * r0; } } } } } } } } } } /* End section0 */ gettimeofday(&end_section0, NULL); timers->section0 += (double)(end_section0.tv_sec - start_section0.tv_sec) + (double)(end_section0.tv_usec - start_section0.tv_usec) / 1000000; return 0; } /* Backdoor edit at Thu Jul 9 11:44:33 2020 */ /* Backdoor edit at Thu Jul 9 11:45:12 2020 */ /* Backdoor edit at Thu Jul 9 11:52:30 2020 */ /* Backdoor edit at Thu Jul 9 11:53:50 2020 */ /* Backdoor edit at Thu Jul 9 11:55:13 2020 */ /* Backdoor edit at Thu Jul 9 11:58:50 2020 */ /* Backdoor edit at Thu Jul 9 12:01:11 2020 */ /* Backdoor edit at Thu Jul 9 12:04:29 2020 */ /* Backdoor edit at Thu Jul 9 12:06:49 2020 */ /* Backdoor edit at Thu Jul 9 12:28:24 2020 */ /* Backdoor edit at Thu Jul 9 12:42:14 2020 */ /* Backdoor edit at Thu Jul 9 12:43:50 2020 */ /* Backdoor edit at Thu Jul 9 12:48:57 2020 */ /* Backdoor edit at Thu Jul 9 12:52:25 2020 */ /* Backdoor edit at Thu Jul 9 12:54:44 2020 */ /* Backdoor edit at Thu Jul 9 12:56:41 2020 */ /* Backdoor edit at Thu Jul 9 13:31:02 2020 */
dsdd.c
/*! @copyright (c) 2017 King Abdullah University of Science and * Technology (KAUST). All rights reserved. * * STARS-H is a software package, provided by King Abdullah * University of Science and Technology (KAUST) * * @file src/backends/openmp/blrm/dsdd.c * @version 1.3.0 * @author Aleksandr Mikhalev * @date 2017-11-07 * */ #include "common.h" #include "starsh.h" int starsh_blrm__dsdd_omp(STARSH_blrm **matrix, STARSH_blrf *format, int maxrank, double tol, int onfly) //! Approximate each tile by divide-and-conquer SVD (GESDD function). /*! * @param[out] matrix: Address of pointer to @ref STARSH_blrm object. * @param[in] format: Block low-rank format. * @param[in] maxrank: Maximum possible rank. * @param[in] tol: Relative error tolerance. * @param[in] onfly: Whether not to store dense blocks. * @ingroup blrm * */ { STARSH_blrf *F = format; STARSH_problem *P = F->problem; STARSH_kernel *kernel = P->kernel; STARSH_int nblocks_far = F->nblocks_far; STARSH_int nblocks_near = F->nblocks_near; // Shortcuts to information about clusters STARSH_cluster *RC = F->row_cluster; STARSH_cluster *CC = F->col_cluster; void *RD = RC->data, *CD = CC->data; // Following values default to given block low-rank format F, but they are // changed when there are false far-field blocks. STARSH_int new_nblocks_far = nblocks_far; STARSH_int new_nblocks_near = nblocks_near; STARSH_int *block_far = F->block_far; STARSH_int *block_near = F->block_near; // Places to store low-rank factors, dense blocks and ranks Array **far_U = NULL, **far_V = NULL, **near_D = NULL; int *far_rank = NULL; double *alloc_U = NULL, *alloc_V = NULL, *alloc_D = NULL; size_t offset_U = 0, offset_V = 0, offset_D = 0; STARSH_int bi, bj = 0; // Init buffers to store low-rank factors of far-field blocks if needed if(nblocks_far > 0) { STARSH_MALLOC(far_U, nblocks_far); STARSH_MALLOC(far_V, nblocks_far); STARSH_MALLOC(far_rank, nblocks_far); size_t size_U = 0, size_V = 0; // Simple cycle over all far-field blocks for(bi = 0; bi < nblocks_far; bi++) { // Get indexes of corresponding block row and block column STARSH_int i = block_far[2*bi]; STARSH_int j = block_far[2*bi+1]; // Get corresponding sizes and minimum of them size_t nrows = RC->size[i], ncols = CC->size[j]; size_U += nrows*maxrank; size_V += ncols*maxrank; } STARSH_MALLOC(alloc_U, size_U); STARSH_MALLOC(alloc_V, size_V); for(bi = 0; bi < nblocks_far; bi++) { // Get indexes of corresponding block row and block column STARSH_int i = block_far[2*bi]; STARSH_int j = block_far[2*bi+1]; // Get corresponding sizes and minimum of them size_t nrows = RC->size[i], ncols = CC->size[j]; int shape_U[] = {nrows, maxrank}; int shape_V[] = {ncols, maxrank}; double *U = alloc_U+offset_U, *V = alloc_V+offset_V; offset_U += nrows*maxrank; offset_V += ncols*maxrank; array_from_buffer(far_U+bi, 2, shape_U, 'd', 'F', U); array_from_buffer(far_V+bi, 2, shape_V, 'd', 'F', V); } offset_U = 0; offset_V = 0; } // Work variables int info; // Simple cycle over all far-field admissible blocks #pragma omp parallel for schedule(dynamic,1) for(bi = 0; bi < nblocks_far; bi++) { int info; // Get indexes of corresponding block row and block column STARSH_int i = block_far[2*bi]; STARSH_int j = block_far[2*bi+1]; // Get corresponding sizes and minimum of them int nrows = RC->size[i]; int ncols = CC->size[j]; int mn = nrows > ncols ? ncols : nrows; // Get size of temporary arrays int lmn = mn, lwork = (4*lmn+8+nrows+ncols)*lmn, liwork = 8*lmn; double *D, *work; int *iwork; size_t D_size = (size_t)nrows*(size_t)ncols; // Allocate temporary arrays STARSH_PMALLOC(D, D_size, info); STARSH_PMALLOC(work, lwork, info); STARSH_PMALLOC(iwork, liwork, info); // Compute elements of a block kernel(nrows, ncols, RC->pivot+RC->start[i], CC->pivot+CC->start[j], RD, CD, D, nrows); starsh_dense_dlrsdd(nrows, ncols, D, nrows, far_U[bi]->data, nrows, far_V[bi]->data, ncols, far_rank+bi, maxrank, tol, work, lwork, iwork); // Free temporary arrays free(D); free(work); free(iwork); } // Get number of false far-field blocks STARSH_int nblocks_false_far = 0; STARSH_int *false_far = NULL; for(bi = 0; bi < nblocks_far; bi++) if(far_rank[bi] == -1) nblocks_false_far++; if(nblocks_false_far > 0) { // IMPORTANT: `false_far` must to be in ascending order for later code // to work normally STARSH_MALLOC(false_far, nblocks_false_far); bj = 0; for(bi = 0; bi < nblocks_far; bi++) if(far_rank[bi] == -1) false_far[bj++] = bi; } // Update lists of far-field and near-field blocks using previously // generated list of false far-field blocks if(nblocks_false_far > 0) { // Update list of near-field blocks new_nblocks_near = nblocks_near+nblocks_false_far; STARSH_MALLOC(block_near, 2*new_nblocks_near); // At first get all near-field blocks, assumed to be dense #pragma omp parallel for schedule(static) for(bi = 0; bi < 2*nblocks_near; bi++) block_near[bi] = F->block_near[bi]; // Add false far-field blocks #pragma omp parallel for schedule(static) for(bi = 0; bi < nblocks_false_far; bi++) { STARSH_int bj = false_far[bi]; block_near[2*(bi+nblocks_near)] = F->block_far[2*bj]; block_near[2*(bi+nblocks_near)+1] = F->block_far[2*bj+1]; } // Update list of far-field blocks new_nblocks_far = nblocks_far-nblocks_false_far; if(new_nblocks_far > 0) { STARSH_MALLOC(block_far, 2*new_nblocks_far); bj = 0; for(bi = 0; bi < nblocks_far; bi++) { // `false_far` must be in ascending order for this to work if(false_far[bj] == bi) { bj++; } else { block_far[2*(bi-bj)] = F->block_far[2*bi]; block_far[2*(bi-bj)+1] = F->block_far[2*bi+1]; } } } // Update format by creating new format STARSH_blrf *F2; info = starsh_blrf_new_from_coo(&F2, P, F->symm, RC, CC, new_nblocks_far, block_far, new_nblocks_near, block_near, F->type); // Swap internal data of formats and free unnecessary data STARSH_blrf tmp_blrf = *F; *F = *F2; *F2 = tmp_blrf; STARSH_WARNING("`F` was modified due to false far-field blocks"); starsh_blrf_free(F2); } // Compute near-field blocks if needed if(onfly == 0 && new_nblocks_near > 0) { STARSH_MALLOC(near_D, new_nblocks_near); size_t size_D = 0; // Simple cycle over all near-field blocks for(bi = 0; bi < new_nblocks_near; bi++) { // Get indexes of corresponding block row and block column STARSH_int i = block_near[2*bi]; STARSH_int j = block_near[2*bi+1]; // Get corresponding sizes and minimum of them size_t nrows = RC->size[i]; size_t ncols = CC->size[j]; // Update size_D size_D += nrows*ncols; } STARSH_MALLOC(alloc_D, size_D); // For each near-field block compute its elements #pragma omp parallel for schedule(dynamic,1) for(bi = 0; bi < new_nblocks_near; bi++) { // Get indexes of corresponding block row and block column STARSH_int i = block_near[2*bi]; STARSH_int j = block_near[2*bi+1]; // Get corresponding sizes and minimum of them int nrows = RC->size[i]; int ncols = CC->size[j]; int shape[2] = {nrows, ncols}; double *D; #pragma omp critical { D = alloc_D+offset_D; array_from_buffer(near_D+bi, 2, shape, 'd', 'F', D); offset_D += near_D[bi]->size; } kernel(nrows, ncols, RC->pivot+RC->start[i], CC->pivot+CC->start[j], RD, CD, D, nrows); } } // Change sizes of far_rank, far_U and far_V if there were false // far-field blocks if(nblocks_false_far > 0 && new_nblocks_far > 0) { bj = 0; for(bi = 0; bi < nblocks_far; bi++) { if(far_rank[bi] == -1) bj++; else { int shape_U[2] = {far_U[bi]->shape[0], far_rank[bi]}; int shape_V[2] = {far_V[bi]->shape[0], far_rank[bi]}; array_from_buffer(far_U+bi-bj, 2, shape_U, 'd', 'F', far_U[bi]->data); array_from_buffer(far_V+bi-bj, 2, shape_V, 'd', 'F', far_V[bi]->data); far_rank[bi-bj] = far_rank[bi]; } } STARSH_REALLOC(far_rank, new_nblocks_far); STARSH_REALLOC(far_U, new_nblocks_far); STARSH_REALLOC(far_V, new_nblocks_far); //STARSH_REALLOC(alloc_U, offset_U); //STARSH_REALLOC(alloc_V, offset_V); } // If all far-field blocks are false, then dealloc buffers if(new_nblocks_far == 0 && nblocks_far > 0) { block_far = NULL; free(far_rank); far_rank = NULL; free(far_U); far_U = NULL; free(far_V); far_V = NULL; free(alloc_U); alloc_U = NULL; free(alloc_V); alloc_V = NULL; } // Dealloc list of false far-field blocks if it is not empty if(nblocks_false_far > 0) free(false_far); // Finish with creating instance of Block Low-Rank Matrix with given // buffers return starsh_blrm_new(matrix, F, far_rank, far_U, far_V, onfly, near_D, alloc_U, alloc_V, alloc_D, '1'); }
#include "common.h" #include "starsh.h" int starsh_blrm__dsdd_omp(STARSH_blrm ** matrix, STARSH_blrf * format, int maxrank, double tol, int onfly) //!Approximate each tile by divide - and - conquer SVD(GESDD function). /* * ! @param[out] matrix: Address of pointer to @ref STARSH_blrm object. * @param[in] format: Block low-rank format. @param[in] maxrank: Maximum * possible rank. @param[in] tol: Relative error tolerance. @param[in] onfly: * Whether not to store dense blocks. @ingroup blrm */ { STARSH_blrf *F = format; STARSH_problem *P = F->problem; STARSH_kernel *kernel = P->kernel; STARSH_int nblocks_far = F->nblocks_far; STARSH_int nblocks_near = F->nblocks_near; //Shortcuts to information about clusters STARSH_cluster * RC = F->row_cluster; STARSH_cluster *CC = F->col_cluster; void *RD = RC->data, *CD = CC->data; //Following values default to given block low - rank format F, but they are // changed when there are false far - field blocks. STARSH_int new_nblocks_far = nblocks_far; STARSH_int new_nblocks_near = nblocks_near; STARSH_int *block_far = F->block_far; STARSH_int *block_near = F->block_near; //Places to store low - rank factors, dense blocks and ranks Array ** far_U = NULL, **far_V = NULL, **near_D = NULL; int *far_rank = NULL; double *alloc_U = NULL, *alloc_V = NULL, *alloc_D = NULL; size_t offset_U = 0, offset_V = 0, offset_D = 0; STARSH_int bi, bj = 0; //Init buffers to store low - rank factors of far - field blocks if needed if (nblocks_far > 0) { STARSH_MALLOC(far_U, nblocks_far); STARSH_MALLOC(far_V, nblocks_far); STARSH_MALLOC(far_rank, nblocks_far); size_t size_U = 0, size_V = 0; //Simple cycle over all far - field blocks for (bi = 0; bi < nblocks_far; bi++) { //Get indexes of corresponding block row and block column STARSH_int i = block_far[2 * bi]; STARSH_int j = block_far[2 * bi + 1]; //Get corresponding sizes and minimum of them size_t nrows = RC->size[i], ncols = CC->size[j]; size_U += nrows * maxrank; size_V += ncols * maxrank; } STARSH_MALLOC(alloc_U, size_U); STARSH_MALLOC(alloc_V, size_V); for (bi = 0; bi < nblocks_far; bi++) { //Get indexes of corresponding block row and block column STARSH_int i = block_far[2 * bi]; STARSH_int j = block_far[2 * bi + 1]; //Get corresponding sizes and minimum of them size_t nrows = RC->size[i], ncols = CC->size[j]; int shape_U[] = {nrows, maxrank}; int shape_V[] = {ncols, maxrank}; double *U = alloc_U + offset_U, *V = alloc_V + offset_V; offset_U += nrows * maxrank; offset_V += ncols * maxrank; array_from_buffer(far_U + bi, 2, shape_U, 'd', 'F', U); array_from_buffer(far_V + bi, 2, shape_V, 'd', 'F', V); } offset_U = 0; offset_V = 0; } //Work variables int info; //Simple cycle over all far - field admissible blocks for (bi = 0; bi < nblocks_far; bi++) { int info; //Get indexes of corresponding block row and block column STARSH_int i = block_far[2 * bi]; STARSH_int j = block_far[2 * bi + 1]; //Get corresponding sizes and minimum of them int nrows = RC->size[i]; int ncols = CC->size[j]; int mn = nrows > ncols ? ncols : nrows; //Get size of temporary arrays int lmn = mn, lwork = (4 * lmn + 8 + nrows + ncols) * lmn, liwork = 8 * lmn; double *D, *work; int *iwork; size_t D_size = (size_t) nrows * (size_t) ncols; //Allocate temporary arrays STARSH_PMALLOC(D, D_size, info); STARSH_PMALLOC(work, lwork, info); STARSH_PMALLOC(iwork, liwork, info); //Compute elements of a block kernel(nrows, ncols, RC->pivot + RC->start[i], CC->pivot + CC->start[j], RD, CD, D, nrows); starsh_dense_dlrsdd(nrows, ncols, D, nrows, far_U[bi]->data, nrows, far_V[bi]->data, ncols, far_rank + bi, maxrank, tol, work, lwork, iwork); //Free temporary arrays free(D); free(work); free(iwork); } //Get number of false far - field blocks STARSH_int nblocks_false_far = 0; STARSH_int *false_far = NULL; for (bi = 0; bi < nblocks_far; bi++) if (far_rank[bi] == -1) nblocks_false_far++; if (nblocks_false_far > 0) { //IMPORTANT:`false_far ` must to be in ascending order for later code // to work normally STARSH_MALLOC(false_far, nblocks_false_far); bj = 0; for (bi = 0; bi < nblocks_far; bi++) if (far_rank[bi] == -1) false_far[bj++] = bi; } //Update lists of far - field and near - field blocks using previously // generated list of false far - field blocks if (nblocks_false_far > 0) { //Update list of near - field blocks new_nblocks_near = nblocks_near + nblocks_false_far; STARSH_MALLOC(block_near, 2 * new_nblocks_near); //At first get all near - field blocks, assumed to be dense for (bi = 0; bi < 2 * nblocks_near; bi++) block_near[bi] = F->block_near[bi]; //Add false far - field blocks for (bi = 0; bi < nblocks_false_far; bi++) { STARSH_int bj = false_far[bi]; block_near[2 * (bi + nblocks_near)] = F->block_far[2 * bj]; block_near[2 * (bi + nblocks_near) + 1] = F->block_far[2 * bj + 1]; } //Update list of far - field blocks new_nblocks_far = nblocks_far - nblocks_false_far; if (new_nblocks_far > 0) { STARSH_MALLOC(block_far, 2 * new_nblocks_far); bj = 0; for (bi = 0; bi < nblocks_far; bi++) { //`false_far ` must be in ascending order for this to work if (false_far[bj] == bi) { bj++; } else { block_far[2 * (bi - bj)] = F->block_far[2 * bi]; block_far[2 * (bi - bj) + 1] = F->block_far[2 * bi + 1]; } } } //Update format by creating new format STARSH_blrf * F2; info = starsh_blrf_new_from_coo(&F2, P, F->symm, RC, CC, new_nblocks_far, block_far, new_nblocks_near, block_near, F->type); //Swap internal data of formats and free unnecessary data STARSH_blrf tmp_blrf = *F; *F = *F2; *F2 = tmp_blrf; STARSH_WARNING("`F` was modified due to false far-field blocks"); starsh_blrf_free(F2); } //Compute near - field blocks if needed if (onfly == 0 && new_nblocks_near > 0) { STARSH_MALLOC(near_D, new_nblocks_near); size_t size_D = 0; //Simple cycle over all near - field blocks for (bi = 0; bi < new_nblocks_near; bi++) { //Get indexes of corresponding block row and block column STARSH_int i = block_near[2 * bi]; STARSH_int j = block_near[2 * bi + 1]; //Get corresponding sizes and minimum of them size_t nrows = RC->size[i]; size_t ncols = CC->size[j]; //Update size_D size_D += nrows * ncols; } STARSH_MALLOC(alloc_D, size_D); //For each near - field block compute its elements for (bi = 0; bi < new_nblocks_near; bi++) { //Get indexes of corresponding block row and block column STARSH_int i = block_near[2 * bi]; STARSH_int j = block_near[2 * bi + 1]; //Get corresponding sizes and minimum of them int nrows = RC->size[i]; int ncols = CC->size[j]; int shape[2] = {nrows, ncols}; double *D; D = alloc_D + offset_D; array_from_buffer(near_D + bi, 2, shape, 'd', 'F', D); offset_D += near_D[bi]->size; kernel(nrows, ncols, RC->pivot + RC->start[i], CC->pivot + CC->start[j], RD, CD, D, nrows); } } //Change sizes of far_rank, far_U and far_V if there were false // far - field blocks if (nblocks_false_far > 0 && new_nblocks_far > 0) { bj = 0; for (bi = 0; bi < nblocks_far; bi++) { if (far_rank[bi] == -1) bj++; else { int shape_U[2] = {far_U[bi]->shape[0], far_rank[bi]}; int shape_V[2] = {far_V[bi]->shape[0], far_rank[bi]}; array_from_buffer(far_U + bi - bj, 2, shape_U, 'd', 'F', far_U[bi]->data); array_from_buffer(far_V + bi - bj, 2, shape_V, 'd', 'F', far_V[bi]->data); far_rank[bi - bj] = far_rank[bi]; } } STARSH_REALLOC(far_rank, new_nblocks_far); STARSH_REALLOC(far_U, new_nblocks_far); STARSH_REALLOC(far_V, new_nblocks_far); //STARSH_REALLOC(alloc_U, offset_U); //STARSH_REALLOC(alloc_V, offset_V); } //If all far - field blocks are false, then dealloc buffers if (new_nblocks_far == 0 && nblocks_far > 0) { block_far = NULL; free(far_rank); far_rank = NULL; free(far_U); far_U = NULL; free(far_V); far_V = NULL; free(alloc_U); alloc_U = NULL; free(alloc_V); alloc_V = NULL; } //Dealloc list of false far - field blocks if it is not empty if (nblocks_false_far > 0) free(false_far); //Finish with creating instance of Block Low - Rank Matrix with given // buffers return starsh_blrm_new(matrix, F, far_rank, far_U, far_V, onfly, near_D, alloc_U, alloc_V, alloc_D, '1'); }
#include "common.h" #include "starsh.h" int starsh_blrm__dsdd_omp(STARSH_blrm ** matrix, STARSH_blrf * format, int maxrank, double tol, int onfly) //!Approximate each tile by divide - and - conquer SVD(GESDD function). /* * ! @param[out] matrix: Address of pointer to @ref STARSH_blrm object. * @param[in] format: Block low-rank format. @param[in] maxrank: Maximum * possible rank. @param[in] tol: Relative error tolerance. @param[in] onfly: * Whether not to store dense blocks. @ingroup blrm */ { STARSH_blrf *F = format; STARSH_problem *P = F->problem; STARSH_kernel *kernel = P->kernel; STARSH_int nblocks_far = F->nblocks_far; STARSH_int nblocks_near = F->nblocks_near; //Shortcuts to information about clusters STARSH_cluster * RC = F->row_cluster; STARSH_cluster *CC = F->col_cluster; void *RD = RC->data, *CD = CC->data; //Following values default to given block low - rank format F, but they are // changed when there are false far - field blocks. STARSH_int new_nblocks_far = nblocks_far; STARSH_int new_nblocks_near = nblocks_near; STARSH_int *block_far = F->block_far; STARSH_int *block_near = F->block_near; //Places to store low - rank factors, dense blocks and ranks Array ** far_U = NULL, **far_V = NULL, **near_D = NULL; int *far_rank = NULL; double *alloc_U = NULL, *alloc_V = NULL, *alloc_D = NULL; size_t offset_U = 0, offset_V = 0, offset_D = 0; STARSH_int bi, bj = 0; //Init buffers to store low - rank factors of far - field blocks if needed if (nblocks_far > 0) { STARSH_MALLOC(far_U, nblocks_far); STARSH_MALLOC(far_V, nblocks_far); STARSH_MALLOC(far_rank, nblocks_far); size_t size_U = 0, size_V = 0; //Simple cycle over all far - field blocks for (bi = 0; bi < nblocks_far; bi++) { //Get indexes of corresponding block row and block column STARSH_int i = block_far[2 * bi]; STARSH_int j = block_far[2 * bi + 1]; //Get corresponding sizes and minimum of them size_t nrows = RC->size[i], ncols = CC->size[j]; size_U += nrows * maxrank; size_V += ncols * maxrank; } STARSH_MALLOC(alloc_U, size_U); STARSH_MALLOC(alloc_V, size_V); for (bi = 0; bi < nblocks_far; bi++) { //Get indexes of corresponding block row and block column STARSH_int i = block_far[2 * bi]; STARSH_int j = block_far[2 * bi + 1]; //Get corresponding sizes and minimum of them size_t nrows = RC->size[i], ncols = CC->size[j]; int shape_U[] = {nrows, maxrank}; int shape_V[] = {ncols, maxrank}; double *U = alloc_U + offset_U, *V = alloc_V + offset_V; offset_U += nrows * maxrank; offset_V += ncols * maxrank; array_from_buffer(far_U + bi, 2, shape_U, 'd', 'F', U); array_from_buffer(far_V + bi, 2, shape_V, 'd', 'F', V); } offset_U = 0; offset_V = 0; } //Work variables int info; //Simple cycle over all far - field admissible blocks #pragma omp parallel for schedule(dynamic,1) for (bi = 0; bi < nblocks_far; bi++) { int info; //Get indexes of corresponding block row and block column STARSH_int i = block_far[2 * bi]; STARSH_int j = block_far[2 * bi + 1]; //Get corresponding sizes and minimum of them int nrows = RC->size[i]; int ncols = CC->size[j]; int mn = nrows > ncols ? ncols : nrows; //Get size of temporary arrays int lmn = mn, lwork = (4 * lmn + 8 + nrows + ncols) * lmn, liwork = 8 * lmn; double *D, *work; int *iwork; size_t D_size = (size_t) nrows * (size_t) ncols; //Allocate temporary arrays STARSH_PMALLOC(D, D_size, info); STARSH_PMALLOC(work, lwork, info); STARSH_PMALLOC(iwork, liwork, info); //Compute elements of a block kernel(nrows, ncols, RC->pivot + RC->start[i], CC->pivot + CC->start[j], RD, CD, D, nrows); starsh_dense_dlrsdd(nrows, ncols, D, nrows, far_U[bi]->data, nrows, far_V[bi]->data, ncols, far_rank + bi, maxrank, tol, work, lwork, iwork); //Free temporary arrays free(D); free(work); free(iwork); } //Get number of false far - field blocks STARSH_int nblocks_false_far = 0; STARSH_int *false_far = NULL; for (bi = 0; bi < nblocks_far; bi++) if (far_rank[bi] == -1) nblocks_false_far++; if (nblocks_false_far > 0) { //IMPORTANT:`false_far ` must to be in ascending order for later code // to work normally STARSH_MALLOC(false_far, nblocks_false_far); bj = 0; for (bi = 0; bi < nblocks_far; bi++) if (far_rank[bi] == -1) false_far[bj++] = bi; } //Update lists of far - field and near - field blocks using previously // generated list of false far - field blocks if (nblocks_false_far > 0) { //Update list of near - field blocks new_nblocks_near = nblocks_near + nblocks_false_far; STARSH_MALLOC(block_near, 2 * new_nblocks_near); //At first get all near - field blocks, assumed to be dense #pragma omp parallel for schedule(static) for (bi = 0; bi < 2 * nblocks_near; bi++) block_near[bi] = F->block_near[bi]; //Add false far - field blocks #pragma omp parallel for schedule(static) for (bi = 0; bi < nblocks_false_far; bi++) { STARSH_int bj = false_far[bi]; block_near[2 * (bi + nblocks_near)] = F->block_far[2 * bj]; block_near[2 * (bi + nblocks_near) + 1] = F->block_far[2 * bj + 1]; } //Update list of far - field blocks new_nblocks_far = nblocks_far - nblocks_false_far; if (new_nblocks_far > 0) { STARSH_MALLOC(block_far, 2 * new_nblocks_far); bj = 0; for (bi = 0; bi < nblocks_far; bi++) { //`false_far ` must be in ascending order for this to work if (false_far[bj] == bi) { bj++; } else { block_far[2 * (bi - bj)] = F->block_far[2 * bi]; block_far[2 * (bi - bj) + 1] = F->block_far[2 * bi + 1]; } } } //Update format by creating new format STARSH_blrf * F2; info = starsh_blrf_new_from_coo(&F2, P, F->symm, RC, CC, new_nblocks_far, block_far, new_nblocks_near, block_near, F->type); //Swap internal data of formats and free unnecessary data STARSH_blrf tmp_blrf = *F; *F = *F2; *F2 = tmp_blrf; STARSH_WARNING("`F` was modified due to false far-field blocks"); starsh_blrf_free(F2); } //Compute near - field blocks if needed if (onfly == 0 && new_nblocks_near > 0) { STARSH_MALLOC(near_D, new_nblocks_near); size_t size_D = 0; //Simple cycle over all near - field blocks for (bi = 0; bi < new_nblocks_near; bi++) { //Get indexes of corresponding block row and block column STARSH_int i = block_near[2 * bi]; STARSH_int j = block_near[2 * bi + 1]; //Get corresponding sizes and minimum of them size_t nrows = RC->size[i]; size_t ncols = CC->size[j]; //Update size_D size_D += nrows * ncols; } STARSH_MALLOC(alloc_D, size_D); //For each near - field block compute its elements #pragma omp parallel for schedule(dynamic,1) for (bi = 0; bi < new_nblocks_near; bi++) { //Get indexes of corresponding block row and block column STARSH_int i = block_near[2 * bi]; STARSH_int j = block_near[2 * bi + 1]; //Get corresponding sizes and minimum of them int nrows = RC->size[i]; int ncols = CC->size[j]; int shape[2] = {nrows, ncols}; double *D; #pragma omp critical { D = alloc_D + offset_D; array_from_buffer(near_D + bi, 2, shape, 'd', 'F', D); offset_D += near_D[bi]->size; } kernel(nrows, ncols, RC->pivot + RC->start[i], CC->pivot + CC->start[j], RD, CD, D, nrows); } } //Change sizes of far_rank, far_U and far_V if there were false // far - field blocks if (nblocks_false_far > 0 && new_nblocks_far > 0) { bj = 0; for (bi = 0; bi < nblocks_far; bi++) { if (far_rank[bi] == -1) bj++; else { int shape_U[2] = {far_U[bi]->shape[0], far_rank[bi]}; int shape_V[2] = {far_V[bi]->shape[0], far_rank[bi]}; array_from_buffer(far_U + bi - bj, 2, shape_U, 'd', 'F', far_U[bi]->data); array_from_buffer(far_V + bi - bj, 2, shape_V, 'd', 'F', far_V[bi]->data); far_rank[bi - bj] = far_rank[bi]; } } STARSH_REALLOC(far_rank, new_nblocks_far); STARSH_REALLOC(far_U, new_nblocks_far); STARSH_REALLOC(far_V, new_nblocks_far); //STARSH_REALLOC(alloc_U, offset_U); //STARSH_REALLOC(alloc_V, offset_V); } //If all far - field blocks are false, then dealloc buffers if (new_nblocks_far == 0 && nblocks_far > 0) { block_far = NULL; free(far_rank); far_rank = NULL; free(far_U); far_U = NULL; free(far_V); far_V = NULL; free(alloc_U); alloc_U = NULL; free(alloc_V); alloc_V = NULL; } //Dealloc list of false far - field blocks if it is not empty if (nblocks_false_far > 0) free(false_far); //Finish with creating instance of Block Low - Rank Matrix with given // buffers return starsh_blrm_new(matrix, F, far_rank, far_U, far_V, onfly, near_D, alloc_U, alloc_V, alloc_D, '1'); }
Searching.202007281116.only_gather_top_m.profile.h
// // Created by Zhen Peng on 7/28/2020. // #ifndef BATCH_SEARCHING_SEARCHING_H #define BATCH_SEARCHING_SEARCHING_H #include <vector> #include <boost/dynamic_bitset.hpp> //#include <boost/sort/sort.hpp> #include <iostream> #include <fstream> #include <unordered_map> #include <immintrin.h> #include <cstring> #include <unordered_set> #include <set> #include <cfloat> #include <algorithm> //#include <omp.h> #include "../include/definitions.h" //#include "../include/efanna2e/neighbor.h" #include "../include/utils.h" #include "../include/Candidate.h" #include "../include/parallelization.h" #include "../include/bitvector.h" namespace PANNS { class Searching { //private: public: idi num_v_ = 0; edgei num_e_ = 0; idi num_queries_ = 0; uint64_t dimension_ = 0; idi width_ = 0; // NSG largest degree idi ep_ = 0; // Start point // std::vector<dataf> data_load_; // std::vector<dataf> queries_load_; // std::vector< std::vector<dataf> > data_load_; // std::vector< std::vector<dataf> > queries_load_; // std::vector<distf> norms_; dataf *data_load_ = nullptr; dataf *queries_load_ = nullptr; // dataf *norms_; // std::vector< std::vector<idi> > nsg_graph_; // idi *nsg_graph_indices_; // idi *nsg_graph_out_edges_; // std::vector< std::vector<idi> > edge_list_; char *opt_nsg_graph_ = nullptr; uint64_t data_bytes_; uint64_t neighbor_bytes_; uint64_t vertex_bytes_; // For multithreads int num_threads_ = 1; // int num_real_threads_ = 1; // int num_threads_intra_query_ = 1; // int num_threads_inter_query_ = 1; dataf compute_norm( const dataf *data) const; // idi vertex_id); // const std::vector<PANNS::dataf> &data); // size_t loc_start, // idi dimension) dataf compute_distance_with_norm( const dataf *v_data, const dataf *q_data, // idi vertex_id, // idi query_id, // const std::vector<dataf> &d_data, // const std::vector<dataf> &q_data, // PANNS::idi d_start, // PANNS::idi q_start, const dataf vertex_norm) const; // idi dimension) static idi insert_into_queue( std::vector<Candidate> &c_queue, idi c_queue_top, Candidate cand); static idi add_into_queue( std::vector<PANNS::Candidate> &queue, idi &queue_top, const idi queue_size, const PANNS::Candidate &cand); static idi add_into_queue( std::vector<PANNS::Candidate> &queue, const idi queue_start, idi &queue_size, const idi queue_capacity, const PANNS::Candidate &cand); static void add_into_queue_at( const Candidate &cand, std::vector<Candidate> &queue, const idi insert_index, // The insertion location, independent with queue_start const idi queue_start, idi &queue_top, // The number of elements in queue, independent with queue_start const idi queue_size); // The maximum capacity of queue, independent with queue_start. static void insert_one_element_at( // const T &cand, // T *queue_base, const Candidate &cand, std::vector<Candidate> &queue_base, const idi insert_index, const idi queue_start, const idi queue_size); // idi insert_into_queue_nsg( // std::vector< Candidate > &c_queue, // idi c_queue_top, // Candidate cand); static idi merge_two_queues_into_1st_queue_seq_fixed( std::vector<Candidate> &queue1, const idi queue1_start, const idi queue1_size, std::vector<Candidate> &queue2, const idi queue2_start, const idi queue2_size); static void merge_two_queues_into_1st_queue_seq_incr( std::vector<Candidate> &queue1, const idi queue1_start, idi &queue1_size, // The number of element in queue1, independent with queue1_start. const idi queue1_length, // The maximum capacity of queue1, independent with queue1_start. std::vector<Candidate> &queue2, const idi queue2_start, const idi queue2_size); idi merge_all_queues_para_list( std::vector< std::vector<Candidate> > &local_queues_list, std::vector<idi> &local_queues_ends, std::vector<Candidate> &set_L, const idi L); // idi merge_all_queues_para_array( //// std::vector< std::vector<Candidate> > &local_queues_list, // std::vector<Candidate> &local_queues_array, // std::vector<idi> &local_queues_ends, // const idi local_queue_length, // std::vector<Candidate> &set_L, // const idi L); idi merge_all_queues_para_array( std::vector<Candidate> &set_L, // std::vector<Candidate> &local_queues_array, std::vector<idi> &local_queues_ends, const idi local_queue_length, // std::vector<Candidate> &set_L, const idi L); idi merge_all_queues_queue_base( // std::vector< std::vector<Candidate> > &local_queues_list, std::vector<Candidate> &set_L, // std::vector<Candidate> &local_queues_array, std::vector<idi> &local_queues_ends, const idi queue_base, const int real_threads, const idi local_queue_length, // std::vector<Candidate> &set_L, const idi L); void merge_two_consecutive_queues_in_place( std::vector<Candidate> &two_queues, const idi base_1, // const idi &end_1, const idi base_2, const idi &length_2); void merge_in_set_L( std::vector<Candidate> &set_L, const idi set_L_length, const idi num_queues, const idi local_queue_length); distf selecting_top_L_seq( std::vector<Candidate> &set_L, const idi global_L, // const idi local_L, const idi num_queues, const std::vector<idi> &local_queues_starts, std::vector<idi> &local_queues_sizes); void selecting_unchecked_top_M_seq( const idi query_id, const idi iter, std::vector<Candidate> &set_L, const std::vector<idi> &pointers_starts, const idi value_M, const idi num_queues, const std::vector<idi> &local_queues_starts, const std::vector<idi> &local_queues_sizes, std::vector<idi> &local_m_counts); void gather_unchecked_top_M_seq( const idi query_id, const idi iter, std::vector<Candidate> &set_L, const std::vector<idi> &pointers_starts, const idi value_M, const idi num_queues, const std::vector<idi> &local_queues_starts, const std::vector<idi> &local_queues_sizes, std::vector<idi> &top_m_candidates, idi &top_m_candidates_size, std::vector<idi> &bound_subs); // idi merge_all_queues_all_together_in_sequential( // std::vector<Candidate> &set_L, // std::vector<idi> &local_queues_ends, // const idi local_queue_length, // const idi L); // idi min_all_queues_at_heads( // const std::vector<Candidate> &set_L, // std::vector<idi> &queue_heads, // const std::vector<idi> &local_queues_ends, // const idi local_queue_length, // const idi L); public: // For Profiling // L3CacheMissRate cache_miss_kernel; uint64_t count_distance_computation_ = 0; uint64_t count_add_to_queue_ = 0; // uint64_t count_single_query_computation_ = 0; // distf dist_min_ = 0; // distf dist_max_ = 0; double time_merge_ = 0; double time_gather_ = 0; // double time_select_ = 0; // double time_select_L_ = 0.0; // double time_select_M_ = 0.0; double time_initialization_ = 0; double time_sequential_phase_ = 0; double time_parallel_phase_ = 0; double time_ending_ = 0.0; double time_assign_s_ = 0.0; double time_expand_ = 0.0; double time_pick_top_m_ = 0.0; double time_distance_computation_ = 0.0; double time_add_to_queue_ = 0.0; // double time_insert_ = 0; // double time_compare_minimum_ = 0; // double time_memmove_ = 0; // std::vector<double> time_memmove_list_; // L3CacheMissRate profile_miss_rate; // uint64_t number_local_elements_ = 0; // std::vector<idi> L_ids_; // std::vector<idi> M_ids_; ~Searching() { free(data_load_); data_load_ = nullptr; // free(queries_load_); // _mm_free(data_load_); free(queries_load_); queries_load_ = nullptr; // free(norms_); // free(nsg_graph_indices_); // free(nsg_graph_out_edges_); free(opt_nsg_graph_); opt_nsg_graph_ = nullptr; } void load_data_load(char *filename); void load_queries_load(char *filename); void load_nsg_graph(char *filename); // void build_opt_graph(); void prepare_init_ids( std::vector<unsigned> &init_ids, const unsigned L) const; // void prepare_candidate_queue_list( // const float *query_load, // std::vector<std::vector<efanna2e::Neighbor> > &retset_list, // std::vector<boost::dynamic_bitset<> > &is_visited_list, // const std::vector<unsigned> &init_ids, // const boost::dynamic_bitset<> &flags, // unsigned batch_start, // unsigned batch_size, // unsigned L); // void search_in_batch( //// const float *query_load, // size_t K, // size_t L, // unsigned batch_start, // unsigned batch_size, // std::vector< std::vector<Candidate> > &set_L_list, // std::vector< boost::dynamic_bitset<> > &is_visited_list, // const std::vector<idi> &init_ids, // const boost::dynamic_bitset<> &is_visited, // std::vector<std::vector<idi> > &set_K_list); void search_in_sequential( idi query_id, idi K, idi L, std::vector<Candidate> &set_L, // boost::dynamic_bitset<> &is_visited, // boost::dynamic_bitset<> is_visited, // std::vector<idi> &init_ids, const std::vector<idi> &init_ids, std::vector<idi> &set_K); // void search_in_sequential_BitVector( // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K); // idi get_out_degree(idi v_id) const // { // if (v_id < num_v_ - 1) { // return nsg_graph_indices_[v_id + 1] - nsg_graph_indices_[v_id]; // } else { // return num_e_ - nsg_graph_indices_[v_id]; // } // } void search_with_top_m( idi M, idi query_id, idi K, idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K); // std::vector< std::vector<idi> > &top_m_list); void search_with_top_m_scale_m( const PANNS::idi value_M_max, const PANNS::idi query_id, const PANNS::idi K, const PANNS::idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, std::vector<idi> &top_m_candidates, boost::dynamic_bitset<> &is_visited); // void search_with_top_m_myths_M( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K); // void search_with_top_m_to_get_distance_range( // const PANNS::idi M, // const PANNS::idi query_id, //// const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids); // void search_with_top_m_profile_bit_CAS( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K); // void search_with_top_m_no_local_arrays( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // boost::dynamic_bitset<> &is_visited); void search_with_top_m_in_batch( PANNS::idi M, PANNS::idi batch_start, PANNS::idi batch_size, PANNS::idi K, PANNS::idi L, std::vector< std::vector<Candidate> > &set_L_list, const std::vector<idi> &init_ids, std::vector< std::vector<idi> > &set_K_list); // void para_search_with_top_m_critical_area( // idi M, // idi query_id, // idi K, // idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K); // void para_search_with_top_m_critical_area_no_omp( // idi M, // idi query_id, // idi K, // idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K); // void para_search_with_top_m_critical_area_yes_omp( // idi M, // idi query_id, // idi K, // idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K); // void para_search_with_top_m_visited_array( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // std::vector<uint8_t> &is_visited); // void para_search_with_top_m_merge_queues( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K); // void para_search_with_top_m_queues_seq_merge( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K); // void para_search_with_top_m_merge_queues_no_CAS( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // std::vector< std::vector<Candidate> > &local_queues_list, // std::vector<idi> &local_queues_ends, //// std::vector<uint8_t> &is_visited); // boost::dynamic_bitset<> &is_visited); // void para_search_with_top_m_merge_queues_in_array( // void para_search_with_top_m_merge_queues_new_threshold( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue //// std::vector< std::vector<Candidate> > &local_queues_list, // std::vector<Candidate> &local_queues_array, // std::vector<idi> &local_queues_ends, // Sizes of local queue // BitVector &is_visited); // void para_search_with_top_m_merge_queues_by_sort( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue //// std::vector<Candidate> &local_queues_array, // std::vector<idi> &local_queues_ends, // Sizes of local queue // std::vector<idi> &dest_offsets, // const std::vector<idi> &offsets_load_set_L, // Offsets for store into set_L. // BitVector &is_visited); // void para_search_with_top_m_merge_queues_better_merge_v0( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // std::vector<idi> &local_queues_ends, // Sizes of local queue //// std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited); // boost::dynamic_bitset<> &is_visited); //// BitVector &is_visited); // void para_search_with_top_m_merge_queues_better_merge_v2( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // std::vector<idi> &local_queues_ends, // Sizes of local queue //// std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited) // boost::dynamic_bitset<> &is_visited, // std::vector<distf> &local_thresholds); //// BitVector &is_visited) // void para_search_with_top_m_merge_queues_better_merge_v1( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // std::vector<idi> &local_queues_ends, // Sizes of local queue // std::vector<Candidate> &top_m_candidates, //// std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited); // boost::dynamic_bitset<> &is_visited); //// BitVector &is_visited); // void para_search_with_top_m_merge_queues_better_merge_v0_0( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // std::vector<idi> &local_queues_ends, // Sizes of local queue //// std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited) // boost::dynamic_bitset<> &is_visited); //// BitVector &is_visited) // void para_search_with_top_m_merge_queues_less_merge( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // std::vector<idi> &local_queues_ends, // Sizes of local queue //// std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited) // boost::dynamic_bitset<> &is_visited, // std::vector<distf> &local_thresholds); //// BitVector &is_visited) // void para_search_with_top_m_merge_queues_no_merge( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // std::vector<idi> &local_queues_ends, // Sizes of local queue //// std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited) // boost::dynamic_bitset<> &is_visited, // std::vector<distf> &local_thresholds, // const uint64_t computation_threshold); // void para_search_with_top_m_merge_queues_scale_m_v0( // const idi value_M_max, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // std::vector<idi> &local_queues_ends, // Sizes of local queue //// std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited); // boost::dynamic_bitset<> &is_visited); void para_search_with_top_m_merge_queues_middle_m( const idi value_M_middle, const idi value_M_max, const idi query_id, const idi K, const idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, const idi local_queue_length, // Maximum size of local queue const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; std::vector<idi> &local_queues_ends, // Sizes of local queue // std::vector<Candidate> &top_m_candidates, std::vector<idi> &top_m_candidates, // std::vector<uint8_t> &is_visited) boost::dynamic_bitset<> &is_visited); // std::vector<distf> &local_thresholds); // BitVector &is_visited) // void para_search_with_top_m_merge_queues_scale_m_v2( // const idi value_M_min, // const idi value_M_max, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; // std::vector<idi> &local_queues_ends, // Sizes of local queue //// std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited) // boost::dynamic_bitset<> &is_visited); // void para_search_with_top_m_merge_queues_scale_m_v3( // const idi value_M_middle, // const idi value_M_max, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; // std::vector<idi> &local_queues_ends, // Sizes of local queue //// std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited) // boost::dynamic_bitset<> &is_visited); void para_search_with_top_m_merge_queues_middle_m_no_merge( const uint64_t computation_threshold, const idi value_M_middle, const idi value_M_max, const idi query_id, const idi K, const idi L, const idi init_size, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, const idi local_queue_length, // Maximum size of local queue const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; std::vector<idi> &local_queues_ends, // Sizes of local queue std::vector<idi> &top_m_candidates, boost::dynamic_bitset<> &is_visited); void para_search_with_top_m_merge_queues_sequential_merge( const idi value_M_middle, const idi value_M_max, const idi query_id, const idi K, const idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, const idi local_queue_length, // Maximum size of local queue const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; std::vector<idi> &local_queues_ends, // Sizes of local queue std::vector<idi> &top_m_candidates, boost::dynamic_bitset<> &is_visited); void para_search_with_top_m_nested_para( const idi batch_start, const idi batch_size, const idi value_M_middle, const idi value_M_max, const idi K, const idi L, std::vector< std::vector<Candidate> > &set_L_list, const std::vector<idi> &init_ids, std::vector< std::vector<idi> > &set_K_list, const idi local_queue_length, // Maximum size of local queue const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; std::vector< std::vector<idi> > &local_queues_ends_list, // Sizes of local queue std::vector< std::vector<idi> > &top_m_candidates_list, std::vector< boost::dynamic_bitset<> > &is_visited_list); void subsearch_with_top_m( const idi value_M_max, const idi query_id, const idi local_L, std::vector<Candidate> &set_L, const idi set_L_start, idi &set_L_size, std::vector<idi> &local_top_m_candidates, boost::dynamic_bitset<> &is_visited, uint64_t &local_count_distance_computation); void subsearch_top_m_for_one_iteration( const idi iter, idi &k_uc, const idi value_M, const idi query_id, const dataf *query_data, const idi L, std::vector<Candidate> &set_L, const idi set_L_start, idi &set_L_size, std::vector<idi> &top_m_candidates, boost::dynamic_bitset<> &is_visited, uint64_t &count_distance_computation); // void subsearch_top_m_for_one_iteration_lth( // const distf bound_lth, // const idi iter, // idi &k_uc, // const idi value_M, // const idi query_id, // const dataf *query_data, // const idi L, // std::vector<Candidate> &set_L, // const idi set_L_start, // idi &set_L_size, // std::vector<idi> &top_m_candidates, // boost::dynamic_bitset<> &is_visited, // uint64_t &count_distance_computation); void subsearch_top_m_for_one_iteration_lth_mth( const distf bound_lth, // const idi top_m_position, const idi iter, idi &k_uc, const idi local_m_count, const idi query_id, const dataf *query_data, const idi L, std::vector<Candidate> &set_L, const idi set_L_start, idi &set_L_size, std::vector<idi> &top_m_candidates, boost::dynamic_bitset<> &is_visited, uint64_t &count_distance_computation, double &time_pick_top_m, uint64_t &count_add_to_queue, double &time_distance_computation, double &time_add_to_queue); // void para_search_with_top_m_subsearch_v3( // const idi local_M_max, // const idi local_M_middle, // const idi query_id, // const idi K, // const idi global_L, // const idi local_L, //// const idi total_L, //// const idi init_queue_size, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const std::vector<idi> &local_queues_starts, // std::vector<idi> &local_queues_sizes, // std::vector<idi> &local_m_counts, // std::vector< std::vector<idi> > &top_m_candidates_list, // boost::dynamic_bitset<> &is_visited); // void para_search_with_top_m_subsearch_v4( // const idi local_M_max, // const idi local_M_middle, // const idi query_id, // const idi K, // const idi global_L, // const idi local_L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const std::vector<idi> &local_queues_starts, // std::vector<idi> &local_queues_sizes, // std::vector<idi> &local_m_counts, // std::vector<idi> &top_m_candidates, // boost::dynamic_bitset<> &is_visited); void para_search_with_top_m_subsearch_v5( const idi local_M_max, const idi local_M_middle, const idi query_id, const idi K, const idi global_L, const idi local_L, // const idi total_L, // const idi init_queue_size, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, const std::vector<idi> &local_queues_starts, std::vector<idi> &local_queues_sizes, // std::vector<idi> &local_m_counts, std::vector<idi> &top_m_candidates, // std::vector< std::vector<idi> > &top_m_candidates_list, boost::dynamic_bitset<> &is_visited); void subsearch_for_simple_search( const idi query_id, const idi local_L, std::vector<Candidate> &set_L, const idi base_set_L, idi &set_L_end, // std::vector<uint8_t> &is_visited, boost::dynamic_bitset<> &is_visited, uint64_t &local_count_distance_computation); void para_simple_search_subsearch( const idi query_id, const idi K, const idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, // std::vector<uint8_t> &is_visited); boost::dynamic_bitset<> &is_visited); // void para_search_with_top_m_merge_queues_global_threshold( // const idi value_M_middle, // const idi value_M_max, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; // std::vector<idi> &local_queues_ends, // Sizes of local queue // std::vector<idi> &top_m_candidates, // boost::dynamic_bitset<> &is_visited); // void para_search_with_top_m_merge_queues_distance_threshold_m( //// const idi value_M_middle, //// const idi value_M_max, // const distf relative_dist_threshold, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; // std::vector<idi> &local_queues_ends, // Sizes of local queue //// std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited) // boost::dynamic_bitset<> &is_visited); //// std::vector<distf> &local_thresholds) //// BitVector &is_visited) // void para_search_with_top_m_merge_queues_distance_threshold_m_middle_iteration( //// const idi value_M_middle, //// const idi value_M_max, // const distf relative_dist_threshold, // const idi middle_iteration, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; // std::vector<idi> &local_queues_ends, // Sizes of local queue //// std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited) // boost::dynamic_bitset<> &is_visited); // void para_search_with_top_m_merge_queues_collectors( // const idi value_M_middle, // const idi value_M_max, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; // std::vector<idi> &local_queues_ends, // Sizes of local queue //// std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited) // boost::dynamic_bitset<> &is_visited); // void para_search_with_top_m_merge_queues_selecting( // const idi value_M_middle, // const idi value_M_max, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; // std::vector<idi> &local_queues_ends, // Sizes of local queue //// std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited) // boost::dynamic_bitset<> &is_visited); // void para_search_with_top_m_merge_queues_myths( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue //// std::vector< std::vector<Candidate> > &local_queues_list, // std::vector<Candidate> &local_queues_array, // std::vector<idi> &local_queues_ends, // Sizes of local queue // BitVector &is_visited); //// std::vector<uint8_t> &is_visited); //// boost::dynamic_bitset<> &is_visited); //// void para_prepare_init_ids( //// std::vector<unsigned> &init_ids, //// unsigned L) const; // void para_search_with_top_m_in_batch_embarassing_para( // const PANNS::idi M, // const PANNS::idi batch_start, // const PANNS::idi batch_size, // const PANNS::idi K, // const PANNS::idi L, // std::vector< std::vector<Candidate> > &set_L_list, // const std::vector<idi> &init_ids, // std::vector< std::vector<idi> > &set_K_list, // std::vector< boost::dynamic_bitset<> > &is_visited_list); // void test_neighbors_distance_to_father( // const idi num_selected) const; // void test_neighbors_normalized_distance_to_father( // const idi num_selected) const; void load_true_NN( const char *filename, std::vector< std::vector<idi> > &true_nn_list); void get_recall_for_all_queries( const std::vector< std::vector<idi> > &true_nn_list, const std::vector<std::vector<unsigned>> &set_K_list, std::unordered_map<unsigned, double> &recalls) const; }; // Class Searching /** * Input the data from the file. * @param filename */ inline void Searching::load_data_load(char *filename) { auto old_d = dimension_; DiskIO::load_data( filename, data_load_, num_v_, dimension_); if (old_d) { if (old_d != dimension_) { std::cerr << "Error: data dimension " << dimension_ << " is not equal to query dimension " << old_d << "." << std::endl; exit(EXIT_FAILURE); } } } /** * Input queries from the file. * @param filename */ inline void Searching::load_queries_load(char *filename) { auto old_d = dimension_; DiskIO::load_data( filename, queries_load_, num_queries_, dimension_); if (old_d) { if (old_d != dimension_) { std::cerr << "Error: query dimension " << dimension_ << " is not equal to data dimension " << old_d << "." << std::endl; exit(EXIT_FAILURE); } } } /** * Input the NSG graph from the file. * Reference: https://github.com/ZJULearning/nsg/blob/master/src/index_nsg.cpp * @param filename */ inline void Searching::load_nsg_graph(char *filename) { std::ifstream fin(filename); if (!fin.is_open()) { std::cerr << "Error: cannot read file " << filename << " ." << std::endl; exit(EXIT_FAILURE); } fin.read(reinterpret_cast<char *>(&width_), sizeof(unsigned)); fin.read(reinterpret_cast<char *>(&ep_), sizeof(unsigned)); data_bytes_ = (1 + dimension_) * sizeof(dataf); neighbor_bytes_ = (1 + width_) * sizeof(idi); vertex_bytes_ = data_bytes_ + neighbor_bytes_; opt_nsg_graph_ = (char *) malloc(num_v_ * vertex_bytes_); if (!opt_nsg_graph_) { std::cerr << "Error: no enough memory for opt_nsg_graph_." << std::endl; exit(EXIT_FAILURE); } idi v_id = 0; num_e_ = 0; char *base_location = opt_nsg_graph_; while (true) { idi degree; fin.read(reinterpret_cast<char *>(&degree), sizeof(unsigned)); if (fin.eof()) { break; } num_e_ += degree; // std::vector<idi> tmp_ngbrs(degree); // fin.read(reinterpret_cast<char *>(tmp_ngbrs.data()), degree * sizeof(unsigned)); // Norm and data distf norm = compute_norm(data_load_ + v_id * dimension_); // distf norm = compute_norm(v_id); std::memcpy(base_location, &norm, sizeof(distf)); // Norm memcpy(base_location + sizeof(distf), data_load_ + v_id * dimension_, dimension_ * sizeof(dataf)); // Data base_location += data_bytes_; // Neighbors memcpy(base_location, &degree, sizeof(idi)); // Number of neighbors fin.read(base_location + sizeof(idi), degree * sizeof(unsigned)); // Neighbors // memcpy(location + sizeof(idi), tmp_ngbrs.data(), degree * sizeof(unsigned)); base_location += neighbor_bytes_; ++v_id; } if (v_id != num_v_) { std::cerr << "Error: NSG data has " << v_id << " vertices, but origin data has " << num_v_ << " vertices." << std::endl; exit(EXIT_FAILURE); } free(data_load_); data_load_ = nullptr; // //////////////////////// // idi v_id = 0; // num_e_ = 0; // while (true) { // idi degree; // fin.read(reinterpret_cast<char *>(&degree), sizeof(unsigned)); // if (fin.eof()) { // break; // } // num_e_ += degree; // // std::vector<idi> ngbrs(degree); // fin.read(reinterpret_cast<char *>(ngbrs.data()), degree * sizeof(unsigned)); //// nsg_graph_.push_back(ngbrs); //// tmp_edge_list.push_back(ngbrs); // edge_list_.push_back(ngbrs); // ++v_id; // } // if (v_id != num_v_) { // std::cerr << "Error: NSG data has " << v_id // << " vertices, but origin data has " << num_v_ << " vertices." << std::endl; // exit(EXIT_FAILURE); // } } /** * Load those true top-K neighbors (ground truth) of queries * @param filename * @param[out] true_nn_list */ inline void Searching::load_true_NN( const char *filename, std::vector< std::vector<idi> > &true_nn_list) // unsigned &t_K) { std::ifstream fin(filename); if (!fin.is_open()) { fprintf(stderr, "Error: cannot open file %s\n", filename); exit(EXIT_FAILURE); } idi t_query_num; idi t_K; // unsigned t_K; fin.read(reinterpret_cast<char *>(&t_query_num), sizeof(t_query_num)); fin.read(reinterpret_cast<char *>(&t_K), sizeof(t_K)); // if (t_query_num != query_num) { // fprintf(stderr, "Error: query_num %u is not equal to the record %u in true-NN file %s\n", // query_num, t_query_num, filename); // exit(EXIT_FAILURE); // } if (t_query_num < num_queries_) { fprintf(stderr, "Error: t_query_num %u is smaller than num_queries_ %u\n", t_query_num, num_queries_); exit(EXIT_FAILURE); } if (t_K < 100) { fprintf(stderr, "Error: t_K %u is smaller than 100.\n", t_K); exit(EXIT_FAILURE); } // data = new unsigned[(size_t) t_query_num * (size_t) t_K]; true_nn_list.resize(t_query_num); for (idi q_i = 0; q_i < t_query_num; ++q_i) { true_nn_list[q_i].resize(t_K); } for (unsigned q_i = 0; q_i < t_query_num; ++q_i) { // size_t offset = q_i * t_K; for (unsigned n_i = 0; n_i < t_K; ++n_i) { unsigned id; float dist; fin.read(reinterpret_cast<char *>(&id), sizeof(id)); fin.read(reinterpret_cast<char *>(&dist), sizeof(dist)); // data[offset + n_i] = id; true_nn_list[q_i][n_i] = id; } } fin.close(); } inline void Searching::get_recall_for_all_queries( const std::vector< std::vector<idi> > &true_nn_list, const std::vector<std::vector<unsigned>> &set_K_list, std::unordered_map<unsigned, double> &recalls) const { // if (t_K < 100) { // fprintf(stderr, "Error: t_K %u is smaller than 100.\n", t_K); // exit(EXIT_FAILURE); // } if (true_nn_list[0].size() < 100) { fprintf(stderr, "Error: Number of true nearest neighbors of a query is smaller than 100.\n"); exit(EXIT_FAILURE); } recalls[1] = 0.0; recalls[5] = 0.0; recalls[10] = 0.0; recalls[20] = 0.0; recalls[50] = 0.0; recalls[100] = 0.0; for (unsigned q_i = 0; q_i < num_queries_; ++q_i) { // size_t offset = q_i * t_K; for (unsigned top_i = 0; top_i < 100; ++top_i) { unsigned true_id = true_nn_list[q_i][top_i]; for (unsigned n_i = 0; n_i < 100; ++n_i) { if (set_K_list[q_i][n_i] == true_id) { if (n_i < 1) recalls[1] += 1; if (n_i < 5) recalls[5] += 1; if (n_i < 10) recalls[10] += 1; if (n_i < 20) recalls[20] += 1; if (n_i < 50) recalls[50] += 1; if (n_i < 100) recalls[100] += 1; } } } } recalls[1] /= 1.0 * num_queries_; recalls[5] /= 5.0 * num_queries_; recalls[10] /= 10.0 * num_queries_; recalls[20] /= 20.0 * num_queries_; recalls[50] /= 50.0 * num_queries_; recalls[100] /= 100.0 * num_queries_; } inline void Searching::search_in_sequential( const idi query_id, const idi K, const idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K) { // {//test // printf("Iteration: Relative_Distance:\n"); //// printf("Iteration: Relative_Distance:\n"); //// printf("----query: %u----\n", query_id); // } boost::dynamic_bitset<> is_visited(num_v_); for (idi v_i = 0; v_i < L; ++v_i) { is_visited[init_ids[v_i]] = true; } const dataf *query_data = queries_load_ + query_id * dimension_; for (idi v_i = 0; v_i < L; ++v_i) { idi v_id = init_ids[v_i]; _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); } // Get the distances of all candidates, store in the set set_L. for (unsigned i = 0; i < L; i++) { unsigned v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; ++count_distance_computation_; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[i] = Candidate(v_id, dist, false); // False means not checked. } std::sort(set_L.begin(), set_L.begin() + L); idi k = 0; // Index of every queue's first unchecked candidate. idi tmp_count = 0; // for debug // {// Print relative distance //// distf top_dist = set_L[0].distance_; // for (idi i_l = 0; i_l < L; ++i_l) { // printf("%u %f\n", // tmp_count, set_L[i_l].distance_); //// tmp_count, set_L[i_l].distance_ - top_dist); // } // } while (k < L) { Candidate &top_cand = set_L[k]; unsigned nk = L; if (!top_cand.is_checked_) { ++tmp_count; top_cand.is_checked_ = true; idi v_id = top_cand.id_; // Vertex ID. _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } // Traverse v_id's all neighbors, pushing them into the queue for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; if (is_visited[nb_id]) { continue; } is_visited[nb_id] = true; auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; // Compute the distance ++count_distance_computation_; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[L-1].distance_) { continue; } // if (dist >= set_L[L-1].distance_) { // continue; // } Candidate cand(nb_id, dist, false); // Insert into the queue idi r = insert_into_queue(set_L, L, cand); if (r < nk) { nk = r; } } // {// Print relative distance //// distf top_dist = set_L[0].distance_; // for (idi i_l = 0; i_l < L; ++i_l) { // printf("%u %f\n", // tmp_count, set_L[i_l].distance_); //// tmp_count, set_L[i_l].distance_ - top_dist); // } // } } if (nk <= k) { k = nk; } else { ++k; } } // cache_miss_kernel.measure_stop(); for (size_t k_i = 0; k_i < K; ++k_i) { set_K[k_i] = set_L[k_i].id_; } // {//test // if (0 == query_id) { // exit(1); // } // } } //inline void Searching::search_in_sequential_BitVector( // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) //{ //// boost::dynamic_bitset<> is_visited(num_v_); // BitVector is_visited(num_v_); // //#pragma omp parallel for // for (idi v_i = 0; v_i < L; ++v_i) { //// is_visited[init_ids[v_i]] = true; // is_visited.atomic_set_bit(init_ids[v_i]); // } // // const dataf *query_data = queries_load_ + query_id * dimension_; // //#pragma omp parallel for // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. //#pragma omp parallel for // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // idi k = 0; // Index of every queue's first unchecked candidate. // while (k < L) { // Candidate &top_cand = set_L[k]; // unsigned nk = L; // if (!top_cand.is_checked_) { // top_cand.is_checked_ = true; // idi v_id = top_cand.id_; // Vertex ID. // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // // Traverse v_id's all neighbors, pushing them into the queue // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; //// if (is_visited[nb_id]) { //// continue; //// } //// is_visited[nb_id] = true; // // {// Self-defined BitVector // if (is_visited.atomic_is_bit_set(nb_id)) { // continue; // } // is_visited.atomic_set_bit(nb_id); // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // // Compute the distance // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); // // Insert into the queue // idi r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // if (nk <= k) { // k = nk; // } else { // ++k; // } // } //// cache_miss_kernel.measure_stop(); //#pragma omp parallel for // for (size_t k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //} /** * Prepare init_ids and flags, as they are constant for all queries. * @param[out] init_ids * @param L */ inline void Searching::prepare_init_ids( std::vector<unsigned int> &init_ids, const unsigned L) const { // idi num_ngbrs = get_out_degree(ep_); // edgei edge_start = nsg_graph_indices_[ep_]; // // Store ep_'s neighbors as candidates // idi tmp_l = 0; // for (; tmp_l < L && tmp_l < num_ngbrs; tmp_l++) { // init_ids[tmp_l] = nsg_graph_out_edges_[edge_start + tmp_l]; // } // std::unordered_set<idi> visited_ids; boost::dynamic_bitset<> is_selected(num_v_); idi *out_edges = (idi *) (opt_nsg_graph_ + ep_ * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; idi init_ids_end = 0; // for (; tmp_l < L && tmp_l < out_degree; tmp_l++) { for (idi e_i = 0; e_i < out_degree && init_ids_end < L; ++e_i) { // idi v_id = out_edges[tmp_l]; idi v_id = out_edges[e_i]; if(is_selected[v_id]) { continue; } is_selected[v_id] = true; // init_ids[tmp_l] = v_id; init_ids[init_ids_end++] = v_id; // init_ids[tmp_l] = out_edges[tmp_l]; // visited_ids.insert(init_ids[tmp_l]); } // for (idi i = 0; i < tmp_l; ++i) { // is_visited[init_ids[i]] = true; // } // If ep_'s neighbors are not enough, add other random vertices idi tmp_id = ep_ + 1; // use tmp_id to replace rand(). while (init_ids_end < L) { tmp_id %= num_v_; idi v_id = tmp_id++; if (is_selected[v_id]) { continue; } // if (visited_ids.find(id) != visited_ids.end()) { // continue; // } is_selected[v_id] = true; // visited_ids.insert(id); init_ids[init_ids_end++] = v_id; // tmp_l++; } } // TODO: re-code in AVX-512 inline dataf Searching::compute_norm( const dataf *data) const // idi vertex_id) // const std::vector<PANNS::dataf> &data) // size_t loc_start, // idi dimension) { // const dataf *a = data.data() + loc_start; // const dataf *a = data_load_ + vertex_id * dimension_; // idi size = dimension_; dataf result = 0; //#define AVX_L2NORM(addr, dest, tmp) \ // tmp = _mm256_load_ps(addr); \ // tmp = _mm256_mul_ps(tmp, tmp); \ // dest = _mm256_add_ps(dest, tmp); #define AVX_L2NORM(addr, dest, tmp) \ tmp = _mm256_loadu_ps(addr); \ tmp = _mm256_mul_ps(tmp, tmp); \ dest = _mm256_add_ps(dest, tmp); __m256 sum; __m256 l0, l1; unsigned D = (dimension_ + 7) & ~7U; unsigned DR = D % 16; unsigned DD = D - DR; const float *l = data; const float *e_l = l + DD; float unpack[8] __attribute__ ((aligned (32))) = {0, 0, 0, 0, 0, 0, 0, 0}; sum = _mm256_load_ps(unpack); // sum = _mm256_loadu_ps(unpack); if (DR) { AVX_L2NORM(e_l, sum, l0); } for (unsigned i = 0; i < DD; i += 16, l += 16) { AVX_L2NORM(l, sum, l0); AVX_L2NORM(l + 8, sum, l1); } _mm256_store_ps(unpack, sum); // _mm256_storeu_ps(unpack, sum); result = unpack[0] + unpack[1] + unpack[2] + unpack[3] + unpack[4] + unpack[5] + unpack[6] + unpack[7]; return result; } inline dataf Searching::compute_distance_with_norm( const dataf *v_data, const dataf *q_data, // idi vertex_id, // idi query_id, // const std::vector<PANNS::dataf> &d_data, // const std::vector<PANNS::dataf> &q_data, // PANNS::idi d_start, // PANNS::idi q_start, const dataf vertex_norm) const // idi dimension) { // idi size = dimension_; float result = 0; //#define AVX_DOT(addr1, addr2, dest, tmp1, tmp2) \ // tmp1 = _mm256_load_ps(addr1);\ // tmp2 = _mm256_load_ps(addr2);\ // tmp1 = _mm256_mul_ps(tmp1, tmp2); \ // dest = _mm256_add_ps(dest, tmp1); #define AVX_DOT(addr1, addr2, dest, tmp1, tmp2) \ tmp1 = _mm256_loadu_ps(addr1);\ tmp2 = _mm256_loadu_ps(addr2);\ tmp1 = _mm256_mul_ps(tmp1, tmp2); \ dest = _mm256_add_ps(dest, tmp1); __m256 sum; __m256 l0, l1; __m256 r0, r1; unsigned D = (dimension_ + 7) & ~7U; unsigned DR = D % 16; unsigned DD = D - DR; const float *l = v_data; const float *r = q_data; // const float *l = (float *) (opt_nsg_graph_ + vertex_id * vertex_bytes_ + sizeof(distf)); // const float *r = queries_load_ + query_id * dimension_; const float *e_l = l + DD; const float *e_r = r + DD; float unpack[8] __attribute__ ((aligned (32))) = {0, 0, 0, 0, 0, 0, 0, 0}; sum = _mm256_load_ps(unpack); // sum = _mm256_loadu_ps(unpack); if (DR) { AVX_DOT(e_l, e_r, sum, l0, r0); } for (unsigned i = 0; i < DD; i += 16, l += 16, r += 16) { AVX_DOT(l, r, sum, l0, r0); AVX_DOT(l + 8, r + 8, sum, l1, r1); } _mm256_store_ps(unpack, sum); // _mm256_storeu_ps(unpack, sum); result = unpack[0] + unpack[1] + unpack[2] + unpack[3] + unpack[4] + unpack[5] + unpack[6] + unpack[7]; result = -2 * result + vertex_norm; return result; } //// DEPRECATED. // The difference from insert_into_queue is that add_into_queue will increase the queue size by 1. //inline idi Searching::add_into_queue( // std::vector<PANNS::Candidate> &queue, // idi &queue_top, // const idi queue_size, // const PANNS::Candidate &cand) //{ // assert(queue_size > 1); // if (0 == queue_top) { // queue[queue_top++] = cand; // return 0; // } else if (1 == queue_top) { // if (queue[0] < cand) { // queue[queue_top++] = cand; // return 1; // } else { // queue[++queue_top] = queue[0]; // queue[0] = cand; // return 0; // } // } // // if (queue[queue_top - 1] < cand) { // if (queue_top < queue_size) { // queue[queue_top++] = cand; // } // return queue_top; // } // // idi r = insert_into_queue( // queue, // queue_top - 1, // cand); //// {//test //// printf("r: %u" //// "queue_top: %u " //// "queue_size: %u\n", //// r, //// queue_top, //// queue_size); //// } // return r; // //// ///////////////////////////////////////////////////////////// //// // Find the insert location //// auto it_loc = std::lower_bound(queue.begin(), queue.begin() + queue_top, cand); //// idi insert_loc = it_loc - queue.begin(); //// if (insert_loc == queue_size) { //// return queue_size; //// } //// //// // Insert ////// if (queue_top == queue_size) { ////// // If full already ////// --queue_top; ////// } //// memmove(reinterpret_cast<char *>(queue.data() + insert_loc + 1), //// reinterpret_cast<char *>(queue.data() + insert_loc), //// (queue_top - insert_loc) * sizeof(Candidate)); ////// for (idi q_i = queue_top; q_i > insert_loc; --q_i) { ////// queue.at(q_i) = queue.at(q_i - 1); ////// } //// queue[insert_loc] = cand; //// ++queue_top; //// return insert_loc; //} // The difference from insert_into_queue is that add_into_queue will increase the queue size by 1. inline idi Searching::add_into_queue( std::vector<PANNS::Candidate> &queue, idi &queue_top, const idi queue_size, const PANNS::Candidate &cand) { if (0 == queue_top) { queue[queue_top++] = cand; return 0; } // Find the insert location auto it_loc = std::lower_bound(queue.begin(), queue.begin() + queue_top, cand); idi insert_loc = it_loc - queue.begin(); if (insert_loc == queue_size) { return queue_size; } // Insert if (queue_top == queue_size) { // If full already --queue_top; } memmove(reinterpret_cast<char *>(queue.data() + insert_loc + 1), reinterpret_cast<char *>(queue.data() + insert_loc), (queue_top - insert_loc) * sizeof(Candidate)); // for (idi q_i = queue_top; q_i > insert_loc; --q_i) { // queue.at(q_i) = queue.at(q_i - 1); // } queue[insert_loc] = cand; ++queue_top; return insert_loc; } // The difference from insert_into_queue is that add_into_queue will increase the queue size by 1. // add_into_queue with a queue_start inline idi Searching::add_into_queue( std::vector<PANNS::Candidate> &queue, const idi queue_start, idi &queue_size, // The insertion location starting from queue_start const idi queue_capacity, // The maximum capacity of queue, independent with queue_start. const PANNS::Candidate &cand) { if (0 == queue_size) { queue[queue_start + queue_size++] = cand; return 0; } idi queue_end = queue_start + queue_size; // Find the insert location const auto it_loc = std::lower_bound(queue.begin() + queue_start, queue.begin() + queue_end, cand); // auto it_loc = std::lower_bound(queue.begin(), queue.begin() + queue_size, cand); idi insert_loc = it_loc - queue.begin(); if (insert_loc != queue_end) { if (cand.id_ == it_loc->id_) { // Duplicate return queue_capacity; } if (queue_size >= queue_capacity) { // Queue is full --queue_size; --queue_end; } } else { // insert_loc == queue_end, insert at the end? if (queue_size < queue_capacity) { // Queue is not full // Insert at the end queue[insert_loc] = cand; ++queue_size; return queue_size - 1; } else { // Queue is full return queue_capacity; } } // Add into queue memmove(reinterpret_cast<char *>(queue.data() + insert_loc + 1), reinterpret_cast<char *>(queue.data() + insert_loc), (queue_end - insert_loc) * sizeof(Candidate)); queue[insert_loc] = cand; ++queue_size; return insert_loc - queue_start; } inline void Searching::add_into_queue_at( const Candidate &cand, std::vector<Candidate> &queue, const idi insert_index, // The insertion location, independent with queue_start const idi queue_start, idi &queue_size, // The number of elements in queue, independent with queue_start const idi queue_length) // The maximum capacity of queue, independent with queue_start. { const idi dest_index = queue_start + insert_index; if (queue_size == queue_length) { --queue_size; } memmove(reinterpret_cast<char *>(queue.data() + dest_index + 1), reinterpret_cast<char *>(queue.data() + dest_index), (queue_size - insert_index) * sizeof(Candidate)); queue[dest_index] = cand; ++queue_size; } inline void Searching::insert_one_element_at( // const T &cand, // T *queue_base, const Candidate &cand, std::vector<Candidate> &queue, const idi insert_index, const idi queue_start, const idi queue_size) { const idi dest_index = queue_start + insert_index; memmove(reinterpret_cast<char *>(queue.data() + dest_index + 1), reinterpret_cast<char *>(queue.data() + dest_index), (queue_size - insert_index - 1) * sizeof(Candidate)); queue[dest_index] = cand; // memmove(reinterpret_cast<char *>(queue_base + dest_index + 1), // reinterpret_cast<char *>(queue_base + dest_index), // (queue_size - insert_index - 1) * sizeof(T)); // for (idi q_i = queue_size - 1; q_i > insert_index; --q_i) { // queue_base.at(q_i + queue_start) = queue_base.at(q_i - 1 + queue_start); // } // queue_base[dest_index] = cand; } /** * PANNS version of InsertIntoPool(): binary-search to find the insert place and then move. * @param[out] c_queue * @param c_queue_top * @param cand * @return */ inline idi Searching::insert_into_queue( std::vector<PANNS::Candidate> &c_queue, PANNS::idi c_queue_top, PANNS::Candidate cand) { if (c_queue[0].distance_ > cand.distance_) { // If the first memmove(reinterpret_cast<char *>(c_queue.data() + 1), reinterpret_cast<char *>(c_queue.data()), c_queue_top * sizeof(Candidate)); c_queue[0] = cand; return 0; } else if (c_queue[c_queue_top - 1].distance_ == cand.distance_) { // If the last if (c_queue[c_queue_top - 1].id_ > cand.id_) { // Use ID as the second metrics for ordering c_queue[c_queue_top - 1] = cand; return c_queue_top - 1; } else { return c_queue_top; } } idi left = 0; idi right = c_queue_top; while (left < right) { idi mid = (right - left) / 2 + left; if (c_queue[mid].distance_ > cand.distance_) { right = mid; } else { left = mid + 1; } } // If the distance is the same if (0 != left && c_queue[left - 1].distance_ != cand.distance_) { ; } else { while (0 != left && c_queue[left - 1].distance_ == cand.distance_ && c_queue[left - 1].id_ > cand.id_) { // Use ID as the second metrics for ordering --left; } } // Insert to left memmove(reinterpret_cast<char *>(c_queue.data() + left + 1), reinterpret_cast<char *>(c_queue.data() + left), (c_queue_top - left) * sizeof(Candidate)); c_queue[left] = cand; return left; } //inline void Searching::cand_pushes_ngbrs_into_queue( // idi cand_id, // const dataf *query_data, // idi L, // idi &new_k, // boost::dynamic_bitset<> &is_visited, // std::vector<Candidate> &set_L) //{ // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = true; // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist >= set_L[L-1].distance_) { // continue; // } // Candidate cand(nb_id, dist, false); // idi r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } //} //inline void Searching::search_in_sequential( // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) const //{ // boost::dynamic_bitset<> is_visited(num_v_); // // for (idi v_i = 0; v_i < L; ++v_i) { // is_visited[init_ids[v_i]] = true; // } // const dataf *query_data = queries_load_ + query_id * dimension_; // // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // idi k = 0; // Index of every queue's first unchecked candidate. // while (k < L) { // Candidate &top_cand = set_L[k]; // unsigned nk = L; // if (!top_cand.is_checked_) { // top_cand.is_checked_ = true; // idi v_id = top_cand.id_; // Vertex ID. // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // // Traverse v_id's all neighbors, pushing them into the queue // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = true; // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // // Compute the distance // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } // Candidate cand(nb_id, dist, false); // // Insert into the queue // idi r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // if (nk <= k) { // k = nk; // } else { // ++k; // } // } // // for (size_t k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //} // Deprecated: cannot use std::set, because its element is constant. //inline void Searching::search_in_sequential( // const idi query_id, // const idi K, // const idi L, //// std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) const //{ // std::set<Candidate> set_L; // boost::dynamic_bitset<> is_visited(num_v_); // // for (idi v_i = 0; v_i < L; ++v_i) { // is_visited[init_ids[v_i]] = true; // } // const dataf *query_data = queries_load_ + query_id * dimension_; // // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; // distf dist = compute_distance_with_norm(v_data, query_data, norm); //// set_L[i] = Candidate(v_id, dist, false); // False means not checked. // set_L.emplace(v_id, dist, false); // } //// std::sort(set_L.begin(), set_L.begin() + L); // idi k = 0; // Index of every queue's first unchecked candidate. // while (k < L) { //// Candidate &top_cand = set_L[k]; // std::set<Candidate>::iterator top_cand = std::next(set_L.begin(), k); // unsigned nk = L; // if (!top_cand->is_checked_) { // top_cand->is_checked_ = true; // idi v_id = top_cand.id_; // Vertex ID. // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // // Traverse v_id's all neighbors, pushing them into the queue // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = true; // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // // Compute the distance // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } // Candidate cand(nb_id, dist, false); // // Insert into the queue // idi r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // if (nk <= k) { // k = nk; // } else { // ++k; // } // } // // for (size_t k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //} /* Function: * queue1_size is fixed. */ inline idi Searching::merge_two_queues_into_1st_queue_seq_fixed( std::vector<Candidate> &queue1, const idi queue1_start, const idi queue1_size, std::vector<Candidate> &queue2, const idi queue2_start, const idi queue2_size) // const idi limit_size) { assert(queue1_size && queue2_size); // Record the lowest insert location. auto it_loc = std::lower_bound( queue1.begin() + queue1_start, queue1.begin() + queue1_start + queue1_size, queue2[queue2_start]); idi insert_index = it_loc - (queue1.begin() + queue1_start); if (insert_index == queue1_size) { return insert_index; } else if (insert_index == queue1_size - 1) { queue1[queue1_start + insert_index] = queue2[queue2_start]; return insert_index; } // Insert the 1st of queue2 if (queue2[queue2_start].id_ != it_loc->id_) { // Not Duplicate insert_one_element_at( queue2[queue2_start], queue1, insert_index, queue1_start, queue1_size); } if (queue2_size == 1) { return insert_index; } // Insert idi q_i_1 = insert_index + 1 + queue1_start; idi q_i_2 = queue2_start + 1; const idi q_i_1_bound = queue1_start + queue1_size; const idi q_i_2_bound = queue2_start + queue2_size; // const idi insert_i_bound = queue1_start + limit_size; for (idi insert_i = insert_index + 1; insert_i < queue1_size; ++insert_i) { if (q_i_1 >= q_i_1_bound || q_i_2 >= q_i_2_bound) { // queue1 or queue2 finished traverse. Rest o break; } else if (queue1[q_i_1] < queue2[q_i_2]) { ++q_i_1; } else if (queue2[q_i_2] < queue1[q_i_1]) { // Insert queue2[q_i_2] into queue1 insert_one_element_at( queue2[q_i_2++], queue1, insert_i, queue1_start, queue1_size); ++q_i_1; } else { // Duplicate ++q_i_2; ++q_i_1; } } return insert_index; } /* Function: * queue1_size should be updated. * queue1_length should be provided. */ inline void Searching::merge_two_queues_into_1st_queue_seq_incr( std::vector<Candidate> &queue1, const idi queue1_start, idi &queue1_size, // The number of element in queue1, independent with queue1_start. const idi queue1_length, // The maximum capacity of queue1, independent with queue1_start. std::vector<Candidate> &queue2, const idi queue2_start, const idi queue2_size) // const idi limit_size) { assert(queue1_size && queue2_size); // Record the lowest insert location. auto it_loc = std::lower_bound( queue1.begin() + queue1_start, queue1.begin() + queue1_start + queue1_size, queue2[queue2_start]); idi insert_index = it_loc - (queue1.begin() + queue1_start); if (insert_index == queue1_size) { idi copy_count = (queue1_size + queue2_size > queue1_length) ? queue1_length - queue1_size : queue2_size; memmove(queue1.data() + queue1_start + queue1_size, queue2.data() + queue2_start, copy_count * sizeof(Candidate)); queue1_size += copy_count; return; } if (queue2[queue2_start].id_ != it_loc->id_) { // Not Duplicate add_into_queue_at( queue2[queue2_start], queue1, insert_index, queue1_start, queue1_size, queue1_length); } if (queue2_size == 1) { return; } // Insert idi q_i_1 = insert_index + 1 + queue1_start; idi q_i_2 = queue2_start + 1; idi q_i_1_bound = queue1_start + queue1_size; // When queue1_size is updated, so should be q_i_1_bound. const idi q_i_2_bound = queue2_start + queue2_size; // idi insert_i; for (idi insert_i = insert_index + 1; insert_i < queue1_length; ++insert_i) { if (q_i_1 >= q_i_1_bound) { queue1_size += std::min(queue1_length - insert_i, q_i_2_bound - q_i_2); for ( ; insert_i < queue1_size; ++insert_i) { queue1[queue1_start + insert_i] = queue2[q_i_2++]; } break; } else if (q_i_2 >= q_i_2_bound) { break; } else if (queue1[q_i_1] < queue2[q_i_2]) { ++q_i_1; } else if (queue2[q_i_2] < queue1[q_i_1]) { add_into_queue_at( queue2[q_i_2++], queue1, insert_i, queue1_start, queue1_size, queue1_length); ++q_i_1; q_i_1_bound = queue1_start + queue1_size; } else { // Duplicate ++q_i_2; ++q_i_1; } } } inline idi Searching::merge_all_queues_para_list( std::vector< std::vector<Candidate> > &local_queues_list, std::vector<idi> &local_queues_ends, std::vector<Candidate> &set_L, const idi L) { int size = 1 << (static_cast<idi>(log2(num_threads_))); idi log2size = static_cast<idi>(log2(size)); for (idi d = 0; d < log2size; ++d) { uint32_t by = 1 << (d + 1); #pragma omp parallel for for (int i = 0; i < size; i += by) { idi ai = i + (1 << (d + 1)) - 1; // i + 2^(d+1) - 1 idi bi = i + (1 << d) - 1; // i + 2^d - 1 if (0 == local_queues_ends[bi]) { continue; } if (local_queues_ends[ai] == 0) { local_queues_list[ai].swap(local_queues_list[bi]); std::swap(local_queues_ends[ai], local_queues_ends[bi]); continue; } // else if (local_queues_ends[ai] < L && local_queues_ends[bi] >= L) { // local_queues_list[ai].swap(local_queues_list[bi]); // std::swap(local_queues_ends[ai], local_queues_ends[bi]); // } // merge_two_queues_into_1st_queue_seq( // local_queues_list[ai], // 0, // local_queues_ends[ai], // local_queues_list[bi], // 0, // local_queues_ends[bi]); idi tmp_length = local_queues_ends[ai] + local_queues_ends[bi]; std::vector<Candidate> tmp_queue(tmp_length); std::merge( local_queues_list[ai].begin(), local_queues_list[ai].begin() + local_queues_ends[ai], local_queues_list[bi].begin(), local_queues_list[bi].begin() + local_queues_ends[bi], tmp_queue.begin()); if (tmp_length > L) { tmp_queue.resize(L); tmp_length = L; } else if (tmp_length < L) { tmp_queue.resize(L); } local_queues_list[ai].swap(tmp_queue); local_queues_ends[ai] = tmp_length; // {// Print queue a // printf("d: %u " // "i: %u " // "ai: %u " // "local_queues_ends[%d]: %d\n", // d, // i, // ai, // ai, // local_queues_ends[ai]); // for (idi i_q = 0; i_q < local_queues_ends[ai]; ++i_q) { // printf("[%u]: " // "id: %u " // "dist: %f\n", // i_q, // local_queues_list[ai][i_q].id_, // local_queues_list[ai][i_q].distance_); // } // } } } // Remain, prefix-sum-like merge if (size != num_threads_) { for (int i = size; i < num_threads_; ++i) { idi ai = i; idi bi = i - 1; if (0 == local_queues_ends[bi]) { continue; } if (local_queues_ends[ai] == 0) { local_queues_list[ai].swap(local_queues_list[bi]); std::swap(local_queues_ends[ai], local_queues_ends[bi]); continue; } // else if (local_queues_ends[ai] < L && local_queues_ends[bi] >= L) { // local_queues_list[ai].swap(local_queues_list[bi]); // std::swap(local_queues_ends[ai], local_queues_ends[bi]); // } // merge_two_queues_into_1st_queue_seq( // local_queues_list[ai], // 0, // local_queues_ends[ai], // local_queues_list[bi], // 0, // local_queues_ends[bi]); idi tmp_length = local_queues_ends[ai] + local_queues_ends[bi]; std::vector<Candidate> tmp_queue(tmp_length); std::merge( local_queues_list[ai].begin(), local_queues_list[ai].begin() + local_queues_ends[ai], local_queues_list[bi].begin(), local_queues_list[bi].begin() + local_queues_ends[bi], tmp_queue.begin()); if (tmp_length > L) { tmp_queue.resize(L); tmp_length = L; } else if (tmp_length < L) { tmp_queue.resize(L); } local_queues_list[ai].swap(tmp_queue); local_queues_ends[ai] = tmp_length; } } // Merge into set_L idi r = L; if (local_queues_ends[num_threads_ - 1]) { r = merge_two_queues_into_1st_queue_seq_fixed( set_L, 0, L, local_queues_list[num_threads_ - 1], 0, local_queues_ends[num_threads_ - 1]); } // Reset local_queues_ends std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); return r; } /* Function: * Use large local_queues_array as a concatenation of all queues */ inline idi Searching::merge_all_queues_para_array( std::vector<Candidate> &set_L, std::vector<idi> &local_queues_ends, const idi local_queue_length, const idi L) { const int num_queues = num_threads_; idi nk = L; int size = 1 << (static_cast<idi>(log2(num_queues))); idi log2size = static_cast<idi>(log2(size)); for (idi d = 0; d < log2size; ++d) { uint32_t by = 1 << (d + 1); #pragma omp parallel for for (int i = 0; i < size; i += by) { idi ai = i + (1 << (d + 1)) - 1; // i + 2^(d+1) - 1 idi a_start = ai * local_queue_length; idi bi = i + (1 << d) - 1; // i + 2^d - 1 idi b_start = bi * local_queue_length; if (0 == local_queues_ends[bi]) { continue; } if (local_queues_ends[ai] == 0) { std::copy(set_L.begin() + b_start, set_L.begin() + b_start + local_queues_ends[bi], set_L.begin() + a_start); // Copy bi to ai local_queues_ends[ai] = local_queues_ends[bi]; local_queues_ends[bi] = 0; continue; } if (ai != static_cast<idi>(num_queues - 1)) { merge_two_queues_into_1st_queue_seq_incr( set_L, a_start, local_queues_ends[ai], local_queue_length, set_L, b_start, local_queues_ends[bi]); } else { idi r = merge_two_queues_into_1st_queue_seq_fixed( set_L, a_start, L, set_L, b_start, local_queues_ends[bi]); if (r < nk) { nk = r; } } } } // Remain, prefix-sum-like merge if (size != num_queues) { for (int i = size; i < num_queues; ++i) { idi ai = i; idi a_start = ai * local_queue_length; idi bi = i - 1; idi b_start = bi * local_queue_length; if (0 == local_queues_ends[bi]) { continue; } if (local_queues_ends[ai] == 0) { std::copy(set_L.begin() + b_start, set_L.begin() + b_start + local_queues_ends[bi], set_L.begin() + a_start); // Copy bi to ai local_queues_ends[ai] = local_queues_ends[bi]; local_queues_ends[bi] = 0; continue; } if (ai != static_cast<idi>(num_queues - 1)) { merge_two_queues_into_1st_queue_seq_incr( set_L, a_start, local_queues_ends[ai], local_queue_length, set_L, b_start, local_queues_ends[bi]); } else { idi r = merge_two_queues_into_1st_queue_seq_fixed( set_L, a_start, L, set_L, b_start, local_queues_ends[bi]); if (r < nk) { nk = r; } } } } // Reset local_queues_ends // Not do this for Collector Idea or Selecting Idea std::fill(local_queues_ends.begin(), local_queues_ends.end() - 1, 0); // std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); return nk; // return r; } /* Function: * When merge all queues (in an array, and [num_threads_ - 1] is the global queue), * the starting local is at [queue_base] */ inline idi Searching::merge_all_queues_queue_base( // std::vector< std::vector<Candidate> > &local_queues_list, std::vector<Candidate> &set_L, // std::vector<Candidate> &local_queues_array, std::vector<idi> &local_queues_ends, const idi queue_base, const int real_threads, const idi local_queue_length, // std::vector<Candidate> &set_L, const idi L) { idi nk = L; int size = 1 << (static_cast<idi>(log2(real_threads))); // int size = 1 << (static_cast<idi>(log2(num_threads_))); idi log2size = static_cast<idi>(log2(size)); for (idi d = 0; d < log2size; ++d) { idi by = 1 << (d + 1); idi i_bound = size + queue_base; #pragma omp parallel for num_threads(real_threads) for (idi i = queue_base; i < i_bound; i += by) { // for (int i = 0; i < size; i += by) { // idi ai = i + (1 << (d + 1)) - 1 + queue_base; // i + 2^(d+1) - 1 idi ai = i + (1 << (d + 1)) - 1; // i + 2^(d+1) - 1 idi a_start = ai * local_queue_length; // idi bi = i + (1 << d) - 1 + queue_base; // i + 2^d - 1 idi bi = i + (1 << d) - 1; // i + 2^d - 1 idi b_start = bi * local_queue_length; if (0 == local_queues_ends[bi]) { continue; } if (local_queues_ends[ai] == 0) { // local_queues_list[ai].swap(local_queues_list[bi]); std::copy(set_L.begin() + b_start, set_L.begin() + b_start + local_queues_ends[bi], set_L.begin() + a_start); // Copy bi to ai local_queues_ends[ai] = local_queues_ends[bi]; local_queues_ends[bi] = 0; continue; } if (ai != static_cast<idi>(num_threads_ - 1)) { merge_two_queues_into_1st_queue_seq_incr( set_L, a_start, local_queues_ends[ai], local_queue_length, set_L, b_start, local_queues_ends[bi]); } else { idi r = merge_two_queues_into_1st_queue_seq_fixed( set_L, a_start, L, set_L, b_start, local_queues_ends[bi]); if (r < nk) { nk = r; } } } } // Remain, prefix-sum-like merge if (size != real_threads) { // if (size != num_threads_) { for (int i = size + queue_base; i < num_threads_; ++i) { // for (int i = size; i < num_threads_; ++i) { idi ai = i; idi a_start = ai * local_queue_length; idi bi = i - 1; idi b_start = bi * local_queue_length; if (0 == local_queues_ends[bi]) { continue; } if (local_queues_ends[ai] == 0) { std::copy(set_L.begin() + b_start, set_L.begin() + b_start + local_queues_ends[bi], set_L.begin() + a_start); // Copy bi to ai local_queues_ends[ai] = local_queues_ends[bi]; local_queues_ends[bi] = 0; continue; } if (ai != static_cast<idi>(num_threads_ - 1)) { merge_two_queues_into_1st_queue_seq_incr( set_L, a_start, local_queues_ends[ai], local_queue_length, set_L, b_start, local_queues_ends[bi]); } else { idi r = merge_two_queues_into_1st_queue_seq_fixed( set_L, a_start, L, set_L, b_start, local_queues_ends[bi]); if (r < nk) { nk = r; } } } } // Reset local_queues_ends std::fill(local_queues_ends.begin(), local_queues_ends.end() - 1, 0); // std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); return nk; // return r; } inline void Searching::merge_two_consecutive_queues_in_place( std::vector<Candidate> &two_queues, const idi base_1, // const idi &end_1, const idi base_2, const idi &length_2) { // idi tid = omp_get_thread_num(); idi index_1 = base_1; idi index_2 = base_2; const idi bound_2 = base_2 + length_2; while (index_1 < index_2 && index_2 < bound_2) { Candidate e_1 = two_queues[index_1]; Candidate e_2 = two_queues[index_2]; if (e_1 < e_2) { ++index_1; } else if (e_2 < e_1) { // time_memmove_list_[tid] -= WallTimer::get_time_mark(); std::memmove(two_queues.data() + index_1 + 1, two_queues.data() + index_1, (index_2 - index_1) * sizeof(Candidate)); // time_memmove_list_[tid] += WallTimer::get_time_mark(); two_queues[index_1] = e_2; ++index_1; ++index_2; } else { // Duplicate, but have no idea what to do right now // time_memmove_list_[tid] -= WallTimer::get_time_mark(); std::memmove(two_queues.data() + index_1 + 1, two_queues.data() + index_1, (index_2 - index_1) * sizeof(Candidate)); // time_memmove_list_[tid] += WallTimer::get_time_mark(); index_1 += 2; ++index_2; } } } ///* Function: // * Merge all queues to the global queue, in a two-queue-merge way // */ //inline idi Searching::merge_all_queues_all_together_in_sequential( // std::vector<Candidate> &set_L, // std::vector<idi> &local_queues_ends, // const idi local_queue_length, // const idi L) //{ // const idi num_queues = num_threads_; // const idi global_queue_base = (num_queues - 1) * local_queue_length; // std::vector<idi> queue_heads(num_queues, 0); // idi queue_id_min; // //// bool is_finished = false; // bool is_1st_selected = true; // idi nk = L; // The highest location of insertion. // { // for (idi q_i = 0; q_i < num_queues; ++q_i) { // if (0 == local_queues_ends[q_i]) { // continue; // } // _mm_prefetch(set_L.data() + q_i * local_queue_length, _MM_HINT_T0); // } // } // while (queue_heads[num_queues - 1] < L) { //// time_compare_minimum_ -= WallTimer::get_time_mark(); // queue_id_min = min_all_queues_at_heads( // set_L, // queue_heads, // local_queues_ends, // local_queue_length, // L); //// time_compare_minimum_ += WallTimer::get_time_mark(); // if (queue_id_min != num_queues - 1) { // Not in the global queue //// time_insert_ -= WallTimer::get_time_mark(); // insert_one_element_at( // set_L[queue_heads[queue_id_min] + queue_id_min * local_queue_length], // set_L, // queue_heads[num_queues - 1], // global_queue_base, // L); //// time_insert_ += WallTimer::get_time_mark(); // if (is_1st_selected) { // Get the highest inserting location // is_1st_selected = false; // nk = queue_heads[num_queues - 1]; // } // ++queue_heads[queue_id_min]; // } // ++queue_heads[num_queues - 1]; // } // // // Reset local_queues_ends // std::fill(local_queues_ends.begin(), local_queues_ends.end() - 1, 0); // return nk; //} ///* Function: // * Find the minimum among queues at their head locations // */ //inline idi Searching::min_all_queues_at_heads( // const std::vector<Candidate> &set_L, // std::vector<idi> &queue_heads, // const std::vector<idi> &local_queues_ends, // const idi local_queue_length, // const idi L) //{ // const idi num_queues = num_threads_; // idi min_queue_id = num_queues - 1; // Candidate min_candidate = set_L[queue_heads[min_queue_id] + min_queue_id * local_queue_length]; // // for (idi q_i = 0; q_i < num_queues - 1; ++q_i) { // if (queue_heads[q_i] >= local_queues_ends[q_i]) { // q_i finished // continue; // } // const Candidate &ele = set_L[queue_heads[q_i] + q_i * local_queue_length]; // if (ele < min_candidate) { // min_candidate = ele; // min_queue_id = q_i; // } else if (ele.id_ == min_candidate.id_) { // Redundant element // ++queue_heads[q_i]; // } // } // // return min_queue_id; //} inline void Searching::merge_in_set_L( std::vector<Candidate> &set_L, const idi set_L_length, const idi num_queues, const idi local_queue_length) { idi size = 1 << (static_cast<idi>(log2(num_queues))); idi log2size = static_cast<idi>(log2(size)); for (idi d = 0; d < log2size; ++d) { const idi merge_length = (local_queue_length << d); idi by = 1 << (d + 1); // Parallel for #pragma omp parallel for for (idi i = 0; i < size; i += by) { // idi a = i + (1 << d) - 1; // idi b = i + (1 << (d + 1)) - 1; idi a = i; idi b = i + (1 << d); idi base_a = a * local_queue_length; idi base_b = b * local_queue_length; if (base_a >= set_L_length || base_b >= set_L_length) { continue; } idi length_b; if (a + by < size) { length_b = merge_length; } else { // The last one if (size == num_queues) { length_b = set_L_length - base_b; } else { length_b = merge_length; } } // printf("a: %u b: %u " // "base_a: %u base_b: %u length_b: %u\n", // a, b, // base_a, base_b, length_b); merge_two_consecutive_queues_in_place( set_L, base_a, base_b, length_b); } } if (size != num_queues) { for (idi i = size; i < num_queues; ++i) { idi a = 0; idi b = i; idi base_a = a; idi base_b = b * local_queue_length; if (base_b >= set_L_length) { continue; } idi length_b; if (b != num_queues - 1) { length_b = local_queue_length; } else { length_b = set_L_length - base_b; } // printf("a: %u b: %u " // "base_a: %u base_b: %u length_b: %u\n", // a, b, // base_a, base_b, length_b); merge_two_consecutive_queues_in_place( set_L, base_a, base_b, length_b); } } } /* * 7/5/2020-20:27 * Every queue keeps only elements which can be ordered in the top-L globally. * local_queues_lengths records the end location for all queues */ inline distf Searching::selecting_top_L_seq( std::vector<Candidate> &set_L, const idi global_L, const idi num_queues, const std::vector<idi> &local_queues_starts, std::vector<idi> &local_queues_sizes) { std::vector<idi> pointers(num_queues, 0); distf bound_lth; idi rank = 0; bool is_finished = false; distf min_dist = FLT_MAX; idi min_q_i; idi min_id; while (rank < global_L) { is_finished = true; min_dist = FLT_MAX; for (idi q_i = 0; q_i < num_queues; ++q_i) { if (pointers[q_i] >= local_queues_sizes[q_i]) { // q_i is finished continue; } is_finished = false; idi sub = pointers[q_i] + local_queues_starts[q_i]; distf tmp_dist = set_L[sub].distance_; idi tmp_id = set_L[sub].id_; if (tmp_dist < min_dist) { min_dist = tmp_dist; min_id = tmp_id; min_q_i = q_i; } else if (tmp_dist == min_dist && tmp_id < min_id) { min_id = tmp_id; min_q_i = q_i; } } if (is_finished) { {//test printf("Error: selecting_top_L_seq: only found %u elements but global_L is %u.\n", rank, global_L); } break; } bound_lth = min_dist; ++pointers[min_q_i]; ++rank; } std::copy(pointers.begin(), pointers.end(), local_queues_sizes.begin()); return bound_lth; } /* * 7/24/2020-10:08 * Record for every queue the position that contains the top-M unchecked vertices. * So the total expanded vertices should still be M, which means the computation should * be the same with merging idea. */ inline void Searching::selecting_unchecked_top_M_seq( const idi query_id, const idi iter, std::vector<Candidate> &set_L, const std::vector<idi> &pointers_starts, const idi value_M, const idi num_queues, const std::vector<idi> &local_queues_starts, const std::vector<idi> &local_queues_sizes, std::vector<idi> &local_m_counts) { std::vector<idi> pointers(pointers_starts); // std::vector<idi> pointers(num_queues, 0); std::fill(local_m_counts.begin(), local_m_counts.end(), 0); idi rank = 0; bool is_finished = true; distf min_dist = FLT_MAX; idi min_q_i; idi min_id; while (rank < value_M) { min_dist = FLT_MAX; for (idi q_i = 0; q_i < num_queues; ++q_i) { idi &pointer = pointers[q_i]; idi sub = pointer + local_queues_starts[q_i]; // {//test // if (133 == query_id && // 3 == iter && // 321341 == set_L[sub].id_) { // printf("(%u %f)\n", // set_L[sub].id_, set_L[sub].distance_); // } // } while (pointer < local_queues_sizes[q_i] && set_L[sub].is_checked_) { ++pointer; ++sub; } if (pointer >= local_queues_sizes[q_i]) { // q_i is finished continue; } is_finished = false; distf tmp_dist = set_L[sub].distance_; idi tmp_id = set_L[sub].id_; if (tmp_dist < min_dist) { min_dist = tmp_dist; min_id = tmp_id; min_q_i = q_i; } else if (tmp_dist == min_dist && tmp_id < min_id) { min_id = tmp_id; min_q_i = q_i; } } if (!is_finished) { is_finished = true; ++pointers[min_q_i]; ++rank; ++local_m_counts[min_q_i]; } else { break; } } // std::copy(pointers.begin(), pointers.end(), local_top_m_positions.begin()); } /* * 7/27/2020-15:41 * Gather the top-M unchecked vertices from local queues. */ inline void Searching::gather_unchecked_top_M_seq( const idi query_id, const idi iter, std::vector<Candidate> &set_L, const std::vector<idi> &pointers_starts, const idi value_M, const idi num_queues, const std::vector<idi> &local_queues_starts, const std::vector<idi> &local_queues_sizes, std::vector<idi> &top_m_candidates, idi &top_m_candidates_size, std::vector<idi> &bound_subs) { std::vector<idi> pointers(pointers_starts); // std::vector<idi> pointers(num_queues, 0); // std::fill(local_m_counts.begin(), local_m_counts.end(), 0); // idi rank = 0; bool is_finished = true; distf min_dist = FLT_MAX; idi min_q_i; idi min_id; while (top_m_candidates_size < value_M) { min_dist = FLT_MAX; for (idi q_i = 0; q_i < num_queues; ++q_i) { idi &pointer = pointers[q_i]; idi sub = pointer + local_queues_starts[q_i]; while (pointer < local_queues_sizes[q_i] && set_L[sub].is_checked_) { ++pointer; ++sub; } if (pointer >= local_queues_sizes[q_i]) { // q_i is finished continue; } is_finished = false; distf tmp_dist = set_L[sub].distance_; idi tmp_id = set_L[sub].id_; if (tmp_dist < min_dist) { min_dist = tmp_dist; min_id = tmp_id; min_q_i = q_i; } else if (tmp_dist == min_dist && tmp_id < min_id) { min_id = tmp_id; min_q_i = q_i; } } if (!is_finished) { is_finished = true; idi sub = local_queues_starts[min_q_i] + pointers[min_q_i]; top_m_candidates[top_m_candidates_size++] = set_L[sub].id_; set_L[sub].is_checked_ = true; // Checked ++pointers[min_q_i]; // ++rank; // ++local_m_counts[min_q_i]; } else { break; } } // std::copy(pointers.begin(), pointers.end(), local_top_m_positions.begin()); std::copy(pointers.begin(), pointers.end(), bound_subs.begin()); } inline void Searching::search_with_top_m( const PANNS::idi M, const PANNS::idi query_id, const PANNS::idi K, const PANNS::idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K) { boost::dynamic_bitset<> is_visited(num_v_); { for (idi c_i = 0; c_i < L; ++c_i) { is_visited[init_ids[c_i]] = true; } } const dataf *query_data = queries_load_ + query_id * dimension_; for (idi v_i = 0; v_i < L; ++v_i) { idi v_id = init_ids[v_i]; _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); } // Get the distances of all candidates, store in the set set_L. for (unsigned i = 0; i < L; i++) { unsigned v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; ++count_distance_computation_; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[i] = Candidate(v_id, dist, false); // False means not checked. } std::sort(set_L.begin(), set_L.begin() + L); std::vector<idi> top_m_candidates(M); idi top_m_candidates_end = 0; idi k = 0; // Index of first unchecked candidate. idi tmp_count = 0; // for debug while (k < L) { ++tmp_count; unsigned nk = L; // Select M candidates idi last_k = L; for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { if (set_L[c_i].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[c_i].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; } // Push M candidates' neighbors into the queue. for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { idi cand_id = top_m_candidates[c_i]; _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; if (is_visited[nb_id]) { continue; } is_visited[nb_id] = true; auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; ++count_distance_computation_; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[L-1].distance_) { continue; } Candidate cand(nb_id, dist, false); idi r = insert_into_queue(set_L, L, cand); if (r < nk) { nk = r; } } } top_m_candidates_end = 0; // Clear top_m_candidates if (nk <= last_k) { k = nk; } else { k = last_k + 1; } } for (idi k_i = 0; k_i < K; ++k_i) { set_K[k_i] = set_L[k_i].id_; } } inline void Searching::search_with_top_m_scale_m( const PANNS::idi value_M_max, const PANNS::idi query_id, const PANNS::idi K, const PANNS::idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, std::vector<idi> &top_m_candidates, boost::dynamic_bitset<> &is_visited) { // boost::dynamic_bitset<> is_visited(num_v_); { for (idi c_i = 0; c_i < L; ++c_i) { is_visited[init_ids[c_i]] = true; } } const dataf *query_data = queries_load_ + query_id * dimension_; for (idi v_i = 0; v_i < L; ++v_i) { idi v_id = init_ids[v_i]; _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); } // Get the distances of all candidates, store in the set set_L. for (unsigned i = 0; i < L; i++) { unsigned v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; ++count_distance_computation_; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[i] = Candidate(v_id, dist, false); // False means not checked. } std::sort(set_L.begin(), set_L.begin() + L); // std::vector<idi> top_m_candidates(M); idi top_m_candidates_end = 0; idi k = 0; // Index of first unchecked candidate. idi tmp_count = 0; // for debug idi M = 1; while (k < L) { ++tmp_count; unsigned nk = L; // Select M candidates idi last_k = L; for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { if (set_L[c_i].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[c_i].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; } // Push M candidates' neighbors into the queue. for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { idi cand_id = top_m_candidates[c_i]; _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; if (is_visited[nb_id]) { continue; } is_visited[nb_id] = true; auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; ++count_distance_computation_; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[L-1].distance_) { continue; } Candidate cand(nb_id, dist, false); idi r = insert_into_queue(set_L, L, cand); if (r < nk) { nk = r; } } } top_m_candidates_end = 0; // Clear top_m_candidates if (nk <= last_k) { k = nk; } else { k = last_k + 1; } if (M < value_M_max) { M <<= 1; } } for (idi k_i = 0; k_i < K; ++k_i) { set_K[k_i] = set_L[k_i].id_; } {// Reset is_visited.reset(); } } ////void Searching::search_with_top_m( //inline void Searching::search_with_top_m_to_get_distance_range( // const PANNS::idi M, // const PANNS::idi query_id, //// const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids) //// std::vector<idi> &set_K) //{ // dist_max_ = -FLT_MAX; // dist_min_ = FLT_MAX; // boost::dynamic_bitset<> is_visited(num_v_); // // { // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = true; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. //// {// For distance range //// if (dist > dist_max_) { //// dist_max_ = dist; //// } //// if (dist < dist_min_) { //// dist_min_ = dist; //// } //// } // } // std::sort(set_L.begin(), set_L.begin() + L); // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; // // unsigned nk = L; // // // Select M candidates // idi last_k = L; // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = true; // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); // idi r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } //// {// For distance range //// if (dist > dist_max_) { //// dist_max_ = dist; //// } //// if (dist < dist_min_) { //// dist_min_ = dist; //// } //// } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // {// For histogram // for (idi i_l = 0; i_l < L; ++i_l) { // distf dist = set_L[i_l].distance_; // {// For distance range // if (dist > dist_max_) { // dist_max_ = dist; // } // if (dist < dist_min_) { // dist_min_ = dist; // } // } // } // } // } // //// for (idi k_i = 0; k_i < K; ++k_i) { //// set_K[k_i] = set_L[k_i].id_; //// } //} // ////void Searching::search_with_top_m( //inline void Searching::search_with_top_m_myths_M( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) //{ //// {//test //// printf("query_id: %u\n", query_id); //// } // const idi loc_range = L / 3; // // // boost::dynamic_bitset<> is_visited(num_v_); // // { // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = true; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // //// {// For histogram //// const distf dist_range = dist_max_ - dist_min_; //// printf("iter:%u\n", 0); //// for (idi i_l = 0; i_l < L; ++i_l) { //// printf("%f\n", (set_L[i_l].distance_ - dist_min_) / dist_range * 100.0); //// } //// } // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // std::vector<idi> range_count(3, 0); // idi zero_inserted_count = 0; //// {//test //// printf("tmp_count: %u\n", tmp_count); //// } // ++tmp_count; // // unsigned nk = L; // // // Select M candidates // idi last_k = L; // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } //// {//test //// printf("top_m_candidates_ends: %u\n", top_m_candidates_end); //// } // { // if (0 == top_m_candidates_end) { // break; // } // } // // // uint64_t count_neighbors = 0; // uint64_t count_inserted = 0; // std::vector<idi> locs_to_count(M); // // Push M candidates' neighbors into the queue. // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // // count_neighbors += out_degree; // idi num_inserted = 0; // // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = true; // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // ++num_inserted; // Candidate cand(nb_id, dist, false); // idi r = insert_into_queue(set_L, L, cand); //// { //// printf("c_i: %u " //// "count: %u " //// "loc_inserted: %u\n", //// c_i, //// num_inserted, //// r); //// } // if (r < nk) { // nk = r; // } // { // ++range_count[r / loc_range]; // } // } // { // if (0 == num_inserted) { // ++zero_inserted_count; // } // locs_to_count[c_i] = num_inserted; // count_inserted += num_inserted; // } //// { //// printf("c_i: %u " //// "num_inserted: %u\n", //// c_i, //// num_inserted); //// } // } //// { //// for (idi c_i = top_m_candidates_end; c_i < M; ++c_i) { //// locs_to_count[c_i] = 0; //// } //// printf("iter:%u\n", tmp_count); //// for (idi c_i = 0; c_i < M; ++c_i) { //// printf("%u %u\n", c_i, locs_to_count[c_i]); //// } //// } //// {//test //// idi sum = 0; //// for (const idi ct : range_count) sum += ct; //// printf("tmp_count: %u " //// "k: %u " //// "actual_M: %u %.1f%% " //// "zero_ins: %u %.1f%% " //// "1/3: %u %.1f%% " //// "2/3: %u %.1f%% " //// "3/3: %u %.1f%%\n", //// tmp_count, //// k, //// top_m_candidates_end, 100.0 * top_m_candidates_end / M, //// zero_inserted_count, 100.0 * zero_inserted_count / top_m_candidates_end, //// range_count[0], 100.0 * range_count[0] / sum, //// range_count[1], 100.0 * range_count[1] / sum, //// range_count[2], 100.0 * range_count[2] / sum); //// } // top_m_candidates_end = 0; // Clear top_m_candidates // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // { // printf("query:%uiter: %u " // "#neighbors: %lu " // "#inserted: %lu " // "ratio: %.2f%%\n", // query_id, tmp_count, // count_neighbors, // count_inserted, // 100.0 * count_inserted / count_neighbors); // } //// {// For histogram ////// const auto it_min = std::min_element(set_L.begin(), set_L.end()); ////// const auto it_max = std::max_element(set_L.begin(), set_L.end()); ////// const distf dist_min = it_min->distance_; ////// const distf dist_max = it_max->distance_; ////// const distf dist_min = it_min->distance_ - 1.0; ////// const distf dist_max = it_max->distance_ + 1.0; //// const distf dist_range = dist_max_ - dist_min_; ////// const distf dist_range = dist_max - dist_min; ////// { ////// printf("it_min->distance_: %f dist_min: %f\n", ////// it_min->distance_, dist_min); ////// } ////// const distf dist_range = it_max->distance_ - it_min->distance_; //// printf("iter:%u\n", tmp_count); //// for (idi i_l = 0; i_l < L; ++i_l) { ////// printf("%f\n", set_L[i_l].distance_); ////// printf("%f\n", (set_L[i_l].distance_ - dist_min) / dist_range * 100.0); //// printf("%f\n", (set_L[i_l].distance_ - dist_min_) / dist_range * 100.0); ////// printf("%.2f\n", (set_L[i_l].distance_ - it_min->distance_) / dist_range * 100.0); //// } //// } // } // // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } // if (query_id == 3) { // exit(1); // } //} // //// Sequential Top-M algorithm for profiling purpose: byte array, CAS, and OpenMP ////void Searching::search_with_top_m( //inline void Searching::search_with_top_m_profile_bit_CAS( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) //{ //// std::vector<uint8_t> is_visited(num_v_, 0); // Byte array //// boost::dynamic_bitset<> is_visited(num_v_); // Bit array // BitVector is_visited(num_v_); // // { //#pragma omp parallel for // for (idi c_i = 0; c_i < L; ++c_i) { //// is_visited[init_ids[c_i]] = true; // is_visited.atomic_set_bit(init_ids[c_i]); // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; //#pragma omp parallel for // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. //#pragma omp parallel for // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; // // unsigned nk = L; // // // Select M candidates // idi last_k = L; // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. //#pragma omp parallel for // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; //// if (is_visited[nb_id]) { //// continue; //// } //// is_visited[nb_id] = true; // //// if (!AtomicOps::CAS(is_visited.data() + nb_id, //// static_cast<uint8_t>(0), //// static_cast<uint8_t>(1))) { //// continue; //// } // {// Self-defined BitVector // if (is_visited.atomic_is_bit_set(nb_id)) { // continue; // } // is_visited.atomic_set_bit(nb_id); // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); // idi r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // } // //#pragma omp parallel for // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //// //// {//test //// for (idi k_i = 0; k_i < K; ++k_i) { //// printf("%u: %u: %u %f\n", //// query_id, //// k_i, set_L[k_i].id_, set_L[k_i].distance_); //// } //// exit(1); //// } //} ///// Backup //inline void Searching::search_with_top_m( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) //// std::vector< std::vector<idi> > &top_m_list) //{ // boost::dynamic_bitset<> is_visited(num_v_); // // { // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = true; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; // // unsigned nk = L; // // // Select M candidates // idi last_k = L; // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = true; // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); // idi r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // } // // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //// //// {//test //// for (idi k_i = 0; k_i < K; ++k_i) { //// printf("%u: %u: %u %f\n", //// query_id, //// k_i, set_L[k_i].id_, set_L[k_i].distance_); //// } //// exit(1); //// } //} // ////// DEPRECATED: the is_visited array cannot be shared among threads. //inline void Searching::search_with_top_m_no_local_arrays( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // boost::dynamic_bitset<> &is_visited) //// std::vector< std::vector<idi> > &top_m_list) //{ //// boost::dynamic_bitset<> is_visited(num_v_); // // { // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = true; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; // // unsigned nk = L; // // // Select M candidates // idi last_k = L; // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = true; // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); // idi r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // } // // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //// //// {//test //// for (idi k_i = 0; k_i < K; ++k_i) { //// printf("%u: %u: %u %f\n", //// query_id, //// k_i, set_L[k_i].id_, set_L[k_i].distance_); //// } //// exit(1); //// } //} inline void Searching::search_with_top_m_in_batch( const PANNS::idi M, const PANNS::idi batch_start, const PANNS::idi batch_size, const PANNS::idi K, const PANNS::idi L, std::vector< std::vector<Candidate> > &set_L_list, const std::vector<idi> &init_ids, std::vector< std::vector<idi> > &set_K_list) { std::vector< boost::dynamic_bitset<> > is_visited_list(batch_size, boost::dynamic_bitset<> (num_v_)); // Prepare the init_ids { //#pragma omp parallel for for (idi q_i = 0; q_i < batch_size; ++q_i) { auto &is_visited = is_visited_list[q_i]; for (idi c_i = 0; c_i < L; ++c_i) { is_visited[init_ids[c_i]] = true; } } } // Initialize set_L_list { //#pragma omp parallel for for (idi q_i = 0; q_i < batch_size; ++q_i) { const dataf *query_data = queries_load_ + (q_i + batch_start) * dimension_; for (idi i = 0; i < L; i++) { idi v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; // ++count_distance_computation_; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L_list[q_i][i] = Candidate(v_id, dist, false); // False means not checked. } std::sort(set_L_list[q_i].begin(), set_L_list[q_i].begin() + L); } } { std::vector<idi> joint_queue(M * batch_size); // Joint queue for all shared top-M candidates idi joint_queue_end = 0; boost::dynamic_bitset<> is_in_joint_queue(num_v_); // std::vector< std::vector<idi> > cands_query_ids(num_v_, std::vector<idi>(batch_size)); // If candidate cand_id is selected by query q_i, q_i should be in cands_query_ids[cand_id]. // std::vector<idi> cands_query_ids_ends(num_v_, 0); std::unordered_map< idi, std::vector<idi> > cands_query_ids(batch_size * M); std::vector<idi> ks(batch_size, 0); // Indices of every queue's first unchecked candidate. std::vector<idi> nks(batch_size, L); // Indices of highest candidate inserted std::vector<idi> last_ks(batch_size, L); // Indices of lowest candidate unchecked std::vector<idi> queries_not_finished(batch_size); idi queries_not_finished_end = batch_size; for (idi q_i = 0; q_i < batch_size; ++q_i) { queries_not_finished[q_i] = q_i; } bool is_finished = false; idi counter_for_debug = 0; while (!is_finished) { ++counter_for_debug; // Build the new joint queue // Traverse every query's queue for(idi q_i = 0; q_i < queries_not_finished_end; ++q_i) { idi q_local_id = queries_not_finished[q_i]; // last_ks[q_local_id] = L; auto &set_L = set_L_list[q_local_id]; idi top_m_count = 0; for (idi c_i = ks[q_local_id]; c_i < L && top_m_count < M; ++c_i) { if (set_L[c_i].is_checked_) { continue; } set_L[c_i].is_checked_ = true; last_ks[q_local_id] = c_i; ++top_m_count; idi cand_id = set_L[c_i].id_; // Record which query selected cand_id auto tmp_c = cands_query_ids.find(cand_id); if (tmp_c != cands_query_ids.end()) { tmp_c->second.push_back(q_local_id); } else { cands_query_ids.emplace(cand_id, std::vector<idi>()); cands_query_ids[cand_id].reserve(batch_size); cands_query_ids[cand_id].push_back(q_local_id); } // cands_query_ids[cand_id][cands_query_ids_ends[cand_id]++] = q_local_id; // Add candidate cand_id into the joint queue if (is_in_joint_queue[cand_id]) { continue; } is_in_joint_queue[cand_id] = true; joint_queue[joint_queue_end++] = cand_id; } } queries_not_finished_end = 0; // Clear queries_not_finished // Traverse every shared candidate for (idi c_i = 0; c_i < joint_queue_end; ++c_i) { idi cand_id = joint_queue[c_i]; is_in_joint_queue[cand_id] = false; // Reset is_in_joint_queue idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; const auto &query_local_ids = cands_query_ids[cand_id]; // Push neighbors to every queue of the queries that selected cand_id. // Traverse cand_id's neighbors // idi &q_i_bound = cands_query_ids_ends[cand_id]; // for (idi q_i = 0; q_i < q_i_bound; ++q_i) { // idi q_local_id = query_local_ids[q_i]; for (idi q_local_id : query_local_ids) { dataf *query_data = queries_load_ + (q_local_id + batch_start) * dimension_; auto &is_visited = is_visited_list[q_local_id]; auto &set_L = set_L_list[q_local_id]; // // Traverse cand_id's neighbors for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; if (is_visited[nb_id]) { continue; } is_visited[nb_id] = true; auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; // ++count_distance_computation_; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[L-1].distance_) { continue; } // if (dist >= set_L[L-1].distance_) { // continue; // } Candidate new_cand(nb_id, dist, false); idi insert_loc = insert_into_queue(set_L, L, new_cand); if (insert_loc < nks[q_local_id]) { nks[q_local_id] = insert_loc; } } } cands_query_ids.erase(cand_id); // q_i_bound = 0; // Clear cands_query_ids[cand_id] } joint_queue_end = 0; // Clear joint_queue for (idi q_local_id = 0; q_local_id < batch_size; ++q_local_id) { if (nks[q_local_id] <= last_ks[q_local_id]) { ks[q_local_id] = nks[q_local_id]; } else { ks[q_local_id] = last_ks[q_local_id] + 1; } nks[q_local_id] = L; last_ks[q_local_id] = L; if (ks[q_local_id] < L) { queries_not_finished[queries_not_finished_end++] = q_local_id; } } if (!queries_not_finished_end) { is_finished = true; } } } { for (idi q_i = 0; q_i < batch_size; ++q_i) { for (idi c_i = 0; c_i < K && c_i < L; ++c_i) { set_K_list[q_i + batch_start][c_i] = set_L_list[q_i][c_i].id_; } } } //// // {//test // for (idi q_i = 0; q_i < batch_size; ++q_i) { // printf("query: %u\n", q_i + batch_start); // for (idi c_i = 0; c_i < K; ++c_i) { // printf("%u: %u %f\n", c_i, set_L_list[q_i][c_i].id_, set_L_list[q_i][c_i].distance_); // } // } // } } //inline void Searching::para_search_with_top_m_critical_area( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) //// std::vector< std::vector<idi> > &top_m_list) //{ // std::vector<uint8_t> is_visited(num_v_, 0); //// boost::dynamic_bitset<> is_visited(num_v_); // // { ////#pragma omp parallel for // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = 1; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. ////#pragma omp parallel for // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; // ++count_distance_computation_; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; // // unsigned nk = L; //// int nk = L; // // // Select M candidates // idi last_k = L; // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. // // OpenMP reduction(min : nk) has a problem if nk is unsigned. nk might end up with being MAX_UINT. ////#pragma omp parallel for ////#pragma omp parallel for reduction(min : nk) // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; //// if (is_visited[nb_id]) { //// continue; //// } //// is_visited[nb_id] = 1; // // if (!AtomicOps::CAS(is_visited.data() + nb_id, // static_cast<uint8_t>(0), // static_cast<uint8_t>(1))) { // continue; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // ++count_distance_computation_; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); // idi r; ////#pragma omp critical // { // r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // } // ////#pragma omp parallel for // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //} // //inline void Searching::para_search_with_top_m_critical_area_no_omp( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) //// std::vector< std::vector<idi> > &top_m_list) //{ // std::vector<uint8_t> is_visited(num_v_, 0); //// boost::dynamic_bitset<> is_visited(num_v_); // // { ////#pragma omp parallel for // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = 1; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. ////#pragma omp parallel for // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; // ++count_distance_computation_; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; // // unsigned nk = L; //// int nk = L; // // // Select M candidates // idi last_k = L; // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. // // OpenMP reduction(min : nk) has a problem if nk is unsigned. nk might end up with being MAX_UINT. ////#pragma omp parallel for ////#pragma omp parallel for reduction(min : nk) // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; //// if (is_visited[nb_id]) { //// continue; //// } //// is_visited[nb_id] = 1; // // if (!AtomicOps::CAS(is_visited.data() + nb_id, // static_cast<uint8_t>(0), // static_cast<uint8_t>(1))) { // continue; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // ++count_distance_computation_; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); // idi r; ////#pragma omp critical // { // r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // } // ////#pragma omp parallel for // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //} // //inline void Searching::para_search_with_top_m_critical_area_yes_omp( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) //// std::vector< std::vector<idi> > &top_m_list) //{ // std::vector<uint8_t> is_visited(num_v_, 0); //// boost::dynamic_bitset<> is_visited(num_v_); // // { //#pragma omp parallel for // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = 1; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. ////#pragma omp parallel for // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; // ++count_distance_computation_; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; // // unsigned nk = L; //// int nk = L; // // // Select M candidates // idi last_k = L; // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. // // OpenMP reduction(min : nk) has a problem if nk is unsigned. nk might end up with being MAX_UINT. ////#pragma omp parallel for ////#pragma omp parallel for reduction(min : nk) // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; //// if (is_visited[nb_id]) { //// continue; //// } //// is_visited[nb_id] = 1; // // if (!AtomicOps::CAS(is_visited.data() + nb_id, // static_cast<uint8_t>(0), // static_cast<uint8_t>(1))) { // continue; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // ++count_distance_computation_; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); // idi r; ////#pragma omp critical // { // r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // } // ////#pragma omp parallel for // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //} // //inline void Searching::para_search_with_top_m_visited_array( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // std::vector<uint8_t> &is_visited) //// std::vector< std::vector<idi> > &top_m_list) //{ //// uint64_t count_visited = 0; // //// std::vector<uint8_t> is_visited(num_v_, 0); //// boost::dynamic_bitset<> is_visited(num_v_); // // { ////#pragma omp parallel for // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = 1; //// ++count_visited; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. ////#pragma omp parallel for // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; // ++count_distance_computation_; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; // // unsigned nk = L; //// int nk = L; // // // Select M candidates // idi last_k = L; // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. // // OpenMP reduction(min : nk) has a problem if nk is unsigned. nk might end up with being MAX_UINT. ////#pragma omp parallel for ////#pragma omp parallel for reduction(min : nk) // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; //// if (is_visited[nb_id]) { //// continue; //// } //// is_visited[nb_id] = 1; // // if (!AtomicOps::CAS(is_visited.data() + nb_id, // static_cast<uint8_t>(0), // static_cast<uint8_t>(1))) { // continue; // } //// ++count_visited; // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // ++count_distance_computation_; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); // idi r; ////#pragma omp critical // { // r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // } // ////#pragma omp parallel for // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } // //// { //// printf("query_id: %u " //// "count_visited: %lu %f%%\n", //// query_id, //// count_visited, //// 100.0 * count_visited / num_v_); //// } //} // //inline void Searching::para_search_with_top_m_merge_queues( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) //{ //// {//test //// printf("query_id: %u\n", query_id); //// } //// const idi local_queue_length = ((M - 1) / num_threads_ + 1) * width_; // const idi local_queue_length = L; // std::vector< std::vector<Candidate> > local_queues_list(num_threads_, std::vector<Candidate>(local_queue_length)); // std::vector<idi> local_queues_ends(num_threads_, 0); // std::vector<uint8_t> is_visited(num_v_, 0); //// boost::dynamic_bitset<> is_visited(num_v_); // // { //#pragma omp parallel for // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = 1; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; //#pragma omp parallel for // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. //#pragma omp parallel for // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; //// {//test //// printf("tmp_count: %d\n", tmp_count); //// } // // // Select M candidates // idi last_k = L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. //#pragma omp parallel for // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // int tid = omp_get_thread_num(); // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; //// if (is_visited[nb_id]) { //// continue; //// } //// is_visited[nb_id] = 1; // // if (!AtomicOps::CAS(is_visited.data() + nb_id, // static_cast<uint8_t>(0), // static_cast<uint8_t>(1))) { // continue; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); // // Add to the local queue. // add_into_queue(local_queues_list[tid], local_queues_ends[tid], local_queue_length, cand); // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // idi nk = L; //// // Merge. Parallel merging in every two queues. //// { //// for (int tid = 0; tid < num_threads_; ++tid) { //// if (0 == local_queues_ends[tid]) continue; //// idi r = merge_two_queues_into_1st_queue_para( //// set_L, //// 0, //// L, //// local_queues_list[tid], //// 0, //// local_queues_ends[tid]); ////// idi r = merge_two_queues_into_1st_queue_seq( ////// set_L, ////// 0, ////// L, ////// local_queues_list[tid], ////// 0, ////// local_queues_ends[tid]); //// local_queues_ends[tid] = 0; // Reset the local queue //// if (r < nk) { //// nk = r; //// } //// } //// } //// {// text //// if (query_id == 4 && //// tmp_count == 5) { //// // Print local queues //// for (int t_i = 0; t_i < num_threads_; ++t_i) { ////// idi start_i = t_i * local_queue_length; //// for (idi q_i = 0; q_i < local_queues_ends[t_i]; ++q_i) { //// printf("t[%u][%u]: " //// "id: %u " //// "dist: %f\n", //// t_i, q_i, //// local_queues_list[t_i][q_i].id_, //// local_queues_list[t_i][q_i].distance_); //// } //// } //// printf("----------\n"); //// for (idi i = 0; i < L; ++i) { //// printf("set_L[%u]: " //// "id: %u " //// "dist: %f\n", //// i, //// set_L[i].id_, //// set_L[i].distance_); //// } //// printf("----------\n"); //// } //// } // // Merge. Merge all queues in parallel. // { // if (num_threads_ > 1) { // idi r = merge_all_queues_para_list( // local_queues_list, // local_queues_ends, // set_L, // L); // if (r < nk) { // nk = r; // } // } else { // if (local_queues_ends[0]) { // idi r = merge_two_queues_into_1st_queue_seq_fixed( // set_L, // 0, // L, // local_queues_list[0], // 0, // local_queues_ends[0]); // local_queues_ends[0] = 0; // if (r < nk) { // nk = r; // } // } // } // } //// {//test //// if (query_id == 4) { //// for (idi i = 0; i < L; ++i) { //// printf("tmp_count: %u " //// "set_L[%u]: " //// "id: %u " //// "dist: %f\n", //// tmp_count, //// i, //// set_L[i].id_, //// set_L[i].distance_); //// } //// } //// //// } // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // } // //#pragma omp parallel for // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //// { //// exit(1); //// } //// {//test //// ////// if (query_id == 4) { //// for (idi i = 0; i < L; ++i) { //// printf("set_L[%u]: " //// "id: %u " //// "dist: %f\n", //// i, //// set_L[i].id_, //// set_L[i].distance_); //// } ////// exit(1); ////// } //// } //} // ////// Using local queue and then sequential merge. //inline void Searching::para_search_with_top_m_queues_seq_merge( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) //// std::vector< std::vector<idi> > &top_m_list) //{ //// const idi local_queue_length = ((L - 1) / num_threads_ + 1) * width_; // const idi local_queue_length = L; // std::vector< std::vector<Candidate> > local_queues_list(num_threads_, std::vector<Candidate>(local_queue_length)); // std::vector<idi> local_queues_ends(num_threads_, 0); // std::vector<uint8_t> is_visited(num_v_, 0); //// boost::dynamic_bitset<> is_visited(num_v_); // // { //#pragma omp parallel for // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = 1; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; //// for (idi v_i = 0; v_i < L; ++v_i) { //// idi v_id = init_ids[v_i]; //// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); //// } // // Get the distances of all candidates, store in the set set_L. //#pragma omp parallel for // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; //// { //// printf("tmp_count: %u " //// "k: %u\n", //// tmp_count, //// k); //// } // //// unsigned nk = L; //// int nk = L; // // // Select M candidates // idi last_k = L; // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. //#pragma omp parallel for // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // int tid = omp_get_thread_num(); // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; //// if (is_visited[nb_id]) { //// continue; //// } //// is_visited[nb_id] = 1; // // if (!AtomicOps::CAS(is_visited.data() + nb_id, // static_cast<uint8_t>(0), // static_cast<uint8_t>(1))) { // continue; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); //// idi r; ////#pragma omp critical //// { //// r = insert_into_queue(set_L, L, cand); //// if (r < nk) { //// nk = r; //// } //// } // // Add to the local queue. // add_into_queue(local_queues_list[tid], local_queues_ends[tid], local_queue_length, cand); // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // idi nk = L; // // Merge // { // for (int tid = 0; tid < num_threads_; ++tid) { // if (0 == local_queues_ends[tid]) continue; // idi r = merge_two_queues_into_1st_queue_seq_fixed( // set_L, // 0, // L, // local_queues_list[tid], // 0, // local_queues_ends[tid]); //// L + 1); // local_queues_ends[tid] = 0; // Reset the local queue // if (r < nk) { // nk = r; // } // } // } // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // } // //#pragma omp parallel for // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //// //// {//test //// for (idi k_i = 0; k_i < K; ++k_i) { //// printf("%u: %u: %u %f\n", //// query_id, //// k_i, set_L[k_i].id_, set_L[k_i].distance_); //// } //// exit(1); //// } //} // //inline void Searching::para_search_with_top_m_merge_queues_no_CAS( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // std::vector< std::vector<Candidate> > &local_queues_list, // std::vector<idi> &local_queues_ends, //// std::vector<uint8_t> &is_visited) // boost::dynamic_bitset<> &is_visited) //{ ////// const idi local_queue_length = ((M - 1) / num_threads_ + 1) * width_; //// const idi local_queue_length = L; //// std::vector< std::vector<Candidate> > local_queues_list(num_threads_, std::vector<Candidate>(local_queue_length)); //// std::vector<idi> local_queues_ends(num_threads_, 0); ////// std::vector<uint8_t> is_visited(num_v_, 0); //// boost::dynamic_bitset<> is_visited(num_v_); // // { //#pragma omp parallel for // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = 1; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; //#pragma omp parallel for // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. //#pragma omp parallel for // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; // // // Select M candidates // idi last_k = L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. //#pragma omp parallel for // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // int tid = omp_get_thread_num(); // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; // //// if (!AtomicOps::CAS(is_visited.data() + nb_id, //// static_cast<uint8_t>(0), //// static_cast<uint8_t>(1))) { //// continue; //// } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); // // Add to the local queue. // add_into_queue(local_queues_list[tid], local_queues_ends[tid], local_queue_length, cand); // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // idi nk = L; //// // Merge. Parallel merging in every two queues. //// { //// for (int tid = 0; tid < num_threads_; ++tid) { //// if (0 == local_queues_ends[tid]) continue; //// idi r = merge_two_queues_into_1st_queue_para( //// set_L, //// 0, //// L, //// local_queues_list[tid], //// 0, //// local_queues_ends[tid]); ////// idi r = merge_two_queues_into_1st_queue_seq( ////// set_L, ////// 0, ////// L, ////// local_queues_list[tid], ////// 0, ////// local_queues_ends[tid]); //// local_queues_ends[tid] = 0; // Reset the local queue //// if (r < nk) { //// nk = r; //// } //// } //// } //// // Merge. Merge all queues in parallel. //// { //// if (num_threads_ > 1) { //// idi r = merge_all_queues_para( //// local_queues_list, //// local_queues_ends, //// set_L, //// L); //// if (r < nk) { //// nk = r; //// } //// } else { //// if (local_queues_ends[0]) { //// idi r = merge_two_queues_into_1st_queue_seq( //// set_L, //// 0, //// L, //// local_queues_list[0], //// 0, //// local_queues_ends[0]); //// local_queues_ends[0] = 0; //// if (r < nk) { //// nk = r; //// } //// } //// } //// } // // Merge // { // for (int tid = 0; tid < num_threads_; ++tid) { // if (0 == local_queues_ends[tid]) continue; // idi r = merge_two_queues_into_1st_queue_seq_fixed( // set_L, // 0, // L, // local_queues_list[tid], // 0, // local_queues_ends[tid]); //// L + 1); // local_queues_ends[tid] = 0; // Reset the local queue // if (r < nk) { // nk = r; // } // } // } // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // } // //#pragma omp parallel for // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } // // {// Reset // is_visited.reset(); //// std::fill(is_visited.begin(), is_visited.end(), 0); // std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); // } //} //inline void Searching::para_search_with_top_m_merge_queues_in_array( //inline void Searching::para_search_with_top_m_merge_queues_new_threshold( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue //// std::vector< std::vector<Candidate> > &local_queues_list, // std::vector<Candidate> &local_queues_array, // std::vector<idi> &local_queues_ends, // Sizes of local queue // BitVector &is_visited) //// std::vector<uint8_t> &is_visited) //// boost::dynamic_bitset<> &is_visited) //{ // { //#pragma omp parallel for // for (idi c_i = 0; c_i < L; ++c_i) { //// is_visited[init_ids[c_i]] = 1; // is_visited.atomic_set_bit(init_ids[c_i]); // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; //#pragma omp parallel for // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. //#pragma omp parallel for // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // // idi min_index = L - 1; // distf min_1st = set_L[min_index].distance_; // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; //// {//test //// printf("tmp_count: %d\n", tmp_count); //// } // // // Select M candidates // idi last_k = L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. //#pragma omp parallel for // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // int tid = omp_get_thread_num(); // const idi local_queue_start = tid * local_queue_length; // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; //// { // Sequential edition //// if (is_visited[nb_id]) { //// continue; //// } //// is_visited[nb_id] = 1; //// } //// { // __ATOMIC_SEQ_CST edition //// if (!AtomicOps::CAS(is_visited.data() + nb_id, //// static_cast<uint8_t>(0), //// static_cast<uint8_t>(1))) { //// continue; //// } //// } //// {// Acquire and Release edition //// if (__atomic_load_n(is_visited.data() + nb_id, __ATOMIC_ACQUIRE)) { //// continue; //// } //// __atomic_store_n(is_visited.data() + nb_id, 1, __ATOMIC_RELEASE); //// } // {// Self-defined BitVector // if (is_visited.atomic_is_bit_set(nb_id)) { // continue; // } // is_visited.atomic_set_bit(nb_id); // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // // if (dist > min_1st) { // continue; // } else if (min_index > 0) { // // Inserted, so min_1st needs update // if (dist > set_L[min_index - 1].distance_) { // min_1st = dist; // if (min_index < L - 1) { // ++min_index; // } // } else { // min_1st = set_L[--min_index].distance_; // } //// min_1st = set_L[--min_index].distance_; // } // //// if (dist > set_L[L-1].distance_) { //// continue; //// } // // Candidate cand(nb_id, dist, false); // // Add to the local queue. // add_into_queue(local_queues_array, local_queue_start, local_queues_ends[tid], local_queue_length, cand); // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // idi nk = L; //// // Merge. Parallel merging in every two queues. //// { //// for (int tid = 0; tid < num_threads_; ++tid) { //// if (0 == local_queues_ends[tid]) continue; //// idi r = merge_two_queues_into_1st_queue_para( //// set_L, //// 0, //// L, //// local_queues_list[tid], //// 0, //// local_queues_ends[tid]); ////// idi r = merge_two_queues_into_1st_queue_seq( ////// set_L, ////// 0, ////// L, ////// local_queues_list[tid], ////// 0, ////// local_queues_ends[tid]); //// local_queues_ends[tid] = 0; // Reset the local queue //// if (r < nk) { //// nk = r; //// } //// } //// } // // Merge. Merge all queues in parallel. // { // if (num_threads_ > 1) { // idi r = merge_all_queues_para_array( //// local_queues_list, // local_queues_array, // local_queues_ends, // local_queue_length, // set_L, // L); // if (r < nk) { // nk = r; // } // } else { // if (local_queues_ends[0]) { // idi r = merge_two_queues_into_1st_queue_seq_fixed( // set_L, // 0, // L, //// local_queues_list[0], // local_queues_array, // 0, // local_queues_ends[0]); // local_queues_ends[0] = 0; // if (r < nk) { // nk = r; // } // } // } // } //// // Merge Sequentially //// { //// for (int tid = 0; tid < num_threads_; ++tid) { //// if (0 == local_queues_ends[tid]) continue; //// idi r = merge_two_queues_into_1st_queue_seq_fixed( //// set_L, //// 0, //// L, ////// local_queues_list[tid], ////// 0, //// local_queues_array, //// tid * local_queue_length, //// local_queues_ends[tid]); ////// L + 1); //// local_queues_ends[tid] = 0; // Reset the local queue //// if (r < nk) { //// nk = r; //// } //// } //// } // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // } // //#pragma omp parallel for // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } // // {// Reset //// is_visited.reset(); //// std::fill(is_visited.begin(), is_visited.end(), 0); // is_visited.clear_all(); // std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); // } //} /* * 5/7/2020-15:14 * Use 1 threads to scale M until the value_M_middle. * Then use multiple threads. */ inline void Searching::para_search_with_top_m_merge_queues_middle_m( const idi value_M_middle, const idi value_M_max, const idi query_id, const idi K, const idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, const idi local_queue_length, // Maximum size of local queue const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; std::vector<idi> &local_queues_ends, // Sizes of local queue std::vector<idi> &top_m_candidates, boost::dynamic_bitset<> &is_visited) { time_initialization_ -= WallTimer::get_time_mark(); // const idi base_set_L = (num_threads_ - 1) * local_queue_length; //#pragma omp parallel for for (idi c_i = 0; c_i < L; ++c_i) { is_visited[init_ids[c_i]] = 1; } const dataf *query_data = queries_load_ + query_id * dimension_; #pragma omp parallel for for (idi v_i = 0; v_i < L; ++v_i) { idi v_id = init_ids[v_i]; _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); } uint64_t tmp_count_computation = 0; // Get the distances of all candidates, store in the set set_L. //#pragma omp parallel for #pragma omp parallel for reduction(+ : tmp_count_computation) for (unsigned i = 0; i < L; i++) { unsigned v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; ++tmp_count_computation; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked. } count_distance_computation_ += tmp_count_computation; tmp_count_computation = 0; // std::sort(set_L.begin(), set_L.begin() + L); std::sort( set_L.begin() + base_set_L, set_L.begin() + base_set_L + L); local_queues_ends[num_threads_ - 1] = L; time_initialization_ += WallTimer::get_time_mark(); idi top_m_candidates_end = 0; idi k = 0; // Index of first unchecked candidate. idi tmp_count = 0; // for debug idi M = 1; time_sequential_phase_ -= WallTimer::get_time_mark(); { // Single thread while (k < L && M < value_M_middle) { ++tmp_count; // {//test // printf("tmp_count: %d\n", tmp_count); // } // Select M candidates idi last_k = L; // Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { idi index_set_L = c_i + base_set_L; if (set_L[index_set_L].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[index_set_L].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; } idi nk = L; // Push M candidates' neighbors into the queue. for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { idi cand_id = top_m_candidates[c_i]; _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; { // Sequential edition if (is_visited[nb_id]) { continue; } is_visited[nb_id] = 1; } auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; ++tmp_count_computation; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[L - 1 + base_set_L].distance_) { continue; } Candidate cand(nb_id, dist, false); // Thread 0 maintains the "global" queue idi r = add_into_queue( set_L, base_set_L, local_queues_ends[num_threads_ - 1], L, cand); if (r < nk) { nk = r; } } } top_m_candidates_end = 0; // Clear top_m_candidates count_distance_computation_ += tmp_count_computation; tmp_count_computation = 0; if (nk <= last_k) { k = nk; } else { k = last_k + 1; } {// Scale M if (M < value_M_max) { M <<= 1; } else { M = value_M_max; } } } } time_sequential_phase_ += WallTimer::get_time_mark(); time_parallel_phase_ -= WallTimer::get_time_mark(); uint64_t tmp_count_add_to_queue = 0; double tmp_time_pick_top_m = 0; double tmp_time_distance_computation = 0; double tmp_time_add_to_queue = 0.0; { // Multiple Threads while (k < L) { time_expand_ -= WallTimer::get_time_mark(); ++tmp_count; // {//test // printf("tmp_count: %d\n", tmp_count); // } // Select M candidates idi last_k = L; time_pick_top_m_ -= WallTimer::get_time_mark(); // Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { idi index_set_L = c_i + base_set_L; if (set_L[index_set_L].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[index_set_L].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; } time_pick_top_m_ += WallTimer::get_time_mark(); idi nk = L; // Push M candidates' neighbors into the queue. #pragma omp parallel for reduction(+ : tmp_count_computation) \ reduction(+ : tmp_count_add_to_queue) \ reduction(+ : tmp_time_pick_top_m) \ reduction(+ : tmp_time_distance_computation) \ reduction(+ : tmp_time_add_to_queue) // for (int tid = 0; tid < num_threads_; ++tid) { for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { tmp_time_pick_top_m -= WallTimer::get_time_mark(); int tid = omp_get_thread_num(); idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } tmp_time_pick_top_m += WallTimer::get_time_mark(); for (idi e_i = 0; e_i < out_degree; ++e_i) { tmp_time_distance_computation -= WallTimer::get_time_mark(); idi nb_id = out_edges[e_i]; { // Sequential edition if (is_visited[nb_id]) { tmp_time_distance_computation += WallTimer::get_time_mark(); continue; } is_visited[nb_id] = 1; } auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; ++tmp_count_computation; distf dist = compute_distance_with_norm(nb_data, query_data, norm); tmp_time_distance_computation += WallTimer::get_time_mark(); if (dist > set_L[L - 1 + base_set_L].distance_) { continue; } ++tmp_count_add_to_queue; Candidate cand(nb_id, dist, false); // Add to the local queue. // tmp_time_pick_top_m -= WallTimer::get_time_mark(); tmp_time_add_to_queue -= WallTimer::get_time_mark(); if (0 != tid) { // Non-Master threads using local queues add_into_queue( set_L, (tid - 1) * local_queue_length, local_queues_ends[tid - 1], local_queue_length, cand); } else { // Thread 0 maintains the "global" queue idi r = add_into_queue( set_L, base_set_L, local_queues_ends[num_threads_ - 1], L, cand); if (r < nk) { nk = r; } } tmp_time_add_to_queue += WallTimer::get_time_mark(); // tmp_time_pick_top_m += WallTimer::get_time_mark(); } } time_add_to_queue_ += tmp_time_add_to_queue; tmp_time_add_to_queue = 0; // } time_distance_computation_ += tmp_time_distance_computation; tmp_time_distance_computation = 0; time_pick_top_m_ += tmp_time_pick_top_m; tmp_time_pick_top_m = 0; top_m_candidates_end = 0; // Clear top_m_candidates count_add_to_queue_ += tmp_count_add_to_queue; tmp_count_add_to_queue = 0; count_distance_computation_ += tmp_count_computation; tmp_count_computation = 0; time_expand_ += WallTimer::get_time_mark(); // // Merge. Merge all queues in parallel. { time_merge_ -= WallTimer::get_time_mark(); if (num_threads_ > 1) { idi r = merge_all_queues_para_array( set_L, local_queues_ends, local_queue_length, L); if (r < nk) { nk = r; } } time_merge_ += WallTimer::get_time_mark(); } if (nk <= last_k) { k = nk; } else { k = last_k + 1; } {// Scale M if (M < value_M_max) { M <<= 1; } else { M = value_M_max; } } } } time_parallel_phase_ += WallTimer::get_time_mark(); time_ending_ -= WallTimer::get_time_mark(); #pragma omp parallel for for (idi k_i = 0; k_i < K; ++k_i) { set_K[k_i] = set_L[k_i + base_set_L].id_; // set_K[k_i] = set_L[k_i].id_; } {// Reset // std::fill(is_visited.begin(), is_visited.end(), 0); is_visited.reset(); // is_visited.clear_all(); std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); } time_ending_ += WallTimer::get_time_mark(); // {//test // if (3 == query_id) { // exit(1); // } // } } inline void Searching::para_search_with_top_m_merge_queues_middle_m_no_merge( const uint64_t computation_threshold, const idi value_M_middle, const idi value_M_max, const idi query_id, const idi K, const idi L, const idi init_size, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, const idi local_queue_length, // Maximum size of local queue const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; std::vector<idi> &local_queues_ends, // Sizes of local queue std::vector<idi> &top_m_candidates, boost::dynamic_bitset<> &is_visited) { uint64_t count_single_query_computation = 0; uint64_t count_init_computation = 0; uint64_t count_seq_computation = 0; uint64_t count_par_computation = 0; // {//test // printf("query_id: %u\n", query_id); // } // time_initialization_ -= WallTimer::get_time_mark(); // const idi base_set_L = (num_threads_ - 1) * local_queue_length; { #pragma omp parallel for for (idi c_i = 0; c_i < init_size; ++c_i) { // for (idi c_i = 0; c_i < L; ++c_i) { is_visited[init_ids[c_i]] = 1; // is_visited.atomic_set_bit(init_ids[c_i]); } } const dataf *query_data = queries_load_ + query_id * dimension_; #pragma omp parallel for for (idi v_i = 0; v_i < init_size; ++v_i) { // for (idi v_i = 0; v_i < L; ++v_i) { idi v_id = init_ids[v_i]; _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); } uint64_t tmp_count_computation = 0; // Get the distances of all candidates, store in the set set_L. //#pragma omp parallel for #pragma omp parallel for reduction(+ : tmp_count_computation) for (unsigned i = 0; i < init_size; i++) { // for (unsigned i = 0; i < L; i++) { unsigned v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; // ++count_distance_computation_; ++tmp_count_computation; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked. // set_L[i] = Candidate(v_id, dist, false); // False means not checked. } count_distance_computation_ += tmp_count_computation; count_init_computation += tmp_count_computation; count_single_query_computation += tmp_count_computation; tmp_count_computation = 0; // std::sort(set_L.begin(), set_L.begin() + L); std::sort( set_L.begin() + base_set_L, set_L.begin() + base_set_L + init_size); // set_L.begin() + base_set_L + L); local_queues_ends[num_threads_ - 1] = init_size; // local_queues_ends[num_threads_ - 1] = L; // time_initialization_ += WallTimer::get_time_mark(); // time_sequential_phase_ -= WallTimer::get_time_mark(); // std::vector<idi> top_m_candidates(M); idi &global_queue_size = local_queues_ends[num_threads_ - 1]; idi top_m_candidates_end = 0; idi k = 0; // Index of first unchecked candidate. idi tmp_count = 0; // for debug idi M = 1; { // Single thread while (k < L && M < value_M_middle && count_single_query_computation <= computation_threshold) { ++tmp_count; // {//test // printf("tmp_count: %d\n", tmp_count); // } // int real_threads = std::min(static_cast<int>(M), num_threads_); // idi queue_base = num_threads_ - real_threads; // Select M candidates idi last_k = L; // Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. for (idi c_i = k; c_i < global_queue_size && top_m_candidates_end < M; ++c_i) { // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { idi index_set_L = c_i + base_set_L; if (set_L[index_set_L].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[index_set_L].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; } idi nk = L; // Push M candidates' neighbors into the queue. for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { idi cand_id = top_m_candidates[c_i]; _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; { // Sequential edition if (is_visited[nb_id]) { continue; } is_visited[nb_id] = 1; } auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; // ++count_distance_computation_; ++tmp_count_computation; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[global_queue_size - 1 + base_set_L].distance_) { // if (dist > set_L[L - 1 + base_set_L].distance_) { continue; } Candidate cand(nb_id, dist, false); // Thread 0 maintains the "global" queue idi r = add_into_queue( set_L, base_set_L, global_queue_size, // local_queues_ends[num_threads_ - 1], L, cand); if (r < nk) { nk = r; } } } top_m_candidates_end = 0; // Clear top_m_candidates count_distance_computation_ += tmp_count_computation; count_seq_computation += tmp_count_computation; count_single_query_computation += tmp_count_computation; tmp_count_computation = 0; // {// Local queues' ends // printf("query%u:iter: %u", query_id, tmp_count); // for (int i_t = 0; i_t < num_threads_; ++i_t) { // printf(" [%u]: %u", i_t, local_queues_ends[i_t]); // } // printf("\n"); // } if (nk <= last_k) { k = nk; } else { k = last_k + 1; } {// Scale M if (M < value_M_max) { M <<= 1; } else { M = value_M_max; } } } } // time_sequential_phase_ += WallTimer::get_time_mark(); // time_parallel_phase_ -= WallTimer::get_time_mark(); { // Multiple Threads while (k < L and count_single_query_computation <= computation_threshold) { // while (k < L) { ++tmp_count; // {//test // printf("tmp_count: %d " // "k: %u " // "global_queue_size: %u\n", // tmp_count, // k, // global_queue_size); // } // int real_threads = std::min(static_cast<int>(M), num_threads_); // idi queue_base = num_threads_ - real_threads; // Select M candidates idi last_k = L; // Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. for (idi c_i = k; c_i < global_queue_size && top_m_candidates_end < M; ++c_i) { // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { idi index_set_L = c_i + base_set_L; if (set_L[index_set_L].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[index_set_L].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; } idi nk = L; // Push M candidates' neighbors into the queue. //#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(real_threads) #pragma omp parallel for reduction(+ : tmp_count_computation) for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { int tid = omp_get_thread_num(); idi cand_id = top_m_candidates[c_i]; _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; { // Sequential edition if (is_visited[nb_id]) { continue; } is_visited[nb_id] = 1; } auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; // ++count_distance_computation_; ++tmp_count_computation; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[global_queue_size - 1 + base_set_L].distance_) { // if (dist > set_L[L - 1 + base_set_L].distance_) { continue; } Candidate cand(nb_id, dist, false); // Add to the local queue. if (0 != tid) { // Non-Master threads using local queues add_into_queue( set_L, (tid - 1) * local_queue_length, local_queues_ends[tid - 1], local_queue_length, cand); } else { // Thread 0 maintains the "global" queue idi r = add_into_queue( set_L, base_set_L, global_queue_size, // local_queues_ends[num_threads_ - 1], L, cand); if (r < nk) { nk = r; } } } } top_m_candidates_end = 0; // Clear top_m_candidates count_distance_computation_ += tmp_count_computation; count_par_computation += tmp_count_computation; count_single_query_computation += tmp_count_computation; tmp_count_computation = 0; // {// Local queues' ends // printf("query%u:iter: %u", query_id, tmp_count); // for (int i_t = 0; i_t < num_threads_; ++i_t) { // printf(" [%u]: %u", i_t, local_queues_ends[i_t]); // } // printf("\n"); // } // Merge. Merge all queues in parallel. { if (num_threads_ > 1) { // idi r = merge_all_queues_queue_base( // set_L, // local_queues_ends, // queue_base, // real_threads, // local_queue_length, // L); idi r = merge_all_queues_para_array( set_L, local_queues_ends, local_queue_length, L); if (r < nk) { nk = r; } } } if (nk <= last_k) { k = nk; } else { k = last_k + 1; } {// Scale M if (M < value_M_max) { M <<= 1; } else { M = value_M_max; } } // {// Print relative distance //// distf top_dist = set_L[base_set_L].distance_; // for (idi i_l = 0; i_l < L; ++i_l) { // printf("%u %f\n", // tmp_count, set_L[i_l + base_set_L].distance_); //// tmp_count, set_L[i_l + base_set_L].distance_ - top_dist); // } // } } } // time_parallel_phase_ += WallTimer::get_time_mark(); #pragma omp parallel for for (idi k_i = 0; k_i < K; ++k_i) { set_K[k_i] = set_L[k_i + base_set_L].id_; // set_K[k_i] = set_L[k_i].id_; } {// Reset // std::fill(is_visited.begin(), is_visited.end(), 0); is_visited.reset(); // is_visited.clear_all(); std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); } // {//test // if (3 == query_id) { // exit(1); // } // } // {//test // printf("count_single: %lu " // "ct_init: %lu " // "ct_seq: %lu " // "ct_par: %lu\n", // count_single_query_computation, // count_init_computation, // count_seq_computation, // count_par_computation); // } } ///* // * 6/15/2020-14:40 // * Queues merging together to the global queue // */ //inline void Searching::para_search_with_top_m_merge_queues_sequential_merge( // const idi value_M_middle, // const idi value_M_max, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; // std::vector<idi> &local_queues_ends, // Sizes of local queue // std::vector<idi> &top_m_candidates, // boost::dynamic_bitset<> &is_visited) //{ //// const idi base_set_L = (num_threads_ - 1) * local_queue_length; // { //#pragma omp parallel for // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = 1; //// is_visited.atomic_set_bit(init_ids[c_i]); // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; //#pragma omp parallel for // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // uint64_t tmp_count_computation = 0; // // Get the distances of all candidates, store in the set set_L. ////#pragma omp parallel for //#pragma omp parallel for reduction(+ : tmp_count_computation) // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation_; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked. //// set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; //// std::sort(set_L.begin(), set_L.begin() + L); // std::sort( // set_L.begin() + base_set_L, // set_L.begin() + base_set_L + L); // local_queues_ends[num_threads_ - 1] = L; // //// std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // idi M = 1; // // { // Single thread // while (k < L && M < value_M_middle) { // ++tmp_count; //// {//test //// printf("tmp_count: %d\n", tmp_count); //// } // // // Select M candidates // idi last_k = L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // idi index_set_L = c_i + base_set_L; // if (set_L[index_set_L].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[index_set_L].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; // } // // // idi nk = L; // // Push M candidates' neighbors into the queue. // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // { // Sequential edition // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation_; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L - 1 + base_set_L].distance_) { // continue; // } // // Candidate cand(nb_id, dist, false); // // Thread 0 maintains the "global" queue // idi r = add_into_queue( // set_L, // base_set_L, // local_queues_ends[num_threads_ - 1], // L, // cand); // if (r < nk) { // nk = r; // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // // {// Scale M // if (M < value_M_max) { // M <<= 1; // } else { // M = value_M_max; // } // } // } // } // // { // Multiple Threads // while (k < L) { // ++tmp_count; //// {//test //// if (num_threads_ == 2) { //// printf("tmp_count: %d " //// "k: %u\n", //// tmp_count, //// k); //// } //// } // // Select M candidates // idi last_k = L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // idi index_set_L = c_i + base_set_L; // if (set_L[index_set_L].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[index_set_L].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; // } // // // idi nk = L; // // Push M candidates' neighbors into the queue. ////#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(real_threads) //#pragma omp parallel for reduction(+ : tmp_count_computation) // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // int tid = omp_get_thread_num(); // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // { // Sequential edition // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation_; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L - 1 + base_set_L].distance_) { // continue; // } // // Candidate cand(nb_id, dist, false); // // Add to the local queue. // if (0 != tid) { // // Non-Master threads using local queues // add_into_queue( // set_L, // (tid - 1) * local_queue_length, // local_queues_ends[tid - 1], // local_queue_length, // cand); // } else { // // Thread 0 maintains the "global" queue // idi r = add_into_queue( // set_L, // base_set_L, // local_queues_ends[num_threads_ - 1], // L, // cand); // if (r < nk) { // nk = r; // } // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; // //// // Merge. Merge all queues in parallel. // { //// {//test //// for (idi q_i = 0; q_i < num_threads_; ++q_i) { //// if (0 == local_queues_ends[q_i]) { //// continue; //// } //// for (idi e_i = 0; e_i < local_queues_ends[q_i]; ++e_i) { //// printf("tmp_count: %u " //// "q_i: %u " //// "[%u]: (%u, %f)\n", //// tmp_count, //// q_i, //// e_i, set_L[q_i * local_queue_length + e_i].id_, set_L[q_i * local_queue_length + e_i].distance_); //// } //// } //// } //// time_merge_ -= WallTimer::get_time_mark(); // if (num_threads_ > 1) { // idi r = merge_all_queues_all_together_in_sequential( // set_L, // local_queues_ends, // local_queue_length, // L); //// idi r = merge_all_queues_para_array( //// set_L, //// local_queues_ends, //// local_queue_length, //// L); // if (r < nk) { // nk = r; // } //// {//test //// printf("tmp_count: %u " //// "r: %u " //// "last_k: %u\n", //// tmp_count, //// r, //// last_k); //// for (idi l_i = 0; l_i < L; ++l_i) { //// printf("tmp_count: %u " //// "[%u]: (%u, %f)\n", //// tmp_count, //// l_i, set_L[l_i + base_set_L].id_, set_L[l_i + base_set_L].distance_); //// } //// } // } // //// time_merge_ += WallTimer::get_time_mark(); // } // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // {// Scale M // if (M < value_M_max) { // M <<= 1; // } else { // M = value_M_max; // } // } // } // } // //#pragma omp parallel for // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i + base_set_L].id_; //// set_K[k_i] = set_L[k_i].id_; // } // // {// Reset //// std::fill(is_visited.begin(), is_visited.end(), 0); // is_visited.reset(); //// is_visited.clear_all(); // std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); // } // //// {//test //// if (0 == query_id) { //// exit(1); //// } //// } //} ///* // * 6/19/2020: // * Intra-query + Inter-query // */ //inline void Searching::para_search_with_top_m_nested_para( // const idi batch_start, // const idi batch_size, // const idi value_M_middle, // const idi value_M_max, // const idi K, // const idi L, // std::vector< std::vector<Candidate> > &set_L_list, // const std::vector<idi> &init_ids, // std::vector< std::vector<idi> > &set_K_list, // const idi local_queue_length, // Maximum size of local queue // const idi base_set_L, // base_set_L = (num_threads_intra_query_ - 1) * local_queue_length; // std::vector< std::vector<idi> > &local_queues_ends_list, // Sizes of local queue // std::vector< std::vector<idi> > &top_m_candidates_list, // std::vector< boost::dynamic_bitset<> > &is_visited_list) //{ // {// Initialize is_visited flag array //#pragma omp parallel for num_threads(num_threads_inter_query_) // for (idi q_i = 0; q_i < batch_size; ++q_i) { // auto &is_visited = is_visited_list[q_i]; //#pragma omp parallel for num_threads(num_threads_intra_query_) // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = 1; // } // } // } // //#pragma omp parallel for // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // uint64_t tmp_count_total_computation = 0; //#pragma omp parallel for num_threads(num_threads_inter_query_) reduction(+ : tmp_count_total_computation) // for (idi q_i = 0; q_i < batch_size; ++q_i) { // idi query_id = batch_start + q_i; // auto &set_L = set_L_list[q_i]; // auto &local_queues_ends = local_queues_ends_list[q_i]; // auto &is_visited = is_visited_list[q_i]; // // const dataf *query_data = queries_load_ + query_id * dimension_; ////#pragma omp parallel for //// for (idi v_i = 0; v_i < L; ++v_i) { //// idi v_id = init_ids[v_i]; //// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); //// } // uint64_t tmp_count_computation = 0; // // Get the distances of all candidates, store in the set set_L. ////#pragma omp parallel for //#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(num_threads_intra_query_) // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation_; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked. //// set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } //// count_distance_computation_ += tmp_count_computation; // tmp_count_total_computation += tmp_count_computation; // tmp_count_computation = 0; //// std::sort(set_L.begin(), set_L.begin() + L); // std::sort( // set_L.begin() + base_set_L, // set_L.begin() + base_set_L + L); // local_queues_ends[num_threads_intra_query_ - 1] = L; // //// std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // idi M = 1; // // auto &top_m_candidates = top_m_candidates_list[q_i]; // { // Single thread // while (k < L && M < value_M_middle) { // ++tmp_count; //// {//test //// printf("tmp_count: %d\n", tmp_count); //// } // // // Select M candidates // idi last_k = L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // idi index_set_L = c_i + base_set_L; // if (set_L[index_set_L].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[index_set_L].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; // } // // idi nk = L; // // Push M candidates' neighbors into the queue. // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // { // Sequential edition // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation_; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); //// {//test //// if (391655 == nb_id) { //// printf("tmp_count: %u " //// "nb_id: %u " //// "distf: %f\n", //// tmp_count, //// nb_id, //// dist); //// } //// } // if (dist > set_L[L - 1 + base_set_L].distance_) { // continue; // } // // Candidate cand(nb_id, dist, false); // // Thread 0 maintains the "global" queue // idi r = add_into_queue( // set_L, // base_set_L, // local_queues_ends[num_threads_intra_query_ - 1], // L, // cand); // if (r < nk) { // nk = r; // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates //// count_distance_computation_ += tmp_count_computation; // tmp_count_total_computation += tmp_count_computation; // tmp_count_computation = 0; // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // // {// Scale M // if (M < value_M_max) { // M <<= 1; // } else { // M = value_M_max; // } // } // } // } // // { // Multiple Threads // while (k < L) { // ++tmp_count; //// {//test //// printf("tmp_count: %d\n", tmp_count); //// } // // Select M candidates // idi last_k = L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // idi index_set_L = c_i + base_set_L; // if (set_L[index_set_L].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[index_set_L].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; // } // // idi nk = L; // // Push M candidates' neighbors into the queue. //#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(num_threads_intra_query_) // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // int tid = omp_get_thread_num(); // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // { // Sequential edition // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation_; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); //// {//test //// if (391655 == nb_id) { //// printf("tmp_count: %u " //// "nb_id: %u " //// "distf: %f\n", //// tmp_count, //// nb_id, //// dist); //// } //// } // if (dist > set_L[L - 1 + base_set_L].distance_) { // continue; // } // // Candidate cand(nb_id, dist, false); // // Add to the local queue. // if (0 != tid) { // // Non-Master threads using local queues // add_into_queue( // set_L, // (tid - 1) * local_queue_length, // local_queues_ends[tid - 1], // local_queue_length, // cand); // } else { // // Thread 0 maintains the "global" queue // idi r = add_into_queue( // set_L, // base_set_L, // local_queues_ends[num_threads_intra_query_ - 1], // L, // cand); // if (r < nk) { // nk = r; // } // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates //// count_distance_computation_ += tmp_count_computation; // tmp_count_total_computation += tmp_count_computation; // tmp_count_computation = 0; // //// // Merge. Merge all queues in parallel. // { //// time_merge_ -= WallTimer::get_time_mark(); // if (num_threads_intra_query_ > 1) { // idi r = merge_all_queues_para_array( // set_L, // local_queues_ends, // local_queue_length, // L); // if (r < nk) { // nk = r; // } // } //// time_merge_ += WallTimer::get_time_mark(); // } // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // {// Scale M // if (M < value_M_max) { // M <<= 1; // } else { // M = value_M_max; // } // } // } // } // count_distance_computation_ += tmp_count_total_computation; // tmp_count_total_computation = 0; // // auto &set_K = set_K_list[query_id]; // //#pragma omp parallel for num_threads(num_threads_intra_query_) // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i + base_set_L].id_; //// set_K[k_i] = set_L[k_i].id_; // } // // {// Reset //// std::fill(is_visited.begin(), is_visited.end(), 0); // is_visited.reset(); //// is_visited.clear_all(); // std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); // } // } // //// {//test //// if (3 == query_id) { //// exit(1); //// } //// } //// { //// for (idi k_i = 0; k_i < K; ++k_i) { //// printf("%u: (%u %f)\n", //// k_i, set_L_list[0][k_i].id_, set_L_list[0][k_i].distance_); //// } //// if (0 == batch_start) { //// exit(1); //// } //// } //} /* * 6/22/2020-21:30 * Do searching on the local_set_L * local_set_L is already sorted * is_visited is already set up. */ inline void Searching::subsearch_with_top_m( const idi value_M_max, const idi query_id, const idi local_L, std::vector<Candidate> &set_L, const idi set_L_start, idi &set_L_size, std::vector<idi> &local_top_m_candidates, boost::dynamic_bitset<> &is_visited, uint64_t &local_count_distance_computation) { const dataf *query_data = queries_load_ + query_id * dimension_; // idi local_top_m_candidates_end = 0; idi k = 0; // Index of first unchecked candidate. idi iter = 0; idi M = 1; // value of M while (k < local_L) { ++iter; subsearch_top_m_for_one_iteration( iter, k, M, query_id, query_data, local_L, set_L, set_L_start, set_L_size, local_top_m_candidates, is_visited, local_count_distance_computation); {// Scale M if (M < value_M_max) { M <<= 1; } else { M = value_M_max; } } } // {//test // printf("set_L_start: %u " // "local_count_distance_computation: %lu\n", // set_L_start, // local_count_distance_computation); // } } //// Backup //inline void Searching::subsearch_with_top_m( // const idi value_M_max, // const idi query_id, // const idi local_L, // std::vector<Candidate> &set_L, // const idi base_set_L, // idi &set_L_end, // std::vector<idi> &local_top_m_candidates, // boost::dynamic_bitset<> &is_visited, // uint64_t &local_count_distance_computation) //{ // const dataf *query_data = queries_load_ + query_id * dimension_; // idi local_top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi iter = 0; // idi M = 1; // value of M // // while (k < local_L) { // ++iter; // // Select M candidates // idi last_k = local_L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k; c_i < set_L_end && local_top_m_candidates_end < M; ++c_i) { // idi index_set_L = c_i + base_set_L; // if (set_L[index_set_L].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[index_set_L].is_checked_ = true; // local_top_m_candidates[local_top_m_candidates_end++] = set_L[index_set_L].id_; // } // // idi nk = local_L; // // Push M candidates' neighbors into the queue. // for (idi c_i = 0; c_i < local_top_m_candidates_end; ++c_i) { // idi cand_id = local_top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // { // Sequential edition // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // ++local_count_distance_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[set_L_end - 1 + base_set_L].distance_) { // continue; // } // // Candidate cand(nb_id, dist, false); // // Thread 0 maintains the "global" queue // idi r = add_into_queue( // set_L, // base_set_L, // set_L_end, // local_L, // cand); // if (r < nk) { // nk = r; // } // } // } // local_top_m_candidates_end = 0; // Clear top_m_candidates // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // // {// Scale M // if (M < value_M_max) { // M <<= 1; // } else { // M = value_M_max; // } // } // } //} /* * 7/6/2020-23:17 * Subsearch only 1 iteration using top-m */ inline void Searching::subsearch_top_m_for_one_iteration( const idi iter, idi &k_uc, const idi value_M, const idi query_id, const dataf *query_data, const idi L, std::vector<Candidate> &set_L, const idi set_L_start, idi &set_L_size, std::vector<idi> &top_m_candidates, boost::dynamic_bitset<> &is_visited, uint64_t &count_distance_computation) { // Select M candidates idi top_m_candidates_end = 0; idi last_k = L; // Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. for (idi c_i = k_uc; c_i < set_L_size && top_m_candidates_end < value_M; ++c_i) { idi index_set_L = c_i + set_L_start; if (set_L[index_set_L].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[index_set_L].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; // {//test // M_ids_.push_back(set_L[index_set_L].id_); // } } idi nk = L; // Push M candidates' neighbors into the queue. for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { idi cand_id = top_m_candidates[c_i]; _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; { // Sequential edition if (is_visited[nb_id]) { continue; } is_visited[nb_id] = 1; } auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; ++count_distance_computation; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[set_L_size - 1 + set_L_start].distance_) { continue; } Candidate cand(nb_id, dist, false); idi r = add_into_queue( set_L, set_L_start, set_L_size, L, cand); if (r < nk) { nk = r; } } } // top_m_candidates_end = 0; // Clear top_m_candidates if (nk <= last_k) { k_uc = nk; } else { k_uc = last_k + 1; } // {//test // for (idi l_i = 0; l_i < set_L_size; ++l_i) { // L_ids_.push_back(set_L[set_L_start + l_i].id_); // } // std::sort(L_ids_.begin(), L_ids_.end()); // std::sort(M_ids_.begin(), M_ids_.end()); // for (idi m_i = 0; m_i < M_ids_.size(); ++m_i) { // printf("query_id: %u " // "iter: %u " // "M[%u]: " // "%u\n", // query_id, // iter, // m_i, // M_ids_[m_i]); // } // M_ids_.clear(); // for (idi l_i = 0; l_i < L_ids_.size(); ++l_i) { // printf("query_id: %u " // "iter: %u " // "L[%u]: " // "%u\n", // query_id, // iter, // l_i, // L_ids_[l_i]); // } // L_ids_.clear(); // } } ///* // * One more parameter for distance bound // */ //inline void Searching::subsearch_top_m_for_one_iteration_lth( // const distf bound_lth, // const idi iter, // idi &k_uc, // const idi value_M, // const idi query_id, // const dataf *query_data, // const idi L, // std::vector<Candidate> &set_L, // const idi set_L_start, // idi &set_L_size, // std::vector<idi> &top_m_candidates, // boost::dynamic_bitset<> &is_visited, // uint64_t &count_distance_computation) //{ // // Select M candidates // idi top_m_candidates_end = 0; // idi last_k = L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k_uc; c_i < set_L_size && top_m_candidates_end < value_M; ++c_i) { // idi index_set_L = c_i + set_L_start; // if (set_L[index_set_L].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[index_set_L].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; // } // // idi nk = L; // // Push M candidates' neighbors into the queue. // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // { // Sequential edition // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // ++count_distance_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > bound_lth) { // continue; // } // // Candidate cand(nb_id, dist, false); // idi r = add_into_queue( // set_L, // set_L_start, // set_L_size, // L, // cand); // if (r < nk) { // nk = r; // } // } // } // // if (nk <= last_k) { // k_uc = nk; // } else { // k_uc = last_k + 1; // } //} /* * 7/24/2020-10:53 * Subsearch for one iteration, with the global L-th value as the bound, * and the top_m_position indicates the bound for local top-M vertices. */ inline void Searching::subsearch_top_m_for_one_iteration_lth_mth( const distf bound_lth, // const idi top_m_position, const idi iter, idi &k_uc, const idi local_m_count, const idi query_id, const dataf *query_data, const idi L, std::vector<Candidate> &set_L, const idi set_L_start, idi &set_L_size, std::vector<idi> &top_m_candidates, boost::dynamic_bitset<> &is_visited, uint64_t &count_distance_computation, double &time_pick_top_m, uint64_t &count_add_to_queue, double &time_distance_computation, double &time_add_to_queue) { // {//test // printf("query_id: %u " // "iter: %u " // "tid: %u \n", // query_id, // iter, // omp_get_thread_num()); // } // Select M candidates idi top_m_candidates_end = 0; idi last_k = L; // Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k_uc; c_i < top_m_position; ++c_i) { time_pick_top_m -= WallTimer::get_time_mark(); for (idi c_i = k_uc; c_i < set_L_size && top_m_candidates_end < local_m_count; ++c_i) { idi index_set_L = c_i + set_L_start; if (set_L[index_set_L].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[index_set_L].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; // {//test // M_ids_.push_back(set_L[index_set_L].id_); // } } time_pick_top_m += WallTimer::get_time_mark(); idi nk = L; // Push M candidates' neighbors into the queue. for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { time_pick_top_m -= WallTimer::get_time_mark(); idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } time_pick_top_m += WallTimer::get_time_mark(); for (idi e_i = 0; e_i < out_degree; ++e_i) { time_distance_computation -= WallTimer::get_time_mark(); idi nb_id = out_edges[e_i]; { // Sequential edition if (is_visited[nb_id]) { time_distance_computation += WallTimer::get_time_mark(); continue; } is_visited[nb_id] = 1; } auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; ++count_distance_computation; distf dist = compute_distance_with_norm(nb_data, query_data, norm); time_distance_computation += WallTimer::get_time_mark(); if (dist > set_L[set_L_start + set_L_size - 1].distance_) { // if (dist > bound_lth) { continue; } ++count_add_to_queue; Candidate cand(nb_id, dist, false); // time_pick_top_m -= WallTimer::get_time_mark(); time_add_to_queue -= WallTimer::get_time_mark(); idi r = add_into_queue( set_L, set_L_start, set_L_size, L, cand); if (r < nk) { nk = r; } time_add_to_queue += WallTimer::get_time_mark(); // time_pick_top_m += WallTimer::get_time_mark(); } } if (nk <= last_k) { k_uc = nk; } else { k_uc = last_k + 1; } } ///* // * 7/26/2020-15:41 // * L-th and M-th Selection. // * Seq-Par Phases: when M is 1 and 2, do sequential searching; // * When M is equal and larger than 4, do parallel searching. // * It's for load-balance issue. // */ //inline void Searching::para_search_with_top_m_subsearch_v3( // const idi local_M_max, // const idi local_M_middle, // const idi query_id, // const idi K, // const idi global_L, // const idi local_L, //// const idi total_L, //// const idi init_queue_size, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const std::vector<idi> &local_queues_starts, // std::vector<idi> &local_queues_sizes, // std::vector<idi> &local_m_counts, // std::vector< std::vector<idi> > &top_m_candidates_list, // boost::dynamic_bitset<> &is_visited) //{ // time_initialization_ -= WallTimer::get_time_mark(); // uint64_t tmp_count_computation = 0; // {// Initialization // // is_visited flag array ////#pragma omp parallel for //// Cannot use OMP for bit array is_visited! // for (idi c_i = 0; c_i < global_L; ++c_i) { // is_visited[init_ids[c_i]] = 1; // } // // const dataf *query_data = queries_load_ + query_id * dimension_; //#pragma omp parallel for // for (idi v_i = 0; v_i < global_L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // // Get the distances of all candidates, store in the set set_L. //#pragma omp parallel for reduction(+ : tmp_count_computation) // for (idi id_i = 0; id_i < global_L; ++id_i) { // idi v_id = init_ids[id_i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[id_i] = Candidate(v_id, dist, false); // False means not checked. // } // local_queues_sizes[0] = global_L; // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; // std::sort(set_L.begin(), set_L.begin() + global_L); // } // time_initialization_ += WallTimer::get_time_mark(); // // // Searching // if (num_threads_ == 1) { // Single threads //// std::sort( //// set_L.begin(), //// set_L.end()); // subsearch_with_top_m( // local_M_max, // query_id, // local_L, // set_L, // 0, // local_queues_sizes[0], // top_m_candidates_list[0], // is_visited, // tmp_count_computation); // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; // } else { // Multiple threads // const dataf *query_data = queries_load_ + query_id * dimension_; // const idi num_queues = num_threads_; // idi local_M = 1; // idi iter = 0; // std::vector<idi> ks(num_queues, 0); // // time_sequential_phase_ -= WallTimer::get_time_mark(); // {// Sequential Search for M = 1, 2. // idi &k = ks[0]; // while (k < global_L && local_M < local_M_middle) { // ++iter; // subsearch_top_m_for_one_iteration( // iter, // k, // local_M, // query_id, // query_data, // global_L, // set_L, // 0, // local_queues_sizes[0], // top_m_candidates_list[0], // is_visited, // tmp_count_computation); // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; // // {// Double M // if (local_M < local_M_max) { // local_M <<= 1; // } // } // } // } // time_sequential_phase_ += WallTimer::get_time_mark(); // // time_parallel_phase_ -= WallTimer::get_time_mark(); // distf bound_lth = set_L[global_L - 1].distance_; // {// Parallel Search for M >= 4, or local_M_middle // time_assign_s_ -=WallTimer::get_time_mark(); // {// Assign elements from Queue[0] to others // idi dst_i = 1; // for (idi e_i = 1; e_i < global_L; ++e_i) { // idi dest_sub = e_i % num_queues; // if (0 == dest_sub) { // set_L[dst_i++] = set_L[e_i]; // } else { // set_L[local_queues_starts[dest_sub] + local_queues_sizes[dest_sub]++] = set_L[e_i]; // } // } // local_queues_sizes[0] = dst_i; // } // std::fill(ks.begin(), ks.end(), 0); // // // selecting_unchecked_top_M_seq( // query_id, // iter, // set_L, // ks, // local_M, // num_queues, // local_queues_starts, // local_queues_sizes, // local_m_counts); // time_assign_s_ +=WallTimer::get_time_mark(); // // double tmp_time_pick_top_m = 0; // uint64_t tmp_count_add_to_queue = 0; // uint8_t not_finished = 1; // double tmp_time_distance_computation = 0; // double tmp_time_add_to_queue = 0; // while (true) { // time_expand_ -= WallTimer::get_time_mark(); // not_finished = 0; // ++iter; //#pragma omp parallel for reduction(+ : tmp_count_computation) \ // reduction(+ : tmp_time_pick_top_m) \ // reduction(+ : tmp_count_add_to_queue) \ // reduction(+ : tmp_time_distance_computation) \ // reduction(+ : tmp_time_add_to_queue) // for (idi q_i = 0; q_i < num_queues; ++q_i) { // tmp_time_pick_top_m -= WallTimer::get_time_mark(); // idi L_value = q_i == 0 ? global_L : local_L; // idi &k = ks[q_i]; // idi &local_queue_size = local_queues_sizes[q_i]; // auto &local_top_m_candidates = top_m_candidates_list[q_i]; // idi local_m_count = local_m_counts[q_i]; //// if (local_M < num_queues && !local_m_count) { //// local_m_count = 1; //// } // tmp_time_pick_top_m += WallTimer::get_time_mark(); // if (!local_m_count) { // continue; // } // not_finished = 1; // const idi local_queue_start = local_queues_starts[q_i]; // // subsearch_top_m_for_one_iteration_lth_mth( // bound_lth, // iter, // k, // local_m_count, // query_id, // query_data, // L_value, // set_L, // local_queue_start, // local_queue_size, // local_top_m_candidates, // is_visited, // tmp_count_computation, // tmp_time_pick_top_m, // tmp_count_add_to_queue, // tmp_time_distance_computation, // tmp_time_add_to_queue); // } // time_add_to_queue_ += tmp_time_add_to_queue; // tmp_time_add_to_queue = 0; // time_distance_computation_ += tmp_time_distance_computation; // tmp_time_distance_computation = 0; // count_add_to_queue_ += tmp_count_add_to_queue; // tmp_count_add_to_queue = 0; // time_pick_top_m_ += tmp_time_pick_top_m; // tmp_time_pick_top_m = 0; // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; // time_expand_ += WallTimer::get_time_mark(); // if (!not_finished) { // break; // } // {// Scale M // if (local_M < local_M_max) { // local_M <<= 1; // } //// else { //// local_M = value_M_max; //// } // } // time_select_ -= WallTimer::get_time_mark(); //#pragma omp parallel sections // { //#pragma omp section // {// Setecting and update local_queues_lengths //// time_select_L_ -= WallTimer::get_time_mark(); // bound_lth = selecting_top_L_seq( // set_L, // global_L, //// local_L, // num_queues, // local_queues_starts, // local_queues_sizes); //// time_select_L_ += WallTimer::get_time_mark(); // } //#pragma omp section // { //// time_select_M_ -= WallTimer::get_time_mark(); // selecting_unchecked_top_M_seq( // query_id, // iter, // set_L, // ks, // local_M, // num_queues, // local_queues_starts, // local_queues_sizes, // local_m_counts); //// time_select_M_ += WallTimer::get_time_mark(); // } // } // time_select_ += WallTimer::get_time_mark(); //// {//test //// printf("query_id: %u " //// "iter: %u", //// query_id, //// iter); //// printf(" local_queues_sizes:"); //// for (idi i = 0; i < num_queues; ++i) { //// printf(" %u", local_queues_sizes[i]); //// } //// printf(" local_m_counts:"); //// for (idi i = 0; i < num_queues; ++i) { //// printf(" %u", local_m_counts[i]); //// } //// printf(" ks:"); //// for (idi i = 0; i < num_queues; ++i) { //// printf(" %u", ks[i]); //// } //// printf("\n"); //// } // } // } // time_parallel_phase_ += WallTimer::get_time_mark(); // } // //// time_merge_ -= WallTimer::get_time_mark(); // time_ending_ -= WallTimer::get_time_mark(); // {// Return the results to set_K // std::vector<idi> pointer(num_threads_, 0); // // get the first // distf min_dist = FLT_MAX; // idi min_q_i; // idi min_id; // idi min_sub; // idi last_id; // for (int q_i = 0; q_i < num_threads_; ++q_i) { // if (pointer[q_i] >= local_queues_sizes[q_i]) { // continue; // } // idi sub = pointer[q_i] + local_queues_starts[q_i]; // distf tmp_dist = set_L[sub].distance_; // idi tmp_id = set_L[sub].id_; // if (tmp_dist < min_dist) { // min_dist = tmp_dist; // min_id = tmp_id; // min_q_i = q_i; // min_sub = sub; // } else if (tmp_dist == min_dist && tmp_id < min_id) { // min_id = tmp_id; // min_q_i = q_i; // min_sub = sub; // } // } // set_K[0] = set_L[min_sub].id_; //// {//test //// printf("query_id: %u " //// "[%u]: " //// "(%u, %f)\n", //// query_id, //// 0, //// set_L[min_sub].id_, set_L[min_sub].distance_); //// } // ++pointer[min_q_i]; // last_id = set_K[0]; // // bool is_finished = false; // idi k_i = 1; // while (k_i < K && !is_finished) { // is_finished = true; // min_dist = FLT_MAX; // for (int q_i = 0; q_i < num_threads_; ++q_i) { // const idi local_queue_size = local_queues_sizes[q_i]; // idi sub = pointer[q_i] + local_queues_starts[q_i]; // // while (pointer[q_i] < local_queue_size // && set_L[sub].id_ == last_id) { // ++pointer[q_i]; // ++sub; // } // if (pointer[q_i] >= local_queue_size) { // continue; // } // is_finished = false; // distf tmp_dist = set_L[sub].distance_; // idi tmp_id = set_L[sub].id_; // if (tmp_dist < min_dist) { // min_dist = tmp_dist; // min_id = tmp_id; // min_q_i = q_i; // min_sub = sub; // } else if (tmp_dist == min_dist && tmp_id < min_id) { // min_id = tmp_id; // min_q_i = q_i; // min_sub = sub; // } // } // set_K[k_i] = set_L[min_sub].id_; //// {//test //// printf("query_id: %u " //// "[%u]: " //// "(%u, %f)\n", //// query_id, //// k_i, //// set_L[min_sub].id_, set_L[min_sub].distance_); //// } // ++pointer[min_q_i]; // ++k_i; // } // } //// time_merge_ += WallTimer::get_time_mark(); // // {// Reset //// std::fill(is_visited.begin(), is_visited.end(), 0); // is_visited.reset(); // std::fill(local_queues_sizes.begin() + 1, local_queues_sizes.end(), 0); // } // // time_ending_ += WallTimer::get_time_mark(); //// {//test //// if (3 == query_id) { //// exit(1); //// } //// } //} // ///* // * 7/27/2020-15:33 // * Same with v3, but gather top-m vertices together // */ //inline void Searching::para_search_with_top_m_subsearch_v4( // const idi local_M_max, // const idi local_M_middle, // const idi query_id, // const idi K, // const idi global_L, // const idi local_L, //// const idi total_L, //// const idi init_queue_size, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const std::vector<idi> &local_queues_starts, // std::vector<idi> &local_queues_sizes, // std::vector<idi> &local_m_counts, // std::vector<idi> &top_m_candidates, //// std::vector< std::vector<idi> > &top_m_candidates_list, // boost::dynamic_bitset<> &is_visited) //{ // time_initialization_ -= WallTimer::get_time_mark(); // uint64_t tmp_count_computation = 0; // {// Initialization // // is_visited flag array ////#pragma omp parallel for //// Cannot use OMP for bit array is_visited! // for (idi c_i = 0; c_i < global_L; ++c_i) { // is_visited[init_ids[c_i]] = 1; // } // // const dataf *query_data = queries_load_ + query_id * dimension_; //#pragma omp parallel for // for (idi v_i = 0; v_i < global_L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // // Get the distances of all candidates, store in the set set_L. //#pragma omp parallel for reduction(+ : tmp_count_computation) // for (idi id_i = 0; id_i < global_L; ++id_i) { // idi v_id = init_ids[id_i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[id_i] = Candidate(v_id, dist, false); // False means not checked. // } // local_queues_sizes[0] = global_L; // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; // std::sort(set_L.begin(), set_L.begin() + global_L); // } // time_initialization_ += WallTimer::get_time_mark(); // // // Searching // if (num_threads_ == 1) { // Single threads //// std::sort( //// set_L.begin(), //// set_L.end()); // subsearch_with_top_m( // local_M_max, // query_id, // local_L, // set_L, // 0, // local_queues_sizes[0], // top_m_candidates, // is_visited, // tmp_count_computation); // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; // } else { // Multiple threads // const dataf *query_data = queries_load_ + query_id * dimension_; // const idi num_queues = num_threads_; // idi local_M = 1; // idi iter = 0; //// std::vector<idi> ks(num_queues, 0); // // time_sequential_phase_ -= WallTimer::get_time_mark(); // {// Sequential Search for M = 1, 2. // idi k = 0; //// idi &k = ks[0]; // while (k < global_L && local_M < local_M_middle) { // ++iter; // subsearch_top_m_for_one_iteration( // iter, // k, // local_M, // query_id, // query_data, // global_L, // set_L, // 0, // local_queues_sizes[0], // top_m_candidates, // is_visited, // tmp_count_computation); // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; // // {// Double M // if (local_M < local_M_max) { // local_M <<= 1; // } // } // } // } // time_sequential_phase_ += WallTimer::get_time_mark(); // // time_parallel_phase_ -= WallTimer::get_time_mark(); // distf bound_lth = set_L[global_L - 1].distance_; // {// Parallel Search for M >= 4, or local_M_middle // time_assign_s_ -=WallTimer::get_time_mark(); // {// Assign elements from Queue[0] to others // idi dst_i = 1; // for (idi e_i = 1; e_i < global_L; ++e_i) { // idi dest_sub = e_i % num_queues; // if (0 == dest_sub) { // set_L[dst_i++] = set_L[e_i]; // } else { // set_L[local_queues_starts[dest_sub] + local_queues_sizes[dest_sub]++] = set_L[e_i]; // } // } // local_queues_sizes[0] = dst_i; // } //// std::fill(ks.begin(), ks.end(), 0); // // idi top_m_candidates_size = 0; //// selecting_unchecked_top_M_seq( //// query_id, //// iter, //// set_L, //// ks, //// local_M, //// num_queues, //// local_queues_starts, //// local_queues_sizes, //// local_m_counts); // time_assign_s_ +=WallTimer::get_time_mark(); // // std::vector<idi> ks(num_queues, 0); // std::vector<idi> nks(num_queues); // std::vector<idi> bound_ks(num_queues); // double tmp_time_pick_top_m = 0; // uint64_t tmp_count_add_to_queue = 0; // uint8_t not_finished = 1; // double tmp_time_distance_computation = 0; // double tmp_time_add_to_queue = 0; // while (true) { // time_expand_ -= WallTimer::get_time_mark(); // not_finished = 0; // ++iter; // // // Gather top-M vertices // time_pick_top_m_ -= WallTimer::get_time_mark(); // gather_unchecked_top_M_seq( // query_id, // iter, // set_L, // ks, // local_M, // num_queues, // local_queues_starts, // local_queues_sizes, // top_m_candidates, // top_m_candidates_size, // bound_ks); // time_pick_top_m_ += WallTimer::get_time_mark(); // if (!top_m_candidates_size) { // time_expand_ += WallTimer::get_time_mark(); // break; // } // std::fill(nks.begin(), nks.end(), global_L); // // // Expand top-M vertices //#pragma omp parallel for schedule(static, 1) \ // reduction(+ : tmp_count_computation) \ // reduction(+ : tmp_count_add_to_queue) \ // reduction(+ : tmp_time_distance_computation) \ // reduction(+ : tmp_time_pick_top_m) \ // reduction(+ : tmp_time_add_to_queue) // for (idi c_i = 0; c_i < top_m_candidates_size; ++c_i) { // tmp_time_pick_top_m -= WallTimer::get_time_mark(); // idi tid = omp_get_thread_num(); // const idi set_L_start = local_queues_starts[tid]; // idi &set_L_size = local_queues_sizes[tid]; // idi &nk = nks[tid]; // idi L_value = tid == 0 ? global_L : local_L; // idi cand_id = top_m_candidates[c_i]; //// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; //// for (idi n_i = 0; n_i < out_degree; ++n_i) { //// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); //// } // tmp_time_pick_top_m += WallTimer::get_time_mark(); // // Expand cand_id's neighbors // for (idi e_i = 0; e_i < out_degree; ++e_i) { // tmp_time_distance_computation -= WallTimer::get_time_mark(); // idi nb_id = out_edges[e_i]; // { // Sequential edition // if (is_visited[nb_id]) { // tmp_time_distance_computation += WallTimer::get_time_mark(); // continue; // } // is_visited[nb_id] = 1; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // tmp_time_distance_computation += WallTimer::get_time_mark(); // if (dist > set_L[set_L_start + set_L_size - 1].distance_) { //// if (dist > bound_lth) { // continue; // } // ++tmp_count_add_to_queue; // Candidate cand(nb_id, dist, false); // tmp_time_add_to_queue -= WallTimer::get_time_mark(); // idi r = add_into_queue( // set_L, // set_L_start, // set_L_size, // L_value, // cand); // if (r < nk) { // nk = r; // } // tmp_time_add_to_queue += WallTimer::get_time_mark(); // } // } // top_m_candidates_size = 0; // time_add_to_queue_ += tmp_time_add_to_queue; // tmp_time_add_to_queue = 0; // time_distance_computation_ += tmp_time_distance_computation; // tmp_time_distance_computation = 0; // count_add_to_queue_ += tmp_count_add_to_queue; // tmp_count_add_to_queue = 0; // time_pick_top_m_ += tmp_time_pick_top_m; // tmp_time_pick_top_m = 0; // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; // for (idi q_i = 0; q_i < num_queues; ++q_i) { // if (nks[q_i] < bound_ks[q_i]) { // ks[q_i] = nks[q_i]; // } else { // ks[q_i] = bound_ks[q_i]; // } // } // time_expand_ += WallTimer::get_time_mark(); // // time_select_ -= WallTimer::get_time_mark(); // {// Select L-th // bound_lth = selecting_top_L_seq( // set_L, // global_L, // num_queues, // local_queues_starts, // local_queues_sizes); // } // time_select_ += WallTimer::get_time_mark(); // {// Scale M // if (local_M < local_M_max) { // local_M <<= 1; // } // } //// {//test //// printf("query_id: %u " //// "iter: %u", //// query_id, //// iter); //// printf(" local_queues_sizes:"); //// for (idi i = 0; i < num_queues; ++i) { //// printf(" %u", local_queues_sizes[i]); //// } //// printf(" local_m_counts:"); //// for (idi i = 0; i < num_queues; ++i) { //// printf(" %u", local_m_counts[i]); //// } //// printf(" ks:"); //// for (idi i = 0; i < num_queues; ++i) { //// printf(" %u", ks[i]); //// } //// printf("\n"); //// } // } // } // time_parallel_phase_ += WallTimer::get_time_mark(); // } // //// time_merge_ -= WallTimer::get_time_mark(); // time_ending_ -= WallTimer::get_time_mark(); // {// Return the results to set_K // std::vector<idi> pointer(num_threads_, 0); // // get the first // distf min_dist = FLT_MAX; // idi min_q_i; // idi min_id; // idi min_sub; // idi last_id; // for (int q_i = 0; q_i < num_threads_; ++q_i) { // if (pointer[q_i] >= local_queues_sizes[q_i]) { // continue; // } // idi sub = pointer[q_i] + local_queues_starts[q_i]; // distf tmp_dist = set_L[sub].distance_; // idi tmp_id = set_L[sub].id_; // if (tmp_dist < min_dist) { // min_dist = tmp_dist; // min_id = tmp_id; // min_q_i = q_i; // min_sub = sub; // } else if (tmp_dist == min_dist && tmp_id < min_id) { // min_id = tmp_id; // min_q_i = q_i; // min_sub = sub; // } // } // set_K[0] = set_L[min_sub].id_; //// {//test //// printf("query_id: %u " //// "[%u]: " //// "(%u, %f)\n", //// query_id, //// 0, //// set_L[min_sub].id_, set_L[min_sub].distance_); //// } // ++pointer[min_q_i]; // last_id = set_K[0]; // // bool is_finished = false; // idi k_i = 1; // while (k_i < K && !is_finished) { // is_finished = true; // min_dist = FLT_MAX; // for (int q_i = 0; q_i < num_threads_; ++q_i) { // const idi local_queue_size = local_queues_sizes[q_i]; // idi sub = pointer[q_i] + local_queues_starts[q_i]; // // while (pointer[q_i] < local_queue_size // && set_L[sub].id_ == last_id) { // ++pointer[q_i]; // ++sub; // } // if (pointer[q_i] >= local_queue_size) { // continue; // } // is_finished = false; // distf tmp_dist = set_L[sub].distance_; // idi tmp_id = set_L[sub].id_; // if (tmp_dist < min_dist) { // min_dist = tmp_dist; // min_id = tmp_id; // min_q_i = q_i; // min_sub = sub; // } else if (tmp_dist == min_dist && tmp_id < min_id) { // min_id = tmp_id; // min_q_i = q_i; // min_sub = sub; // } // } // set_K[k_i] = set_L[min_sub].id_; //// {//test //// printf("query_id: %u " //// "[%u]: " //// "(%u, %f)\n", //// query_id, //// k_i, //// set_L[min_sub].id_, set_L[min_sub].distance_); //// } // ++pointer[min_q_i]; // ++k_i; // } // } //// time_merge_ += WallTimer::get_time_mark(); // // {// Reset //// std::fill(is_visited.begin(), is_visited.end(), 0); // is_visited.reset(); // std::fill(local_queues_sizes.begin() + 1, local_queues_sizes.end(), 0); // } // // time_ending_ += WallTimer::get_time_mark(); //// {//test //// if (3 == query_id) { //// exit(1); //// } //// } //} /* * 7/28/2020-11:25 * Same with V4, but only gather top-m vertices, but not select top-L. */ inline void Searching::para_search_with_top_m_subsearch_v5( const idi local_M_max, const idi local_M_middle, const idi query_id, const idi K, const idi global_L, const idi local_L, // const idi total_L, // const idi init_queue_size, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, const std::vector<idi> &local_queues_starts, std::vector<idi> &local_queues_sizes, // std::vector<idi> &local_m_counts, std::vector<idi> &top_m_candidates, // std::vector< std::vector<idi> > &top_m_candidates_list, boost::dynamic_bitset<> &is_visited) { time_initialization_ -= WallTimer::get_time_mark(); uint64_t tmp_count_computation = 0; {// Initialization // is_visited flag array //#pragma omp parallel for // Cannot use OMP for bit array is_visited! for (idi c_i = 0; c_i < global_L; ++c_i) { is_visited[init_ids[c_i]] = 1; } const dataf *query_data = queries_load_ + query_id * dimension_; #pragma omp parallel for for (idi v_i = 0; v_i < global_L; ++v_i) { idi v_id = init_ids[v_i]; _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); } // Get the distances of all candidates, store in the set set_L. #pragma omp parallel for reduction(+ : tmp_count_computation) for (idi id_i = 0; id_i < global_L; ++id_i) { idi v_id = init_ids[id_i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; ++tmp_count_computation; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[id_i] = Candidate(v_id, dist, false); // False means not checked. } local_queues_sizes[0] = global_L; count_distance_computation_ += tmp_count_computation; tmp_count_computation = 0; std::sort(set_L.begin(), set_L.begin() + global_L); } time_initialization_ += WallTimer::get_time_mark(); // Searching if (num_threads_ == 1) { // Single threads // std::sort( // set_L.begin(), // set_L.end()); subsearch_with_top_m( local_M_max, query_id, local_L, set_L, 0, local_queues_sizes[0], top_m_candidates, is_visited, tmp_count_computation); count_distance_computation_ += tmp_count_computation; tmp_count_computation = 0; } else { // Multiple threads const dataf *query_data = queries_load_ + query_id * dimension_; const idi num_queues = num_threads_; idi local_M = 1; idi iter = 0; // std::vector<idi> ks(num_queues, 0); time_sequential_phase_ -= WallTimer::get_time_mark(); {// Sequential Search for M = 1, 2. idi k = 0; // idi &k = ks[0]; while (k < global_L && local_M < local_M_middle) { ++iter; subsearch_top_m_for_one_iteration( iter, k, local_M, query_id, query_data, global_L, set_L, 0, local_queues_sizes[0], top_m_candidates, is_visited, tmp_count_computation); count_distance_computation_ += tmp_count_computation; tmp_count_computation = 0; {// Double M if (local_M < local_M_max) { local_M <<= 1; } } } } time_sequential_phase_ += WallTimer::get_time_mark(); time_parallel_phase_ -= WallTimer::get_time_mark(); // distf bound_lth = set_L[global_L - 1].distance_; {// Parallel Search for M >= 4, or local_M_middle time_assign_s_ -=WallTimer::get_time_mark(); {// Assign elements from Queue[0] to others idi dst_i = 1; for (idi e_i = 1; e_i < global_L; ++e_i) { idi dest_sub = e_i % num_queues; if (0 == dest_sub) { set_L[dst_i++] = set_L[e_i]; } else { set_L[local_queues_starts[dest_sub] + local_queues_sizes[dest_sub]++] = set_L[e_i]; } } local_queues_sizes[0] = dst_i; } // std::fill(ks.begin(), ks.end(), 0); idi top_m_candidates_size = 0; time_assign_s_ +=WallTimer::get_time_mark(); std::vector<idi> ks(num_queues, 0); std::vector<idi> nks(num_queues); std::vector<idi> bound_ks(num_queues); double tmp_time_pick_top_m = 0; uint64_t tmp_count_add_to_queue = 0; double tmp_time_distance_computation = 0; double tmp_time_add_to_queue = 0; while (true) { time_expand_ -= WallTimer::get_time_mark(); ++iter; // Gather top-M vertices time_gather_ -= WallTimer::get_time_mark(); gather_unchecked_top_M_seq( query_id, iter, set_L, ks, local_M, num_queues, local_queues_starts, local_queues_sizes, top_m_candidates, top_m_candidates_size, bound_ks); time_gather_ += WallTimer::get_time_mark(); {//test printf("query_id: %u " "iter: %u", query_id, iter); printf(" local_queues_sizes:"); for (idi i = 0; i < num_queues; ++i) { printf(" %u", local_queues_sizes[i]); } // printf(" local_m_counts:"); // for (idi i = 0; i < num_queues; ++i) { // printf(" %u", local_m_counts[i]); // } // printf(" ks:"); // for (idi i = 0; i < num_queues; ++i) { // printf(" %u", ks[i]); // } printf(" gathered:"); for (idi i = 0; i < num_queues; ++i) { printf(" %u", bound_ks[i] - ks[i]); } printf("\n"); } if (!top_m_candidates_size) { time_expand_ += WallTimer::get_time_mark(); break; } std::fill(nks.begin(), nks.end(), global_L); // Expand top-M vertices #pragma omp parallel for schedule(static, 1) \ reduction(+ : tmp_count_computation) \ reduction(+ : tmp_count_add_to_queue) \ reduction(+ : tmp_time_distance_computation) \ reduction(+ : tmp_time_pick_top_m) \ reduction(+ : tmp_time_add_to_queue) for (idi c_i = 0; c_i < top_m_candidates_size; ++c_i) { tmp_time_pick_top_m -= WallTimer::get_time_mark(); idi tid = omp_get_thread_num(); const idi set_L_start = local_queues_starts[tid]; idi &set_L_size = local_queues_sizes[tid]; idi &nk = nks[tid]; // idi L_value = tid == 0 ? global_L : local_L; idi L_value = local_L; idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } tmp_time_pick_top_m += WallTimer::get_time_mark(); // Expand cand_id's neighbors for (idi e_i = 0; e_i < out_degree; ++e_i) { tmp_time_distance_computation -= WallTimer::get_time_mark(); idi nb_id = out_edges[e_i]; { // Sequential edition if (is_visited[nb_id]) { tmp_time_distance_computation += WallTimer::get_time_mark(); continue; } is_visited[nb_id] = 1; } auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; ++tmp_count_computation; distf dist = compute_distance_with_norm(nb_data, query_data, norm); tmp_time_distance_computation += WallTimer::get_time_mark(); if (dist > set_L[set_L_start + set_L_size - 1].distance_) { if (set_L_size < L_value) { ++tmp_count_add_to_queue; set_L[set_L_start + set_L_size] = Candidate(nb_id, dist, false); if (set_L_size < nk) { nk = set_L_size; } ++set_L_size; } continue; } // if (dist > set_L[set_L_start + set_L_size - 1].distance_) { //// if (dist > bound_lth) { // continue; // } ++tmp_count_add_to_queue; Candidate cand(nb_id, dist, false); tmp_time_add_to_queue -= WallTimer::get_time_mark(); idi r = add_into_queue( set_L, set_L_start, set_L_size, L_value, cand); if (r < nk) { nk = r; } tmp_time_add_to_queue += WallTimer::get_time_mark(); } } top_m_candidates_size = 0; time_add_to_queue_ += tmp_time_add_to_queue; tmp_time_add_to_queue = 0; time_distance_computation_ += tmp_time_distance_computation; tmp_time_distance_computation = 0; count_add_to_queue_ += tmp_count_add_to_queue; tmp_count_add_to_queue = 0; time_pick_top_m_ += tmp_time_pick_top_m; tmp_time_pick_top_m = 0; count_distance_computation_ += tmp_count_computation; tmp_count_computation = 0; for (idi q_i = 0; q_i < num_queues; ++q_i) { if (nks[q_i] < bound_ks[q_i]) { ks[q_i] = nks[q_i]; } else { ks[q_i] = bound_ks[q_i]; } } time_expand_ += WallTimer::get_time_mark(); // time_select_ -= WallTimer::get_time_mark(); // {// Select L-th // bound_lth = selecting_top_L_seq( // set_L, // global_L, // num_queues, // local_queues_starts, // local_queues_sizes); // } // time_select_ += WallTimer::get_time_mark(); {// Scale M if (local_M < local_M_max) { local_M <<= 1; } } // {//test // printf("query_id: %u " // "iter: %u", // query_id, // iter); // printf(" local_queues_sizes:"); // for (idi i = 0; i < num_queues; ++i) { // printf(" %u", local_queues_sizes[i]); // } //// printf(" local_m_counts:"); //// for (idi i = 0; i < num_queues; ++i) { //// printf(" %u", local_m_counts[i]); //// } //// printf(" ks:"); //// for (idi i = 0; i < num_queues; ++i) { //// printf(" %u", ks[i]); //// } // printf(" bound_ks:"); // for (idi i = 0; i < num_queues; ++i) { // printf(" %u", bound_ks[i]); // } // printf("\n"); // } } } time_parallel_phase_ += WallTimer::get_time_mark(); } // time_merge_ -= WallTimer::get_time_mark(); time_ending_ -= WallTimer::get_time_mark(); {// Return the results to set_K std::vector<idi> pointer(num_threads_, 0); // get the first distf min_dist = FLT_MAX; idi min_q_i; idi min_id; idi min_sub; idi last_id; for (int q_i = 0; q_i < num_threads_; ++q_i) { if (pointer[q_i] >= local_queues_sizes[q_i]) { continue; } idi sub = pointer[q_i] + local_queues_starts[q_i]; distf tmp_dist = set_L[sub].distance_; idi tmp_id = set_L[sub].id_; if (tmp_dist < min_dist) { min_dist = tmp_dist; min_id = tmp_id; min_q_i = q_i; min_sub = sub; } else if (tmp_dist == min_dist && tmp_id < min_id) { min_id = tmp_id; min_q_i = q_i; min_sub = sub; } } set_K[0] = set_L[min_sub].id_; // {//test // printf("query_id: %u " // "[%u]: " // "(%u, %f)\n", // query_id, // 0, // set_L[min_sub].id_, set_L[min_sub].distance_); // } ++pointer[min_q_i]; last_id = set_K[0]; bool is_finished = false; idi k_i = 1; while (k_i < K && !is_finished) { is_finished = true; min_dist = FLT_MAX; for (int q_i = 0; q_i < num_threads_; ++q_i) { const idi local_queue_size = local_queues_sizes[q_i]; idi sub = pointer[q_i] + local_queues_starts[q_i]; while (pointer[q_i] < local_queue_size && set_L[sub].id_ == last_id) { ++pointer[q_i]; ++sub; } if (pointer[q_i] >= local_queue_size) { continue; } is_finished = false; distf tmp_dist = set_L[sub].distance_; idi tmp_id = set_L[sub].id_; if (tmp_dist < min_dist) { min_dist = tmp_dist; min_id = tmp_id; min_q_i = q_i; min_sub = sub; } else if (tmp_dist == min_dist && tmp_id < min_id) { min_id = tmp_id; min_q_i = q_i; min_sub = sub; } } set_K[k_i] = set_L[min_sub].id_; // {//test // printf("query_id: %u " // "[%u]: " // "(%u, %f)\n", // query_id, // k_i, // set_L[min_sub].id_, set_L[min_sub].distance_); // } ++pointer[min_q_i]; ++k_i; } } // time_merge_ += WallTimer::get_time_mark(); {// Reset // std::fill(is_visited.begin(), is_visited.end(), 0); is_visited.reset(); std::fill(local_queues_sizes.begin() + 1, local_queues_sizes.end(), 0); } time_ending_ += WallTimer::get_time_mark(); {//test if (3 == query_id) { exit(1); } } } /* * 6/27/2020-12:33 * Do searching on the local_set_L * local_set_L is already sorted * is_visited is already set up. */ inline void Searching::subsearch_for_simple_search( const idi query_id, const idi local_L, std::vector<Candidate> &set_L, const idi base_set_L, idi &set_L_end, // std::vector<uint8_t> &is_visited, boost::dynamic_bitset<> &is_visited, uint64_t &local_count_distance_computation) { const dataf *query_data = queries_load_ + query_id * dimension_; // idi local_top_m_candidates_end = 0; idi k = 0; // Index of first unchecked candidate. idi iter = 0; // idi M = 1; // value of M while (k < local_L) { ++iter; // {//test // printf("query_id: %u " // "iter: %u\n", // query_id, // iter); // } // Select the top-1 unchecked candidate idi top_1; idi last_k = local_L; // Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. for (idi c_i = k; c_i < set_L_end; ++c_i) { idi index_set_L = c_i + base_set_L; if (set_L[index_set_L].is_checked_) { continue; } top_1 = set_L[index_set_L].id_; last_k = c_i; // Record the location of the last candidate selected. set_L[index_set_L].is_checked_ = true; // local_top_m_candidates[local_top_m_candidates_end++] = set_L[index_set_L].id_; break; } if (last_k == local_L) { break; } idi nk = local_L; // Push top-1' neighbors into the queue. idi cand_id = top_1; _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; { // Sequential edition if (is_visited[nb_id]) { continue; } is_visited[nb_id] = 1; } // {// Critical edition // if (!AtomicOps::CAS(is_visited.data() + nb_id, // static_cast<uint8_t>(0), // static_cast<uint8_t>(1))) { // continue; // } // } auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; ++local_count_distance_computation; distf dist = compute_distance_with_norm(nb_data, query_data, norm); // { // if (0 == query_id // && (785802 == nb_id // || 180955 == nb_id // || 240996 == nb_id // || 813701 == nb_id // || 708177 == nb_id // || 87578 == nb_id // || 561813 == nb_id // || 701258 == nb_id // || 872728 == nb_id)) { //// && 180955 == nb_id) { // printf("parent: %u " // "nb_id: %u " // "dist: %f " // "base_set_L: %u " // "set_L_end: %u\n", // cand_id, // nb_id, // dist, // base_set_L, // set_L_end); // } // } if (dist > set_L[set_L_end - 1 + base_set_L].distance_) { continue; } Candidate cand(nb_id, dist, false); // Thread 0 maintains the "global" queue idi r = add_into_queue( set_L, base_set_L, set_L_end, local_L, cand); if (r < nk) { nk = r; } } if (nk <= last_k) { k = nk; } else { k = last_k + 1; } } } /* * 6/27/2020-12:26 * Is is good to use subsearch by every thread it self? */ inline void Searching::para_simple_search_subsearch( const idi query_id, const idi K, const idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, // std::vector<uint8_t> &is_visited) boost::dynamic_bitset<> &is_visited) { uint64_t tmp_count_computation = 0; {// Initialization // is_visited flag array //#pragma omp parallel for // Cannot use OMP for bit array is_visited! for (idi c_i = 0; c_i < L; ++c_i) { is_visited[init_ids[c_i]] = 1; } const dataf *query_data = queries_load_ + query_id * dimension_; #pragma omp parallel for for (idi v_i = 0; v_i < L; ++v_i) { idi v_id = init_ids[v_i]; _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); } // Get the distances of all candidates, store in the set set_L. //#pragma omp parallel for #pragma omp parallel for reduction(+ : tmp_count_computation) for (unsigned i = 0; i < L; i++) { unsigned v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; ++tmp_count_computation; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[i] = Candidate(v_id, dist, false); // False means not checked. } count_distance_computation_ += tmp_count_computation; tmp_count_computation = 0; // std::sort( // set_L.begin(), // set_L.begin() + L); } idi queue_end = L; // Searching if (num_threads_ == 1) { // Single threads std::sort( set_L.begin(), set_L.end()); subsearch_for_simple_search( query_id, L, set_L, 0, queue_end, is_visited, tmp_count_computation); count_distance_computation_ += tmp_count_computation; // { //// {//test //// for (idi i = 0; i < queue_end; ++i) { //// printf("start: " //// "query_id: %u " //// "set_L[%u]: " //// "(%u %f)\n", //// query_id, //// i, //// set_L[i].id_, set_L[i].distance_); //// } //// } // // idi half_length = queue_end / 2; // std::sort( // set_L.begin(), // set_L.begin() + half_length); //// {//test //// for (idi i = 0; i < half_length; ++i) { //// printf("sorted: " //// "query_id: %u " //// "set_L[%u]: " //// "(%u %f)\n", //// query_id, //// i, //// set_L[i].id_, set_L[i].distance_); //// } //// } // // subsearch_for_simple_search( // query_id, // half_length, // local_L // set_L, // 0, // base_set_L // half_length, // set_L_end // is_visited, // tmp_count_computation); // //// {//test //// for (idi i = 0; i < half_length; ++i) { //// printf("subsearched: " //// "query_id: %u " //// "set_L[%u]: " //// "(%u %f)\n", //// query_id, //// i, //// set_L[i].id_, set_L[i].distance_); //// } //// } // // std::sort( // set_L.begin() + half_length, // set_L.end()); // //// {//test //// for (idi i = half_length; i < queue_end; ++i) { //// printf("sorted: " //// "query_id: %u " //// "set_L[%u]: " //// "(%u %f)\n", //// query_id, //// i, //// set_L[i].id_, set_L[i].distance_); //// } //// } // // subsearch_for_simple_search( // query_id, // half_length, // local_L // set_L, // half_length, // base_set_L // half_length, // set_L_end // is_visited, // tmp_count_computation); //// {//test //// for (idi i = half_length; i < queue_end; ++i) { //// printf("subsearched: " //// "query_id: %u " //// "set_L[%u]: " //// "(%u %f)\n", //// query_id, //// i, //// set_L[i].id_, set_L[i].distance_); //// } //// } //// {//test //// for (idi i = 0; i < queue_end; ++i) { //// printf("explored: " //// "query_id: %u " //// "set_L[%u]: " //// "(%u %f)\n", //// query_id, //// i, //// set_L[i].id_, set_L[i].distance_); //// } //// } // count_distance_computation_ += tmp_count_computation; // // std::vector <Candidate> tmp_set_L(L); // std::merge(set_L.begin(), set_L.begin() + half_length, // set_L.begin() + half_length, set_L.end(), // tmp_set_L.begin()); // std::copy(tmp_set_L.begin(), tmp_set_L.end(), set_L.begin()); //// {//test //// for (idi i = 0; i < queue_end; ++i) { //// printf("merged: " //// "query_id: %u " //// "set_L[%u]: " //// "(%u %f)\n", //// query_id, //// i, //// set_L[i].id_, set_L[i].distance_); //// } //// } // } } else { // Multiple threads const idi num_queues = num_threads_; const idi local_queue_length = (L - 1) / num_queues + 1; // Parallel for #pragma omp parallel for reduction(+ : tmp_count_computation) for (idi q_i = 0; q_i < num_queues; ++q_i) { idi local_queue_base = q_i * local_queue_length; if (local_queue_base >= L) { continue; } idi local_queue_end = local_queue_length; if (local_queue_base + local_queue_end > L) { local_queue_end = L - local_queue_base; } std::sort( set_L.begin() + local_queue_base, set_L.begin() + local_queue_base + local_queue_end); subsearch_for_simple_search( query_id, local_queue_end, // local_L set_L, local_queue_base, // base_set_L local_queue_end, // set_L_end is_visited, tmp_count_computation); } count_distance_computation_ += tmp_count_computation; // Merge // time_merge_ -= WallTimer::get_time_mark(); merge_in_set_L( set_L, L, num_queues, local_queue_length); // time_merge_ += WallTimer::get_time_mark(); } {// Return the results to set_K // How to deal with duplicate? idi last_id = set_L[0].id_; set_K[0] = last_id; idi k_i = 1; idi l_i = 1; while (k_i < K && l_i < L) { if (last_id == set_L[l_i].id_) { ++l_i; continue; } last_id = set_L[l_i++].id_; set_K[k_i++] = last_id; } //#pragma omp parallel for // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; //// set_K[k_i] = set_L[k_i].id_; // } } {// Reset // std::fill(is_visited.begin(), is_visited.end(), 0); is_visited.reset(); // is_visited.clear_all(); } // {//test // if (0 == query_id) { // exit(1); // } // } } ///* // * 6/22/2020-09:38 // * A synchronized last element as the sentinel // */ //inline void Searching::para_search_with_top_m_merge_queues_global_threshold( // const idi value_M_middle, // const idi value_M_max, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; // std::vector<idi> &local_queues_ends, // Sizes of local queue // std::vector<idi> &top_m_candidates, // boost::dynamic_bitset<> &is_visited) //{ //// const idi base_set_L = (num_threads_ - 1) * local_queue_length; // { //#pragma omp parallel for // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = 1; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; //#pragma omp parallel for // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // uint64_t tmp_count_computation = 0; // // Get the distances of all candidates, store in the set set_L. ////#pragma omp parallel for //#pragma omp parallel for reduction(+ : tmp_count_computation) // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked. // } // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; //// std::sort(set_L.begin(), set_L.begin() + L); // std::sort( // set_L.begin() + base_set_L, // set_L.begin() + base_set_L + L); // local_queues_ends[num_threads_ - 1] = L; // // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // idi M = 1; // // { // Single thread // while (k < L && M < value_M_middle) { // ++tmp_count; //// {//test //// printf("tmp_count: %d\n", tmp_count); //// } // // // Select M candidates // idi last_k = L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // idi index_set_L = c_i + base_set_L; // if (set_L[index_set_L].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[index_set_L].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; // } // // idi nk = L; // // Push M candidates' neighbors into the queue. // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // { // Sequential edition // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L - 1 + base_set_L].distance_) { // continue; // } // // Candidate cand(nb_id, dist, false); // // Thread 0 maintains the "global" queue // idi r = add_into_queue( // set_L, // base_set_L, // local_queues_ends[num_threads_ - 1], // L, // cand); // if (r < nk) { // nk = r; // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // // {// Scale M // if (M < value_M_max) { // M <<= 1; // } else { // M = value_M_max; // } // } // // } // } // // { // Multiple Threads // while (k < L) { // ++tmp_count; //// {//test //// printf("tmp_count: %d\n", tmp_count); //// } // // Select M candidates // idi last_k = L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // idi index_set_L = c_i + base_set_L; // if (set_L[index_set_L].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[index_set_L].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; // } // // // idi nk = L; // // Push M candidates' neighbors into the queue. ////#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(real_threads) //#pragma omp parallel for reduction(+ : tmp_count_computation) // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // int tid = omp_get_thread_num(); // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // { // Sequential edition // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L - 1 + base_set_L].distance_) { // continue; // } // // Candidate cand(nb_id, dist, false); // // Add to the local queue. // if (0 != tid) { // // Non-Master threads using local queues // add_into_queue( // set_L, // (tid - 1) * local_queue_length, // local_queues_ends[tid - 1], // local_queue_length, // cand); // } else { // // Thread 0 maintains the "global" queue // idi r = add_into_queue( // set_L, // base_set_L, // local_queues_ends[num_threads_ - 1], // L, // cand); // if (r < nk) { // nk = r; // } // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; // // {// Local queues' ends //// printf("query%u:iter: %u", query_id, tmp_count); // idi total_elements = 0; // for (int i_t = 0; i_t < num_threads_ - 1; ++i_t) { // total_elements += local_queues_ends[i_t]; // } // number_local_elements_ += total_elements; //// printf(" total_elements: %u+%u\n", total_elements - local_queues_ends[num_threads_ - 1], local_queues_ends[num_threads_ - 1]); //// for (int i_t = 0; i_t < num_threads_; ++i_t) { //// printf(" [%u]: %u", i_t, local_queues_ends[i_t]); //// } //// printf("\n"); // } // //// // Merge. Merge all queues in parallel. // { // time_merge_ -= WallTimer::get_time_mark(); // if (num_threads_ > 1) { // idi r = merge_all_queues_para_array( // set_L, // local_queues_ends, // local_queue_length, // L); // if (r < nk) { // nk = r; // } // } // time_merge_ += WallTimer::get_time_mark(); // } // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // {// Scale M // if (M < value_M_max) { // M <<= 1; // } else { // M = value_M_max; // } // } // // } // } // // //#pragma omp parallel for // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i + base_set_L].id_; //// set_K[k_i] = set_L[k_i].id_; // } // // {// Reset //// std::fill(is_visited.begin(), is_visited.end(), 0); // is_visited.reset(); //// is_visited.clear_all(); // std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); // } // //// {//test //// if (0 == query_id) { //// exit(1); //// } //// } //} ///* // * 6/7/2020-16:55 // * Use 1 threads to scale M until the value_M_middle. // * Then use multiple threads. // * Except for Thread 0, other threads are collectors. They collect, but do not merge. // * Only merge once after Thread 0 stops. // */ //inline void Searching::para_search_with_top_m_merge_queues_collectors( // const idi value_M_middle, // const idi value_M_max, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; // std::vector<idi> &local_queues_ends, // Sizes of local queue //// std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited) // boost::dynamic_bitset<> &is_visited) //// std::vector<distf> &local_thresholds) //// BitVector &is_visited) //{ // { //#pragma omp parallel for // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = 1; //// is_visited.atomic_set_bit(init_ids[c_i]); // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; //#pragma omp parallel for // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // uint64_t tmp_count_computation = 0; // // Get the distances of all candidates, store in the set set_L. ////#pragma omp parallel for //#pragma omp parallel for reduction(+ : tmp_count_computation) // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation_; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked. //// set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; //// std::sort(set_L.begin(), set_L.begin() + L); // std::sort( // set_L.begin() + base_set_L, // set_L.begin() + base_set_L + L); //// boost::sort::block_indirect_sort( //// set_L.begin() + base_set_L, //// set_L.begin() + base_set_L + L, //// num_threads_); // local_queues_ends[num_threads_ - 1] = L; // //// std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // idi M = 1; // // // Single thread // { // while (k < L && M < value_M_middle) { // ++tmp_count; //// {//test //// printf("tmp_count: %d\n", tmp_count); //// } // //// int real_threads = std::min(static_cast<int>(M), num_threads_); //// idi queue_base = num_threads_ - real_threads; // // Select M candidates // idi last_k = L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // idi index_set_L = c_i + base_set_L; // if (set_L[index_set_L].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[index_set_L].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; // } // // idi nk = L; // // Push M candidates' neighbors into the queue. // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // { // Sequential edition // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation_; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L - 1 + base_set_L].distance_) { // continue; // } // // Candidate cand(nb_id, dist, false); // // Thread 0 maintains the "global" queue // idi r = add_into_queue( // set_L, // base_set_L, // local_queues_ends[num_threads_ - 1], // L, // cand); // if (r < nk) { // nk = r; // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // // {// Scale M // if (M < value_M_max) { // M <<= 1; // } else { // M = value_M_max; // } // } // } // } // // // Multiple Threads // { //// while (k < L/num_threads_/2) { // while (k < L) { // ++tmp_count; //// {//test //// printf("tmp_count: %d\n", tmp_count); //// } //// int real_threads = std::min(static_cast<int>(M), num_threads_); //// idi queue_base = num_threads_ - real_threads; // // Select M candidates // idi last_k = L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // idi index_set_L = c_i + base_set_L; // if (set_L[index_set_L].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[index_set_L].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; // } // // idi chunk_size; // if (num_threads_ <= top_m_candidates_end) { // chunk_size = (top_m_candidates_end - 1) / num_threads_ + 1; // } else { // chunk_size = 1; // } // idi nk = L; // // Push M candidates' neighbors into the queue. ////#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(real_threads) ////#pragma omp parallel for reduction(+ : tmp_count_computation) //#pragma omp parallel for reduction(+ : tmp_count_computation) schedule(static, chunk_size) // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // int tid = omp_get_thread_num(); //// { //// if (c_i < chunk_size && tid != 0) { //// printf("query_id: %u " //// "tmp_count: %u " //// "chunk_size: %u " //// "c_i: %u " //// "tid: %u\n", //// query_id, //// tmp_count, //// chunk_size, //// c_i, //// tid); //// } //// } // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // { // Sequential edition // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation_; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L - 1 + base_set_L].distance_) { // continue; // } // // Candidate cand(nb_id, dist, false); // // Add to the local queue. // if (0 != tid) { // // Non-Master threads using local queues // add_into_queue( // set_L, // (tid - 1) * local_queue_length, // local_queues_ends[tid - 1], // local_queue_length, // cand); // } else { // // Thread 0 maintains the "global" queue // idi r = add_into_queue( // set_L, // base_set_L, // local_queues_ends[num_threads_ - 1], // L, // cand); // if (r < nk) { // nk = r; // } // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; // ////// // Merge. Merge all queues in parallel. //// { //// time_merge_ -= WallTimer::get_time_mark(); //// if (num_threads_ > 1) { ////// idi r = merge_all_queues_queue_base( ////// set_L, ////// local_queues_ends, ////// queue_base, ////// real_threads, ////// local_queue_length, ////// L); //// idi r = merge_all_queues_para_array( //// set_L, //// local_queues_ends, //// local_queue_length, //// L); //// if (r < nk) { //// nk = r; //// } //// } //// time_merge_ += WallTimer::get_time_mark(); //// } // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // {// Scale M // if (M < value_M_max) { // M <<= 1; // } else { // M = value_M_max; // } // } // } // //// // Merge only once after Master Thread stops. //// { //// time_merge_ -= WallTimer::get_time_mark(); //// if (num_threads_ > 1) { ////// idi r = merge_all_queues_queue_base( ////// set_L, ////// local_queues_ends, ////// queue_base, ////// real_threads, ////// local_queue_length, ////// L); //// merge_all_queues_para_array( //// set_L, //// local_queues_ends, //// local_queue_length, //// L); //// } //// time_merge_ += WallTimer::get_time_mark(); //// } // } // // //#pragma omp parallel for // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i + base_set_L].id_; //// set_K[k_i] = set_L[k_i].id_; // } // // {// Reset //// std::fill(is_visited.begin(), is_visited.end(), 0); // is_visited.reset(); //// is_visited.clear_all(); // std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); // } // //// {//test //// printf("tmp_count: %u\n", tmp_count); //// if (3 == query_id) { //// exit(1); //// } //// } //} ///* // * 6/8/2020-16:39 // * Selecting rather than merging // */ //inline void Searching::para_search_with_top_m_merge_queues_selecting( // const idi value_M_middle, // const idi value_M_max, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; // std::vector<idi> &local_queues_ends, // Sizes of local queue //// std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited) // boost::dynamic_bitset<> &is_visited) //{ // { //#pragma omp parallel for // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = 1; //// is_visited.atomic_set_bit(init_ids[c_i]); // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; //#pragma omp parallel for // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // uint64_t tmp_count_computation = 0; // // Get the distances of all candidates, store in the set set_L. ////#pragma omp parallel for //#pragma omp parallel for reduction(+ : tmp_count_computation) // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation_; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked. //// set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; //// std::sort(set_L.begin(), set_L.begin() + L); // std::sort( // set_L.begin() + base_set_L, // set_L.begin() + base_set_L + L); //// boost::sort::block_indirect_sort( //// set_L.begin() + base_set_L, //// set_L.begin() + base_set_L + L, //// num_threads_); // local_queues_ends[num_threads_ - 1] = L; // //// std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // idi M = 1; // // // Single thread // { // while (k < L && M < value_M_middle) { // ++tmp_count; //// {//test //// printf("tmp_count: %d\n", tmp_count); //// } // //// int real_threads = std::min(static_cast<int>(M), num_threads_); //// idi queue_base = num_threads_ - real_threads; // // Select M candidates // idi last_k = L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // idi index_set_L = c_i + base_set_L; // if (set_L[index_set_L].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[index_set_L].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; // } // // idi nk = L; // // Push M candidates' neighbors into the queue. // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // { // Sequential edition // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation_; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L - 1 + base_set_L].distance_) { // continue; // } // // Candidate cand(nb_id, dist, false); // // Thread 0 maintains the "global" queue // idi r = add_into_queue( // set_L, // base_set_L, // local_queues_ends[num_threads_ - 1], // L, // cand); // if (r < nk) { // nk = r; // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // // {// Scale M // if (M < value_M_max) { // M <<= 1; // } else { // M = value_M_max; // } // } // } // } // // // Multiple Threads // { //// while (k < L/num_threads_/2) { //// while (k < L) { // while (true) { // ++tmp_count; //// {//test //// printf("tmp_count: %d\n", tmp_count); //// } //// // Select M candidates //// idi last_k = L; ////// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. //// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { //// idi index_set_L = c_i + base_set_L; //// if (set_L[index_set_L].is_checked_) { //// continue; //// } //// last_k = c_i; // Record the location of the last candidate selected. //// set_L[index_set_L].is_checked_ = true; //// top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; //// } // // // Select M candidates // { // idi traverse_count = 0; // idi bound_sub = L; // This is not always true! // for (idi sub = 0; sub < bound_sub && top_m_candidates_end < M && traverse_count < L; ++sub) { // for (int tid = 0; tid < num_threads_ && top_m_candidates_end < M && traverse_count < L; ++tid) { // if (sub >= local_queues_ends[tid]) { // continue; // } // idi index_set_L = tid * local_queue_length + sub; // if (set_L[index_set_L].is_checked_) { // continue; // } // set_L[index_set_L].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; // } // } // // if (0 == top_m_candidates_end) { // break; // } // } // //// idi nk = L; // // Push M candidates' neighbors into the queue. ////#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(real_threads) //#pragma omp parallel for reduction(+ : tmp_count_computation) // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // int tid = omp_get_thread_num(); // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // { // Sequential edition // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation_; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L - 1 + base_set_L].distance_) { // continue; // } // // Candidate cand(nb_id, dist, false); // // Add to the local queue. // if (0 != tid) { // // Non-Master threads using local queues // add_into_queue( // set_L, // (tid - 1) * local_queue_length, // local_queues_ends[tid - 1], // local_queue_length, // cand); // } else { // // Thread 0 maintains the "global" queue //// idi r = // add_into_queue( // set_L, // base_set_L, // local_queues_ends[num_threads_ - 1], // L, // cand); //// if (r < nk) { //// nk = r; //// } // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; // //// // Merge. Merge all queues in parallel. // { // time_merge_ -= WallTimer::get_time_mark(); // if (num_threads_ > 1) { //// idi r = merge_all_queues_queue_base( //// set_L, //// local_queues_ends, //// queue_base, //// real_threads, //// local_queue_length, //// L); //// idi r = // merge_all_queues_para_array( // set_L, // local_queues_ends, // local_queue_length, // L); //// if (r < nk) { //// nk = r; //// } // } // time_merge_ += WallTimer::get_time_mark(); // } //// if (nk <= last_k) { //// k = nk; //// } else { //// k = last_k + 1; //// } // {// Scale M // if (M < value_M_max) { // M <<= 1; // } else { // M = value_M_max; // } // } // } // } // // ////#pragma omp parallel for //// for (idi k_i = 0; k_i < K; ++k_i) { //// set_K[k_i] = set_L[k_i + base_set_L].id_; ////// set_K[k_i] = set_L[k_i].id_; //// } // // { // idi k_i = 0; // idi bound_sub = K / num_threads_; // for (idi sub = 0; sub < bound_sub; ++sub) { // for (int tid = 0; tid < num_threads_; ++tid) { // idi index_set_L = tid * local_queue_length + sub; // set_K[k_i++] = set_L[index_set_L].id_; // } // } // idi remain = K - k_i; // if (remain) { // for (int tid = 0; tid < remain; ++tid) { // idi index_set_L = tid * local_queue_length + bound_sub; // set_K[k_i++] = set_L[index_set_L].id_; // } // } // } // // {// Reset //// std::fill(is_visited.begin(), is_visited.end(), 0); // is_visited.reset(); //// is_visited.clear_all(); // std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); // } // //// {//test //// printf("tmp_count: %u\n", tmp_count); //// if (3 == query_id) { //// exit(1); //// } //// } //} } // namespace PANNS #endif //BATCH_SEARCHING_SEARCHING_H
// // Created by Zhen Peng on 7/28/2020. // #ifndef BATCH_SEARCHING_SEARCHING_H #define BATCH_SEARCHING_SEARCHING_H #include <vector> #include <boost/dynamic_bitset.hpp> //#include <boost/sort/sort.hpp> #include <iostream> #include <fstream> #include <unordered_map> #include <immintrin.h> #include <cstring> #include <unordered_set> #include <set> #include <cfloat> #include <algorithm> //#include <omp.h> #include "../include/definitions.h" //#include "../include/efanna2e/neighbor.h" #include "../include/utils.h" #include "../include/Candidate.h" #include "../include/parallelization.h" #include "../include/bitvector.h" namespace PANNS { class Searching { //private: public: idi num_v_ = 0; edgei num_e_ = 0; idi num_queries_ = 0; uint64_t dimension_ = 0; idi width_ = 0; // NSG largest degree idi ep_ = 0; // Start point // std::vector<dataf> data_load_; // std::vector<dataf> queries_load_; // std::vector< std::vector<dataf> > data_load_; // std::vector< std::vector<dataf> > queries_load_; // std::vector<distf> norms_; dataf *data_load_ = nullptr; dataf *queries_load_ = nullptr; // dataf *norms_; // std::vector< std::vector<idi> > nsg_graph_; // idi *nsg_graph_indices_; // idi *nsg_graph_out_edges_; // std::vector< std::vector<idi> > edge_list_; char *opt_nsg_graph_ = nullptr; uint64_t data_bytes_; uint64_t neighbor_bytes_; uint64_t vertex_bytes_; // For multithreads int num_threads_ = 1; // int num_real_threads_ = 1; // int num_threads_intra_query_ = 1; // int num_threads_inter_query_ = 1; dataf compute_norm( const dataf *data) const; // idi vertex_id); // const std::vector<PANNS::dataf> &data); // size_t loc_start, // idi dimension) dataf compute_distance_with_norm( const dataf *v_data, const dataf *q_data, // idi vertex_id, // idi query_id, // const std::vector<dataf> &d_data, // const std::vector<dataf> &q_data, // PANNS::idi d_start, // PANNS::idi q_start, const dataf vertex_norm) const; // idi dimension) static idi insert_into_queue( std::vector<Candidate> &c_queue, idi c_queue_top, Candidate cand); static idi add_into_queue( std::vector<PANNS::Candidate> &queue, idi &queue_top, const idi queue_size, const PANNS::Candidate &cand); static idi add_into_queue( std::vector<PANNS::Candidate> &queue, const idi queue_start, idi &queue_size, const idi queue_capacity, const PANNS::Candidate &cand); static void add_into_queue_at( const Candidate &cand, std::vector<Candidate> &queue, const idi insert_index, // The insertion location, independent with queue_start const idi queue_start, idi &queue_top, // The number of elements in queue, independent with queue_start const idi queue_size); // The maximum capacity of queue, independent with queue_start. static void insert_one_element_at( // const T &cand, // T *queue_base, const Candidate &cand, std::vector<Candidate> &queue_base, const idi insert_index, const idi queue_start, const idi queue_size); // idi insert_into_queue_nsg( // std::vector< Candidate > &c_queue, // idi c_queue_top, // Candidate cand); static idi merge_two_queues_into_1st_queue_seq_fixed( std::vector<Candidate> &queue1, const idi queue1_start, const idi queue1_size, std::vector<Candidate> &queue2, const idi queue2_start, const idi queue2_size); static void merge_two_queues_into_1st_queue_seq_incr( std::vector<Candidate> &queue1, const idi queue1_start, idi &queue1_size, // The number of element in queue1, independent with queue1_start. const idi queue1_length, // The maximum capacity of queue1, independent with queue1_start. std::vector<Candidate> &queue2, const idi queue2_start, const idi queue2_size); idi merge_all_queues_para_list( std::vector< std::vector<Candidate> > &local_queues_list, std::vector<idi> &local_queues_ends, std::vector<Candidate> &set_L, const idi L); // idi merge_all_queues_para_array( //// std::vector< std::vector<Candidate> > &local_queues_list, // std::vector<Candidate> &local_queues_array, // std::vector<idi> &local_queues_ends, // const idi local_queue_length, // std::vector<Candidate> &set_L, // const idi L); idi merge_all_queues_para_array( std::vector<Candidate> &set_L, // std::vector<Candidate> &local_queues_array, std::vector<idi> &local_queues_ends, const idi local_queue_length, // std::vector<Candidate> &set_L, const idi L); idi merge_all_queues_queue_base( // std::vector< std::vector<Candidate> > &local_queues_list, std::vector<Candidate> &set_L, // std::vector<Candidate> &local_queues_array, std::vector<idi> &local_queues_ends, const idi queue_base, const int real_threads, const idi local_queue_length, // std::vector<Candidate> &set_L, const idi L); void merge_two_consecutive_queues_in_place( std::vector<Candidate> &two_queues, const idi base_1, // const idi &end_1, const idi base_2, const idi &length_2); void merge_in_set_L( std::vector<Candidate> &set_L, const idi set_L_length, const idi num_queues, const idi local_queue_length); distf selecting_top_L_seq( std::vector<Candidate> &set_L, const idi global_L, // const idi local_L, const idi num_queues, const std::vector<idi> &local_queues_starts, std::vector<idi> &local_queues_sizes); void selecting_unchecked_top_M_seq( const idi query_id, const idi iter, std::vector<Candidate> &set_L, const std::vector<idi> &pointers_starts, const idi value_M, const idi num_queues, const std::vector<idi> &local_queues_starts, const std::vector<idi> &local_queues_sizes, std::vector<idi> &local_m_counts); void gather_unchecked_top_M_seq( const idi query_id, const idi iter, std::vector<Candidate> &set_L, const std::vector<idi> &pointers_starts, const idi value_M, const idi num_queues, const std::vector<idi> &local_queues_starts, const std::vector<idi> &local_queues_sizes, std::vector<idi> &top_m_candidates, idi &top_m_candidates_size, std::vector<idi> &bound_subs); // idi merge_all_queues_all_together_in_sequential( // std::vector<Candidate> &set_L, // std::vector<idi> &local_queues_ends, // const idi local_queue_length, // const idi L); // idi min_all_queues_at_heads( // const std::vector<Candidate> &set_L, // std::vector<idi> &queue_heads, // const std::vector<idi> &local_queues_ends, // const idi local_queue_length, // const idi L); public: // For Profiling // L3CacheMissRate cache_miss_kernel; uint64_t count_distance_computation_ = 0; uint64_t count_add_to_queue_ = 0; // uint64_t count_single_query_computation_ = 0; // distf dist_min_ = 0; // distf dist_max_ = 0; double time_merge_ = 0; double time_gather_ = 0; // double time_select_ = 0; // double time_select_L_ = 0.0; // double time_select_M_ = 0.0; double time_initialization_ = 0; double time_sequential_phase_ = 0; double time_parallel_phase_ = 0; double time_ending_ = 0.0; double time_assign_s_ = 0.0; double time_expand_ = 0.0; double time_pick_top_m_ = 0.0; double time_distance_computation_ = 0.0; double time_add_to_queue_ = 0.0; // double time_insert_ = 0; // double time_compare_minimum_ = 0; // double time_memmove_ = 0; // std::vector<double> time_memmove_list_; // L3CacheMissRate profile_miss_rate; // uint64_t number_local_elements_ = 0; // std::vector<idi> L_ids_; // std::vector<idi> M_ids_; ~Searching() { free(data_load_); data_load_ = nullptr; // free(queries_load_); // _mm_free(data_load_); free(queries_load_); queries_load_ = nullptr; // free(norms_); // free(nsg_graph_indices_); // free(nsg_graph_out_edges_); free(opt_nsg_graph_); opt_nsg_graph_ = nullptr; } void load_data_load(char *filename); void load_queries_load(char *filename); void load_nsg_graph(char *filename); // void build_opt_graph(); void prepare_init_ids( std::vector<unsigned> &init_ids, const unsigned L) const; // void prepare_candidate_queue_list( // const float *query_load, // std::vector<std::vector<efanna2e::Neighbor> > &retset_list, // std::vector<boost::dynamic_bitset<> > &is_visited_list, // const std::vector<unsigned> &init_ids, // const boost::dynamic_bitset<> &flags, // unsigned batch_start, // unsigned batch_size, // unsigned L); // void search_in_batch( //// const float *query_load, // size_t K, // size_t L, // unsigned batch_start, // unsigned batch_size, // std::vector< std::vector<Candidate> > &set_L_list, // std::vector< boost::dynamic_bitset<> > &is_visited_list, // const std::vector<idi> &init_ids, // const boost::dynamic_bitset<> &is_visited, // std::vector<std::vector<idi> > &set_K_list); void search_in_sequential( idi query_id, idi K, idi L, std::vector<Candidate> &set_L, // boost::dynamic_bitset<> &is_visited, // boost::dynamic_bitset<> is_visited, // std::vector<idi> &init_ids, const std::vector<idi> &init_ids, std::vector<idi> &set_K); // void search_in_sequential_BitVector( // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K); // idi get_out_degree(idi v_id) const // { // if (v_id < num_v_ - 1) { // return nsg_graph_indices_[v_id + 1] - nsg_graph_indices_[v_id]; // } else { // return num_e_ - nsg_graph_indices_[v_id]; // } // } void search_with_top_m( idi M, idi query_id, idi K, idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K); // std::vector< std::vector<idi> > &top_m_list); void search_with_top_m_scale_m( const PANNS::idi value_M_max, const PANNS::idi query_id, const PANNS::idi K, const PANNS::idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, std::vector<idi> &top_m_candidates, boost::dynamic_bitset<> &is_visited); // void search_with_top_m_myths_M( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K); // void search_with_top_m_to_get_distance_range( // const PANNS::idi M, // const PANNS::idi query_id, //// const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids); // void search_with_top_m_profile_bit_CAS( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K); // void search_with_top_m_no_local_arrays( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // boost::dynamic_bitset<> &is_visited); void search_with_top_m_in_batch( PANNS::idi M, PANNS::idi batch_start, PANNS::idi batch_size, PANNS::idi K, PANNS::idi L, std::vector< std::vector<Candidate> > &set_L_list, const std::vector<idi> &init_ids, std::vector< std::vector<idi> > &set_K_list); // void para_search_with_top_m_critical_area( // idi M, // idi query_id, // idi K, // idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K); // void para_search_with_top_m_critical_area_no_omp( // idi M, // idi query_id, // idi K, // idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K); // void para_search_with_top_m_critical_area_yes_omp( // idi M, // idi query_id, // idi K, // idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K); // void para_search_with_top_m_visited_array( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // std::vector<uint8_t> &is_visited); // void para_search_with_top_m_merge_queues( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K); // void para_search_with_top_m_queues_seq_merge( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K); // void para_search_with_top_m_merge_queues_no_CAS( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // std::vector< std::vector<Candidate> > &local_queues_list, // std::vector<idi> &local_queues_ends, //// std::vector<uint8_t> &is_visited); // boost::dynamic_bitset<> &is_visited); // void para_search_with_top_m_merge_queues_in_array( // void para_search_with_top_m_merge_queues_new_threshold( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue //// std::vector< std::vector<Candidate> > &local_queues_list, // std::vector<Candidate> &local_queues_array, // std::vector<idi> &local_queues_ends, // Sizes of local queue // BitVector &is_visited); // void para_search_with_top_m_merge_queues_by_sort( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue //// std::vector<Candidate> &local_queues_array, // std::vector<idi> &local_queues_ends, // Sizes of local queue // std::vector<idi> &dest_offsets, // const std::vector<idi> &offsets_load_set_L, // Offsets for store into set_L. // BitVector &is_visited); // void para_search_with_top_m_merge_queues_better_merge_v0( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // std::vector<idi> &local_queues_ends, // Sizes of local queue //// std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited); // boost::dynamic_bitset<> &is_visited); //// BitVector &is_visited); // void para_search_with_top_m_merge_queues_better_merge_v2( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // std::vector<idi> &local_queues_ends, // Sizes of local queue //// std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited) // boost::dynamic_bitset<> &is_visited, // std::vector<distf> &local_thresholds); //// BitVector &is_visited) // void para_search_with_top_m_merge_queues_better_merge_v1( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // std::vector<idi> &local_queues_ends, // Sizes of local queue // std::vector<Candidate> &top_m_candidates, //// std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited); // boost::dynamic_bitset<> &is_visited); //// BitVector &is_visited); // void para_search_with_top_m_merge_queues_better_merge_v0_0( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // std::vector<idi> &local_queues_ends, // Sizes of local queue //// std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited) // boost::dynamic_bitset<> &is_visited); //// BitVector &is_visited) // void para_search_with_top_m_merge_queues_less_merge( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // std::vector<idi> &local_queues_ends, // Sizes of local queue //// std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited) // boost::dynamic_bitset<> &is_visited, // std::vector<distf> &local_thresholds); //// BitVector &is_visited) // void para_search_with_top_m_merge_queues_no_merge( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // std::vector<idi> &local_queues_ends, // Sizes of local queue //// std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited) // boost::dynamic_bitset<> &is_visited, // std::vector<distf> &local_thresholds, // const uint64_t computation_threshold); // void para_search_with_top_m_merge_queues_scale_m_v0( // const idi value_M_max, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // std::vector<idi> &local_queues_ends, // Sizes of local queue //// std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited); // boost::dynamic_bitset<> &is_visited); void para_search_with_top_m_merge_queues_middle_m( const idi value_M_middle, const idi value_M_max, const idi query_id, const idi K, const idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, const idi local_queue_length, // Maximum size of local queue const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; std::vector<idi> &local_queues_ends, // Sizes of local queue // std::vector<Candidate> &top_m_candidates, std::vector<idi> &top_m_candidates, // std::vector<uint8_t> &is_visited) boost::dynamic_bitset<> &is_visited); // std::vector<distf> &local_thresholds); // BitVector &is_visited) // void para_search_with_top_m_merge_queues_scale_m_v2( // const idi value_M_min, // const idi value_M_max, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; // std::vector<idi> &local_queues_ends, // Sizes of local queue //// std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited) // boost::dynamic_bitset<> &is_visited); // void para_search_with_top_m_merge_queues_scale_m_v3( // const idi value_M_middle, // const idi value_M_max, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; // std::vector<idi> &local_queues_ends, // Sizes of local queue //// std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited) // boost::dynamic_bitset<> &is_visited); void para_search_with_top_m_merge_queues_middle_m_no_merge( const uint64_t computation_threshold, const idi value_M_middle, const idi value_M_max, const idi query_id, const idi K, const idi L, const idi init_size, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, const idi local_queue_length, // Maximum size of local queue const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; std::vector<idi> &local_queues_ends, // Sizes of local queue std::vector<idi> &top_m_candidates, boost::dynamic_bitset<> &is_visited); void para_search_with_top_m_merge_queues_sequential_merge( const idi value_M_middle, const idi value_M_max, const idi query_id, const idi K, const idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, const idi local_queue_length, // Maximum size of local queue const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; std::vector<idi> &local_queues_ends, // Sizes of local queue std::vector<idi> &top_m_candidates, boost::dynamic_bitset<> &is_visited); void para_search_with_top_m_nested_para( const idi batch_start, const idi batch_size, const idi value_M_middle, const idi value_M_max, const idi K, const idi L, std::vector< std::vector<Candidate> > &set_L_list, const std::vector<idi> &init_ids, std::vector< std::vector<idi> > &set_K_list, const idi local_queue_length, // Maximum size of local queue const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; std::vector< std::vector<idi> > &local_queues_ends_list, // Sizes of local queue std::vector< std::vector<idi> > &top_m_candidates_list, std::vector< boost::dynamic_bitset<> > &is_visited_list); void subsearch_with_top_m( const idi value_M_max, const idi query_id, const idi local_L, std::vector<Candidate> &set_L, const idi set_L_start, idi &set_L_size, std::vector<idi> &local_top_m_candidates, boost::dynamic_bitset<> &is_visited, uint64_t &local_count_distance_computation); void subsearch_top_m_for_one_iteration( const idi iter, idi &k_uc, const idi value_M, const idi query_id, const dataf *query_data, const idi L, std::vector<Candidate> &set_L, const idi set_L_start, idi &set_L_size, std::vector<idi> &top_m_candidates, boost::dynamic_bitset<> &is_visited, uint64_t &count_distance_computation); // void subsearch_top_m_for_one_iteration_lth( // const distf bound_lth, // const idi iter, // idi &k_uc, // const idi value_M, // const idi query_id, // const dataf *query_data, // const idi L, // std::vector<Candidate> &set_L, // const idi set_L_start, // idi &set_L_size, // std::vector<idi> &top_m_candidates, // boost::dynamic_bitset<> &is_visited, // uint64_t &count_distance_computation); void subsearch_top_m_for_one_iteration_lth_mth( const distf bound_lth, // const idi top_m_position, const idi iter, idi &k_uc, const idi local_m_count, const idi query_id, const dataf *query_data, const idi L, std::vector<Candidate> &set_L, const idi set_L_start, idi &set_L_size, std::vector<idi> &top_m_candidates, boost::dynamic_bitset<> &is_visited, uint64_t &count_distance_computation, double &time_pick_top_m, uint64_t &count_add_to_queue, double &time_distance_computation, double &time_add_to_queue); // void para_search_with_top_m_subsearch_v3( // const idi local_M_max, // const idi local_M_middle, // const idi query_id, // const idi K, // const idi global_L, // const idi local_L, //// const idi total_L, //// const idi init_queue_size, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const std::vector<idi> &local_queues_starts, // std::vector<idi> &local_queues_sizes, // std::vector<idi> &local_m_counts, // std::vector< std::vector<idi> > &top_m_candidates_list, // boost::dynamic_bitset<> &is_visited); // void para_search_with_top_m_subsearch_v4( // const idi local_M_max, // const idi local_M_middle, // const idi query_id, // const idi K, // const idi global_L, // const idi local_L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const std::vector<idi> &local_queues_starts, // std::vector<idi> &local_queues_sizes, // std::vector<idi> &local_m_counts, // std::vector<idi> &top_m_candidates, // boost::dynamic_bitset<> &is_visited); void para_search_with_top_m_subsearch_v5( const idi local_M_max, const idi local_M_middle, const idi query_id, const idi K, const idi global_L, const idi local_L, // const idi total_L, // const idi init_queue_size, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, const std::vector<idi> &local_queues_starts, std::vector<idi> &local_queues_sizes, // std::vector<idi> &local_m_counts, std::vector<idi> &top_m_candidates, // std::vector< std::vector<idi> > &top_m_candidates_list, boost::dynamic_bitset<> &is_visited); void subsearch_for_simple_search( const idi query_id, const idi local_L, std::vector<Candidate> &set_L, const idi base_set_L, idi &set_L_end, // std::vector<uint8_t> &is_visited, boost::dynamic_bitset<> &is_visited, uint64_t &local_count_distance_computation); void para_simple_search_subsearch( const idi query_id, const idi K, const idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, // std::vector<uint8_t> &is_visited); boost::dynamic_bitset<> &is_visited); // void para_search_with_top_m_merge_queues_global_threshold( // const idi value_M_middle, // const idi value_M_max, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; // std::vector<idi> &local_queues_ends, // Sizes of local queue // std::vector<idi> &top_m_candidates, // boost::dynamic_bitset<> &is_visited); // void para_search_with_top_m_merge_queues_distance_threshold_m( //// const idi value_M_middle, //// const idi value_M_max, // const distf relative_dist_threshold, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; // std::vector<idi> &local_queues_ends, // Sizes of local queue //// std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited) // boost::dynamic_bitset<> &is_visited); //// std::vector<distf> &local_thresholds) //// BitVector &is_visited) // void para_search_with_top_m_merge_queues_distance_threshold_m_middle_iteration( //// const idi value_M_middle, //// const idi value_M_max, // const distf relative_dist_threshold, // const idi middle_iteration, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; // std::vector<idi> &local_queues_ends, // Sizes of local queue //// std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited) // boost::dynamic_bitset<> &is_visited); // void para_search_with_top_m_merge_queues_collectors( // const idi value_M_middle, // const idi value_M_max, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; // std::vector<idi> &local_queues_ends, // Sizes of local queue //// std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited) // boost::dynamic_bitset<> &is_visited); // void para_search_with_top_m_merge_queues_selecting( // const idi value_M_middle, // const idi value_M_max, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; // std::vector<idi> &local_queues_ends, // Sizes of local queue //// std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited) // boost::dynamic_bitset<> &is_visited); // void para_search_with_top_m_merge_queues_myths( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue //// std::vector< std::vector<Candidate> > &local_queues_list, // std::vector<Candidate> &local_queues_array, // std::vector<idi> &local_queues_ends, // Sizes of local queue // BitVector &is_visited); //// std::vector<uint8_t> &is_visited); //// boost::dynamic_bitset<> &is_visited); //// void para_prepare_init_ids( //// std::vector<unsigned> &init_ids, //// unsigned L) const; // void para_search_with_top_m_in_batch_embarassing_para( // const PANNS::idi M, // const PANNS::idi batch_start, // const PANNS::idi batch_size, // const PANNS::idi K, // const PANNS::idi L, // std::vector< std::vector<Candidate> > &set_L_list, // const std::vector<idi> &init_ids, // std::vector< std::vector<idi> > &set_K_list, // std::vector< boost::dynamic_bitset<> > &is_visited_list); // void test_neighbors_distance_to_father( // const idi num_selected) const; // void test_neighbors_normalized_distance_to_father( // const idi num_selected) const; void load_true_NN( const char *filename, std::vector< std::vector<idi> > &true_nn_list); void get_recall_for_all_queries( const std::vector< std::vector<idi> > &true_nn_list, const std::vector<std::vector<unsigned>> &set_K_list, std::unordered_map<unsigned, double> &recalls) const; }; // Class Searching /** * Input the data from the file. * @param filename */ inline void Searching::load_data_load(char *filename) { auto old_d = dimension_; DiskIO::load_data( filename, data_load_, num_v_, dimension_); if (old_d) { if (old_d != dimension_) { std::cerr << "Error: data dimension " << dimension_ << " is not equal to query dimension " << old_d << "." << std::endl; exit(EXIT_FAILURE); } } } /** * Input queries from the file. * @param filename */ inline void Searching::load_queries_load(char *filename) { auto old_d = dimension_; DiskIO::load_data( filename, queries_load_, num_queries_, dimension_); if (old_d) { if (old_d != dimension_) { std::cerr << "Error: query dimension " << dimension_ << " is not equal to data dimension " << old_d << "." << std::endl; exit(EXIT_FAILURE); } } } /** * Input the NSG graph from the file. * Reference: https://github.com/ZJULearning/nsg/blob/master/src/index_nsg.cpp * @param filename */ inline void Searching::load_nsg_graph(char *filename) { std::ifstream fin(filename); if (!fin.is_open()) { std::cerr << "Error: cannot read file " << filename << " ." << std::endl; exit(EXIT_FAILURE); } fin.read(reinterpret_cast<char *>(&width_), sizeof(unsigned)); fin.read(reinterpret_cast<char *>(&ep_), sizeof(unsigned)); data_bytes_ = (1 + dimension_) * sizeof(dataf); neighbor_bytes_ = (1 + width_) * sizeof(idi); vertex_bytes_ = data_bytes_ + neighbor_bytes_; opt_nsg_graph_ = (char *) malloc(num_v_ * vertex_bytes_); if (!opt_nsg_graph_) { std::cerr << "Error: no enough memory for opt_nsg_graph_." << std::endl; exit(EXIT_FAILURE); } idi v_id = 0; num_e_ = 0; char *base_location = opt_nsg_graph_; while (true) { idi degree; fin.read(reinterpret_cast<char *>(&degree), sizeof(unsigned)); if (fin.eof()) { break; } num_e_ += degree; // std::vector<idi> tmp_ngbrs(degree); // fin.read(reinterpret_cast<char *>(tmp_ngbrs.data()), degree * sizeof(unsigned)); // Norm and data distf norm = compute_norm(data_load_ + v_id * dimension_); // distf norm = compute_norm(v_id); std::memcpy(base_location, &norm, sizeof(distf)); // Norm memcpy(base_location + sizeof(distf), data_load_ + v_id * dimension_, dimension_ * sizeof(dataf)); // Data base_location += data_bytes_; // Neighbors memcpy(base_location, &degree, sizeof(idi)); // Number of neighbors fin.read(base_location + sizeof(idi), degree * sizeof(unsigned)); // Neighbors // memcpy(location + sizeof(idi), tmp_ngbrs.data(), degree * sizeof(unsigned)); base_location += neighbor_bytes_; ++v_id; } if (v_id != num_v_) { std::cerr << "Error: NSG data has " << v_id << " vertices, but origin data has " << num_v_ << " vertices." << std::endl; exit(EXIT_FAILURE); } free(data_load_); data_load_ = nullptr; // //////////////////////// // idi v_id = 0; // num_e_ = 0; // while (true) { // idi degree; // fin.read(reinterpret_cast<char *>(&degree), sizeof(unsigned)); // if (fin.eof()) { // break; // } // num_e_ += degree; // // std::vector<idi> ngbrs(degree); // fin.read(reinterpret_cast<char *>(ngbrs.data()), degree * sizeof(unsigned)); //// nsg_graph_.push_back(ngbrs); //// tmp_edge_list.push_back(ngbrs); // edge_list_.push_back(ngbrs); // ++v_id; // } // if (v_id != num_v_) { // std::cerr << "Error: NSG data has " << v_id // << " vertices, but origin data has " << num_v_ << " vertices." << std::endl; // exit(EXIT_FAILURE); // } } /** * Load those true top-K neighbors (ground truth) of queries * @param filename * @param[out] true_nn_list */ inline void Searching::load_true_NN( const char *filename, std::vector< std::vector<idi> > &true_nn_list) // unsigned &t_K) { std::ifstream fin(filename); if (!fin.is_open()) { fprintf(stderr, "Error: cannot open file %s\n", filename); exit(EXIT_FAILURE); } idi t_query_num; idi t_K; // unsigned t_K; fin.read(reinterpret_cast<char *>(&t_query_num), sizeof(t_query_num)); fin.read(reinterpret_cast<char *>(&t_K), sizeof(t_K)); // if (t_query_num != query_num) { // fprintf(stderr, "Error: query_num %u is not equal to the record %u in true-NN file %s\n", // query_num, t_query_num, filename); // exit(EXIT_FAILURE); // } if (t_query_num < num_queries_) { fprintf(stderr, "Error: t_query_num %u is smaller than num_queries_ %u\n", t_query_num, num_queries_); exit(EXIT_FAILURE); } if (t_K < 100) { fprintf(stderr, "Error: t_K %u is smaller than 100.\n", t_K); exit(EXIT_FAILURE); } // data = new unsigned[(size_t) t_query_num * (size_t) t_K]; true_nn_list.resize(t_query_num); for (idi q_i = 0; q_i < t_query_num; ++q_i) { true_nn_list[q_i].resize(t_K); } for (unsigned q_i = 0; q_i < t_query_num; ++q_i) { // size_t offset = q_i * t_K; for (unsigned n_i = 0; n_i < t_K; ++n_i) { unsigned id; float dist; fin.read(reinterpret_cast<char *>(&id), sizeof(id)); fin.read(reinterpret_cast<char *>(&dist), sizeof(dist)); // data[offset + n_i] = id; true_nn_list[q_i][n_i] = id; } } fin.close(); } inline void Searching::get_recall_for_all_queries( const std::vector< std::vector<idi> > &true_nn_list, const std::vector<std::vector<unsigned>> &set_K_list, std::unordered_map<unsigned, double> &recalls) const { // if (t_K < 100) { // fprintf(stderr, "Error: t_K %u is smaller than 100.\n", t_K); // exit(EXIT_FAILURE); // } if (true_nn_list[0].size() < 100) { fprintf(stderr, "Error: Number of true nearest neighbors of a query is smaller than 100.\n"); exit(EXIT_FAILURE); } recalls[1] = 0.0; recalls[5] = 0.0; recalls[10] = 0.0; recalls[20] = 0.0; recalls[50] = 0.0; recalls[100] = 0.0; for (unsigned q_i = 0; q_i < num_queries_; ++q_i) { // size_t offset = q_i * t_K; for (unsigned top_i = 0; top_i < 100; ++top_i) { unsigned true_id = true_nn_list[q_i][top_i]; for (unsigned n_i = 0; n_i < 100; ++n_i) { if (set_K_list[q_i][n_i] == true_id) { if (n_i < 1) recalls[1] += 1; if (n_i < 5) recalls[5] += 1; if (n_i < 10) recalls[10] += 1; if (n_i < 20) recalls[20] += 1; if (n_i < 50) recalls[50] += 1; if (n_i < 100) recalls[100] += 1; } } } } recalls[1] /= 1.0 * num_queries_; recalls[5] /= 5.0 * num_queries_; recalls[10] /= 10.0 * num_queries_; recalls[20] /= 20.0 * num_queries_; recalls[50] /= 50.0 * num_queries_; recalls[100] /= 100.0 * num_queries_; } inline void Searching::search_in_sequential( const idi query_id, const idi K, const idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K) { // {//test // printf("Iteration: Relative_Distance:\n"); //// printf("Iteration: Relative_Distance:\n"); //// printf("----query: %u----\n", query_id); // } boost::dynamic_bitset<> is_visited(num_v_); for (idi v_i = 0; v_i < L; ++v_i) { is_visited[init_ids[v_i]] = true; } const dataf *query_data = queries_load_ + query_id * dimension_; for (idi v_i = 0; v_i < L; ++v_i) { idi v_id = init_ids[v_i]; _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); } // Get the distances of all candidates, store in the set set_L. for (unsigned i = 0; i < L; i++) { unsigned v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; ++count_distance_computation_; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[i] = Candidate(v_id, dist, false); // False means not checked. } std::sort(set_L.begin(), set_L.begin() + L); idi k = 0; // Index of every queue's first unchecked candidate. idi tmp_count = 0; // for debug // {// Print relative distance //// distf top_dist = set_L[0].distance_; // for (idi i_l = 0; i_l < L; ++i_l) { // printf("%u %f\n", // tmp_count, set_L[i_l].distance_); //// tmp_count, set_L[i_l].distance_ - top_dist); // } // } while (k < L) { Candidate &top_cand = set_L[k]; unsigned nk = L; if (!top_cand.is_checked_) { ++tmp_count; top_cand.is_checked_ = true; idi v_id = top_cand.id_; // Vertex ID. _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } // Traverse v_id's all neighbors, pushing them into the queue for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; if (is_visited[nb_id]) { continue; } is_visited[nb_id] = true; auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; // Compute the distance ++count_distance_computation_; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[L-1].distance_) { continue; } // if (dist >= set_L[L-1].distance_) { // continue; // } Candidate cand(nb_id, dist, false); // Insert into the queue idi r = insert_into_queue(set_L, L, cand); if (r < nk) { nk = r; } } // {// Print relative distance //// distf top_dist = set_L[0].distance_; // for (idi i_l = 0; i_l < L; ++i_l) { // printf("%u %f\n", // tmp_count, set_L[i_l].distance_); //// tmp_count, set_L[i_l].distance_ - top_dist); // } // } } if (nk <= k) { k = nk; } else { ++k; } } // cache_miss_kernel.measure_stop(); for (size_t k_i = 0; k_i < K; ++k_i) { set_K[k_i] = set_L[k_i].id_; } // {//test // if (0 == query_id) { // exit(1); // } // } } //inline void Searching::search_in_sequential_BitVector( // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) //{ //// boost::dynamic_bitset<> is_visited(num_v_); // BitVector is_visited(num_v_); // // // for (idi v_i = 0; v_i < L; ++v_i) { //// is_visited[init_ids[v_i]] = true; // is_visited.atomic_set_bit(init_ids[v_i]); // } // // const dataf *query_data = queries_load_ + query_id * dimension_; // // // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. // // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // idi k = 0; // Index of every queue's first unchecked candidate. // while (k < L) { // Candidate &top_cand = set_L[k]; // unsigned nk = L; // if (!top_cand.is_checked_) { // top_cand.is_checked_ = true; // idi v_id = top_cand.id_; // Vertex ID. // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // // Traverse v_id's all neighbors, pushing them into the queue // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; //// if (is_visited[nb_id]) { //// continue; //// } //// is_visited[nb_id] = true; // // {// Self-defined BitVector // if (is_visited.atomic_is_bit_set(nb_id)) { // continue; // } // is_visited.atomic_set_bit(nb_id); // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // // Compute the distance // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); // // Insert into the queue // idi r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // if (nk <= k) { // k = nk; // } else { // ++k; // } // } //// cache_miss_kernel.measure_stop(); // // for (size_t k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //} /** * Prepare init_ids and flags, as they are constant for all queries. * @param[out] init_ids * @param L */ inline void Searching::prepare_init_ids( std::vector<unsigned int> &init_ids, const unsigned L) const { // idi num_ngbrs = get_out_degree(ep_); // edgei edge_start = nsg_graph_indices_[ep_]; // // Store ep_'s neighbors as candidates // idi tmp_l = 0; // for (; tmp_l < L && tmp_l < num_ngbrs; tmp_l++) { // init_ids[tmp_l] = nsg_graph_out_edges_[edge_start + tmp_l]; // } // std::unordered_set<idi> visited_ids; boost::dynamic_bitset<> is_selected(num_v_); idi *out_edges = (idi *) (opt_nsg_graph_ + ep_ * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; idi init_ids_end = 0; // for (; tmp_l < L && tmp_l < out_degree; tmp_l++) { for (idi e_i = 0; e_i < out_degree && init_ids_end < L; ++e_i) { // idi v_id = out_edges[tmp_l]; idi v_id = out_edges[e_i]; if(is_selected[v_id]) { continue; } is_selected[v_id] = true; // init_ids[tmp_l] = v_id; init_ids[init_ids_end++] = v_id; // init_ids[tmp_l] = out_edges[tmp_l]; // visited_ids.insert(init_ids[tmp_l]); } // for (idi i = 0; i < tmp_l; ++i) { // is_visited[init_ids[i]] = true; // } // If ep_'s neighbors are not enough, add other random vertices idi tmp_id = ep_ + 1; // use tmp_id to replace rand(). while (init_ids_end < L) { tmp_id %= num_v_; idi v_id = tmp_id++; if (is_selected[v_id]) { continue; } // if (visited_ids.find(id) != visited_ids.end()) { // continue; // } is_selected[v_id] = true; // visited_ids.insert(id); init_ids[init_ids_end++] = v_id; // tmp_l++; } } // TODO: re-code in AVX-512 inline dataf Searching::compute_norm( const dataf *data) const // idi vertex_id) // const std::vector<PANNS::dataf> &data) // size_t loc_start, // idi dimension) { // const dataf *a = data.data() + loc_start; // const dataf *a = data_load_ + vertex_id * dimension_; // idi size = dimension_; dataf result = 0; //#define AVX_L2NORM(addr, dest, tmp) \ // tmp = _mm256_load_ps(addr); \ // tmp = _mm256_mul_ps(tmp, tmp); \ // dest = _mm256_add_ps(dest, tmp); #define AVX_L2NORM(addr, dest, tmp) \ tmp = _mm256_loadu_ps(addr); \ tmp = _mm256_mul_ps(tmp, tmp); \ dest = _mm256_add_ps(dest, tmp); __m256 sum; __m256 l0, l1; unsigned D = (dimension_ + 7) & ~7U; unsigned DR = D % 16; unsigned DD = D - DR; const float *l = data; const float *e_l = l + DD; float unpack[8] __attribute__ ((aligned (32))) = {0, 0, 0, 0, 0, 0, 0, 0}; sum = _mm256_load_ps(unpack); // sum = _mm256_loadu_ps(unpack); if (DR) { AVX_L2NORM(e_l, sum, l0); } for (unsigned i = 0; i < DD; i += 16, l += 16) { AVX_L2NORM(l, sum, l0); AVX_L2NORM(l + 8, sum, l1); } _mm256_store_ps(unpack, sum); // _mm256_storeu_ps(unpack, sum); result = unpack[0] + unpack[1] + unpack[2] + unpack[3] + unpack[4] + unpack[5] + unpack[6] + unpack[7]; return result; } inline dataf Searching::compute_distance_with_norm( const dataf *v_data, const dataf *q_data, // idi vertex_id, // idi query_id, // const std::vector<PANNS::dataf> &d_data, // const std::vector<PANNS::dataf> &q_data, // PANNS::idi d_start, // PANNS::idi q_start, const dataf vertex_norm) const // idi dimension) { // idi size = dimension_; float result = 0; //#define AVX_DOT(addr1, addr2, dest, tmp1, tmp2) \ // tmp1 = _mm256_load_ps(addr1);\ // tmp2 = _mm256_load_ps(addr2);\ // tmp1 = _mm256_mul_ps(tmp1, tmp2); \ // dest = _mm256_add_ps(dest, tmp1); #define AVX_DOT(addr1, addr2, dest, tmp1, tmp2) \ tmp1 = _mm256_loadu_ps(addr1);\ tmp2 = _mm256_loadu_ps(addr2);\ tmp1 = _mm256_mul_ps(tmp1, tmp2); \ dest = _mm256_add_ps(dest, tmp1); __m256 sum; __m256 l0, l1; __m256 r0, r1; unsigned D = (dimension_ + 7) & ~7U; unsigned DR = D % 16; unsigned DD = D - DR; const float *l = v_data; const float *r = q_data; // const float *l = (float *) (opt_nsg_graph_ + vertex_id * vertex_bytes_ + sizeof(distf)); // const float *r = queries_load_ + query_id * dimension_; const float *e_l = l + DD; const float *e_r = r + DD; float unpack[8] __attribute__ ((aligned (32))) = {0, 0, 0, 0, 0, 0, 0, 0}; sum = _mm256_load_ps(unpack); // sum = _mm256_loadu_ps(unpack); if (DR) { AVX_DOT(e_l, e_r, sum, l0, r0); } for (unsigned i = 0; i < DD; i += 16, l += 16, r += 16) { AVX_DOT(l, r, sum, l0, r0); AVX_DOT(l + 8, r + 8, sum, l1, r1); } _mm256_store_ps(unpack, sum); // _mm256_storeu_ps(unpack, sum); result = unpack[0] + unpack[1] + unpack[2] + unpack[3] + unpack[4] + unpack[5] + unpack[6] + unpack[7]; result = -2 * result + vertex_norm; return result; } //// DEPRECATED. // The difference from insert_into_queue is that add_into_queue will increase the queue size by 1. //inline idi Searching::add_into_queue( // std::vector<PANNS::Candidate> &queue, // idi &queue_top, // const idi queue_size, // const PANNS::Candidate &cand) //{ // assert(queue_size > 1); // if (0 == queue_top) { // queue[queue_top++] = cand; // return 0; // } else if (1 == queue_top) { // if (queue[0] < cand) { // queue[queue_top++] = cand; // return 1; // } else { // queue[++queue_top] = queue[0]; // queue[0] = cand; // return 0; // } // } // // if (queue[queue_top - 1] < cand) { // if (queue_top < queue_size) { // queue[queue_top++] = cand; // } // return queue_top; // } // // idi r = insert_into_queue( // queue, // queue_top - 1, // cand); //// {//test //// printf("r: %u" //// "queue_top: %u " //// "queue_size: %u\n", //// r, //// queue_top, //// queue_size); //// } // return r; // //// ///////////////////////////////////////////////////////////// //// // Find the insert location //// auto it_loc = std::lower_bound(queue.begin(), queue.begin() + queue_top, cand); //// idi insert_loc = it_loc - queue.begin(); //// if (insert_loc == queue_size) { //// return queue_size; //// } //// //// // Insert ////// if (queue_top == queue_size) { ////// // If full already ////// --queue_top; ////// } //// memmove(reinterpret_cast<char *>(queue.data() + insert_loc + 1), //// reinterpret_cast<char *>(queue.data() + insert_loc), //// (queue_top - insert_loc) * sizeof(Candidate)); ////// for (idi q_i = queue_top; q_i > insert_loc; --q_i) { ////// queue.at(q_i) = queue.at(q_i - 1); ////// } //// queue[insert_loc] = cand; //// ++queue_top; //// return insert_loc; //} // The difference from insert_into_queue is that add_into_queue will increase the queue size by 1. inline idi Searching::add_into_queue( std::vector<PANNS::Candidate> &queue, idi &queue_top, const idi queue_size, const PANNS::Candidate &cand) { if (0 == queue_top) { queue[queue_top++] = cand; return 0; } // Find the insert location auto it_loc = std::lower_bound(queue.begin(), queue.begin() + queue_top, cand); idi insert_loc = it_loc - queue.begin(); if (insert_loc == queue_size) { return queue_size; } // Insert if (queue_top == queue_size) { // If full already --queue_top; } memmove(reinterpret_cast<char *>(queue.data() + insert_loc + 1), reinterpret_cast<char *>(queue.data() + insert_loc), (queue_top - insert_loc) * sizeof(Candidate)); // for (idi q_i = queue_top; q_i > insert_loc; --q_i) { // queue.at(q_i) = queue.at(q_i - 1); // } queue[insert_loc] = cand; ++queue_top; return insert_loc; } // The difference from insert_into_queue is that add_into_queue will increase the queue size by 1. // add_into_queue with a queue_start inline idi Searching::add_into_queue( std::vector<PANNS::Candidate> &queue, const idi queue_start, idi &queue_size, // The insertion location starting from queue_start const idi queue_capacity, // The maximum capacity of queue, independent with queue_start. const PANNS::Candidate &cand) { if (0 == queue_size) { queue[queue_start + queue_size++] = cand; return 0; } idi queue_end = queue_start + queue_size; // Find the insert location const auto it_loc = std::lower_bound(queue.begin() + queue_start, queue.begin() + queue_end, cand); // auto it_loc = std::lower_bound(queue.begin(), queue.begin() + queue_size, cand); idi insert_loc = it_loc - queue.begin(); if (insert_loc != queue_end) { if (cand.id_ == it_loc->id_) { // Duplicate return queue_capacity; } if (queue_size >= queue_capacity) { // Queue is full --queue_size; --queue_end; } } else { // insert_loc == queue_end, insert at the end? if (queue_size < queue_capacity) { // Queue is not full // Insert at the end queue[insert_loc] = cand; ++queue_size; return queue_size - 1; } else { // Queue is full return queue_capacity; } } // Add into queue memmove(reinterpret_cast<char *>(queue.data() + insert_loc + 1), reinterpret_cast<char *>(queue.data() + insert_loc), (queue_end - insert_loc) * sizeof(Candidate)); queue[insert_loc] = cand; ++queue_size; return insert_loc - queue_start; } inline void Searching::add_into_queue_at( const Candidate &cand, std::vector<Candidate> &queue, const idi insert_index, // The insertion location, independent with queue_start const idi queue_start, idi &queue_size, // The number of elements in queue, independent with queue_start const idi queue_length) // The maximum capacity of queue, independent with queue_start. { const idi dest_index = queue_start + insert_index; if (queue_size == queue_length) { --queue_size; } memmove(reinterpret_cast<char *>(queue.data() + dest_index + 1), reinterpret_cast<char *>(queue.data() + dest_index), (queue_size - insert_index) * sizeof(Candidate)); queue[dest_index] = cand; ++queue_size; } inline void Searching::insert_one_element_at( // const T &cand, // T *queue_base, const Candidate &cand, std::vector<Candidate> &queue, const idi insert_index, const idi queue_start, const idi queue_size) { const idi dest_index = queue_start + insert_index; memmove(reinterpret_cast<char *>(queue.data() + dest_index + 1), reinterpret_cast<char *>(queue.data() + dest_index), (queue_size - insert_index - 1) * sizeof(Candidate)); queue[dest_index] = cand; // memmove(reinterpret_cast<char *>(queue_base + dest_index + 1), // reinterpret_cast<char *>(queue_base + dest_index), // (queue_size - insert_index - 1) * sizeof(T)); // for (idi q_i = queue_size - 1; q_i > insert_index; --q_i) { // queue_base.at(q_i + queue_start) = queue_base.at(q_i - 1 + queue_start); // } // queue_base[dest_index] = cand; } /** * PANNS version of InsertIntoPool(): binary-search to find the insert place and then move. * @param[out] c_queue * @param c_queue_top * @param cand * @return */ inline idi Searching::insert_into_queue( std::vector<PANNS::Candidate> &c_queue, PANNS::idi c_queue_top, PANNS::Candidate cand) { if (c_queue[0].distance_ > cand.distance_) { // If the first memmove(reinterpret_cast<char *>(c_queue.data() + 1), reinterpret_cast<char *>(c_queue.data()), c_queue_top * sizeof(Candidate)); c_queue[0] = cand; return 0; } else if (c_queue[c_queue_top - 1].distance_ == cand.distance_) { // If the last if (c_queue[c_queue_top - 1].id_ > cand.id_) { // Use ID as the second metrics for ordering c_queue[c_queue_top - 1] = cand; return c_queue_top - 1; } else { return c_queue_top; } } idi left = 0; idi right = c_queue_top; while (left < right) { idi mid = (right - left) / 2 + left; if (c_queue[mid].distance_ > cand.distance_) { right = mid; } else { left = mid + 1; } } // If the distance is the same if (0 != left && c_queue[left - 1].distance_ != cand.distance_) { ; } else { while (0 != left && c_queue[left - 1].distance_ == cand.distance_ && c_queue[left - 1].id_ > cand.id_) { // Use ID as the second metrics for ordering --left; } } // Insert to left memmove(reinterpret_cast<char *>(c_queue.data() + left + 1), reinterpret_cast<char *>(c_queue.data() + left), (c_queue_top - left) * sizeof(Candidate)); c_queue[left] = cand; return left; } //inline void Searching::cand_pushes_ngbrs_into_queue( // idi cand_id, // const dataf *query_data, // idi L, // idi &new_k, // boost::dynamic_bitset<> &is_visited, // std::vector<Candidate> &set_L) //{ // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = true; // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist >= set_L[L-1].distance_) { // continue; // } // Candidate cand(nb_id, dist, false); // idi r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } //} //inline void Searching::search_in_sequential( // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) const //{ // boost::dynamic_bitset<> is_visited(num_v_); // // for (idi v_i = 0; v_i < L; ++v_i) { // is_visited[init_ids[v_i]] = true; // } // const dataf *query_data = queries_load_ + query_id * dimension_; // // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // idi k = 0; // Index of every queue's first unchecked candidate. // while (k < L) { // Candidate &top_cand = set_L[k]; // unsigned nk = L; // if (!top_cand.is_checked_) { // top_cand.is_checked_ = true; // idi v_id = top_cand.id_; // Vertex ID. // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // // Traverse v_id's all neighbors, pushing them into the queue // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = true; // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // // Compute the distance // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } // Candidate cand(nb_id, dist, false); // // Insert into the queue // idi r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // if (nk <= k) { // k = nk; // } else { // ++k; // } // } // // for (size_t k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //} // Deprecated: cannot use std::set, because its element is constant. //inline void Searching::search_in_sequential( // const idi query_id, // const idi K, // const idi L, //// std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) const //{ // std::set<Candidate> set_L; // boost::dynamic_bitset<> is_visited(num_v_); // // for (idi v_i = 0; v_i < L; ++v_i) { // is_visited[init_ids[v_i]] = true; // } // const dataf *query_data = queries_load_ + query_id * dimension_; // // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; // distf dist = compute_distance_with_norm(v_data, query_data, norm); //// set_L[i] = Candidate(v_id, dist, false); // False means not checked. // set_L.emplace(v_id, dist, false); // } //// std::sort(set_L.begin(), set_L.begin() + L); // idi k = 0; // Index of every queue's first unchecked candidate. // while (k < L) { //// Candidate &top_cand = set_L[k]; // std::set<Candidate>::iterator top_cand = std::next(set_L.begin(), k); // unsigned nk = L; // if (!top_cand->is_checked_) { // top_cand->is_checked_ = true; // idi v_id = top_cand.id_; // Vertex ID. // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // // Traverse v_id's all neighbors, pushing them into the queue // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = true; // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // // Compute the distance // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } // Candidate cand(nb_id, dist, false); // // Insert into the queue // idi r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // if (nk <= k) { // k = nk; // } else { // ++k; // } // } // // for (size_t k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //} /* Function: * queue1_size is fixed. */ inline idi Searching::merge_two_queues_into_1st_queue_seq_fixed( std::vector<Candidate> &queue1, const idi queue1_start, const idi queue1_size, std::vector<Candidate> &queue2, const idi queue2_start, const idi queue2_size) // const idi limit_size) { assert(queue1_size && queue2_size); // Record the lowest insert location. auto it_loc = std::lower_bound( queue1.begin() + queue1_start, queue1.begin() + queue1_start + queue1_size, queue2[queue2_start]); idi insert_index = it_loc - (queue1.begin() + queue1_start); if (insert_index == queue1_size) { return insert_index; } else if (insert_index == queue1_size - 1) { queue1[queue1_start + insert_index] = queue2[queue2_start]; return insert_index; } // Insert the 1st of queue2 if (queue2[queue2_start].id_ != it_loc->id_) { // Not Duplicate insert_one_element_at( queue2[queue2_start], queue1, insert_index, queue1_start, queue1_size); } if (queue2_size == 1) { return insert_index; } // Insert idi q_i_1 = insert_index + 1 + queue1_start; idi q_i_2 = queue2_start + 1; const idi q_i_1_bound = queue1_start + queue1_size; const idi q_i_2_bound = queue2_start + queue2_size; // const idi insert_i_bound = queue1_start + limit_size; for (idi insert_i = insert_index + 1; insert_i < queue1_size; ++insert_i) { if (q_i_1 >= q_i_1_bound || q_i_2 >= q_i_2_bound) { // queue1 or queue2 finished traverse. Rest o break; } else if (queue1[q_i_1] < queue2[q_i_2]) { ++q_i_1; } else if (queue2[q_i_2] < queue1[q_i_1]) { // Insert queue2[q_i_2] into queue1 insert_one_element_at( queue2[q_i_2++], queue1, insert_i, queue1_start, queue1_size); ++q_i_1; } else { // Duplicate ++q_i_2; ++q_i_1; } } return insert_index; } /* Function: * queue1_size should be updated. * queue1_length should be provided. */ inline void Searching::merge_two_queues_into_1st_queue_seq_incr( std::vector<Candidate> &queue1, const idi queue1_start, idi &queue1_size, // The number of element in queue1, independent with queue1_start. const idi queue1_length, // The maximum capacity of queue1, independent with queue1_start. std::vector<Candidate> &queue2, const idi queue2_start, const idi queue2_size) // const idi limit_size) { assert(queue1_size && queue2_size); // Record the lowest insert location. auto it_loc = std::lower_bound( queue1.begin() + queue1_start, queue1.begin() + queue1_start + queue1_size, queue2[queue2_start]); idi insert_index = it_loc - (queue1.begin() + queue1_start); if (insert_index == queue1_size) { idi copy_count = (queue1_size + queue2_size > queue1_length) ? queue1_length - queue1_size : queue2_size; memmove(queue1.data() + queue1_start + queue1_size, queue2.data() + queue2_start, copy_count * sizeof(Candidate)); queue1_size += copy_count; return; } if (queue2[queue2_start].id_ != it_loc->id_) { // Not Duplicate add_into_queue_at( queue2[queue2_start], queue1, insert_index, queue1_start, queue1_size, queue1_length); } if (queue2_size == 1) { return; } // Insert idi q_i_1 = insert_index + 1 + queue1_start; idi q_i_2 = queue2_start + 1; idi q_i_1_bound = queue1_start + queue1_size; // When queue1_size is updated, so should be q_i_1_bound. const idi q_i_2_bound = queue2_start + queue2_size; // idi insert_i; for (idi insert_i = insert_index + 1; insert_i < queue1_length; ++insert_i) { if (q_i_1 >= q_i_1_bound) { queue1_size += std::min(queue1_length - insert_i, q_i_2_bound - q_i_2); for ( ; insert_i < queue1_size; ++insert_i) { queue1[queue1_start + insert_i] = queue2[q_i_2++]; } break; } else if (q_i_2 >= q_i_2_bound) { break; } else if (queue1[q_i_1] < queue2[q_i_2]) { ++q_i_1; } else if (queue2[q_i_2] < queue1[q_i_1]) { add_into_queue_at( queue2[q_i_2++], queue1, insert_i, queue1_start, queue1_size, queue1_length); ++q_i_1; q_i_1_bound = queue1_start + queue1_size; } else { // Duplicate ++q_i_2; ++q_i_1; } } } inline idi Searching::merge_all_queues_para_list( std::vector< std::vector<Candidate> > &local_queues_list, std::vector<idi> &local_queues_ends, std::vector<Candidate> &set_L, const idi L) { int size = 1 << (static_cast<idi>(log2(num_threads_))); idi log2size = static_cast<idi>(log2(size)); for (idi d = 0; d < log2size; ++d) { uint32_t by = 1 << (d + 1); for (int i = 0; i < size; i += by) { idi ai = i + (1 << (d + 1)) - 1; // i + 2^(d+1) - 1 idi bi = i + (1 << d) - 1; // i + 2^d - 1 if (0 == local_queues_ends[bi]) { continue; } if (local_queues_ends[ai] == 0) { local_queues_list[ai].swap(local_queues_list[bi]); std::swap(local_queues_ends[ai], local_queues_ends[bi]); continue; } // else if (local_queues_ends[ai] < L && local_queues_ends[bi] >= L) { // local_queues_list[ai].swap(local_queues_list[bi]); // std::swap(local_queues_ends[ai], local_queues_ends[bi]); // } // merge_two_queues_into_1st_queue_seq( // local_queues_list[ai], // 0, // local_queues_ends[ai], // local_queues_list[bi], // 0, // local_queues_ends[bi]); idi tmp_length = local_queues_ends[ai] + local_queues_ends[bi]; std::vector<Candidate> tmp_queue(tmp_length); std::merge( local_queues_list[ai].begin(), local_queues_list[ai].begin() + local_queues_ends[ai], local_queues_list[bi].begin(), local_queues_list[bi].begin() + local_queues_ends[bi], tmp_queue.begin()); if (tmp_length > L) { tmp_queue.resize(L); tmp_length = L; } else if (tmp_length < L) { tmp_queue.resize(L); } local_queues_list[ai].swap(tmp_queue); local_queues_ends[ai] = tmp_length; // {// Print queue a // printf("d: %u " // "i: %u " // "ai: %u " // "local_queues_ends[%d]: %d\n", // d, // i, // ai, // ai, // local_queues_ends[ai]); // for (idi i_q = 0; i_q < local_queues_ends[ai]; ++i_q) { // printf("[%u]: " // "id: %u " // "dist: %f\n", // i_q, // local_queues_list[ai][i_q].id_, // local_queues_list[ai][i_q].distance_); // } // } } } // Remain, prefix-sum-like merge if (size != num_threads_) { for (int i = size; i < num_threads_; ++i) { idi ai = i; idi bi = i - 1; if (0 == local_queues_ends[bi]) { continue; } if (local_queues_ends[ai] == 0) { local_queues_list[ai].swap(local_queues_list[bi]); std::swap(local_queues_ends[ai], local_queues_ends[bi]); continue; } // else if (local_queues_ends[ai] < L && local_queues_ends[bi] >= L) { // local_queues_list[ai].swap(local_queues_list[bi]); // std::swap(local_queues_ends[ai], local_queues_ends[bi]); // } // merge_two_queues_into_1st_queue_seq( // local_queues_list[ai], // 0, // local_queues_ends[ai], // local_queues_list[bi], // 0, // local_queues_ends[bi]); idi tmp_length = local_queues_ends[ai] + local_queues_ends[bi]; std::vector<Candidate> tmp_queue(tmp_length); std::merge( local_queues_list[ai].begin(), local_queues_list[ai].begin() + local_queues_ends[ai], local_queues_list[bi].begin(), local_queues_list[bi].begin() + local_queues_ends[bi], tmp_queue.begin()); if (tmp_length > L) { tmp_queue.resize(L); tmp_length = L; } else if (tmp_length < L) { tmp_queue.resize(L); } local_queues_list[ai].swap(tmp_queue); local_queues_ends[ai] = tmp_length; } } // Merge into set_L idi r = L; if (local_queues_ends[num_threads_ - 1]) { r = merge_two_queues_into_1st_queue_seq_fixed( set_L, 0, L, local_queues_list[num_threads_ - 1], 0, local_queues_ends[num_threads_ - 1]); } // Reset local_queues_ends std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); return r; } /* Function: * Use large local_queues_array as a concatenation of all queues */ inline idi Searching::merge_all_queues_para_array( std::vector<Candidate> &set_L, std::vector<idi> &local_queues_ends, const idi local_queue_length, const idi L) { const int num_queues = num_threads_; idi nk = L; int size = 1 << (static_cast<idi>(log2(num_queues))); idi log2size = static_cast<idi>(log2(size)); for (idi d = 0; d < log2size; ++d) { uint32_t by = 1 << (d + 1); for (int i = 0; i < size; i += by) { idi ai = i + (1 << (d + 1)) - 1; // i + 2^(d+1) - 1 idi a_start = ai * local_queue_length; idi bi = i + (1 << d) - 1; // i + 2^d - 1 idi b_start = bi * local_queue_length; if (0 == local_queues_ends[bi]) { continue; } if (local_queues_ends[ai] == 0) { std::copy(set_L.begin() + b_start, set_L.begin() + b_start + local_queues_ends[bi], set_L.begin() + a_start); // Copy bi to ai local_queues_ends[ai] = local_queues_ends[bi]; local_queues_ends[bi] = 0; continue; } if (ai != static_cast<idi>(num_queues - 1)) { merge_two_queues_into_1st_queue_seq_incr( set_L, a_start, local_queues_ends[ai], local_queue_length, set_L, b_start, local_queues_ends[bi]); } else { idi r = merge_two_queues_into_1st_queue_seq_fixed( set_L, a_start, L, set_L, b_start, local_queues_ends[bi]); if (r < nk) { nk = r; } } } } // Remain, prefix-sum-like merge if (size != num_queues) { for (int i = size; i < num_queues; ++i) { idi ai = i; idi a_start = ai * local_queue_length; idi bi = i - 1; idi b_start = bi * local_queue_length; if (0 == local_queues_ends[bi]) { continue; } if (local_queues_ends[ai] == 0) { std::copy(set_L.begin() + b_start, set_L.begin() + b_start + local_queues_ends[bi], set_L.begin() + a_start); // Copy bi to ai local_queues_ends[ai] = local_queues_ends[bi]; local_queues_ends[bi] = 0; continue; } if (ai != static_cast<idi>(num_queues - 1)) { merge_two_queues_into_1st_queue_seq_incr( set_L, a_start, local_queues_ends[ai], local_queue_length, set_L, b_start, local_queues_ends[bi]); } else { idi r = merge_two_queues_into_1st_queue_seq_fixed( set_L, a_start, L, set_L, b_start, local_queues_ends[bi]); if (r < nk) { nk = r; } } } } // Reset local_queues_ends // Not do this for Collector Idea or Selecting Idea std::fill(local_queues_ends.begin(), local_queues_ends.end() - 1, 0); // std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); return nk; // return r; } /* Function: * When merge all queues (in an array, and [num_threads_ - 1] is the global queue), * the starting local is at [queue_base] */ inline idi Searching::merge_all_queues_queue_base( // std::vector< std::vector<Candidate> > &local_queues_list, std::vector<Candidate> &set_L, // std::vector<Candidate> &local_queues_array, std::vector<idi> &local_queues_ends, const idi queue_base, const int real_threads, const idi local_queue_length, // std::vector<Candidate> &set_L, const idi L) { idi nk = L; int size = 1 << (static_cast<idi>(log2(real_threads))); // int size = 1 << (static_cast<idi>(log2(num_threads_))); idi log2size = static_cast<idi>(log2(size)); for (idi d = 0; d < log2size; ++d) { idi by = 1 << (d + 1); idi i_bound = size + queue_base; for (idi i = queue_base; i < i_bound; i += by) { // for (int i = 0; i < size; i += by) { // idi ai = i + (1 << (d + 1)) - 1 + queue_base; // i + 2^(d+1) - 1 idi ai = i + (1 << (d + 1)) - 1; // i + 2^(d+1) - 1 idi a_start = ai * local_queue_length; // idi bi = i + (1 << d) - 1 + queue_base; // i + 2^d - 1 idi bi = i + (1 << d) - 1; // i + 2^d - 1 idi b_start = bi * local_queue_length; if (0 == local_queues_ends[bi]) { continue; } if (local_queues_ends[ai] == 0) { // local_queues_list[ai].swap(local_queues_list[bi]); std::copy(set_L.begin() + b_start, set_L.begin() + b_start + local_queues_ends[bi], set_L.begin() + a_start); // Copy bi to ai local_queues_ends[ai] = local_queues_ends[bi]; local_queues_ends[bi] = 0; continue; } if (ai != static_cast<idi>(num_threads_ - 1)) { merge_two_queues_into_1st_queue_seq_incr( set_L, a_start, local_queues_ends[ai], local_queue_length, set_L, b_start, local_queues_ends[bi]); } else { idi r = merge_two_queues_into_1st_queue_seq_fixed( set_L, a_start, L, set_L, b_start, local_queues_ends[bi]); if (r < nk) { nk = r; } } } } // Remain, prefix-sum-like merge if (size != real_threads) { // if (size != num_threads_) { for (int i = size + queue_base; i < num_threads_; ++i) { // for (int i = size; i < num_threads_; ++i) { idi ai = i; idi a_start = ai * local_queue_length; idi bi = i - 1; idi b_start = bi * local_queue_length; if (0 == local_queues_ends[bi]) { continue; } if (local_queues_ends[ai] == 0) { std::copy(set_L.begin() + b_start, set_L.begin() + b_start + local_queues_ends[bi], set_L.begin() + a_start); // Copy bi to ai local_queues_ends[ai] = local_queues_ends[bi]; local_queues_ends[bi] = 0; continue; } if (ai != static_cast<idi>(num_threads_ - 1)) { merge_two_queues_into_1st_queue_seq_incr( set_L, a_start, local_queues_ends[ai], local_queue_length, set_L, b_start, local_queues_ends[bi]); } else { idi r = merge_two_queues_into_1st_queue_seq_fixed( set_L, a_start, L, set_L, b_start, local_queues_ends[bi]); if (r < nk) { nk = r; } } } } // Reset local_queues_ends std::fill(local_queues_ends.begin(), local_queues_ends.end() - 1, 0); // std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); return nk; // return r; } inline void Searching::merge_two_consecutive_queues_in_place( std::vector<Candidate> &two_queues, const idi base_1, // const idi &end_1, const idi base_2, const idi &length_2) { // idi tid = omp_get_thread_num(); idi index_1 = base_1; idi index_2 = base_2; const idi bound_2 = base_2 + length_2; while (index_1 < index_2 && index_2 < bound_2) { Candidate e_1 = two_queues[index_1]; Candidate e_2 = two_queues[index_2]; if (e_1 < e_2) { ++index_1; } else if (e_2 < e_1) { // time_memmove_list_[tid] -= WallTimer::get_time_mark(); std::memmove(two_queues.data() + index_1 + 1, two_queues.data() + index_1, (index_2 - index_1) * sizeof(Candidate)); // time_memmove_list_[tid] += WallTimer::get_time_mark(); two_queues[index_1] = e_2; ++index_1; ++index_2; } else { // Duplicate, but have no idea what to do right now // time_memmove_list_[tid] -= WallTimer::get_time_mark(); std::memmove(two_queues.data() + index_1 + 1, two_queues.data() + index_1, (index_2 - index_1) * sizeof(Candidate)); // time_memmove_list_[tid] += WallTimer::get_time_mark(); index_1 += 2; ++index_2; } } } ///* Function: // * Merge all queues to the global queue, in a two-queue-merge way // */ //inline idi Searching::merge_all_queues_all_together_in_sequential( // std::vector<Candidate> &set_L, // std::vector<idi> &local_queues_ends, // const idi local_queue_length, // const idi L) //{ // const idi num_queues = num_threads_; // const idi global_queue_base = (num_queues - 1) * local_queue_length; // std::vector<idi> queue_heads(num_queues, 0); // idi queue_id_min; // //// bool is_finished = false; // bool is_1st_selected = true; // idi nk = L; // The highest location of insertion. // { // for (idi q_i = 0; q_i < num_queues; ++q_i) { // if (0 == local_queues_ends[q_i]) { // continue; // } // _mm_prefetch(set_L.data() + q_i * local_queue_length, _MM_HINT_T0); // } // } // while (queue_heads[num_queues - 1] < L) { //// time_compare_minimum_ -= WallTimer::get_time_mark(); // queue_id_min = min_all_queues_at_heads( // set_L, // queue_heads, // local_queues_ends, // local_queue_length, // L); //// time_compare_minimum_ += WallTimer::get_time_mark(); // if (queue_id_min != num_queues - 1) { // Not in the global queue //// time_insert_ -= WallTimer::get_time_mark(); // insert_one_element_at( // set_L[queue_heads[queue_id_min] + queue_id_min * local_queue_length], // set_L, // queue_heads[num_queues - 1], // global_queue_base, // L); //// time_insert_ += WallTimer::get_time_mark(); // if (is_1st_selected) { // Get the highest inserting location // is_1st_selected = false; // nk = queue_heads[num_queues - 1]; // } // ++queue_heads[queue_id_min]; // } // ++queue_heads[num_queues - 1]; // } // // // Reset local_queues_ends // std::fill(local_queues_ends.begin(), local_queues_ends.end() - 1, 0); // return nk; //} ///* Function: // * Find the minimum among queues at their head locations // */ //inline idi Searching::min_all_queues_at_heads( // const std::vector<Candidate> &set_L, // std::vector<idi> &queue_heads, // const std::vector<idi> &local_queues_ends, // const idi local_queue_length, // const idi L) //{ // const idi num_queues = num_threads_; // idi min_queue_id = num_queues - 1; // Candidate min_candidate = set_L[queue_heads[min_queue_id] + min_queue_id * local_queue_length]; // // for (idi q_i = 0; q_i < num_queues - 1; ++q_i) { // if (queue_heads[q_i] >= local_queues_ends[q_i]) { // q_i finished // continue; // } // const Candidate &ele = set_L[queue_heads[q_i] + q_i * local_queue_length]; // if (ele < min_candidate) { // min_candidate = ele; // min_queue_id = q_i; // } else if (ele.id_ == min_candidate.id_) { // Redundant element // ++queue_heads[q_i]; // } // } // // return min_queue_id; //} inline void Searching::merge_in_set_L( std::vector<Candidate> &set_L, const idi set_L_length, const idi num_queues, const idi local_queue_length) { idi size = 1 << (static_cast<idi>(log2(num_queues))); idi log2size = static_cast<idi>(log2(size)); for (idi d = 0; d < log2size; ++d) { const idi merge_length = (local_queue_length << d); idi by = 1 << (d + 1); // Parallel for for (idi i = 0; i < size; i += by) { // idi a = i + (1 << d) - 1; // idi b = i + (1 << (d + 1)) - 1; idi a = i; idi b = i + (1 << d); idi base_a = a * local_queue_length; idi base_b = b * local_queue_length; if (base_a >= set_L_length || base_b >= set_L_length) { continue; } idi length_b; if (a + by < size) { length_b = merge_length; } else { // The last one if (size == num_queues) { length_b = set_L_length - base_b; } else { length_b = merge_length; } } // printf("a: %u b: %u " // "base_a: %u base_b: %u length_b: %u\n", // a, b, // base_a, base_b, length_b); merge_two_consecutive_queues_in_place( set_L, base_a, base_b, length_b); } } if (size != num_queues) { for (idi i = size; i < num_queues; ++i) { idi a = 0; idi b = i; idi base_a = a; idi base_b = b * local_queue_length; if (base_b >= set_L_length) { continue; } idi length_b; if (b != num_queues - 1) { length_b = local_queue_length; } else { length_b = set_L_length - base_b; } // printf("a: %u b: %u " // "base_a: %u base_b: %u length_b: %u\n", // a, b, // base_a, base_b, length_b); merge_two_consecutive_queues_in_place( set_L, base_a, base_b, length_b); } } } /* * 7/5/2020-20:27 * Every queue keeps only elements which can be ordered in the top-L globally. * local_queues_lengths records the end location for all queues */ inline distf Searching::selecting_top_L_seq( std::vector<Candidate> &set_L, const idi global_L, const idi num_queues, const std::vector<idi> &local_queues_starts, std::vector<idi> &local_queues_sizes) { std::vector<idi> pointers(num_queues, 0); distf bound_lth; idi rank = 0; bool is_finished = false; distf min_dist = FLT_MAX; idi min_q_i; idi min_id; while (rank < global_L) { is_finished = true; min_dist = FLT_MAX; for (idi q_i = 0; q_i < num_queues; ++q_i) { if (pointers[q_i] >= local_queues_sizes[q_i]) { // q_i is finished continue; } is_finished = false; idi sub = pointers[q_i] + local_queues_starts[q_i]; distf tmp_dist = set_L[sub].distance_; idi tmp_id = set_L[sub].id_; if (tmp_dist < min_dist) { min_dist = tmp_dist; min_id = tmp_id; min_q_i = q_i; } else if (tmp_dist == min_dist && tmp_id < min_id) { min_id = tmp_id; min_q_i = q_i; } } if (is_finished) { {//test printf("Error: selecting_top_L_seq: only found %u elements but global_L is %u.\n", rank, global_L); } break; } bound_lth = min_dist; ++pointers[min_q_i]; ++rank; } std::copy(pointers.begin(), pointers.end(), local_queues_sizes.begin()); return bound_lth; } /* * 7/24/2020-10:08 * Record for every queue the position that contains the top-M unchecked vertices. * So the total expanded vertices should still be M, which means the computation should * be the same with merging idea. */ inline void Searching::selecting_unchecked_top_M_seq( const idi query_id, const idi iter, std::vector<Candidate> &set_L, const std::vector<idi> &pointers_starts, const idi value_M, const idi num_queues, const std::vector<idi> &local_queues_starts, const std::vector<idi> &local_queues_sizes, std::vector<idi> &local_m_counts) { std::vector<idi> pointers(pointers_starts); // std::vector<idi> pointers(num_queues, 0); std::fill(local_m_counts.begin(), local_m_counts.end(), 0); idi rank = 0; bool is_finished = true; distf min_dist = FLT_MAX; idi min_q_i; idi min_id; while (rank < value_M) { min_dist = FLT_MAX; for (idi q_i = 0; q_i < num_queues; ++q_i) { idi &pointer = pointers[q_i]; idi sub = pointer + local_queues_starts[q_i]; // {//test // if (133 == query_id && // 3 == iter && // 321341 == set_L[sub].id_) { // printf("(%u %f)\n", // set_L[sub].id_, set_L[sub].distance_); // } // } while (pointer < local_queues_sizes[q_i] && set_L[sub].is_checked_) { ++pointer; ++sub; } if (pointer >= local_queues_sizes[q_i]) { // q_i is finished continue; } is_finished = false; distf tmp_dist = set_L[sub].distance_; idi tmp_id = set_L[sub].id_; if (tmp_dist < min_dist) { min_dist = tmp_dist; min_id = tmp_id; min_q_i = q_i; } else if (tmp_dist == min_dist && tmp_id < min_id) { min_id = tmp_id; min_q_i = q_i; } } if (!is_finished) { is_finished = true; ++pointers[min_q_i]; ++rank; ++local_m_counts[min_q_i]; } else { break; } } // std::copy(pointers.begin(), pointers.end(), local_top_m_positions.begin()); } /* * 7/27/2020-15:41 * Gather the top-M unchecked vertices from local queues. */ inline void Searching::gather_unchecked_top_M_seq( const idi query_id, const idi iter, std::vector<Candidate> &set_L, const std::vector<idi> &pointers_starts, const idi value_M, const idi num_queues, const std::vector<idi> &local_queues_starts, const std::vector<idi> &local_queues_sizes, std::vector<idi> &top_m_candidates, idi &top_m_candidates_size, std::vector<idi> &bound_subs) { std::vector<idi> pointers(pointers_starts); // std::vector<idi> pointers(num_queues, 0); // std::fill(local_m_counts.begin(), local_m_counts.end(), 0); // idi rank = 0; bool is_finished = true; distf min_dist = FLT_MAX; idi min_q_i; idi min_id; while (top_m_candidates_size < value_M) { min_dist = FLT_MAX; for (idi q_i = 0; q_i < num_queues; ++q_i) { idi &pointer = pointers[q_i]; idi sub = pointer + local_queues_starts[q_i]; while (pointer < local_queues_sizes[q_i] && set_L[sub].is_checked_) { ++pointer; ++sub; } if (pointer >= local_queues_sizes[q_i]) { // q_i is finished continue; } is_finished = false; distf tmp_dist = set_L[sub].distance_; idi tmp_id = set_L[sub].id_; if (tmp_dist < min_dist) { min_dist = tmp_dist; min_id = tmp_id; min_q_i = q_i; } else if (tmp_dist == min_dist && tmp_id < min_id) { min_id = tmp_id; min_q_i = q_i; } } if (!is_finished) { is_finished = true; idi sub = local_queues_starts[min_q_i] + pointers[min_q_i]; top_m_candidates[top_m_candidates_size++] = set_L[sub].id_; set_L[sub].is_checked_ = true; // Checked ++pointers[min_q_i]; // ++rank; // ++local_m_counts[min_q_i]; } else { break; } } // std::copy(pointers.begin(), pointers.end(), local_top_m_positions.begin()); std::copy(pointers.begin(), pointers.end(), bound_subs.begin()); } inline void Searching::search_with_top_m( const PANNS::idi M, const PANNS::idi query_id, const PANNS::idi K, const PANNS::idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K) { boost::dynamic_bitset<> is_visited(num_v_); { for (idi c_i = 0; c_i < L; ++c_i) { is_visited[init_ids[c_i]] = true; } } const dataf *query_data = queries_load_ + query_id * dimension_; for (idi v_i = 0; v_i < L; ++v_i) { idi v_id = init_ids[v_i]; _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); } // Get the distances of all candidates, store in the set set_L. for (unsigned i = 0; i < L; i++) { unsigned v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; ++count_distance_computation_; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[i] = Candidate(v_id, dist, false); // False means not checked. } std::sort(set_L.begin(), set_L.begin() + L); std::vector<idi> top_m_candidates(M); idi top_m_candidates_end = 0; idi k = 0; // Index of first unchecked candidate. idi tmp_count = 0; // for debug while (k < L) { ++tmp_count; unsigned nk = L; // Select M candidates idi last_k = L; for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { if (set_L[c_i].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[c_i].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; } // Push M candidates' neighbors into the queue. for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { idi cand_id = top_m_candidates[c_i]; _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; if (is_visited[nb_id]) { continue; } is_visited[nb_id] = true; auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; ++count_distance_computation_; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[L-1].distance_) { continue; } Candidate cand(nb_id, dist, false); idi r = insert_into_queue(set_L, L, cand); if (r < nk) { nk = r; } } } top_m_candidates_end = 0; // Clear top_m_candidates if (nk <= last_k) { k = nk; } else { k = last_k + 1; } } for (idi k_i = 0; k_i < K; ++k_i) { set_K[k_i] = set_L[k_i].id_; } } inline void Searching::search_with_top_m_scale_m( const PANNS::idi value_M_max, const PANNS::idi query_id, const PANNS::idi K, const PANNS::idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, std::vector<idi> &top_m_candidates, boost::dynamic_bitset<> &is_visited) { // boost::dynamic_bitset<> is_visited(num_v_); { for (idi c_i = 0; c_i < L; ++c_i) { is_visited[init_ids[c_i]] = true; } } const dataf *query_data = queries_load_ + query_id * dimension_; for (idi v_i = 0; v_i < L; ++v_i) { idi v_id = init_ids[v_i]; _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); } // Get the distances of all candidates, store in the set set_L. for (unsigned i = 0; i < L; i++) { unsigned v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; ++count_distance_computation_; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[i] = Candidate(v_id, dist, false); // False means not checked. } std::sort(set_L.begin(), set_L.begin() + L); // std::vector<idi> top_m_candidates(M); idi top_m_candidates_end = 0; idi k = 0; // Index of first unchecked candidate. idi tmp_count = 0; // for debug idi M = 1; while (k < L) { ++tmp_count; unsigned nk = L; // Select M candidates idi last_k = L; for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { if (set_L[c_i].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[c_i].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; } // Push M candidates' neighbors into the queue. for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { idi cand_id = top_m_candidates[c_i]; _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; if (is_visited[nb_id]) { continue; } is_visited[nb_id] = true; auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; ++count_distance_computation_; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[L-1].distance_) { continue; } Candidate cand(nb_id, dist, false); idi r = insert_into_queue(set_L, L, cand); if (r < nk) { nk = r; } } } top_m_candidates_end = 0; // Clear top_m_candidates if (nk <= last_k) { k = nk; } else { k = last_k + 1; } if (M < value_M_max) { M <<= 1; } } for (idi k_i = 0; k_i < K; ++k_i) { set_K[k_i] = set_L[k_i].id_; } {// Reset is_visited.reset(); } } ////void Searching::search_with_top_m( //inline void Searching::search_with_top_m_to_get_distance_range( // const PANNS::idi M, // const PANNS::idi query_id, //// const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids) //// std::vector<idi> &set_K) //{ // dist_max_ = -FLT_MAX; // dist_min_ = FLT_MAX; // boost::dynamic_bitset<> is_visited(num_v_); // // { // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = true; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. //// {// For distance range //// if (dist > dist_max_) { //// dist_max_ = dist; //// } //// if (dist < dist_min_) { //// dist_min_ = dist; //// } //// } // } // std::sort(set_L.begin(), set_L.begin() + L); // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; // // unsigned nk = L; // // // Select M candidates // idi last_k = L; // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = true; // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); // idi r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } //// {// For distance range //// if (dist > dist_max_) { //// dist_max_ = dist; //// } //// if (dist < dist_min_) { //// dist_min_ = dist; //// } //// } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // {// For histogram // for (idi i_l = 0; i_l < L; ++i_l) { // distf dist = set_L[i_l].distance_; // {// For distance range // if (dist > dist_max_) { // dist_max_ = dist; // } // if (dist < dist_min_) { // dist_min_ = dist; // } // } // } // } // } // //// for (idi k_i = 0; k_i < K; ++k_i) { //// set_K[k_i] = set_L[k_i].id_; //// } //} // ////void Searching::search_with_top_m( //inline void Searching::search_with_top_m_myths_M( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) //{ //// {//test //// printf("query_id: %u\n", query_id); //// } // const idi loc_range = L / 3; // // // boost::dynamic_bitset<> is_visited(num_v_); // // { // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = true; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // //// {// For histogram //// const distf dist_range = dist_max_ - dist_min_; //// printf("iter:%u\n", 0); //// for (idi i_l = 0; i_l < L; ++i_l) { //// printf("%f\n", (set_L[i_l].distance_ - dist_min_) / dist_range * 100.0); //// } //// } // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // std::vector<idi> range_count(3, 0); // idi zero_inserted_count = 0; //// {//test //// printf("tmp_count: %u\n", tmp_count); //// } // ++tmp_count; // // unsigned nk = L; // // // Select M candidates // idi last_k = L; // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } //// {//test //// printf("top_m_candidates_ends: %u\n", top_m_candidates_end); //// } // { // if (0 == top_m_candidates_end) { // break; // } // } // // // uint64_t count_neighbors = 0; // uint64_t count_inserted = 0; // std::vector<idi> locs_to_count(M); // // Push M candidates' neighbors into the queue. // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // // count_neighbors += out_degree; // idi num_inserted = 0; // // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = true; // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // ++num_inserted; // Candidate cand(nb_id, dist, false); // idi r = insert_into_queue(set_L, L, cand); //// { //// printf("c_i: %u " //// "count: %u " //// "loc_inserted: %u\n", //// c_i, //// num_inserted, //// r); //// } // if (r < nk) { // nk = r; // } // { // ++range_count[r / loc_range]; // } // } // { // if (0 == num_inserted) { // ++zero_inserted_count; // } // locs_to_count[c_i] = num_inserted; // count_inserted += num_inserted; // } //// { //// printf("c_i: %u " //// "num_inserted: %u\n", //// c_i, //// num_inserted); //// } // } //// { //// for (idi c_i = top_m_candidates_end; c_i < M; ++c_i) { //// locs_to_count[c_i] = 0; //// } //// printf("iter:%u\n", tmp_count); //// for (idi c_i = 0; c_i < M; ++c_i) { //// printf("%u %u\n", c_i, locs_to_count[c_i]); //// } //// } //// {//test //// idi sum = 0; //// for (const idi ct : range_count) sum += ct; //// printf("tmp_count: %u " //// "k: %u " //// "actual_M: %u %.1f%% " //// "zero_ins: %u %.1f%% " //// "1/3: %u %.1f%% " //// "2/3: %u %.1f%% " //// "3/3: %u %.1f%%\n", //// tmp_count, //// k, //// top_m_candidates_end, 100.0 * top_m_candidates_end / M, //// zero_inserted_count, 100.0 * zero_inserted_count / top_m_candidates_end, //// range_count[0], 100.0 * range_count[0] / sum, //// range_count[1], 100.0 * range_count[1] / sum, //// range_count[2], 100.0 * range_count[2] / sum); //// } // top_m_candidates_end = 0; // Clear top_m_candidates // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // { // printf("query:%uiter: %u " // "#neighbors: %lu " // "#inserted: %lu " // "ratio: %.2f%%\n", // query_id, tmp_count, // count_neighbors, // count_inserted, // 100.0 * count_inserted / count_neighbors); // } //// {// For histogram ////// const auto it_min = std::min_element(set_L.begin(), set_L.end()); ////// const auto it_max = std::max_element(set_L.begin(), set_L.end()); ////// const distf dist_min = it_min->distance_; ////// const distf dist_max = it_max->distance_; ////// const distf dist_min = it_min->distance_ - 1.0; ////// const distf dist_max = it_max->distance_ + 1.0; //// const distf dist_range = dist_max_ - dist_min_; ////// const distf dist_range = dist_max - dist_min; ////// { ////// printf("it_min->distance_: %f dist_min: %f\n", ////// it_min->distance_, dist_min); ////// } ////// const distf dist_range = it_max->distance_ - it_min->distance_; //// printf("iter:%u\n", tmp_count); //// for (idi i_l = 0; i_l < L; ++i_l) { ////// printf("%f\n", set_L[i_l].distance_); ////// printf("%f\n", (set_L[i_l].distance_ - dist_min) / dist_range * 100.0); //// printf("%f\n", (set_L[i_l].distance_ - dist_min_) / dist_range * 100.0); ////// printf("%.2f\n", (set_L[i_l].distance_ - it_min->distance_) / dist_range * 100.0); //// } //// } // } // // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } // if (query_id == 3) { // exit(1); // } //} // //// Sequential Top-M algorithm for profiling purpose: byte array, CAS, and OpenMP ////void Searching::search_with_top_m( //inline void Searching::search_with_top_m_profile_bit_CAS( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) //{ //// std::vector<uint8_t> is_visited(num_v_, 0); // Byte array //// boost::dynamic_bitset<> is_visited(num_v_); // Bit array // BitVector is_visited(num_v_); // // { // // for (idi c_i = 0; c_i < L; ++c_i) { //// is_visited[init_ids[c_i]] = true; // is_visited.atomic_set_bit(init_ids[c_i]); // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; // // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. // // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; // // unsigned nk = L; // // // Select M candidates // idi last_k = L; // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. // // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; //// if (is_visited[nb_id]) { //// continue; //// } //// is_visited[nb_id] = true; // //// if (!AtomicOps::CAS(is_visited.data() + nb_id, //// static_cast<uint8_t>(0), //// static_cast<uint8_t>(1))) { //// continue; //// } // {// Self-defined BitVector // if (is_visited.atomic_is_bit_set(nb_id)) { // continue; // } // is_visited.atomic_set_bit(nb_id); // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); // idi r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // } // // // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //// //// {//test //// for (idi k_i = 0; k_i < K; ++k_i) { //// printf("%u: %u: %u %f\n", //// query_id, //// k_i, set_L[k_i].id_, set_L[k_i].distance_); //// } //// exit(1); //// } //} ///// Backup //inline void Searching::search_with_top_m( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) //// std::vector< std::vector<idi> > &top_m_list) //{ // boost::dynamic_bitset<> is_visited(num_v_); // // { // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = true; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; // // unsigned nk = L; // // // Select M candidates // idi last_k = L; // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = true; // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); // idi r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // } // // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //// //// {//test //// for (idi k_i = 0; k_i < K; ++k_i) { //// printf("%u: %u: %u %f\n", //// query_id, //// k_i, set_L[k_i].id_, set_L[k_i].distance_); //// } //// exit(1); //// } //} // ////// DEPRECATED: the is_visited array cannot be shared among threads. //inline void Searching::search_with_top_m_no_local_arrays( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // boost::dynamic_bitset<> &is_visited) //// std::vector< std::vector<idi> > &top_m_list) //{ //// boost::dynamic_bitset<> is_visited(num_v_); // // { // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = true; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; // // unsigned nk = L; // // // Select M candidates // idi last_k = L; // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = true; // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); // idi r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // } // // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //// //// {//test //// for (idi k_i = 0; k_i < K; ++k_i) { //// printf("%u: %u: %u %f\n", //// query_id, //// k_i, set_L[k_i].id_, set_L[k_i].distance_); //// } //// exit(1); //// } //} inline void Searching::search_with_top_m_in_batch( const PANNS::idi M, const PANNS::idi batch_start, const PANNS::idi batch_size, const PANNS::idi K, const PANNS::idi L, std::vector< std::vector<Candidate> > &set_L_list, const std::vector<idi> &init_ids, std::vector< std::vector<idi> > &set_K_list) { std::vector< boost::dynamic_bitset<> > is_visited_list(batch_size, boost::dynamic_bitset<> (num_v_)); // Prepare the init_ids { // for (idi q_i = 0; q_i < batch_size; ++q_i) { auto &is_visited = is_visited_list[q_i]; for (idi c_i = 0; c_i < L; ++c_i) { is_visited[init_ids[c_i]] = true; } } } // Initialize set_L_list { // for (idi q_i = 0; q_i < batch_size; ++q_i) { const dataf *query_data = queries_load_ + (q_i + batch_start) * dimension_; for (idi i = 0; i < L; i++) { idi v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; // ++count_distance_computation_; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L_list[q_i][i] = Candidate(v_id, dist, false); // False means not checked. } std::sort(set_L_list[q_i].begin(), set_L_list[q_i].begin() + L); } } { std::vector<idi> joint_queue(M * batch_size); // Joint queue for all shared top-M candidates idi joint_queue_end = 0; boost::dynamic_bitset<> is_in_joint_queue(num_v_); // std::vector< std::vector<idi> > cands_query_ids(num_v_, std::vector<idi>(batch_size)); // If candidate cand_id is selected by query q_i, q_i should be in cands_query_ids[cand_id]. // std::vector<idi> cands_query_ids_ends(num_v_, 0); std::unordered_map< idi, std::vector<idi> > cands_query_ids(batch_size * M); std::vector<idi> ks(batch_size, 0); // Indices of every queue's first unchecked candidate. std::vector<idi> nks(batch_size, L); // Indices of highest candidate inserted std::vector<idi> last_ks(batch_size, L); // Indices of lowest candidate unchecked std::vector<idi> queries_not_finished(batch_size); idi queries_not_finished_end = batch_size; for (idi q_i = 0; q_i < batch_size; ++q_i) { queries_not_finished[q_i] = q_i; } bool is_finished = false; idi counter_for_debug = 0; while (!is_finished) { ++counter_for_debug; // Build the new joint queue // Traverse every query's queue for(idi q_i = 0; q_i < queries_not_finished_end; ++q_i) { idi q_local_id = queries_not_finished[q_i]; // last_ks[q_local_id] = L; auto &set_L = set_L_list[q_local_id]; idi top_m_count = 0; for (idi c_i = ks[q_local_id]; c_i < L && top_m_count < M; ++c_i) { if (set_L[c_i].is_checked_) { continue; } set_L[c_i].is_checked_ = true; last_ks[q_local_id] = c_i; ++top_m_count; idi cand_id = set_L[c_i].id_; // Record which query selected cand_id auto tmp_c = cands_query_ids.find(cand_id); if (tmp_c != cands_query_ids.end()) { tmp_c->second.push_back(q_local_id); } else { cands_query_ids.emplace(cand_id, std::vector<idi>()); cands_query_ids[cand_id].reserve(batch_size); cands_query_ids[cand_id].push_back(q_local_id); } // cands_query_ids[cand_id][cands_query_ids_ends[cand_id]++] = q_local_id; // Add candidate cand_id into the joint queue if (is_in_joint_queue[cand_id]) { continue; } is_in_joint_queue[cand_id] = true; joint_queue[joint_queue_end++] = cand_id; } } queries_not_finished_end = 0; // Clear queries_not_finished // Traverse every shared candidate for (idi c_i = 0; c_i < joint_queue_end; ++c_i) { idi cand_id = joint_queue[c_i]; is_in_joint_queue[cand_id] = false; // Reset is_in_joint_queue idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; const auto &query_local_ids = cands_query_ids[cand_id]; // Push neighbors to every queue of the queries that selected cand_id. // Traverse cand_id's neighbors // idi &q_i_bound = cands_query_ids_ends[cand_id]; // for (idi q_i = 0; q_i < q_i_bound; ++q_i) { // idi q_local_id = query_local_ids[q_i]; for (idi q_local_id : query_local_ids) { dataf *query_data = queries_load_ + (q_local_id + batch_start) * dimension_; auto &is_visited = is_visited_list[q_local_id]; auto &set_L = set_L_list[q_local_id]; // // Traverse cand_id's neighbors for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; if (is_visited[nb_id]) { continue; } is_visited[nb_id] = true; auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; // ++count_distance_computation_; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[L-1].distance_) { continue; } // if (dist >= set_L[L-1].distance_) { // continue; // } Candidate new_cand(nb_id, dist, false); idi insert_loc = insert_into_queue(set_L, L, new_cand); if (insert_loc < nks[q_local_id]) { nks[q_local_id] = insert_loc; } } } cands_query_ids.erase(cand_id); // q_i_bound = 0; // Clear cands_query_ids[cand_id] } joint_queue_end = 0; // Clear joint_queue for (idi q_local_id = 0; q_local_id < batch_size; ++q_local_id) { if (nks[q_local_id] <= last_ks[q_local_id]) { ks[q_local_id] = nks[q_local_id]; } else { ks[q_local_id] = last_ks[q_local_id] + 1; } nks[q_local_id] = L; last_ks[q_local_id] = L; if (ks[q_local_id] < L) { queries_not_finished[queries_not_finished_end++] = q_local_id; } } if (!queries_not_finished_end) { is_finished = true; } } } { for (idi q_i = 0; q_i < batch_size; ++q_i) { for (idi c_i = 0; c_i < K && c_i < L; ++c_i) { set_K_list[q_i + batch_start][c_i] = set_L_list[q_i][c_i].id_; } } } //// // {//test // for (idi q_i = 0; q_i < batch_size; ++q_i) { // printf("query: %u\n", q_i + batch_start); // for (idi c_i = 0; c_i < K; ++c_i) { // printf("%u: %u %f\n", c_i, set_L_list[q_i][c_i].id_, set_L_list[q_i][c_i].distance_); // } // } // } } //inline void Searching::para_search_with_top_m_critical_area( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) //// std::vector< std::vector<idi> > &top_m_list) //{ // std::vector<uint8_t> is_visited(num_v_, 0); //// boost::dynamic_bitset<> is_visited(num_v_); // // { //// // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = 1; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. //// // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; // ++count_distance_computation_; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; // // unsigned nk = L; //// int nk = L; // // // Select M candidates // idi last_k = L; // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. // // OpenMP reduction(min : nk) has a problem if nk is unsigned. nk might end up with being MAX_UINT. //// //// // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; //// if (is_visited[nb_id]) { //// continue; //// } //// is_visited[nb_id] = 1; // // if (!AtomicOps::CAS(is_visited.data() + nb_id, // static_cast<uint8_t>(0), // static_cast<uint8_t>(1))) { // continue; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // ++count_distance_computation_; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); // idi r; //// // { // r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // } // //// // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //} // //inline void Searching::para_search_with_top_m_critical_area_no_omp( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) //// std::vector< std::vector<idi> > &top_m_list) //{ // std::vector<uint8_t> is_visited(num_v_, 0); //// boost::dynamic_bitset<> is_visited(num_v_); // // { //// // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = 1; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. //// // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; // ++count_distance_computation_; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; // // unsigned nk = L; //// int nk = L; // // // Select M candidates // idi last_k = L; // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. // // OpenMP reduction(min : nk) has a problem if nk is unsigned. nk might end up with being MAX_UINT. //// //// // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; //// if (is_visited[nb_id]) { //// continue; //// } //// is_visited[nb_id] = 1; // // if (!AtomicOps::CAS(is_visited.data() + nb_id, // static_cast<uint8_t>(0), // static_cast<uint8_t>(1))) { // continue; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // ++count_distance_computation_; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); // idi r; //// // { // r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // } // //// // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //} // //inline void Searching::para_search_with_top_m_critical_area_yes_omp( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) //// std::vector< std::vector<idi> > &top_m_list) //{ // std::vector<uint8_t> is_visited(num_v_, 0); //// boost::dynamic_bitset<> is_visited(num_v_); // // { // // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = 1; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. //// // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; // ++count_distance_computation_; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; // // unsigned nk = L; //// int nk = L; // // // Select M candidates // idi last_k = L; // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. // // OpenMP reduction(min : nk) has a problem if nk is unsigned. nk might end up with being MAX_UINT. //// //// // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; //// if (is_visited[nb_id]) { //// continue; //// } //// is_visited[nb_id] = 1; // // if (!AtomicOps::CAS(is_visited.data() + nb_id, // static_cast<uint8_t>(0), // static_cast<uint8_t>(1))) { // continue; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // ++count_distance_computation_; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); // idi r; //// // { // r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // } // //// // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //} // //inline void Searching::para_search_with_top_m_visited_array( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // std::vector<uint8_t> &is_visited) //// std::vector< std::vector<idi> > &top_m_list) //{ //// uint64_t count_visited = 0; // //// std::vector<uint8_t> is_visited(num_v_, 0); //// boost::dynamic_bitset<> is_visited(num_v_); // // { //// // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = 1; //// ++count_visited; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. //// // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; // ++count_distance_computation_; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; // // unsigned nk = L; //// int nk = L; // // // Select M candidates // idi last_k = L; // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. // // OpenMP reduction(min : nk) has a problem if nk is unsigned. nk might end up with being MAX_UINT. //// //// // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; //// if (is_visited[nb_id]) { //// continue; //// } //// is_visited[nb_id] = 1; // // if (!AtomicOps::CAS(is_visited.data() + nb_id, // static_cast<uint8_t>(0), // static_cast<uint8_t>(1))) { // continue; // } //// ++count_visited; // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // ++count_distance_computation_; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); // idi r; //// // { // r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // } // //// // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } // //// { //// printf("query_id: %u " //// "count_visited: %lu %f%%\n", //// query_id, //// count_visited, //// 100.0 * count_visited / num_v_); //// } //} // //inline void Searching::para_search_with_top_m_merge_queues( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) //{ //// {//test //// printf("query_id: %u\n", query_id); //// } //// const idi local_queue_length = ((M - 1) / num_threads_ + 1) * width_; // const idi local_queue_length = L; // std::vector< std::vector<Candidate> > local_queues_list(num_threads_, std::vector<Candidate>(local_queue_length)); // std::vector<idi> local_queues_ends(num_threads_, 0); // std::vector<uint8_t> is_visited(num_v_, 0); //// boost::dynamic_bitset<> is_visited(num_v_); // // { // // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = 1; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; // // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. // // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; //// {//test //// printf("tmp_count: %d\n", tmp_count); //// } // // // Select M candidates // idi last_k = L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. // // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // int tid = omp_get_thread_num(); // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; //// if (is_visited[nb_id]) { //// continue; //// } //// is_visited[nb_id] = 1; // // if (!AtomicOps::CAS(is_visited.data() + nb_id, // static_cast<uint8_t>(0), // static_cast<uint8_t>(1))) { // continue; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); // // Add to the local queue. // add_into_queue(local_queues_list[tid], local_queues_ends[tid], local_queue_length, cand); // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // idi nk = L; //// // Merge. Parallel merging in every two queues. //// { //// for (int tid = 0; tid < num_threads_; ++tid) { //// if (0 == local_queues_ends[tid]) continue; //// idi r = merge_two_queues_into_1st_queue_para( //// set_L, //// 0, //// L, //// local_queues_list[tid], //// 0, //// local_queues_ends[tid]); ////// idi r = merge_two_queues_into_1st_queue_seq( ////// set_L, ////// 0, ////// L, ////// local_queues_list[tid], ////// 0, ////// local_queues_ends[tid]); //// local_queues_ends[tid] = 0; // Reset the local queue //// if (r < nk) { //// nk = r; //// } //// } //// } //// {// text //// if (query_id == 4 && //// tmp_count == 5) { //// // Print local queues //// for (int t_i = 0; t_i < num_threads_; ++t_i) { ////// idi start_i = t_i * local_queue_length; //// for (idi q_i = 0; q_i < local_queues_ends[t_i]; ++q_i) { //// printf("t[%u][%u]: " //// "id: %u " //// "dist: %f\n", //// t_i, q_i, //// local_queues_list[t_i][q_i].id_, //// local_queues_list[t_i][q_i].distance_); //// } //// } //// printf("----------\n"); //// for (idi i = 0; i < L; ++i) { //// printf("set_L[%u]: " //// "id: %u " //// "dist: %f\n", //// i, //// set_L[i].id_, //// set_L[i].distance_); //// } //// printf("----------\n"); //// } //// } // // Merge. Merge all queues in parallel. // { // if (num_threads_ > 1) { // idi r = merge_all_queues_para_list( // local_queues_list, // local_queues_ends, // set_L, // L); // if (r < nk) { // nk = r; // } // } else { // if (local_queues_ends[0]) { // idi r = merge_two_queues_into_1st_queue_seq_fixed( // set_L, // 0, // L, // local_queues_list[0], // 0, // local_queues_ends[0]); // local_queues_ends[0] = 0; // if (r < nk) { // nk = r; // } // } // } // } //// {//test //// if (query_id == 4) { //// for (idi i = 0; i < L; ++i) { //// printf("tmp_count: %u " //// "set_L[%u]: " //// "id: %u " //// "dist: %f\n", //// tmp_count, //// i, //// set_L[i].id_, //// set_L[i].distance_); //// } //// } //// //// } // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // } // // // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //// { //// exit(1); //// } //// {//test //// ////// if (query_id == 4) { //// for (idi i = 0; i < L; ++i) { //// printf("set_L[%u]: " //// "id: %u " //// "dist: %f\n", //// i, //// set_L[i].id_, //// set_L[i].distance_); //// } ////// exit(1); ////// } //// } //} // ////// Using local queue and then sequential merge. //inline void Searching::para_search_with_top_m_queues_seq_merge( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) //// std::vector< std::vector<idi> > &top_m_list) //{ //// const idi local_queue_length = ((L - 1) / num_threads_ + 1) * width_; // const idi local_queue_length = L; // std::vector< std::vector<Candidate> > local_queues_list(num_threads_, std::vector<Candidate>(local_queue_length)); // std::vector<idi> local_queues_ends(num_threads_, 0); // std::vector<uint8_t> is_visited(num_v_, 0); //// boost::dynamic_bitset<> is_visited(num_v_); // // { // // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = 1; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; //// for (idi v_i = 0; v_i < L; ++v_i) { //// idi v_id = init_ids[v_i]; //// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); //// } // // Get the distances of all candidates, store in the set set_L. // // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; //// { //// printf("tmp_count: %u " //// "k: %u\n", //// tmp_count, //// k); //// } // //// unsigned nk = L; //// int nk = L; // // // Select M candidates // idi last_k = L; // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. // // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // int tid = omp_get_thread_num(); // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; //// if (is_visited[nb_id]) { //// continue; //// } //// is_visited[nb_id] = 1; // // if (!AtomicOps::CAS(is_visited.data() + nb_id, // static_cast<uint8_t>(0), // static_cast<uint8_t>(1))) { // continue; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); //// idi r; //// //// { //// r = insert_into_queue(set_L, L, cand); //// if (r < nk) { //// nk = r; //// } //// } // // Add to the local queue. // add_into_queue(local_queues_list[tid], local_queues_ends[tid], local_queue_length, cand); // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // idi nk = L; // // Merge // { // for (int tid = 0; tid < num_threads_; ++tid) { // if (0 == local_queues_ends[tid]) continue; // idi r = merge_two_queues_into_1st_queue_seq_fixed( // set_L, // 0, // L, // local_queues_list[tid], // 0, // local_queues_ends[tid]); //// L + 1); // local_queues_ends[tid] = 0; // Reset the local queue // if (r < nk) { // nk = r; // } // } // } // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // } // // // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //// //// {//test //// for (idi k_i = 0; k_i < K; ++k_i) { //// printf("%u: %u: %u %f\n", //// query_id, //// k_i, set_L[k_i].id_, set_L[k_i].distance_); //// } //// exit(1); //// } //} // //inline void Searching::para_search_with_top_m_merge_queues_no_CAS( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // std::vector< std::vector<Candidate> > &local_queues_list, // std::vector<idi> &local_queues_ends, //// std::vector<uint8_t> &is_visited) // boost::dynamic_bitset<> &is_visited) //{ ////// const idi local_queue_length = ((M - 1) / num_threads_ + 1) * width_; //// const idi local_queue_length = L; //// std::vector< std::vector<Candidate> > local_queues_list(num_threads_, std::vector<Candidate>(local_queue_length)); //// std::vector<idi> local_queues_ends(num_threads_, 0); ////// std::vector<uint8_t> is_visited(num_v_, 0); //// boost::dynamic_bitset<> is_visited(num_v_); // // { // // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = 1; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; // // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. // // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; // // // Select M candidates // idi last_k = L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. // // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // int tid = omp_get_thread_num(); // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; // //// if (!AtomicOps::CAS(is_visited.data() + nb_id, //// static_cast<uint8_t>(0), //// static_cast<uint8_t>(1))) { //// continue; //// } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); // // Add to the local queue. // add_into_queue(local_queues_list[tid], local_queues_ends[tid], local_queue_length, cand); // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // idi nk = L; //// // Merge. Parallel merging in every two queues. //// { //// for (int tid = 0; tid < num_threads_; ++tid) { //// if (0 == local_queues_ends[tid]) continue; //// idi r = merge_two_queues_into_1st_queue_para( //// set_L, //// 0, //// L, //// local_queues_list[tid], //// 0, //// local_queues_ends[tid]); ////// idi r = merge_two_queues_into_1st_queue_seq( ////// set_L, ////// 0, ////// L, ////// local_queues_list[tid], ////// 0, ////// local_queues_ends[tid]); //// local_queues_ends[tid] = 0; // Reset the local queue //// if (r < nk) { //// nk = r; //// } //// } //// } //// // Merge. Merge all queues in parallel. //// { //// if (num_threads_ > 1) { //// idi r = merge_all_queues_para( //// local_queues_list, //// local_queues_ends, //// set_L, //// L); //// if (r < nk) { //// nk = r; //// } //// } else { //// if (local_queues_ends[0]) { //// idi r = merge_two_queues_into_1st_queue_seq( //// set_L, //// 0, //// L, //// local_queues_list[0], //// 0, //// local_queues_ends[0]); //// local_queues_ends[0] = 0; //// if (r < nk) { //// nk = r; //// } //// } //// } //// } // // Merge // { // for (int tid = 0; tid < num_threads_; ++tid) { // if (0 == local_queues_ends[tid]) continue; // idi r = merge_two_queues_into_1st_queue_seq_fixed( // set_L, // 0, // L, // local_queues_list[tid], // 0, // local_queues_ends[tid]); //// L + 1); // local_queues_ends[tid] = 0; // Reset the local queue // if (r < nk) { // nk = r; // } // } // } // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // } // // // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } // // {// Reset // is_visited.reset(); //// std::fill(is_visited.begin(), is_visited.end(), 0); // std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); // } //} //inline void Searching::para_search_with_top_m_merge_queues_in_array( //inline void Searching::para_search_with_top_m_merge_queues_new_threshold( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue //// std::vector< std::vector<Candidate> > &local_queues_list, // std::vector<Candidate> &local_queues_array, // std::vector<idi> &local_queues_ends, // Sizes of local queue // BitVector &is_visited) //// std::vector<uint8_t> &is_visited) //// boost::dynamic_bitset<> &is_visited) //{ // { // // for (idi c_i = 0; c_i < L; ++c_i) { //// is_visited[init_ids[c_i]] = 1; // is_visited.atomic_set_bit(init_ids[c_i]); // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; // // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. // // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // // idi min_index = L - 1; // distf min_1st = set_L[min_index].distance_; // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; //// {//test //// printf("tmp_count: %d\n", tmp_count); //// } // // // Select M candidates // idi last_k = L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. // // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // int tid = omp_get_thread_num(); // const idi local_queue_start = tid * local_queue_length; // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; //// { // Sequential edition //// if (is_visited[nb_id]) { //// continue; //// } //// is_visited[nb_id] = 1; //// } //// { // __ATOMIC_SEQ_CST edition //// if (!AtomicOps::CAS(is_visited.data() + nb_id, //// static_cast<uint8_t>(0), //// static_cast<uint8_t>(1))) { //// continue; //// } //// } //// {// Acquire and Release edition //// if (__atomic_load_n(is_visited.data() + nb_id, __ATOMIC_ACQUIRE)) { //// continue; //// } //// __atomic_store_n(is_visited.data() + nb_id, 1, __ATOMIC_RELEASE); //// } // {// Self-defined BitVector // if (is_visited.atomic_is_bit_set(nb_id)) { // continue; // } // is_visited.atomic_set_bit(nb_id); // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // // if (dist > min_1st) { // continue; // } else if (min_index > 0) { // // Inserted, so min_1st needs update // if (dist > set_L[min_index - 1].distance_) { // min_1st = dist; // if (min_index < L - 1) { // ++min_index; // } // } else { // min_1st = set_L[--min_index].distance_; // } //// min_1st = set_L[--min_index].distance_; // } // //// if (dist > set_L[L-1].distance_) { //// continue; //// } // // Candidate cand(nb_id, dist, false); // // Add to the local queue. // add_into_queue(local_queues_array, local_queue_start, local_queues_ends[tid], local_queue_length, cand); // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // idi nk = L; //// // Merge. Parallel merging in every two queues. //// { //// for (int tid = 0; tid < num_threads_; ++tid) { //// if (0 == local_queues_ends[tid]) continue; //// idi r = merge_two_queues_into_1st_queue_para( //// set_L, //// 0, //// L, //// local_queues_list[tid], //// 0, //// local_queues_ends[tid]); ////// idi r = merge_two_queues_into_1st_queue_seq( ////// set_L, ////// 0, ////// L, ////// local_queues_list[tid], ////// 0, ////// local_queues_ends[tid]); //// local_queues_ends[tid] = 0; // Reset the local queue //// if (r < nk) { //// nk = r; //// } //// } //// } // // Merge. Merge all queues in parallel. // { // if (num_threads_ > 1) { // idi r = merge_all_queues_para_array( //// local_queues_list, // local_queues_array, // local_queues_ends, // local_queue_length, // set_L, // L); // if (r < nk) { // nk = r; // } // } else { // if (local_queues_ends[0]) { // idi r = merge_two_queues_into_1st_queue_seq_fixed( // set_L, // 0, // L, //// local_queues_list[0], // local_queues_array, // 0, // local_queues_ends[0]); // local_queues_ends[0] = 0; // if (r < nk) { // nk = r; // } // } // } // } //// // Merge Sequentially //// { //// for (int tid = 0; tid < num_threads_; ++tid) { //// if (0 == local_queues_ends[tid]) continue; //// idi r = merge_two_queues_into_1st_queue_seq_fixed( //// set_L, //// 0, //// L, ////// local_queues_list[tid], ////// 0, //// local_queues_array, //// tid * local_queue_length, //// local_queues_ends[tid]); ////// L + 1); //// local_queues_ends[tid] = 0; // Reset the local queue //// if (r < nk) { //// nk = r; //// } //// } //// } // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // } // // // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } // // {// Reset //// is_visited.reset(); //// std::fill(is_visited.begin(), is_visited.end(), 0); // is_visited.clear_all(); // std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); // } //} /* * 5/7/2020-15:14 * Use 1 threads to scale M until the value_M_middle. * Then use multiple threads. */ inline void Searching::para_search_with_top_m_merge_queues_middle_m( const idi value_M_middle, const idi value_M_max, const idi query_id, const idi K, const idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, const idi local_queue_length, // Maximum size of local queue const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; std::vector<idi> &local_queues_ends, // Sizes of local queue std::vector<idi> &top_m_candidates, boost::dynamic_bitset<> &is_visited) { time_initialization_ -= WallTimer::get_time_mark(); // const idi base_set_L = (num_threads_ - 1) * local_queue_length; // for (idi c_i = 0; c_i < L; ++c_i) { is_visited[init_ids[c_i]] = 1; } const dataf *query_data = queries_load_ + query_id * dimension_; for (idi v_i = 0; v_i < L; ++v_i) { idi v_id = init_ids[v_i]; _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); } uint64_t tmp_count_computation = 0; // Get the distances of all candidates, store in the set set_L. // for (unsigned i = 0; i < L; i++) { unsigned v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; ++tmp_count_computation; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked. } count_distance_computation_ += tmp_count_computation; tmp_count_computation = 0; // std::sort(set_L.begin(), set_L.begin() + L); std::sort( set_L.begin() + base_set_L, set_L.begin() + base_set_L + L); local_queues_ends[num_threads_ - 1] = L; time_initialization_ += WallTimer::get_time_mark(); idi top_m_candidates_end = 0; idi k = 0; // Index of first unchecked candidate. idi tmp_count = 0; // for debug idi M = 1; time_sequential_phase_ -= WallTimer::get_time_mark(); { // Single thread while (k < L && M < value_M_middle) { ++tmp_count; // {//test // printf("tmp_count: %d\n", tmp_count); // } // Select M candidates idi last_k = L; // Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { idi index_set_L = c_i + base_set_L; if (set_L[index_set_L].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[index_set_L].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; } idi nk = L; // Push M candidates' neighbors into the queue. for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { idi cand_id = top_m_candidates[c_i]; _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; { // Sequential edition if (is_visited[nb_id]) { continue; } is_visited[nb_id] = 1; } auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; ++tmp_count_computation; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[L - 1 + base_set_L].distance_) { continue; } Candidate cand(nb_id, dist, false); // Thread 0 maintains the "global" queue idi r = add_into_queue( set_L, base_set_L, local_queues_ends[num_threads_ - 1], L, cand); if (r < nk) { nk = r; } } } top_m_candidates_end = 0; // Clear top_m_candidates count_distance_computation_ += tmp_count_computation; tmp_count_computation = 0; if (nk <= last_k) { k = nk; } else { k = last_k + 1; } {// Scale M if (M < value_M_max) { M <<= 1; } else { M = value_M_max; } } } } time_sequential_phase_ += WallTimer::get_time_mark(); time_parallel_phase_ -= WallTimer::get_time_mark(); uint64_t tmp_count_add_to_queue = 0; double tmp_time_pick_top_m = 0; double tmp_time_distance_computation = 0; double tmp_time_add_to_queue = 0.0; { // Multiple Threads while (k < L) { time_expand_ -= WallTimer::get_time_mark(); ++tmp_count; // {//test // printf("tmp_count: %d\n", tmp_count); // } // Select M candidates idi last_k = L; time_pick_top_m_ -= WallTimer::get_time_mark(); // Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { idi index_set_L = c_i + base_set_L; if (set_L[index_set_L].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[index_set_L].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; } time_pick_top_m_ += WallTimer::get_time_mark(); idi nk = L; // Push M candidates' neighbors into the queue. reduction(+ : tmp_count_add_to_queue) \ reduction(+ : tmp_time_pick_top_m) \ reduction(+ : tmp_time_distance_computation) \ reduction(+ : tmp_time_add_to_queue) // for (int tid = 0; tid < num_threads_; ++tid) { for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { tmp_time_pick_top_m -= WallTimer::get_time_mark(); int tid = omp_get_thread_num(); idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } tmp_time_pick_top_m += WallTimer::get_time_mark(); for (idi e_i = 0; e_i < out_degree; ++e_i) { tmp_time_distance_computation -= WallTimer::get_time_mark(); idi nb_id = out_edges[e_i]; { // Sequential edition if (is_visited[nb_id]) { tmp_time_distance_computation += WallTimer::get_time_mark(); continue; } is_visited[nb_id] = 1; } auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; ++tmp_count_computation; distf dist = compute_distance_with_norm(nb_data, query_data, norm); tmp_time_distance_computation += WallTimer::get_time_mark(); if (dist > set_L[L - 1 + base_set_L].distance_) { continue; } ++tmp_count_add_to_queue; Candidate cand(nb_id, dist, false); // Add to the local queue. // tmp_time_pick_top_m -= WallTimer::get_time_mark(); tmp_time_add_to_queue -= WallTimer::get_time_mark(); if (0 != tid) { // Non-Master threads using local queues add_into_queue( set_L, (tid - 1) * local_queue_length, local_queues_ends[tid - 1], local_queue_length, cand); } else { // Thread 0 maintains the "global" queue idi r = add_into_queue( set_L, base_set_L, local_queues_ends[num_threads_ - 1], L, cand); if (r < nk) { nk = r; } } tmp_time_add_to_queue += WallTimer::get_time_mark(); // tmp_time_pick_top_m += WallTimer::get_time_mark(); } } time_add_to_queue_ += tmp_time_add_to_queue; tmp_time_add_to_queue = 0; // } time_distance_computation_ += tmp_time_distance_computation; tmp_time_distance_computation = 0; time_pick_top_m_ += tmp_time_pick_top_m; tmp_time_pick_top_m = 0; top_m_candidates_end = 0; // Clear top_m_candidates count_add_to_queue_ += tmp_count_add_to_queue; tmp_count_add_to_queue = 0; count_distance_computation_ += tmp_count_computation; tmp_count_computation = 0; time_expand_ += WallTimer::get_time_mark(); // // Merge. Merge all queues in parallel. { time_merge_ -= WallTimer::get_time_mark(); if (num_threads_ > 1) { idi r = merge_all_queues_para_array( set_L, local_queues_ends, local_queue_length, L); if (r < nk) { nk = r; } } time_merge_ += WallTimer::get_time_mark(); } if (nk <= last_k) { k = nk; } else { k = last_k + 1; } {// Scale M if (M < value_M_max) { M <<= 1; } else { M = value_M_max; } } } } time_parallel_phase_ += WallTimer::get_time_mark(); time_ending_ -= WallTimer::get_time_mark(); for (idi k_i = 0; k_i < K; ++k_i) { set_K[k_i] = set_L[k_i + base_set_L].id_; // set_K[k_i] = set_L[k_i].id_; } {// Reset // std::fill(is_visited.begin(), is_visited.end(), 0); is_visited.reset(); // is_visited.clear_all(); std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); } time_ending_ += WallTimer::get_time_mark(); // {//test // if (3 == query_id) { // exit(1); // } // } } inline void Searching::para_search_with_top_m_merge_queues_middle_m_no_merge( const uint64_t computation_threshold, const idi value_M_middle, const idi value_M_max, const idi query_id, const idi K, const idi L, const idi init_size, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, const idi local_queue_length, // Maximum size of local queue const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; std::vector<idi> &local_queues_ends, // Sizes of local queue std::vector<idi> &top_m_candidates, boost::dynamic_bitset<> &is_visited) { uint64_t count_single_query_computation = 0; uint64_t count_init_computation = 0; uint64_t count_seq_computation = 0; uint64_t count_par_computation = 0; // {//test // printf("query_id: %u\n", query_id); // } // time_initialization_ -= WallTimer::get_time_mark(); // const idi base_set_L = (num_threads_ - 1) * local_queue_length; { for (idi c_i = 0; c_i < init_size; ++c_i) { // for (idi c_i = 0; c_i < L; ++c_i) { is_visited[init_ids[c_i]] = 1; // is_visited.atomic_set_bit(init_ids[c_i]); } } const dataf *query_data = queries_load_ + query_id * dimension_; for (idi v_i = 0; v_i < init_size; ++v_i) { // for (idi v_i = 0; v_i < L; ++v_i) { idi v_id = init_ids[v_i]; _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); } uint64_t tmp_count_computation = 0; // Get the distances of all candidates, store in the set set_L. // for (unsigned i = 0; i < init_size; i++) { // for (unsigned i = 0; i < L; i++) { unsigned v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; // ++count_distance_computation_; ++tmp_count_computation; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked. // set_L[i] = Candidate(v_id, dist, false); // False means not checked. } count_distance_computation_ += tmp_count_computation; count_init_computation += tmp_count_computation; count_single_query_computation += tmp_count_computation; tmp_count_computation = 0; // std::sort(set_L.begin(), set_L.begin() + L); std::sort( set_L.begin() + base_set_L, set_L.begin() + base_set_L + init_size); // set_L.begin() + base_set_L + L); local_queues_ends[num_threads_ - 1] = init_size; // local_queues_ends[num_threads_ - 1] = L; // time_initialization_ += WallTimer::get_time_mark(); // time_sequential_phase_ -= WallTimer::get_time_mark(); // std::vector<idi> top_m_candidates(M); idi &global_queue_size = local_queues_ends[num_threads_ - 1]; idi top_m_candidates_end = 0; idi k = 0; // Index of first unchecked candidate. idi tmp_count = 0; // for debug idi M = 1; { // Single thread while (k < L && M < value_M_middle && count_single_query_computation <= computation_threshold) { ++tmp_count; // {//test // printf("tmp_count: %d\n", tmp_count); // } // int real_threads = std::min(static_cast<int>(M), num_threads_); // idi queue_base = num_threads_ - real_threads; // Select M candidates idi last_k = L; // Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. for (idi c_i = k; c_i < global_queue_size && top_m_candidates_end < M; ++c_i) { // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { idi index_set_L = c_i + base_set_L; if (set_L[index_set_L].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[index_set_L].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; } idi nk = L; // Push M candidates' neighbors into the queue. for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { idi cand_id = top_m_candidates[c_i]; _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; { // Sequential edition if (is_visited[nb_id]) { continue; } is_visited[nb_id] = 1; } auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; // ++count_distance_computation_; ++tmp_count_computation; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[global_queue_size - 1 + base_set_L].distance_) { // if (dist > set_L[L - 1 + base_set_L].distance_) { continue; } Candidate cand(nb_id, dist, false); // Thread 0 maintains the "global" queue idi r = add_into_queue( set_L, base_set_L, global_queue_size, // local_queues_ends[num_threads_ - 1], L, cand); if (r < nk) { nk = r; } } } top_m_candidates_end = 0; // Clear top_m_candidates count_distance_computation_ += tmp_count_computation; count_seq_computation += tmp_count_computation; count_single_query_computation += tmp_count_computation; tmp_count_computation = 0; // {// Local queues' ends // printf("query%u:iter: %u", query_id, tmp_count); // for (int i_t = 0; i_t < num_threads_; ++i_t) { // printf(" [%u]: %u", i_t, local_queues_ends[i_t]); // } // printf("\n"); // } if (nk <= last_k) { k = nk; } else { k = last_k + 1; } {// Scale M if (M < value_M_max) { M <<= 1; } else { M = value_M_max; } } } } // time_sequential_phase_ += WallTimer::get_time_mark(); // time_parallel_phase_ -= WallTimer::get_time_mark(); { // Multiple Threads while (k < L and count_single_query_computation <= computation_threshold) { // while (k < L) { ++tmp_count; // {//test // printf("tmp_count: %d " // "k: %u " // "global_queue_size: %u\n", // tmp_count, // k, // global_queue_size); // } // int real_threads = std::min(static_cast<int>(M), num_threads_); // idi queue_base = num_threads_ - real_threads; // Select M candidates idi last_k = L; // Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. for (idi c_i = k; c_i < global_queue_size && top_m_candidates_end < M; ++c_i) { // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { idi index_set_L = c_i + base_set_L; if (set_L[index_set_L].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[index_set_L].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; } idi nk = L; // Push M candidates' neighbors into the queue. // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { int tid = omp_get_thread_num(); idi cand_id = top_m_candidates[c_i]; _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; { // Sequential edition if (is_visited[nb_id]) { continue; } is_visited[nb_id] = 1; } auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; // ++count_distance_computation_; ++tmp_count_computation; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[global_queue_size - 1 + base_set_L].distance_) { // if (dist > set_L[L - 1 + base_set_L].distance_) { continue; } Candidate cand(nb_id, dist, false); // Add to the local queue. if (0 != tid) { // Non-Master threads using local queues add_into_queue( set_L, (tid - 1) * local_queue_length, local_queues_ends[tid - 1], local_queue_length, cand); } else { // Thread 0 maintains the "global" queue idi r = add_into_queue( set_L, base_set_L, global_queue_size, // local_queues_ends[num_threads_ - 1], L, cand); if (r < nk) { nk = r; } } } } top_m_candidates_end = 0; // Clear top_m_candidates count_distance_computation_ += tmp_count_computation; count_par_computation += tmp_count_computation; count_single_query_computation += tmp_count_computation; tmp_count_computation = 0; // {// Local queues' ends // printf("query%u:iter: %u", query_id, tmp_count); // for (int i_t = 0; i_t < num_threads_; ++i_t) { // printf(" [%u]: %u", i_t, local_queues_ends[i_t]); // } // printf("\n"); // } // Merge. Merge all queues in parallel. { if (num_threads_ > 1) { // idi r = merge_all_queues_queue_base( // set_L, // local_queues_ends, // queue_base, // real_threads, // local_queue_length, // L); idi r = merge_all_queues_para_array( set_L, local_queues_ends, local_queue_length, L); if (r < nk) { nk = r; } } } if (nk <= last_k) { k = nk; } else { k = last_k + 1; } {// Scale M if (M < value_M_max) { M <<= 1; } else { M = value_M_max; } } // {// Print relative distance //// distf top_dist = set_L[base_set_L].distance_; // for (idi i_l = 0; i_l < L; ++i_l) { // printf("%u %f\n", // tmp_count, set_L[i_l + base_set_L].distance_); //// tmp_count, set_L[i_l + base_set_L].distance_ - top_dist); // } // } } } // time_parallel_phase_ += WallTimer::get_time_mark(); for (idi k_i = 0; k_i < K; ++k_i) { set_K[k_i] = set_L[k_i + base_set_L].id_; // set_K[k_i] = set_L[k_i].id_; } {// Reset // std::fill(is_visited.begin(), is_visited.end(), 0); is_visited.reset(); // is_visited.clear_all(); std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); } // {//test // if (3 == query_id) { // exit(1); // } // } // {//test // printf("count_single: %lu " // "ct_init: %lu " // "ct_seq: %lu " // "ct_par: %lu\n", // count_single_query_computation, // count_init_computation, // count_seq_computation, // count_par_computation); // } } ///* // * 6/15/2020-14:40 // * Queues merging together to the global queue // */ //inline void Searching::para_search_with_top_m_merge_queues_sequential_merge( // const idi value_M_middle, // const idi value_M_max, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; // std::vector<idi> &local_queues_ends, // Sizes of local queue // std::vector<idi> &top_m_candidates, // boost::dynamic_bitset<> &is_visited) //{ //// const idi base_set_L = (num_threads_ - 1) * local_queue_length; // { // // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = 1; //// is_visited.atomic_set_bit(init_ids[c_i]); // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; // // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // uint64_t tmp_count_computation = 0; // // Get the distances of all candidates, store in the set set_L. //// // // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation_; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked. //// set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; //// std::sort(set_L.begin(), set_L.begin() + L); // std::sort( // set_L.begin() + base_set_L, // set_L.begin() + base_set_L + L); // local_queues_ends[num_threads_ - 1] = L; // //// std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // idi M = 1; // // { // Single thread // while (k < L && M < value_M_middle) { // ++tmp_count; //// {//test //// printf("tmp_count: %d\n", tmp_count); //// } // // // Select M candidates // idi last_k = L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // idi index_set_L = c_i + base_set_L; // if (set_L[index_set_L].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[index_set_L].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; // } // // // idi nk = L; // // Push M candidates' neighbors into the queue. // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // { // Sequential edition // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation_; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L - 1 + base_set_L].distance_) { // continue; // } // // Candidate cand(nb_id, dist, false); // // Thread 0 maintains the "global" queue // idi r = add_into_queue( // set_L, // base_set_L, // local_queues_ends[num_threads_ - 1], // L, // cand); // if (r < nk) { // nk = r; // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // // {// Scale M // if (M < value_M_max) { // M <<= 1; // } else { // M = value_M_max; // } // } // } // } // // { // Multiple Threads // while (k < L) { // ++tmp_count; //// {//test //// if (num_threads_ == 2) { //// printf("tmp_count: %d " //// "k: %u\n", //// tmp_count, //// k); //// } //// } // // Select M candidates // idi last_k = L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // idi index_set_L = c_i + base_set_L; // if (set_L[index_set_L].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[index_set_L].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; // } // // // idi nk = L; // // Push M candidates' neighbors into the queue. //// // // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // int tid = omp_get_thread_num(); // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // { // Sequential edition // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation_; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L - 1 + base_set_L].distance_) { // continue; // } // // Candidate cand(nb_id, dist, false); // // Add to the local queue. // if (0 != tid) { // // Non-Master threads using local queues // add_into_queue( // set_L, // (tid - 1) * local_queue_length, // local_queues_ends[tid - 1], // local_queue_length, // cand); // } else { // // Thread 0 maintains the "global" queue // idi r = add_into_queue( // set_L, // base_set_L, // local_queues_ends[num_threads_ - 1], // L, // cand); // if (r < nk) { // nk = r; // } // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; // //// // Merge. Merge all queues in parallel. // { //// {//test //// for (idi q_i = 0; q_i < num_threads_; ++q_i) { //// if (0 == local_queues_ends[q_i]) { //// continue; //// } //// for (idi e_i = 0; e_i < local_queues_ends[q_i]; ++e_i) { //// printf("tmp_count: %u " //// "q_i: %u " //// "[%u]: (%u, %f)\n", //// tmp_count, //// q_i, //// e_i, set_L[q_i * local_queue_length + e_i].id_, set_L[q_i * local_queue_length + e_i].distance_); //// } //// } //// } //// time_merge_ -= WallTimer::get_time_mark(); // if (num_threads_ > 1) { // idi r = merge_all_queues_all_together_in_sequential( // set_L, // local_queues_ends, // local_queue_length, // L); //// idi r = merge_all_queues_para_array( //// set_L, //// local_queues_ends, //// local_queue_length, //// L); // if (r < nk) { // nk = r; // } //// {//test //// printf("tmp_count: %u " //// "r: %u " //// "last_k: %u\n", //// tmp_count, //// r, //// last_k); //// for (idi l_i = 0; l_i < L; ++l_i) { //// printf("tmp_count: %u " //// "[%u]: (%u, %f)\n", //// tmp_count, //// l_i, set_L[l_i + base_set_L].id_, set_L[l_i + base_set_L].distance_); //// } //// } // } // //// time_merge_ += WallTimer::get_time_mark(); // } // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // {// Scale M // if (M < value_M_max) { // M <<= 1; // } else { // M = value_M_max; // } // } // } // } // // // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i + base_set_L].id_; //// set_K[k_i] = set_L[k_i].id_; // } // // {// Reset //// std::fill(is_visited.begin(), is_visited.end(), 0); // is_visited.reset(); //// is_visited.clear_all(); // std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); // } // //// {//test //// if (0 == query_id) { //// exit(1); //// } //// } //} ///* // * 6/19/2020: // * Intra-query + Inter-query // */ //inline void Searching::para_search_with_top_m_nested_para( // const idi batch_start, // const idi batch_size, // const idi value_M_middle, // const idi value_M_max, // const idi K, // const idi L, // std::vector< std::vector<Candidate> > &set_L_list, // const std::vector<idi> &init_ids, // std::vector< std::vector<idi> > &set_K_list, // const idi local_queue_length, // Maximum size of local queue // const idi base_set_L, // base_set_L = (num_threads_intra_query_ - 1) * local_queue_length; // std::vector< std::vector<idi> > &local_queues_ends_list, // Sizes of local queue // std::vector< std::vector<idi> > &top_m_candidates_list, // std::vector< boost::dynamic_bitset<> > &is_visited_list) //{ // {// Initialize is_visited flag array // // for (idi q_i = 0; q_i < batch_size; ++q_i) { // auto &is_visited = is_visited_list[q_i]; // // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = 1; // } // } // } // // // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // uint64_t tmp_count_total_computation = 0; // // for (idi q_i = 0; q_i < batch_size; ++q_i) { // idi query_id = batch_start + q_i; // auto &set_L = set_L_list[q_i]; // auto &local_queues_ends = local_queues_ends_list[q_i]; // auto &is_visited = is_visited_list[q_i]; // // const dataf *query_data = queries_load_ + query_id * dimension_; //// //// for (idi v_i = 0; v_i < L; ++v_i) { //// idi v_id = init_ids[v_i]; //// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); //// } // uint64_t tmp_count_computation = 0; // // Get the distances of all candidates, store in the set set_L. //// // // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation_; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked. //// set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } //// count_distance_computation_ += tmp_count_computation; // tmp_count_total_computation += tmp_count_computation; // tmp_count_computation = 0; //// std::sort(set_L.begin(), set_L.begin() + L); // std::sort( // set_L.begin() + base_set_L, // set_L.begin() + base_set_L + L); // local_queues_ends[num_threads_intra_query_ - 1] = L; // //// std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // idi M = 1; // // auto &top_m_candidates = top_m_candidates_list[q_i]; // { // Single thread // while (k < L && M < value_M_middle) { // ++tmp_count; //// {//test //// printf("tmp_count: %d\n", tmp_count); //// } // // // Select M candidates // idi last_k = L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // idi index_set_L = c_i + base_set_L; // if (set_L[index_set_L].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[index_set_L].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; // } // // idi nk = L; // // Push M candidates' neighbors into the queue. // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // { // Sequential edition // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation_; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); //// {//test //// if (391655 == nb_id) { //// printf("tmp_count: %u " //// "nb_id: %u " //// "distf: %f\n", //// tmp_count, //// nb_id, //// dist); //// } //// } // if (dist > set_L[L - 1 + base_set_L].distance_) { // continue; // } // // Candidate cand(nb_id, dist, false); // // Thread 0 maintains the "global" queue // idi r = add_into_queue( // set_L, // base_set_L, // local_queues_ends[num_threads_intra_query_ - 1], // L, // cand); // if (r < nk) { // nk = r; // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates //// count_distance_computation_ += tmp_count_computation; // tmp_count_total_computation += tmp_count_computation; // tmp_count_computation = 0; // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // // {// Scale M // if (M < value_M_max) { // M <<= 1; // } else { // M = value_M_max; // } // } // } // } // // { // Multiple Threads // while (k < L) { // ++tmp_count; //// {//test //// printf("tmp_count: %d\n", tmp_count); //// } // // Select M candidates // idi last_k = L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // idi index_set_L = c_i + base_set_L; // if (set_L[index_set_L].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[index_set_L].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; // } // // idi nk = L; // // Push M candidates' neighbors into the queue. // // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // int tid = omp_get_thread_num(); // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // { // Sequential edition // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation_; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); //// {//test //// if (391655 == nb_id) { //// printf("tmp_count: %u " //// "nb_id: %u " //// "distf: %f\n", //// tmp_count, //// nb_id, //// dist); //// } //// } // if (dist > set_L[L - 1 + base_set_L].distance_) { // continue; // } // // Candidate cand(nb_id, dist, false); // // Add to the local queue. // if (0 != tid) { // // Non-Master threads using local queues // add_into_queue( // set_L, // (tid - 1) * local_queue_length, // local_queues_ends[tid - 1], // local_queue_length, // cand); // } else { // // Thread 0 maintains the "global" queue // idi r = add_into_queue( // set_L, // base_set_L, // local_queues_ends[num_threads_intra_query_ - 1], // L, // cand); // if (r < nk) { // nk = r; // } // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates //// count_distance_computation_ += tmp_count_computation; // tmp_count_total_computation += tmp_count_computation; // tmp_count_computation = 0; // //// // Merge. Merge all queues in parallel. // { //// time_merge_ -= WallTimer::get_time_mark(); // if (num_threads_intra_query_ > 1) { // idi r = merge_all_queues_para_array( // set_L, // local_queues_ends, // local_queue_length, // L); // if (r < nk) { // nk = r; // } // } //// time_merge_ += WallTimer::get_time_mark(); // } // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // {// Scale M // if (M < value_M_max) { // M <<= 1; // } else { // M = value_M_max; // } // } // } // } // count_distance_computation_ += tmp_count_total_computation; // tmp_count_total_computation = 0; // // auto &set_K = set_K_list[query_id]; // // // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i + base_set_L].id_; //// set_K[k_i] = set_L[k_i].id_; // } // // {// Reset //// std::fill(is_visited.begin(), is_visited.end(), 0); // is_visited.reset(); //// is_visited.clear_all(); // std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); // } // } // //// {//test //// if (3 == query_id) { //// exit(1); //// } //// } //// { //// for (idi k_i = 0; k_i < K; ++k_i) { //// printf("%u: (%u %f)\n", //// k_i, set_L_list[0][k_i].id_, set_L_list[0][k_i].distance_); //// } //// if (0 == batch_start) { //// exit(1); //// } //// } //} /* * 6/22/2020-21:30 * Do searching on the local_set_L * local_set_L is already sorted * is_visited is already set up. */ inline void Searching::subsearch_with_top_m( const idi value_M_max, const idi query_id, const idi local_L, std::vector<Candidate> &set_L, const idi set_L_start, idi &set_L_size, std::vector<idi> &local_top_m_candidates, boost::dynamic_bitset<> &is_visited, uint64_t &local_count_distance_computation) { const dataf *query_data = queries_load_ + query_id * dimension_; // idi local_top_m_candidates_end = 0; idi k = 0; // Index of first unchecked candidate. idi iter = 0; idi M = 1; // value of M while (k < local_L) { ++iter; subsearch_top_m_for_one_iteration( iter, k, M, query_id, query_data, local_L, set_L, set_L_start, set_L_size, local_top_m_candidates, is_visited, local_count_distance_computation); {// Scale M if (M < value_M_max) { M <<= 1; } else { M = value_M_max; } } } // {//test // printf("set_L_start: %u " // "local_count_distance_computation: %lu\n", // set_L_start, // local_count_distance_computation); // } } //// Backup //inline void Searching::subsearch_with_top_m( // const idi value_M_max, // const idi query_id, // const idi local_L, // std::vector<Candidate> &set_L, // const idi base_set_L, // idi &set_L_end, // std::vector<idi> &local_top_m_candidates, // boost::dynamic_bitset<> &is_visited, // uint64_t &local_count_distance_computation) //{ // const dataf *query_data = queries_load_ + query_id * dimension_; // idi local_top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi iter = 0; // idi M = 1; // value of M // // while (k < local_L) { // ++iter; // // Select M candidates // idi last_k = local_L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k; c_i < set_L_end && local_top_m_candidates_end < M; ++c_i) { // idi index_set_L = c_i + base_set_L; // if (set_L[index_set_L].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[index_set_L].is_checked_ = true; // local_top_m_candidates[local_top_m_candidates_end++] = set_L[index_set_L].id_; // } // // idi nk = local_L; // // Push M candidates' neighbors into the queue. // for (idi c_i = 0; c_i < local_top_m_candidates_end; ++c_i) { // idi cand_id = local_top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // { // Sequential edition // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // ++local_count_distance_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[set_L_end - 1 + base_set_L].distance_) { // continue; // } // // Candidate cand(nb_id, dist, false); // // Thread 0 maintains the "global" queue // idi r = add_into_queue( // set_L, // base_set_L, // set_L_end, // local_L, // cand); // if (r < nk) { // nk = r; // } // } // } // local_top_m_candidates_end = 0; // Clear top_m_candidates // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // // {// Scale M // if (M < value_M_max) { // M <<= 1; // } else { // M = value_M_max; // } // } // } //} /* * 7/6/2020-23:17 * Subsearch only 1 iteration using top-m */ inline void Searching::subsearch_top_m_for_one_iteration( const idi iter, idi &k_uc, const idi value_M, const idi query_id, const dataf *query_data, const idi L, std::vector<Candidate> &set_L, const idi set_L_start, idi &set_L_size, std::vector<idi> &top_m_candidates, boost::dynamic_bitset<> &is_visited, uint64_t &count_distance_computation) { // Select M candidates idi top_m_candidates_end = 0; idi last_k = L; // Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. for (idi c_i = k_uc; c_i < set_L_size && top_m_candidates_end < value_M; ++c_i) { idi index_set_L = c_i + set_L_start; if (set_L[index_set_L].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[index_set_L].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; // {//test // M_ids_.push_back(set_L[index_set_L].id_); // } } idi nk = L; // Push M candidates' neighbors into the queue. for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { idi cand_id = top_m_candidates[c_i]; _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; { // Sequential edition if (is_visited[nb_id]) { continue; } is_visited[nb_id] = 1; } auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; ++count_distance_computation; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[set_L_size - 1 + set_L_start].distance_) { continue; } Candidate cand(nb_id, dist, false); idi r = add_into_queue( set_L, set_L_start, set_L_size, L, cand); if (r < nk) { nk = r; } } } // top_m_candidates_end = 0; // Clear top_m_candidates if (nk <= last_k) { k_uc = nk; } else { k_uc = last_k + 1; } // {//test // for (idi l_i = 0; l_i < set_L_size; ++l_i) { // L_ids_.push_back(set_L[set_L_start + l_i].id_); // } // std::sort(L_ids_.begin(), L_ids_.end()); // std::sort(M_ids_.begin(), M_ids_.end()); // for (idi m_i = 0; m_i < M_ids_.size(); ++m_i) { // printf("query_id: %u " // "iter: %u " // "M[%u]: " // "%u\n", // query_id, // iter, // m_i, // M_ids_[m_i]); // } // M_ids_.clear(); // for (idi l_i = 0; l_i < L_ids_.size(); ++l_i) { // printf("query_id: %u " // "iter: %u " // "L[%u]: " // "%u\n", // query_id, // iter, // l_i, // L_ids_[l_i]); // } // L_ids_.clear(); // } } ///* // * One more parameter for distance bound // */ //inline void Searching::subsearch_top_m_for_one_iteration_lth( // const distf bound_lth, // const idi iter, // idi &k_uc, // const idi value_M, // const idi query_id, // const dataf *query_data, // const idi L, // std::vector<Candidate> &set_L, // const idi set_L_start, // idi &set_L_size, // std::vector<idi> &top_m_candidates, // boost::dynamic_bitset<> &is_visited, // uint64_t &count_distance_computation) //{ // // Select M candidates // idi top_m_candidates_end = 0; // idi last_k = L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k_uc; c_i < set_L_size && top_m_candidates_end < value_M; ++c_i) { // idi index_set_L = c_i + set_L_start; // if (set_L[index_set_L].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[index_set_L].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; // } // // idi nk = L; // // Push M candidates' neighbors into the queue. // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // { // Sequential edition // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // ++count_distance_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > bound_lth) { // continue; // } // // Candidate cand(nb_id, dist, false); // idi r = add_into_queue( // set_L, // set_L_start, // set_L_size, // L, // cand); // if (r < nk) { // nk = r; // } // } // } // // if (nk <= last_k) { // k_uc = nk; // } else { // k_uc = last_k + 1; // } //} /* * 7/24/2020-10:53 * Subsearch for one iteration, with the global L-th value as the bound, * and the top_m_position indicates the bound for local top-M vertices. */ inline void Searching::subsearch_top_m_for_one_iteration_lth_mth( const distf bound_lth, // const idi top_m_position, const idi iter, idi &k_uc, const idi local_m_count, const idi query_id, const dataf *query_data, const idi L, std::vector<Candidate> &set_L, const idi set_L_start, idi &set_L_size, std::vector<idi> &top_m_candidates, boost::dynamic_bitset<> &is_visited, uint64_t &count_distance_computation, double &time_pick_top_m, uint64_t &count_add_to_queue, double &time_distance_computation, double &time_add_to_queue) { // {//test // printf("query_id: %u " // "iter: %u " // "tid: %u \n", // query_id, // iter, // omp_get_thread_num()); // } // Select M candidates idi top_m_candidates_end = 0; idi last_k = L; // Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k_uc; c_i < top_m_position; ++c_i) { time_pick_top_m -= WallTimer::get_time_mark(); for (idi c_i = k_uc; c_i < set_L_size && top_m_candidates_end < local_m_count; ++c_i) { idi index_set_L = c_i + set_L_start; if (set_L[index_set_L].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[index_set_L].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; // {//test // M_ids_.push_back(set_L[index_set_L].id_); // } } time_pick_top_m += WallTimer::get_time_mark(); idi nk = L; // Push M candidates' neighbors into the queue. for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { time_pick_top_m -= WallTimer::get_time_mark(); idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } time_pick_top_m += WallTimer::get_time_mark(); for (idi e_i = 0; e_i < out_degree; ++e_i) { time_distance_computation -= WallTimer::get_time_mark(); idi nb_id = out_edges[e_i]; { // Sequential edition if (is_visited[nb_id]) { time_distance_computation += WallTimer::get_time_mark(); continue; } is_visited[nb_id] = 1; } auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; ++count_distance_computation; distf dist = compute_distance_with_norm(nb_data, query_data, norm); time_distance_computation += WallTimer::get_time_mark(); if (dist > set_L[set_L_start + set_L_size - 1].distance_) { // if (dist > bound_lth) { continue; } ++count_add_to_queue; Candidate cand(nb_id, dist, false); // time_pick_top_m -= WallTimer::get_time_mark(); time_add_to_queue -= WallTimer::get_time_mark(); idi r = add_into_queue( set_L, set_L_start, set_L_size, L, cand); if (r < nk) { nk = r; } time_add_to_queue += WallTimer::get_time_mark(); // time_pick_top_m += WallTimer::get_time_mark(); } } if (nk <= last_k) { k_uc = nk; } else { k_uc = last_k + 1; } } ///* // * 7/26/2020-15:41 // * L-th and M-th Selection. // * Seq-Par Phases: when M is 1 and 2, do sequential searching; // * When M is equal and larger than 4, do parallel searching. // * It's for load-balance issue. // */ //inline void Searching::para_search_with_top_m_subsearch_v3( // const idi local_M_max, // const idi local_M_middle, // const idi query_id, // const idi K, // const idi global_L, // const idi local_L, //// const idi total_L, //// const idi init_queue_size, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const std::vector<idi> &local_queues_starts, // std::vector<idi> &local_queues_sizes, // std::vector<idi> &local_m_counts, // std::vector< std::vector<idi> > &top_m_candidates_list, // boost::dynamic_bitset<> &is_visited) //{ // time_initialization_ -= WallTimer::get_time_mark(); // uint64_t tmp_count_computation = 0; // {// Initialization // // is_visited flag array //// //// Cannot use OMP for bit array is_visited! // for (idi c_i = 0; c_i < global_L; ++c_i) { // is_visited[init_ids[c_i]] = 1; // } // // const dataf *query_data = queries_load_ + query_id * dimension_; // // for (idi v_i = 0; v_i < global_L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // // Get the distances of all candidates, store in the set set_L. // // for (idi id_i = 0; id_i < global_L; ++id_i) { // idi v_id = init_ids[id_i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[id_i] = Candidate(v_id, dist, false); // False means not checked. // } // local_queues_sizes[0] = global_L; // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; // std::sort(set_L.begin(), set_L.begin() + global_L); // } // time_initialization_ += WallTimer::get_time_mark(); // // // Searching // if (num_threads_ == 1) { // Single threads //// std::sort( //// set_L.begin(), //// set_L.end()); // subsearch_with_top_m( // local_M_max, // query_id, // local_L, // set_L, // 0, // local_queues_sizes[0], // top_m_candidates_list[0], // is_visited, // tmp_count_computation); // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; // } else { // Multiple threads // const dataf *query_data = queries_load_ + query_id * dimension_; // const idi num_queues = num_threads_; // idi local_M = 1; // idi iter = 0; // std::vector<idi> ks(num_queues, 0); // // time_sequential_phase_ -= WallTimer::get_time_mark(); // {// Sequential Search for M = 1, 2. // idi &k = ks[0]; // while (k < global_L && local_M < local_M_middle) { // ++iter; // subsearch_top_m_for_one_iteration( // iter, // k, // local_M, // query_id, // query_data, // global_L, // set_L, // 0, // local_queues_sizes[0], // top_m_candidates_list[0], // is_visited, // tmp_count_computation); // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; // // {// Double M // if (local_M < local_M_max) { // local_M <<= 1; // } // } // } // } // time_sequential_phase_ += WallTimer::get_time_mark(); // // time_parallel_phase_ -= WallTimer::get_time_mark(); // distf bound_lth = set_L[global_L - 1].distance_; // {// Parallel Search for M >= 4, or local_M_middle // time_assign_s_ -=WallTimer::get_time_mark(); // {// Assign elements from Queue[0] to others // idi dst_i = 1; // for (idi e_i = 1; e_i < global_L; ++e_i) { // idi dest_sub = e_i % num_queues; // if (0 == dest_sub) { // set_L[dst_i++] = set_L[e_i]; // } else { // set_L[local_queues_starts[dest_sub] + local_queues_sizes[dest_sub]++] = set_L[e_i]; // } // } // local_queues_sizes[0] = dst_i; // } // std::fill(ks.begin(), ks.end(), 0); // // // selecting_unchecked_top_M_seq( // query_id, // iter, // set_L, // ks, // local_M, // num_queues, // local_queues_starts, // local_queues_sizes, // local_m_counts); // time_assign_s_ +=WallTimer::get_time_mark(); // // double tmp_time_pick_top_m = 0; // uint64_t tmp_count_add_to_queue = 0; // uint8_t not_finished = 1; // double tmp_time_distance_computation = 0; // double tmp_time_add_to_queue = 0; // while (true) { // time_expand_ -= WallTimer::get_time_mark(); // not_finished = 0; // ++iter; // // reduction(+ : tmp_time_pick_top_m) \ // reduction(+ : tmp_count_add_to_queue) \ // reduction(+ : tmp_time_distance_computation) \ // reduction(+ : tmp_time_add_to_queue) // for (idi q_i = 0; q_i < num_queues; ++q_i) { // tmp_time_pick_top_m -= WallTimer::get_time_mark(); // idi L_value = q_i == 0 ? global_L : local_L; // idi &k = ks[q_i]; // idi &local_queue_size = local_queues_sizes[q_i]; // auto &local_top_m_candidates = top_m_candidates_list[q_i]; // idi local_m_count = local_m_counts[q_i]; //// if (local_M < num_queues && !local_m_count) { //// local_m_count = 1; //// } // tmp_time_pick_top_m += WallTimer::get_time_mark(); // if (!local_m_count) { // continue; // } // not_finished = 1; // const idi local_queue_start = local_queues_starts[q_i]; // // subsearch_top_m_for_one_iteration_lth_mth( // bound_lth, // iter, // k, // local_m_count, // query_id, // query_data, // L_value, // set_L, // local_queue_start, // local_queue_size, // local_top_m_candidates, // is_visited, // tmp_count_computation, // tmp_time_pick_top_m, // tmp_count_add_to_queue, // tmp_time_distance_computation, // tmp_time_add_to_queue); // } // time_add_to_queue_ += tmp_time_add_to_queue; // tmp_time_add_to_queue = 0; // time_distance_computation_ += tmp_time_distance_computation; // tmp_time_distance_computation = 0; // count_add_to_queue_ += tmp_count_add_to_queue; // tmp_count_add_to_queue = 0; // time_pick_top_m_ += tmp_time_pick_top_m; // tmp_time_pick_top_m = 0; // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; // time_expand_ += WallTimer::get_time_mark(); // if (!not_finished) { // break; // } // {// Scale M // if (local_M < local_M_max) { // local_M <<= 1; // } //// else { //// local_M = value_M_max; //// } // } // time_select_ -= WallTimer::get_time_mark(); // // { // // {// Setecting and update local_queues_lengths //// time_select_L_ -= WallTimer::get_time_mark(); // bound_lth = selecting_top_L_seq( // set_L, // global_L, //// local_L, // num_queues, // local_queues_starts, // local_queues_sizes); //// time_select_L_ += WallTimer::get_time_mark(); // } // // { //// time_select_M_ -= WallTimer::get_time_mark(); // selecting_unchecked_top_M_seq( // query_id, // iter, // set_L, // ks, // local_M, // num_queues, // local_queues_starts, // local_queues_sizes, // local_m_counts); //// time_select_M_ += WallTimer::get_time_mark(); // } // } // time_select_ += WallTimer::get_time_mark(); //// {//test //// printf("query_id: %u " //// "iter: %u", //// query_id, //// iter); //// printf(" local_queues_sizes:"); //// for (idi i = 0; i < num_queues; ++i) { //// printf(" %u", local_queues_sizes[i]); //// } //// printf(" local_m_counts:"); //// for (idi i = 0; i < num_queues; ++i) { //// printf(" %u", local_m_counts[i]); //// } //// printf(" ks:"); //// for (idi i = 0; i < num_queues; ++i) { //// printf(" %u", ks[i]); //// } //// printf("\n"); //// } // } // } // time_parallel_phase_ += WallTimer::get_time_mark(); // } // //// time_merge_ -= WallTimer::get_time_mark(); // time_ending_ -= WallTimer::get_time_mark(); // {// Return the results to set_K // std::vector<idi> pointer(num_threads_, 0); // // get the first // distf min_dist = FLT_MAX; // idi min_q_i; // idi min_id; // idi min_sub; // idi last_id; // for (int q_i = 0; q_i < num_threads_; ++q_i) { // if (pointer[q_i] >= local_queues_sizes[q_i]) { // continue; // } // idi sub = pointer[q_i] + local_queues_starts[q_i]; // distf tmp_dist = set_L[sub].distance_; // idi tmp_id = set_L[sub].id_; // if (tmp_dist < min_dist) { // min_dist = tmp_dist; // min_id = tmp_id; // min_q_i = q_i; // min_sub = sub; // } else if (tmp_dist == min_dist && tmp_id < min_id) { // min_id = tmp_id; // min_q_i = q_i; // min_sub = sub; // } // } // set_K[0] = set_L[min_sub].id_; //// {//test //// printf("query_id: %u " //// "[%u]: " //// "(%u, %f)\n", //// query_id, //// 0, //// set_L[min_sub].id_, set_L[min_sub].distance_); //// } // ++pointer[min_q_i]; // last_id = set_K[0]; // // bool is_finished = false; // idi k_i = 1; // while (k_i < K && !is_finished) { // is_finished = true; // min_dist = FLT_MAX; // for (int q_i = 0; q_i < num_threads_; ++q_i) { // const idi local_queue_size = local_queues_sizes[q_i]; // idi sub = pointer[q_i] + local_queues_starts[q_i]; // // while (pointer[q_i] < local_queue_size // && set_L[sub].id_ == last_id) { // ++pointer[q_i]; // ++sub; // } // if (pointer[q_i] >= local_queue_size) { // continue; // } // is_finished = false; // distf tmp_dist = set_L[sub].distance_; // idi tmp_id = set_L[sub].id_; // if (tmp_dist < min_dist) { // min_dist = tmp_dist; // min_id = tmp_id; // min_q_i = q_i; // min_sub = sub; // } else if (tmp_dist == min_dist && tmp_id < min_id) { // min_id = tmp_id; // min_q_i = q_i; // min_sub = sub; // } // } // set_K[k_i] = set_L[min_sub].id_; //// {//test //// printf("query_id: %u " //// "[%u]: " //// "(%u, %f)\n", //// query_id, //// k_i, //// set_L[min_sub].id_, set_L[min_sub].distance_); //// } // ++pointer[min_q_i]; // ++k_i; // } // } //// time_merge_ += WallTimer::get_time_mark(); // // {// Reset //// std::fill(is_visited.begin(), is_visited.end(), 0); // is_visited.reset(); // std::fill(local_queues_sizes.begin() + 1, local_queues_sizes.end(), 0); // } // // time_ending_ += WallTimer::get_time_mark(); //// {//test //// if (3 == query_id) { //// exit(1); //// } //// } //} // ///* // * 7/27/2020-15:33 // * Same with v3, but gather top-m vertices together // */ //inline void Searching::para_search_with_top_m_subsearch_v4( // const idi local_M_max, // const idi local_M_middle, // const idi query_id, // const idi K, // const idi global_L, // const idi local_L, //// const idi total_L, //// const idi init_queue_size, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const std::vector<idi> &local_queues_starts, // std::vector<idi> &local_queues_sizes, // std::vector<idi> &local_m_counts, // std::vector<idi> &top_m_candidates, //// std::vector< std::vector<idi> > &top_m_candidates_list, // boost::dynamic_bitset<> &is_visited) //{ // time_initialization_ -= WallTimer::get_time_mark(); // uint64_t tmp_count_computation = 0; // {// Initialization // // is_visited flag array //// //// Cannot use OMP for bit array is_visited! // for (idi c_i = 0; c_i < global_L; ++c_i) { // is_visited[init_ids[c_i]] = 1; // } // // const dataf *query_data = queries_load_ + query_id * dimension_; // // for (idi v_i = 0; v_i < global_L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // // Get the distances of all candidates, store in the set set_L. // // for (idi id_i = 0; id_i < global_L; ++id_i) { // idi v_id = init_ids[id_i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[id_i] = Candidate(v_id, dist, false); // False means not checked. // } // local_queues_sizes[0] = global_L; // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; // std::sort(set_L.begin(), set_L.begin() + global_L); // } // time_initialization_ += WallTimer::get_time_mark(); // // // Searching // if (num_threads_ == 1) { // Single threads //// std::sort( //// set_L.begin(), //// set_L.end()); // subsearch_with_top_m( // local_M_max, // query_id, // local_L, // set_L, // 0, // local_queues_sizes[0], // top_m_candidates, // is_visited, // tmp_count_computation); // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; // } else { // Multiple threads // const dataf *query_data = queries_load_ + query_id * dimension_; // const idi num_queues = num_threads_; // idi local_M = 1; // idi iter = 0; //// std::vector<idi> ks(num_queues, 0); // // time_sequential_phase_ -= WallTimer::get_time_mark(); // {// Sequential Search for M = 1, 2. // idi k = 0; //// idi &k = ks[0]; // while (k < global_L && local_M < local_M_middle) { // ++iter; // subsearch_top_m_for_one_iteration( // iter, // k, // local_M, // query_id, // query_data, // global_L, // set_L, // 0, // local_queues_sizes[0], // top_m_candidates, // is_visited, // tmp_count_computation); // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; // // {// Double M // if (local_M < local_M_max) { // local_M <<= 1; // } // } // } // } // time_sequential_phase_ += WallTimer::get_time_mark(); // // time_parallel_phase_ -= WallTimer::get_time_mark(); // distf bound_lth = set_L[global_L - 1].distance_; // {// Parallel Search for M >= 4, or local_M_middle // time_assign_s_ -=WallTimer::get_time_mark(); // {// Assign elements from Queue[0] to others // idi dst_i = 1; // for (idi e_i = 1; e_i < global_L; ++e_i) { // idi dest_sub = e_i % num_queues; // if (0 == dest_sub) { // set_L[dst_i++] = set_L[e_i]; // } else { // set_L[local_queues_starts[dest_sub] + local_queues_sizes[dest_sub]++] = set_L[e_i]; // } // } // local_queues_sizes[0] = dst_i; // } //// std::fill(ks.begin(), ks.end(), 0); // // idi top_m_candidates_size = 0; //// selecting_unchecked_top_M_seq( //// query_id, //// iter, //// set_L, //// ks, //// local_M, //// num_queues, //// local_queues_starts, //// local_queues_sizes, //// local_m_counts); // time_assign_s_ +=WallTimer::get_time_mark(); // // std::vector<idi> ks(num_queues, 0); // std::vector<idi> nks(num_queues); // std::vector<idi> bound_ks(num_queues); // double tmp_time_pick_top_m = 0; // uint64_t tmp_count_add_to_queue = 0; // uint8_t not_finished = 1; // double tmp_time_distance_computation = 0; // double tmp_time_add_to_queue = 0; // while (true) { // time_expand_ -= WallTimer::get_time_mark(); // not_finished = 0; // ++iter; // // // Gather top-M vertices // time_pick_top_m_ -= WallTimer::get_time_mark(); // gather_unchecked_top_M_seq( // query_id, // iter, // set_L, // ks, // local_M, // num_queues, // local_queues_starts, // local_queues_sizes, // top_m_candidates, // top_m_candidates_size, // bound_ks); // time_pick_top_m_ += WallTimer::get_time_mark(); // if (!top_m_candidates_size) { // time_expand_ += WallTimer::get_time_mark(); // break; // } // std::fill(nks.begin(), nks.end(), global_L); // // // Expand top-M vertices // // reduction(+ : tmp_count_computation) \ // reduction(+ : tmp_count_add_to_queue) \ // reduction(+ : tmp_time_distance_computation) \ // reduction(+ : tmp_time_pick_top_m) \ // reduction(+ : tmp_time_add_to_queue) // for (idi c_i = 0; c_i < top_m_candidates_size; ++c_i) { // tmp_time_pick_top_m -= WallTimer::get_time_mark(); // idi tid = omp_get_thread_num(); // const idi set_L_start = local_queues_starts[tid]; // idi &set_L_size = local_queues_sizes[tid]; // idi &nk = nks[tid]; // idi L_value = tid == 0 ? global_L : local_L; // idi cand_id = top_m_candidates[c_i]; //// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; //// for (idi n_i = 0; n_i < out_degree; ++n_i) { //// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); //// } // tmp_time_pick_top_m += WallTimer::get_time_mark(); // // Expand cand_id's neighbors // for (idi e_i = 0; e_i < out_degree; ++e_i) { // tmp_time_distance_computation -= WallTimer::get_time_mark(); // idi nb_id = out_edges[e_i]; // { // Sequential edition // if (is_visited[nb_id]) { // tmp_time_distance_computation += WallTimer::get_time_mark(); // continue; // } // is_visited[nb_id] = 1; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // tmp_time_distance_computation += WallTimer::get_time_mark(); // if (dist > set_L[set_L_start + set_L_size - 1].distance_) { //// if (dist > bound_lth) { // continue; // } // ++tmp_count_add_to_queue; // Candidate cand(nb_id, dist, false); // tmp_time_add_to_queue -= WallTimer::get_time_mark(); // idi r = add_into_queue( // set_L, // set_L_start, // set_L_size, // L_value, // cand); // if (r < nk) { // nk = r; // } // tmp_time_add_to_queue += WallTimer::get_time_mark(); // } // } // top_m_candidates_size = 0; // time_add_to_queue_ += tmp_time_add_to_queue; // tmp_time_add_to_queue = 0; // time_distance_computation_ += tmp_time_distance_computation; // tmp_time_distance_computation = 0; // count_add_to_queue_ += tmp_count_add_to_queue; // tmp_count_add_to_queue = 0; // time_pick_top_m_ += tmp_time_pick_top_m; // tmp_time_pick_top_m = 0; // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; // for (idi q_i = 0; q_i < num_queues; ++q_i) { // if (nks[q_i] < bound_ks[q_i]) { // ks[q_i] = nks[q_i]; // } else { // ks[q_i] = bound_ks[q_i]; // } // } // time_expand_ += WallTimer::get_time_mark(); // // time_select_ -= WallTimer::get_time_mark(); // {// Select L-th // bound_lth = selecting_top_L_seq( // set_L, // global_L, // num_queues, // local_queues_starts, // local_queues_sizes); // } // time_select_ += WallTimer::get_time_mark(); // {// Scale M // if (local_M < local_M_max) { // local_M <<= 1; // } // } //// {//test //// printf("query_id: %u " //// "iter: %u", //// query_id, //// iter); //// printf(" local_queues_sizes:"); //// for (idi i = 0; i < num_queues; ++i) { //// printf(" %u", local_queues_sizes[i]); //// } //// printf(" local_m_counts:"); //// for (idi i = 0; i < num_queues; ++i) { //// printf(" %u", local_m_counts[i]); //// } //// printf(" ks:"); //// for (idi i = 0; i < num_queues; ++i) { //// printf(" %u", ks[i]); //// } //// printf("\n"); //// } // } // } // time_parallel_phase_ += WallTimer::get_time_mark(); // } // //// time_merge_ -= WallTimer::get_time_mark(); // time_ending_ -= WallTimer::get_time_mark(); // {// Return the results to set_K // std::vector<idi> pointer(num_threads_, 0); // // get the first // distf min_dist = FLT_MAX; // idi min_q_i; // idi min_id; // idi min_sub; // idi last_id; // for (int q_i = 0; q_i < num_threads_; ++q_i) { // if (pointer[q_i] >= local_queues_sizes[q_i]) { // continue; // } // idi sub = pointer[q_i] + local_queues_starts[q_i]; // distf tmp_dist = set_L[sub].distance_; // idi tmp_id = set_L[sub].id_; // if (tmp_dist < min_dist) { // min_dist = tmp_dist; // min_id = tmp_id; // min_q_i = q_i; // min_sub = sub; // } else if (tmp_dist == min_dist && tmp_id < min_id) { // min_id = tmp_id; // min_q_i = q_i; // min_sub = sub; // } // } // set_K[0] = set_L[min_sub].id_; //// {//test //// printf("query_id: %u " //// "[%u]: " //// "(%u, %f)\n", //// query_id, //// 0, //// set_L[min_sub].id_, set_L[min_sub].distance_); //// } // ++pointer[min_q_i]; // last_id = set_K[0]; // // bool is_finished = false; // idi k_i = 1; // while (k_i < K && !is_finished) { // is_finished = true; // min_dist = FLT_MAX; // for (int q_i = 0; q_i < num_threads_; ++q_i) { // const idi local_queue_size = local_queues_sizes[q_i]; // idi sub = pointer[q_i] + local_queues_starts[q_i]; // // while (pointer[q_i] < local_queue_size // && set_L[sub].id_ == last_id) { // ++pointer[q_i]; // ++sub; // } // if (pointer[q_i] >= local_queue_size) { // continue; // } // is_finished = false; // distf tmp_dist = set_L[sub].distance_; // idi tmp_id = set_L[sub].id_; // if (tmp_dist < min_dist) { // min_dist = tmp_dist; // min_id = tmp_id; // min_q_i = q_i; // min_sub = sub; // } else if (tmp_dist == min_dist && tmp_id < min_id) { // min_id = tmp_id; // min_q_i = q_i; // min_sub = sub; // } // } // set_K[k_i] = set_L[min_sub].id_; //// {//test //// printf("query_id: %u " //// "[%u]: " //// "(%u, %f)\n", //// query_id, //// k_i, //// set_L[min_sub].id_, set_L[min_sub].distance_); //// } // ++pointer[min_q_i]; // ++k_i; // } // } //// time_merge_ += WallTimer::get_time_mark(); // // {// Reset //// std::fill(is_visited.begin(), is_visited.end(), 0); // is_visited.reset(); // std::fill(local_queues_sizes.begin() + 1, local_queues_sizes.end(), 0); // } // // time_ending_ += WallTimer::get_time_mark(); //// {//test //// if (3 == query_id) { //// exit(1); //// } //// } //} /* * 7/28/2020-11:25 * Same with V4, but only gather top-m vertices, but not select top-L. */ inline void Searching::para_search_with_top_m_subsearch_v5( const idi local_M_max, const idi local_M_middle, const idi query_id, const idi K, const idi global_L, const idi local_L, // const idi total_L, // const idi init_queue_size, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, const std::vector<idi> &local_queues_starts, std::vector<idi> &local_queues_sizes, // std::vector<idi> &local_m_counts, std::vector<idi> &top_m_candidates, // std::vector< std::vector<idi> > &top_m_candidates_list, boost::dynamic_bitset<> &is_visited) { time_initialization_ -= WallTimer::get_time_mark(); uint64_t tmp_count_computation = 0; {// Initialization // is_visited flag array // // Cannot use OMP for bit array is_visited! for (idi c_i = 0; c_i < global_L; ++c_i) { is_visited[init_ids[c_i]] = 1; } const dataf *query_data = queries_load_ + query_id * dimension_; for (idi v_i = 0; v_i < global_L; ++v_i) { idi v_id = init_ids[v_i]; _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); } // Get the distances of all candidates, store in the set set_L. for (idi id_i = 0; id_i < global_L; ++id_i) { idi v_id = init_ids[id_i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; ++tmp_count_computation; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[id_i] = Candidate(v_id, dist, false); // False means not checked. } local_queues_sizes[0] = global_L; count_distance_computation_ += tmp_count_computation; tmp_count_computation = 0; std::sort(set_L.begin(), set_L.begin() + global_L); } time_initialization_ += WallTimer::get_time_mark(); // Searching if (num_threads_ == 1) { // Single threads // std::sort( // set_L.begin(), // set_L.end()); subsearch_with_top_m( local_M_max, query_id, local_L, set_L, 0, local_queues_sizes[0], top_m_candidates, is_visited, tmp_count_computation); count_distance_computation_ += tmp_count_computation; tmp_count_computation = 0; } else { // Multiple threads const dataf *query_data = queries_load_ + query_id * dimension_; const idi num_queues = num_threads_; idi local_M = 1; idi iter = 0; // std::vector<idi> ks(num_queues, 0); time_sequential_phase_ -= WallTimer::get_time_mark(); {// Sequential Search for M = 1, 2. idi k = 0; // idi &k = ks[0]; while (k < global_L && local_M < local_M_middle) { ++iter; subsearch_top_m_for_one_iteration( iter, k, local_M, query_id, query_data, global_L, set_L, 0, local_queues_sizes[0], top_m_candidates, is_visited, tmp_count_computation); count_distance_computation_ += tmp_count_computation; tmp_count_computation = 0; {// Double M if (local_M < local_M_max) { local_M <<= 1; } } } } time_sequential_phase_ += WallTimer::get_time_mark(); time_parallel_phase_ -= WallTimer::get_time_mark(); // distf bound_lth = set_L[global_L - 1].distance_; {// Parallel Search for M >= 4, or local_M_middle time_assign_s_ -=WallTimer::get_time_mark(); {// Assign elements from Queue[0] to others idi dst_i = 1; for (idi e_i = 1; e_i < global_L; ++e_i) { idi dest_sub = e_i % num_queues; if (0 == dest_sub) { set_L[dst_i++] = set_L[e_i]; } else { set_L[local_queues_starts[dest_sub] + local_queues_sizes[dest_sub]++] = set_L[e_i]; } } local_queues_sizes[0] = dst_i; } // std::fill(ks.begin(), ks.end(), 0); idi top_m_candidates_size = 0; time_assign_s_ +=WallTimer::get_time_mark(); std::vector<idi> ks(num_queues, 0); std::vector<idi> nks(num_queues); std::vector<idi> bound_ks(num_queues); double tmp_time_pick_top_m = 0; uint64_t tmp_count_add_to_queue = 0; double tmp_time_distance_computation = 0; double tmp_time_add_to_queue = 0; while (true) { time_expand_ -= WallTimer::get_time_mark(); ++iter; // Gather top-M vertices time_gather_ -= WallTimer::get_time_mark(); gather_unchecked_top_M_seq( query_id, iter, set_L, ks, local_M, num_queues, local_queues_starts, local_queues_sizes, top_m_candidates, top_m_candidates_size, bound_ks); time_gather_ += WallTimer::get_time_mark(); {//test printf("query_id: %u " "iter: %u", query_id, iter); printf(" local_queues_sizes:"); for (idi i = 0; i < num_queues; ++i) { printf(" %u", local_queues_sizes[i]); } // printf(" local_m_counts:"); // for (idi i = 0; i < num_queues; ++i) { // printf(" %u", local_m_counts[i]); // } // printf(" ks:"); // for (idi i = 0; i < num_queues; ++i) { // printf(" %u", ks[i]); // } printf(" gathered:"); for (idi i = 0; i < num_queues; ++i) { printf(" %u", bound_ks[i] - ks[i]); } printf("\n"); } if (!top_m_candidates_size) { time_expand_ += WallTimer::get_time_mark(); break; } std::fill(nks.begin(), nks.end(), global_L); // Expand top-M vertices reduction(+ : tmp_count_computation) \ reduction(+ : tmp_count_add_to_queue) \ reduction(+ : tmp_time_distance_computation) \ reduction(+ : tmp_time_pick_top_m) \ reduction(+ : tmp_time_add_to_queue) for (idi c_i = 0; c_i < top_m_candidates_size; ++c_i) { tmp_time_pick_top_m -= WallTimer::get_time_mark(); idi tid = omp_get_thread_num(); const idi set_L_start = local_queues_starts[tid]; idi &set_L_size = local_queues_sizes[tid]; idi &nk = nks[tid]; // idi L_value = tid == 0 ? global_L : local_L; idi L_value = local_L; idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } tmp_time_pick_top_m += WallTimer::get_time_mark(); // Expand cand_id's neighbors for (idi e_i = 0; e_i < out_degree; ++e_i) { tmp_time_distance_computation -= WallTimer::get_time_mark(); idi nb_id = out_edges[e_i]; { // Sequential edition if (is_visited[nb_id]) { tmp_time_distance_computation += WallTimer::get_time_mark(); continue; } is_visited[nb_id] = 1; } auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; ++tmp_count_computation; distf dist = compute_distance_with_norm(nb_data, query_data, norm); tmp_time_distance_computation += WallTimer::get_time_mark(); if (dist > set_L[set_L_start + set_L_size - 1].distance_) { if (set_L_size < L_value) { ++tmp_count_add_to_queue; set_L[set_L_start + set_L_size] = Candidate(nb_id, dist, false); if (set_L_size < nk) { nk = set_L_size; } ++set_L_size; } continue; } // if (dist > set_L[set_L_start + set_L_size - 1].distance_) { //// if (dist > bound_lth) { // continue; // } ++tmp_count_add_to_queue; Candidate cand(nb_id, dist, false); tmp_time_add_to_queue -= WallTimer::get_time_mark(); idi r = add_into_queue( set_L, set_L_start, set_L_size, L_value, cand); if (r < nk) { nk = r; } tmp_time_add_to_queue += WallTimer::get_time_mark(); } } top_m_candidates_size = 0; time_add_to_queue_ += tmp_time_add_to_queue; tmp_time_add_to_queue = 0; time_distance_computation_ += tmp_time_distance_computation; tmp_time_distance_computation = 0; count_add_to_queue_ += tmp_count_add_to_queue; tmp_count_add_to_queue = 0; time_pick_top_m_ += tmp_time_pick_top_m; tmp_time_pick_top_m = 0; count_distance_computation_ += tmp_count_computation; tmp_count_computation = 0; for (idi q_i = 0; q_i < num_queues; ++q_i) { if (nks[q_i] < bound_ks[q_i]) { ks[q_i] = nks[q_i]; } else { ks[q_i] = bound_ks[q_i]; } } time_expand_ += WallTimer::get_time_mark(); // time_select_ -= WallTimer::get_time_mark(); // {// Select L-th // bound_lth = selecting_top_L_seq( // set_L, // global_L, // num_queues, // local_queues_starts, // local_queues_sizes); // } // time_select_ += WallTimer::get_time_mark(); {// Scale M if (local_M < local_M_max) { local_M <<= 1; } } // {//test // printf("query_id: %u " // "iter: %u", // query_id, // iter); // printf(" local_queues_sizes:"); // for (idi i = 0; i < num_queues; ++i) { // printf(" %u", local_queues_sizes[i]); // } //// printf(" local_m_counts:"); //// for (idi i = 0; i < num_queues; ++i) { //// printf(" %u", local_m_counts[i]); //// } //// printf(" ks:"); //// for (idi i = 0; i < num_queues; ++i) { //// printf(" %u", ks[i]); //// } // printf(" bound_ks:"); // for (idi i = 0; i < num_queues; ++i) { // printf(" %u", bound_ks[i]); // } // printf("\n"); // } } } time_parallel_phase_ += WallTimer::get_time_mark(); } // time_merge_ -= WallTimer::get_time_mark(); time_ending_ -= WallTimer::get_time_mark(); {// Return the results to set_K std::vector<idi> pointer(num_threads_, 0); // get the first distf min_dist = FLT_MAX; idi min_q_i; idi min_id; idi min_sub; idi last_id; for (int q_i = 0; q_i < num_threads_; ++q_i) { if (pointer[q_i] >= local_queues_sizes[q_i]) { continue; } idi sub = pointer[q_i] + local_queues_starts[q_i]; distf tmp_dist = set_L[sub].distance_; idi tmp_id = set_L[sub].id_; if (tmp_dist < min_dist) { min_dist = tmp_dist; min_id = tmp_id; min_q_i = q_i; min_sub = sub; } else if (tmp_dist == min_dist && tmp_id < min_id) { min_id = tmp_id; min_q_i = q_i; min_sub = sub; } } set_K[0] = set_L[min_sub].id_; // {//test // printf("query_id: %u " // "[%u]: " // "(%u, %f)\n", // query_id, // 0, // set_L[min_sub].id_, set_L[min_sub].distance_); // } ++pointer[min_q_i]; last_id = set_K[0]; bool is_finished = false; idi k_i = 1; while (k_i < K && !is_finished) { is_finished = true; min_dist = FLT_MAX; for (int q_i = 0; q_i < num_threads_; ++q_i) { const idi local_queue_size = local_queues_sizes[q_i]; idi sub = pointer[q_i] + local_queues_starts[q_i]; while (pointer[q_i] < local_queue_size && set_L[sub].id_ == last_id) { ++pointer[q_i]; ++sub; } if (pointer[q_i] >= local_queue_size) { continue; } is_finished = false; distf tmp_dist = set_L[sub].distance_; idi tmp_id = set_L[sub].id_; if (tmp_dist < min_dist) { min_dist = tmp_dist; min_id = tmp_id; min_q_i = q_i; min_sub = sub; } else if (tmp_dist == min_dist && tmp_id < min_id) { min_id = tmp_id; min_q_i = q_i; min_sub = sub; } } set_K[k_i] = set_L[min_sub].id_; // {//test // printf("query_id: %u " // "[%u]: " // "(%u, %f)\n", // query_id, // k_i, // set_L[min_sub].id_, set_L[min_sub].distance_); // } ++pointer[min_q_i]; ++k_i; } } // time_merge_ += WallTimer::get_time_mark(); {// Reset // std::fill(is_visited.begin(), is_visited.end(), 0); is_visited.reset(); std::fill(local_queues_sizes.begin() + 1, local_queues_sizes.end(), 0); } time_ending_ += WallTimer::get_time_mark(); {//test if (3 == query_id) { exit(1); } } } /* * 6/27/2020-12:33 * Do searching on the local_set_L * local_set_L is already sorted * is_visited is already set up. */ inline void Searching::subsearch_for_simple_search( const idi query_id, const idi local_L, std::vector<Candidate> &set_L, const idi base_set_L, idi &set_L_end, // std::vector<uint8_t> &is_visited, boost::dynamic_bitset<> &is_visited, uint64_t &local_count_distance_computation) { const dataf *query_data = queries_load_ + query_id * dimension_; // idi local_top_m_candidates_end = 0; idi k = 0; // Index of first unchecked candidate. idi iter = 0; // idi M = 1; // value of M while (k < local_L) { ++iter; // {//test // printf("query_id: %u " // "iter: %u\n", // query_id, // iter); // } // Select the top-1 unchecked candidate idi top_1; idi last_k = local_L; // Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. for (idi c_i = k; c_i < set_L_end; ++c_i) { idi index_set_L = c_i + base_set_L; if (set_L[index_set_L].is_checked_) { continue; } top_1 = set_L[index_set_L].id_; last_k = c_i; // Record the location of the last candidate selected. set_L[index_set_L].is_checked_ = true; // local_top_m_candidates[local_top_m_candidates_end++] = set_L[index_set_L].id_; break; } if (last_k == local_L) { break; } idi nk = local_L; // Push top-1' neighbors into the queue. idi cand_id = top_1; _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; { // Sequential edition if (is_visited[nb_id]) { continue; } is_visited[nb_id] = 1; } // {// Critical edition // if (!AtomicOps::CAS(is_visited.data() + nb_id, // static_cast<uint8_t>(0), // static_cast<uint8_t>(1))) { // continue; // } // } auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; ++local_count_distance_computation; distf dist = compute_distance_with_norm(nb_data, query_data, norm); // { // if (0 == query_id // && (785802 == nb_id // || 180955 == nb_id // || 240996 == nb_id // || 813701 == nb_id // || 708177 == nb_id // || 87578 == nb_id // || 561813 == nb_id // || 701258 == nb_id // || 872728 == nb_id)) { //// && 180955 == nb_id) { // printf("parent: %u " // "nb_id: %u " // "dist: %f " // "base_set_L: %u " // "set_L_end: %u\n", // cand_id, // nb_id, // dist, // base_set_L, // set_L_end); // } // } if (dist > set_L[set_L_end - 1 + base_set_L].distance_) { continue; } Candidate cand(nb_id, dist, false); // Thread 0 maintains the "global" queue idi r = add_into_queue( set_L, base_set_L, set_L_end, local_L, cand); if (r < nk) { nk = r; } } if (nk <= last_k) { k = nk; } else { k = last_k + 1; } } } /* * 6/27/2020-12:26 * Is is good to use subsearch by every thread it self? */ inline void Searching::para_simple_search_subsearch( const idi query_id, const idi K, const idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, // std::vector<uint8_t> &is_visited) boost::dynamic_bitset<> &is_visited) { uint64_t tmp_count_computation = 0; {// Initialization // is_visited flag array // // Cannot use OMP for bit array is_visited! for (idi c_i = 0; c_i < L; ++c_i) { is_visited[init_ids[c_i]] = 1; } const dataf *query_data = queries_load_ + query_id * dimension_; for (idi v_i = 0; v_i < L; ++v_i) { idi v_id = init_ids[v_i]; _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); } // Get the distances of all candidates, store in the set set_L. // for (unsigned i = 0; i < L; i++) { unsigned v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; ++tmp_count_computation; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[i] = Candidate(v_id, dist, false); // False means not checked. } count_distance_computation_ += tmp_count_computation; tmp_count_computation = 0; // std::sort( // set_L.begin(), // set_L.begin() + L); } idi queue_end = L; // Searching if (num_threads_ == 1) { // Single threads std::sort( set_L.begin(), set_L.end()); subsearch_for_simple_search( query_id, L, set_L, 0, queue_end, is_visited, tmp_count_computation); count_distance_computation_ += tmp_count_computation; // { //// {//test //// for (idi i = 0; i < queue_end; ++i) { //// printf("start: " //// "query_id: %u " //// "set_L[%u]: " //// "(%u %f)\n", //// query_id, //// i, //// set_L[i].id_, set_L[i].distance_); //// } //// } // // idi half_length = queue_end / 2; // std::sort( // set_L.begin(), // set_L.begin() + half_length); //// {//test //// for (idi i = 0; i < half_length; ++i) { //// printf("sorted: " //// "query_id: %u " //// "set_L[%u]: " //// "(%u %f)\n", //// query_id, //// i, //// set_L[i].id_, set_L[i].distance_); //// } //// } // // subsearch_for_simple_search( // query_id, // half_length, // local_L // set_L, // 0, // base_set_L // half_length, // set_L_end // is_visited, // tmp_count_computation); // //// {//test //// for (idi i = 0; i < half_length; ++i) { //// printf("subsearched: " //// "query_id: %u " //// "set_L[%u]: " //// "(%u %f)\n", //// query_id, //// i, //// set_L[i].id_, set_L[i].distance_); //// } //// } // // std::sort( // set_L.begin() + half_length, // set_L.end()); // //// {//test //// for (idi i = half_length; i < queue_end; ++i) { //// printf("sorted: " //// "query_id: %u " //// "set_L[%u]: " //// "(%u %f)\n", //// query_id, //// i, //// set_L[i].id_, set_L[i].distance_); //// } //// } // // subsearch_for_simple_search( // query_id, // half_length, // local_L // set_L, // half_length, // base_set_L // half_length, // set_L_end // is_visited, // tmp_count_computation); //// {//test //// for (idi i = half_length; i < queue_end; ++i) { //// printf("subsearched: " //// "query_id: %u " //// "set_L[%u]: " //// "(%u %f)\n", //// query_id, //// i, //// set_L[i].id_, set_L[i].distance_); //// } //// } //// {//test //// for (idi i = 0; i < queue_end; ++i) { //// printf("explored: " //// "query_id: %u " //// "set_L[%u]: " //// "(%u %f)\n", //// query_id, //// i, //// set_L[i].id_, set_L[i].distance_); //// } //// } // count_distance_computation_ += tmp_count_computation; // // std::vector <Candidate> tmp_set_L(L); // std::merge(set_L.begin(), set_L.begin() + half_length, // set_L.begin() + half_length, set_L.end(), // tmp_set_L.begin()); // std::copy(tmp_set_L.begin(), tmp_set_L.end(), set_L.begin()); //// {//test //// for (idi i = 0; i < queue_end; ++i) { //// printf("merged: " //// "query_id: %u " //// "set_L[%u]: " //// "(%u %f)\n", //// query_id, //// i, //// set_L[i].id_, set_L[i].distance_); //// } //// } // } } else { // Multiple threads const idi num_queues = num_threads_; const idi local_queue_length = (L - 1) / num_queues + 1; // Parallel for for (idi q_i = 0; q_i < num_queues; ++q_i) { idi local_queue_base = q_i * local_queue_length; if (local_queue_base >= L) { continue; } idi local_queue_end = local_queue_length; if (local_queue_base + local_queue_end > L) { local_queue_end = L - local_queue_base; } std::sort( set_L.begin() + local_queue_base, set_L.begin() + local_queue_base + local_queue_end); subsearch_for_simple_search( query_id, local_queue_end, // local_L set_L, local_queue_base, // base_set_L local_queue_end, // set_L_end is_visited, tmp_count_computation); } count_distance_computation_ += tmp_count_computation; // Merge // time_merge_ -= WallTimer::get_time_mark(); merge_in_set_L( set_L, L, num_queues, local_queue_length); // time_merge_ += WallTimer::get_time_mark(); } {// Return the results to set_K // How to deal with duplicate? idi last_id = set_L[0].id_; set_K[0] = last_id; idi k_i = 1; idi l_i = 1; while (k_i < K && l_i < L) { if (last_id == set_L[l_i].id_) { ++l_i; continue; } last_id = set_L[l_i++].id_; set_K[k_i++] = last_id; } // // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; //// set_K[k_i] = set_L[k_i].id_; // } } {// Reset // std::fill(is_visited.begin(), is_visited.end(), 0); is_visited.reset(); // is_visited.clear_all(); } // {//test // if (0 == query_id) { // exit(1); // } // } } ///* // * 6/22/2020-09:38 // * A synchronized last element as the sentinel // */ //inline void Searching::para_search_with_top_m_merge_queues_global_threshold( // const idi value_M_middle, // const idi value_M_max, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; // std::vector<idi> &local_queues_ends, // Sizes of local queue // std::vector<idi> &top_m_candidates, // boost::dynamic_bitset<> &is_visited) //{ //// const idi base_set_L = (num_threads_ - 1) * local_queue_length; // { // // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = 1; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; // // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // uint64_t tmp_count_computation = 0; // // Get the distances of all candidates, store in the set set_L. //// // // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked. // } // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; //// std::sort(set_L.begin(), set_L.begin() + L); // std::sort( // set_L.begin() + base_set_L, // set_L.begin() + base_set_L + L); // local_queues_ends[num_threads_ - 1] = L; // // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // idi M = 1; // // { // Single thread // while (k < L && M < value_M_middle) { // ++tmp_count; //// {//test //// printf("tmp_count: %d\n", tmp_count); //// } // // // Select M candidates // idi last_k = L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // idi index_set_L = c_i + base_set_L; // if (set_L[index_set_L].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[index_set_L].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; // } // // idi nk = L; // // Push M candidates' neighbors into the queue. // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // { // Sequential edition // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L - 1 + base_set_L].distance_) { // continue; // } // // Candidate cand(nb_id, dist, false); // // Thread 0 maintains the "global" queue // idi r = add_into_queue( // set_L, // base_set_L, // local_queues_ends[num_threads_ - 1], // L, // cand); // if (r < nk) { // nk = r; // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // // {// Scale M // if (M < value_M_max) { // M <<= 1; // } else { // M = value_M_max; // } // } // // } // } // // { // Multiple Threads // while (k < L) { // ++tmp_count; //// {//test //// printf("tmp_count: %d\n", tmp_count); //// } // // Select M candidates // idi last_k = L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // idi index_set_L = c_i + base_set_L; // if (set_L[index_set_L].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[index_set_L].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; // } // // // idi nk = L; // // Push M candidates' neighbors into the queue. //// // // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // int tid = omp_get_thread_num(); // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // { // Sequential edition // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L - 1 + base_set_L].distance_) { // continue; // } // // Candidate cand(nb_id, dist, false); // // Add to the local queue. // if (0 != tid) { // // Non-Master threads using local queues // add_into_queue( // set_L, // (tid - 1) * local_queue_length, // local_queues_ends[tid - 1], // local_queue_length, // cand); // } else { // // Thread 0 maintains the "global" queue // idi r = add_into_queue( // set_L, // base_set_L, // local_queues_ends[num_threads_ - 1], // L, // cand); // if (r < nk) { // nk = r; // } // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; // // {// Local queues' ends //// printf("query%u:iter: %u", query_id, tmp_count); // idi total_elements = 0; // for (int i_t = 0; i_t < num_threads_ - 1; ++i_t) { // total_elements += local_queues_ends[i_t]; // } // number_local_elements_ += total_elements; //// printf(" total_elements: %u+%u\n", total_elements - local_queues_ends[num_threads_ - 1], local_queues_ends[num_threads_ - 1]); //// for (int i_t = 0; i_t < num_threads_; ++i_t) { //// printf(" [%u]: %u", i_t, local_queues_ends[i_t]); //// } //// printf("\n"); // } // //// // Merge. Merge all queues in parallel. // { // time_merge_ -= WallTimer::get_time_mark(); // if (num_threads_ > 1) { // idi r = merge_all_queues_para_array( // set_L, // local_queues_ends, // local_queue_length, // L); // if (r < nk) { // nk = r; // } // } // time_merge_ += WallTimer::get_time_mark(); // } // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // {// Scale M // if (M < value_M_max) { // M <<= 1; // } else { // M = value_M_max; // } // } // // } // } // // // // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i + base_set_L].id_; //// set_K[k_i] = set_L[k_i].id_; // } // // {// Reset //// std::fill(is_visited.begin(), is_visited.end(), 0); // is_visited.reset(); //// is_visited.clear_all(); // std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); // } // //// {//test //// if (0 == query_id) { //// exit(1); //// } //// } //} ///* // * 6/7/2020-16:55 // * Use 1 threads to scale M until the value_M_middle. // * Then use multiple threads. // * Except for Thread 0, other threads are collectors. They collect, but do not merge. // * Only merge once after Thread 0 stops. // */ //inline void Searching::para_search_with_top_m_merge_queues_collectors( // const idi value_M_middle, // const idi value_M_max, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; // std::vector<idi> &local_queues_ends, // Sizes of local queue //// std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited) // boost::dynamic_bitset<> &is_visited) //// std::vector<distf> &local_thresholds) //// BitVector &is_visited) //{ // { // // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = 1; //// is_visited.atomic_set_bit(init_ids[c_i]); // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; // // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // uint64_t tmp_count_computation = 0; // // Get the distances of all candidates, store in the set set_L. //// // // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation_; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked. //// set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; //// std::sort(set_L.begin(), set_L.begin() + L); // std::sort( // set_L.begin() + base_set_L, // set_L.begin() + base_set_L + L); //// boost::sort::block_indirect_sort( //// set_L.begin() + base_set_L, //// set_L.begin() + base_set_L + L, //// num_threads_); // local_queues_ends[num_threads_ - 1] = L; // //// std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // idi M = 1; // // // Single thread // { // while (k < L && M < value_M_middle) { // ++tmp_count; //// {//test //// printf("tmp_count: %d\n", tmp_count); //// } // //// int real_threads = std::min(static_cast<int>(M), num_threads_); //// idi queue_base = num_threads_ - real_threads; // // Select M candidates // idi last_k = L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // idi index_set_L = c_i + base_set_L; // if (set_L[index_set_L].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[index_set_L].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; // } // // idi nk = L; // // Push M candidates' neighbors into the queue. // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // { // Sequential edition // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation_; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L - 1 + base_set_L].distance_) { // continue; // } // // Candidate cand(nb_id, dist, false); // // Thread 0 maintains the "global" queue // idi r = add_into_queue( // set_L, // base_set_L, // local_queues_ends[num_threads_ - 1], // L, // cand); // if (r < nk) { // nk = r; // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // // {// Scale M // if (M < value_M_max) { // M <<= 1; // } else { // M = value_M_max; // } // } // } // } // // // Multiple Threads // { //// while (k < L/num_threads_/2) { // while (k < L) { // ++tmp_count; //// {//test //// printf("tmp_count: %d\n", tmp_count); //// } //// int real_threads = std::min(static_cast<int>(M), num_threads_); //// idi queue_base = num_threads_ - real_threads; // // Select M candidates // idi last_k = L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // idi index_set_L = c_i + base_set_L; // if (set_L[index_set_L].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[index_set_L].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; // } // // idi chunk_size; // if (num_threads_ <= top_m_candidates_end) { // chunk_size = (top_m_candidates_end - 1) / num_threads_ + 1; // } else { // chunk_size = 1; // } // idi nk = L; // // Push M candidates' neighbors into the queue. //// //// // // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // int tid = omp_get_thread_num(); //// { //// if (c_i < chunk_size && tid != 0) { //// printf("query_id: %u " //// "tmp_count: %u " //// "chunk_size: %u " //// "c_i: %u " //// "tid: %u\n", //// query_id, //// tmp_count, //// chunk_size, //// c_i, //// tid); //// } //// } // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // { // Sequential edition // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation_; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L - 1 + base_set_L].distance_) { // continue; // } // // Candidate cand(nb_id, dist, false); // // Add to the local queue. // if (0 != tid) { // // Non-Master threads using local queues // add_into_queue( // set_L, // (tid - 1) * local_queue_length, // local_queues_ends[tid - 1], // local_queue_length, // cand); // } else { // // Thread 0 maintains the "global" queue // idi r = add_into_queue( // set_L, // base_set_L, // local_queues_ends[num_threads_ - 1], // L, // cand); // if (r < nk) { // nk = r; // } // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; // ////// // Merge. Merge all queues in parallel. //// { //// time_merge_ -= WallTimer::get_time_mark(); //// if (num_threads_ > 1) { ////// idi r = merge_all_queues_queue_base( ////// set_L, ////// local_queues_ends, ////// queue_base, ////// real_threads, ////// local_queue_length, ////// L); //// idi r = merge_all_queues_para_array( //// set_L, //// local_queues_ends, //// local_queue_length, //// L); //// if (r < nk) { //// nk = r; //// } //// } //// time_merge_ += WallTimer::get_time_mark(); //// } // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // {// Scale M // if (M < value_M_max) { // M <<= 1; // } else { // M = value_M_max; // } // } // } // //// // Merge only once after Master Thread stops. //// { //// time_merge_ -= WallTimer::get_time_mark(); //// if (num_threads_ > 1) { ////// idi r = merge_all_queues_queue_base( ////// set_L, ////// local_queues_ends, ////// queue_base, ////// real_threads, ////// local_queue_length, ////// L); //// merge_all_queues_para_array( //// set_L, //// local_queues_ends, //// local_queue_length, //// L); //// } //// time_merge_ += WallTimer::get_time_mark(); //// } // } // // // // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i + base_set_L].id_; //// set_K[k_i] = set_L[k_i].id_; // } // // {// Reset //// std::fill(is_visited.begin(), is_visited.end(), 0); // is_visited.reset(); //// is_visited.clear_all(); // std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); // } // //// {//test //// printf("tmp_count: %u\n", tmp_count); //// if (3 == query_id) { //// exit(1); //// } //// } //} ///* // * 6/8/2020-16:39 // * Selecting rather than merging // */ //inline void Searching::para_search_with_top_m_merge_queues_selecting( // const idi value_M_middle, // const idi value_M_max, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; // std::vector<idi> &local_queues_ends, // Sizes of local queue //// std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited) // boost::dynamic_bitset<> &is_visited) //{ // { // // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = 1; //// is_visited.atomic_set_bit(init_ids[c_i]); // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; // // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // uint64_t tmp_count_computation = 0; // // Get the distances of all candidates, store in the set set_L. //// // // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation_; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked. //// set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; //// std::sort(set_L.begin(), set_L.begin() + L); // std::sort( // set_L.begin() + base_set_L, // set_L.begin() + base_set_L + L); //// boost::sort::block_indirect_sort( //// set_L.begin() + base_set_L, //// set_L.begin() + base_set_L + L, //// num_threads_); // local_queues_ends[num_threads_ - 1] = L; // //// std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // idi M = 1; // // // Single thread // { // while (k < L && M < value_M_middle) { // ++tmp_count; //// {//test //// printf("tmp_count: %d\n", tmp_count); //// } // //// int real_threads = std::min(static_cast<int>(M), num_threads_); //// idi queue_base = num_threads_ - real_threads; // // Select M candidates // idi last_k = L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // idi index_set_L = c_i + base_set_L; // if (set_L[index_set_L].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[index_set_L].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; // } // // idi nk = L; // // Push M candidates' neighbors into the queue. // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // { // Sequential edition // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation_; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L - 1 + base_set_L].distance_) { // continue; // } // // Candidate cand(nb_id, dist, false); // // Thread 0 maintains the "global" queue // idi r = add_into_queue( // set_L, // base_set_L, // local_queues_ends[num_threads_ - 1], // L, // cand); // if (r < nk) { // nk = r; // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // // {// Scale M // if (M < value_M_max) { // M <<= 1; // } else { // M = value_M_max; // } // } // } // } // // // Multiple Threads // { //// while (k < L/num_threads_/2) { //// while (k < L) { // while (true) { // ++tmp_count; //// {//test //// printf("tmp_count: %d\n", tmp_count); //// } //// // Select M candidates //// idi last_k = L; ////// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. //// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { //// idi index_set_L = c_i + base_set_L; //// if (set_L[index_set_L].is_checked_) { //// continue; //// } //// last_k = c_i; // Record the location of the last candidate selected. //// set_L[index_set_L].is_checked_ = true; //// top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; //// } // // // Select M candidates // { // idi traverse_count = 0; // idi bound_sub = L; // This is not always true! // for (idi sub = 0; sub < bound_sub && top_m_candidates_end < M && traverse_count < L; ++sub) { // for (int tid = 0; tid < num_threads_ && top_m_candidates_end < M && traverse_count < L; ++tid) { // if (sub >= local_queues_ends[tid]) { // continue; // } // idi index_set_L = tid * local_queue_length + sub; // if (set_L[index_set_L].is_checked_) { // continue; // } // set_L[index_set_L].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; // } // } // // if (0 == top_m_candidates_end) { // break; // } // } // //// idi nk = L; // // Push M candidates' neighbors into the queue. //// // // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // int tid = omp_get_thread_num(); // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // { // Sequential edition // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation_; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L - 1 + base_set_L].distance_) { // continue; // } // // Candidate cand(nb_id, dist, false); // // Add to the local queue. // if (0 != tid) { // // Non-Master threads using local queues // add_into_queue( // set_L, // (tid - 1) * local_queue_length, // local_queues_ends[tid - 1], // local_queue_length, // cand); // } else { // // Thread 0 maintains the "global" queue //// idi r = // add_into_queue( // set_L, // base_set_L, // local_queues_ends[num_threads_ - 1], // L, // cand); //// if (r < nk) { //// nk = r; //// } // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; // //// // Merge. Merge all queues in parallel. // { // time_merge_ -= WallTimer::get_time_mark(); // if (num_threads_ > 1) { //// idi r = merge_all_queues_queue_base( //// set_L, //// local_queues_ends, //// queue_base, //// real_threads, //// local_queue_length, //// L); //// idi r = // merge_all_queues_para_array( // set_L, // local_queues_ends, // local_queue_length, // L); //// if (r < nk) { //// nk = r; //// } // } // time_merge_ += WallTimer::get_time_mark(); // } //// if (nk <= last_k) { //// k = nk; //// } else { //// k = last_k + 1; //// } // {// Scale M // if (M < value_M_max) { // M <<= 1; // } else { // M = value_M_max; // } // } // } // } // // //// //// for (idi k_i = 0; k_i < K; ++k_i) { //// set_K[k_i] = set_L[k_i + base_set_L].id_; ////// set_K[k_i] = set_L[k_i].id_; //// } // // { // idi k_i = 0; // idi bound_sub = K / num_threads_; // for (idi sub = 0; sub < bound_sub; ++sub) { // for (int tid = 0; tid < num_threads_; ++tid) { // idi index_set_L = tid * local_queue_length + sub; // set_K[k_i++] = set_L[index_set_L].id_; // } // } // idi remain = K - k_i; // if (remain) { // for (int tid = 0; tid < remain; ++tid) { // idi index_set_L = tid * local_queue_length + bound_sub; // set_K[k_i++] = set_L[index_set_L].id_; // } // } // } // // {// Reset //// std::fill(is_visited.begin(), is_visited.end(), 0); // is_visited.reset(); //// is_visited.clear_all(); // std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); // } // //// {//test //// printf("tmp_count: %u\n", tmp_count); //// if (3 == query_id) { //// exit(1); //// } //// } //} } // namespace PANNS #endif //BATCH_SEARCHING_SEARCHING_H
// // Created by Zhen Peng on 7/28/2020. // #ifndef BATCH_SEARCHING_SEARCHING_H #define BATCH_SEARCHING_SEARCHING_H #include <vector> #include <boost/dynamic_bitset.hpp> //#include <boost/sort/sort.hpp> #include <iostream> #include <fstream> #include <unordered_map> #include <immintrin.h> #include <cstring> #include <unordered_set> #include <set> #include <cfloat> #include <algorithm> //#include <omp.h> #include "../include/definitions.h" //#include "../include/efanna2e/neighbor.h" #include "../include/utils.h" #include "../include/Candidate.h" #include "../include/parallelization.h" #include "../include/bitvector.h" namespace PANNS { class Searching { //private: public: idi num_v_ = 0; edgei num_e_ = 0; idi num_queries_ = 0; uint64_t dimension_ = 0; idi width_ = 0; // NSG largest degree idi ep_ = 0; // Start point // std::vector<dataf> data_load_; // std::vector<dataf> queries_load_; // std::vector< std::vector<dataf> > data_load_; // std::vector< std::vector<dataf> > queries_load_; // std::vector<distf> norms_; dataf *data_load_ = nullptr; dataf *queries_load_ = nullptr; // dataf *norms_; // std::vector< std::vector<idi> > nsg_graph_; // idi *nsg_graph_indices_; // idi *nsg_graph_out_edges_; // std::vector< std::vector<idi> > edge_list_; char *opt_nsg_graph_ = nullptr; uint64_t data_bytes_; uint64_t neighbor_bytes_; uint64_t vertex_bytes_; // For multithreads int num_threads_ = 1; // int num_real_threads_ = 1; // int num_threads_intra_query_ = 1; // int num_threads_inter_query_ = 1; dataf compute_norm( const dataf *data) const; // idi vertex_id); // const std::vector<PANNS::dataf> &data); // size_t loc_start, // idi dimension) dataf compute_distance_with_norm( const dataf *v_data, const dataf *q_data, // idi vertex_id, // idi query_id, // const std::vector<dataf> &d_data, // const std::vector<dataf> &q_data, // PANNS::idi d_start, // PANNS::idi q_start, const dataf vertex_norm) const; // idi dimension) static idi insert_into_queue( std::vector<Candidate> &c_queue, idi c_queue_top, Candidate cand); static idi add_into_queue( std::vector<PANNS::Candidate> &queue, idi &queue_top, const idi queue_size, const PANNS::Candidate &cand); static idi add_into_queue( std::vector<PANNS::Candidate> &queue, const idi queue_start, idi &queue_size, const idi queue_capacity, const PANNS::Candidate &cand); static void add_into_queue_at( const Candidate &cand, std::vector<Candidate> &queue, const idi insert_index, // The insertion location, independent with queue_start const idi queue_start, idi &queue_top, // The number of elements in queue, independent with queue_start const idi queue_size); // The maximum capacity of queue, independent with queue_start. static void insert_one_element_at( // const T &cand, // T *queue_base, const Candidate &cand, std::vector<Candidate> &queue_base, const idi insert_index, const idi queue_start, const idi queue_size); // idi insert_into_queue_nsg( // std::vector< Candidate > &c_queue, // idi c_queue_top, // Candidate cand); static idi merge_two_queues_into_1st_queue_seq_fixed( std::vector<Candidate> &queue1, const idi queue1_start, const idi queue1_size, std::vector<Candidate> &queue2, const idi queue2_start, const idi queue2_size); static void merge_two_queues_into_1st_queue_seq_incr( std::vector<Candidate> &queue1, const idi queue1_start, idi &queue1_size, // The number of element in queue1, independent with queue1_start. const idi queue1_length, // The maximum capacity of queue1, independent with queue1_start. std::vector<Candidate> &queue2, const idi queue2_start, const idi queue2_size); idi merge_all_queues_para_list( std::vector< std::vector<Candidate> > &local_queues_list, std::vector<idi> &local_queues_ends, std::vector<Candidate> &set_L, const idi L); // idi merge_all_queues_para_array( //// std::vector< std::vector<Candidate> > &local_queues_list, // std::vector<Candidate> &local_queues_array, // std::vector<idi> &local_queues_ends, // const idi local_queue_length, // std::vector<Candidate> &set_L, // const idi L); idi merge_all_queues_para_array( std::vector<Candidate> &set_L, // std::vector<Candidate> &local_queues_array, std::vector<idi> &local_queues_ends, const idi local_queue_length, // std::vector<Candidate> &set_L, const idi L); idi merge_all_queues_queue_base( // std::vector< std::vector<Candidate> > &local_queues_list, std::vector<Candidate> &set_L, // std::vector<Candidate> &local_queues_array, std::vector<idi> &local_queues_ends, const idi queue_base, const int real_threads, const idi local_queue_length, // std::vector<Candidate> &set_L, const idi L); void merge_two_consecutive_queues_in_place( std::vector<Candidate> &two_queues, const idi base_1, // const idi &end_1, const idi base_2, const idi &length_2); void merge_in_set_L( std::vector<Candidate> &set_L, const idi set_L_length, const idi num_queues, const idi local_queue_length); distf selecting_top_L_seq( std::vector<Candidate> &set_L, const idi global_L, // const idi local_L, const idi num_queues, const std::vector<idi> &local_queues_starts, std::vector<idi> &local_queues_sizes); void selecting_unchecked_top_M_seq( const idi query_id, const idi iter, std::vector<Candidate> &set_L, const std::vector<idi> &pointers_starts, const idi value_M, const idi num_queues, const std::vector<idi> &local_queues_starts, const std::vector<idi> &local_queues_sizes, std::vector<idi> &local_m_counts); void gather_unchecked_top_M_seq( const idi query_id, const idi iter, std::vector<Candidate> &set_L, const std::vector<idi> &pointers_starts, const idi value_M, const idi num_queues, const std::vector<idi> &local_queues_starts, const std::vector<idi> &local_queues_sizes, std::vector<idi> &top_m_candidates, idi &top_m_candidates_size, std::vector<idi> &bound_subs); // idi merge_all_queues_all_together_in_sequential( // std::vector<Candidate> &set_L, // std::vector<idi> &local_queues_ends, // const idi local_queue_length, // const idi L); // idi min_all_queues_at_heads( // const std::vector<Candidate> &set_L, // std::vector<idi> &queue_heads, // const std::vector<idi> &local_queues_ends, // const idi local_queue_length, // const idi L); public: // For Profiling // L3CacheMissRate cache_miss_kernel; uint64_t count_distance_computation_ = 0; uint64_t count_add_to_queue_ = 0; // uint64_t count_single_query_computation_ = 0; // distf dist_min_ = 0; // distf dist_max_ = 0; double time_merge_ = 0; double time_gather_ = 0; // double time_select_ = 0; // double time_select_L_ = 0.0; // double time_select_M_ = 0.0; double time_initialization_ = 0; double time_sequential_phase_ = 0; double time_parallel_phase_ = 0; double time_ending_ = 0.0; double time_assign_s_ = 0.0; double time_expand_ = 0.0; double time_pick_top_m_ = 0.0; double time_distance_computation_ = 0.0; double time_add_to_queue_ = 0.0; // double time_insert_ = 0; // double time_compare_minimum_ = 0; // double time_memmove_ = 0; // std::vector<double> time_memmove_list_; // L3CacheMissRate profile_miss_rate; // uint64_t number_local_elements_ = 0; // std::vector<idi> L_ids_; // std::vector<idi> M_ids_; ~Searching() { free(data_load_); data_load_ = nullptr; // free(queries_load_); // _mm_free(data_load_); free(queries_load_); queries_load_ = nullptr; // free(norms_); // free(nsg_graph_indices_); // free(nsg_graph_out_edges_); free(opt_nsg_graph_); opt_nsg_graph_ = nullptr; } void load_data_load(char *filename); void load_queries_load(char *filename); void load_nsg_graph(char *filename); // void build_opt_graph(); void prepare_init_ids( std::vector<unsigned> &init_ids, const unsigned L) const; // void prepare_candidate_queue_list( // const float *query_load, // std::vector<std::vector<efanna2e::Neighbor> > &retset_list, // std::vector<boost::dynamic_bitset<> > &is_visited_list, // const std::vector<unsigned> &init_ids, // const boost::dynamic_bitset<> &flags, // unsigned batch_start, // unsigned batch_size, // unsigned L); // void search_in_batch( //// const float *query_load, // size_t K, // size_t L, // unsigned batch_start, // unsigned batch_size, // std::vector< std::vector<Candidate> > &set_L_list, // std::vector< boost::dynamic_bitset<> > &is_visited_list, // const std::vector<idi> &init_ids, // const boost::dynamic_bitset<> &is_visited, // std::vector<std::vector<idi> > &set_K_list); void search_in_sequential( idi query_id, idi K, idi L, std::vector<Candidate> &set_L, // boost::dynamic_bitset<> &is_visited, // boost::dynamic_bitset<> is_visited, // std::vector<idi> &init_ids, const std::vector<idi> &init_ids, std::vector<idi> &set_K); // void search_in_sequential_BitVector( // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K); // idi get_out_degree(idi v_id) const // { // if (v_id < num_v_ - 1) { // return nsg_graph_indices_[v_id + 1] - nsg_graph_indices_[v_id]; // } else { // return num_e_ - nsg_graph_indices_[v_id]; // } // } void search_with_top_m( idi M, idi query_id, idi K, idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K); // std::vector< std::vector<idi> > &top_m_list); void search_with_top_m_scale_m( const PANNS::idi value_M_max, const PANNS::idi query_id, const PANNS::idi K, const PANNS::idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, std::vector<idi> &top_m_candidates, boost::dynamic_bitset<> &is_visited); // void search_with_top_m_myths_M( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K); // void search_with_top_m_to_get_distance_range( // const PANNS::idi M, // const PANNS::idi query_id, //// const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids); // void search_with_top_m_profile_bit_CAS( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K); // void search_with_top_m_no_local_arrays( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // boost::dynamic_bitset<> &is_visited); void search_with_top_m_in_batch( PANNS::idi M, PANNS::idi batch_start, PANNS::idi batch_size, PANNS::idi K, PANNS::idi L, std::vector< std::vector<Candidate> > &set_L_list, const std::vector<idi> &init_ids, std::vector< std::vector<idi> > &set_K_list); // void para_search_with_top_m_critical_area( // idi M, // idi query_id, // idi K, // idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K); // void para_search_with_top_m_critical_area_no_omp( // idi M, // idi query_id, // idi K, // idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K); // void para_search_with_top_m_critical_area_yes_omp( // idi M, // idi query_id, // idi K, // idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K); // void para_search_with_top_m_visited_array( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // std::vector<uint8_t> &is_visited); // void para_search_with_top_m_merge_queues( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K); // void para_search_with_top_m_queues_seq_merge( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K); // void para_search_with_top_m_merge_queues_no_CAS( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // std::vector< std::vector<Candidate> > &local_queues_list, // std::vector<idi> &local_queues_ends, //// std::vector<uint8_t> &is_visited); // boost::dynamic_bitset<> &is_visited); // void para_search_with_top_m_merge_queues_in_array( // void para_search_with_top_m_merge_queues_new_threshold( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue //// std::vector< std::vector<Candidate> > &local_queues_list, // std::vector<Candidate> &local_queues_array, // std::vector<idi> &local_queues_ends, // Sizes of local queue // BitVector &is_visited); // void para_search_with_top_m_merge_queues_by_sort( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue //// std::vector<Candidate> &local_queues_array, // std::vector<idi> &local_queues_ends, // Sizes of local queue // std::vector<idi> &dest_offsets, // const std::vector<idi> &offsets_load_set_L, // Offsets for store into set_L. // BitVector &is_visited); // void para_search_with_top_m_merge_queues_better_merge_v0( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // std::vector<idi> &local_queues_ends, // Sizes of local queue //// std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited); // boost::dynamic_bitset<> &is_visited); //// BitVector &is_visited); // void para_search_with_top_m_merge_queues_better_merge_v2( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // std::vector<idi> &local_queues_ends, // Sizes of local queue //// std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited) // boost::dynamic_bitset<> &is_visited, // std::vector<distf> &local_thresholds); //// BitVector &is_visited) // void para_search_with_top_m_merge_queues_better_merge_v1( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // std::vector<idi> &local_queues_ends, // Sizes of local queue // std::vector<Candidate> &top_m_candidates, //// std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited); // boost::dynamic_bitset<> &is_visited); //// BitVector &is_visited); // void para_search_with_top_m_merge_queues_better_merge_v0_0( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // std::vector<idi> &local_queues_ends, // Sizes of local queue //// std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited) // boost::dynamic_bitset<> &is_visited); //// BitVector &is_visited) // void para_search_with_top_m_merge_queues_less_merge( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // std::vector<idi> &local_queues_ends, // Sizes of local queue //// std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited) // boost::dynamic_bitset<> &is_visited, // std::vector<distf> &local_thresholds); //// BitVector &is_visited) // void para_search_with_top_m_merge_queues_no_merge( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // std::vector<idi> &local_queues_ends, // Sizes of local queue //// std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited) // boost::dynamic_bitset<> &is_visited, // std::vector<distf> &local_thresholds, // const uint64_t computation_threshold); // void para_search_with_top_m_merge_queues_scale_m_v0( // const idi value_M_max, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // std::vector<idi> &local_queues_ends, // Sizes of local queue //// std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited); // boost::dynamic_bitset<> &is_visited); void para_search_with_top_m_merge_queues_middle_m( const idi value_M_middle, const idi value_M_max, const idi query_id, const idi K, const idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, const idi local_queue_length, // Maximum size of local queue const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; std::vector<idi> &local_queues_ends, // Sizes of local queue // std::vector<Candidate> &top_m_candidates, std::vector<idi> &top_m_candidates, // std::vector<uint8_t> &is_visited) boost::dynamic_bitset<> &is_visited); // std::vector<distf> &local_thresholds); // BitVector &is_visited) // void para_search_with_top_m_merge_queues_scale_m_v2( // const idi value_M_min, // const idi value_M_max, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; // std::vector<idi> &local_queues_ends, // Sizes of local queue //// std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited) // boost::dynamic_bitset<> &is_visited); // void para_search_with_top_m_merge_queues_scale_m_v3( // const idi value_M_middle, // const idi value_M_max, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; // std::vector<idi> &local_queues_ends, // Sizes of local queue //// std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited) // boost::dynamic_bitset<> &is_visited); void para_search_with_top_m_merge_queues_middle_m_no_merge( const uint64_t computation_threshold, const idi value_M_middle, const idi value_M_max, const idi query_id, const idi K, const idi L, const idi init_size, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, const idi local_queue_length, // Maximum size of local queue const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; std::vector<idi> &local_queues_ends, // Sizes of local queue std::vector<idi> &top_m_candidates, boost::dynamic_bitset<> &is_visited); void para_search_with_top_m_merge_queues_sequential_merge( const idi value_M_middle, const idi value_M_max, const idi query_id, const idi K, const idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, const idi local_queue_length, // Maximum size of local queue const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; std::vector<idi> &local_queues_ends, // Sizes of local queue std::vector<idi> &top_m_candidates, boost::dynamic_bitset<> &is_visited); void para_search_with_top_m_nested_para( const idi batch_start, const idi batch_size, const idi value_M_middle, const idi value_M_max, const idi K, const idi L, std::vector< std::vector<Candidate> > &set_L_list, const std::vector<idi> &init_ids, std::vector< std::vector<idi> > &set_K_list, const idi local_queue_length, // Maximum size of local queue const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; std::vector< std::vector<idi> > &local_queues_ends_list, // Sizes of local queue std::vector< std::vector<idi> > &top_m_candidates_list, std::vector< boost::dynamic_bitset<> > &is_visited_list); void subsearch_with_top_m( const idi value_M_max, const idi query_id, const idi local_L, std::vector<Candidate> &set_L, const idi set_L_start, idi &set_L_size, std::vector<idi> &local_top_m_candidates, boost::dynamic_bitset<> &is_visited, uint64_t &local_count_distance_computation); void subsearch_top_m_for_one_iteration( const idi iter, idi &k_uc, const idi value_M, const idi query_id, const dataf *query_data, const idi L, std::vector<Candidate> &set_L, const idi set_L_start, idi &set_L_size, std::vector<idi> &top_m_candidates, boost::dynamic_bitset<> &is_visited, uint64_t &count_distance_computation); // void subsearch_top_m_for_one_iteration_lth( // const distf bound_lth, // const idi iter, // idi &k_uc, // const idi value_M, // const idi query_id, // const dataf *query_data, // const idi L, // std::vector<Candidate> &set_L, // const idi set_L_start, // idi &set_L_size, // std::vector<idi> &top_m_candidates, // boost::dynamic_bitset<> &is_visited, // uint64_t &count_distance_computation); void subsearch_top_m_for_one_iteration_lth_mth( const distf bound_lth, // const idi top_m_position, const idi iter, idi &k_uc, const idi local_m_count, const idi query_id, const dataf *query_data, const idi L, std::vector<Candidate> &set_L, const idi set_L_start, idi &set_L_size, std::vector<idi> &top_m_candidates, boost::dynamic_bitset<> &is_visited, uint64_t &count_distance_computation, double &time_pick_top_m, uint64_t &count_add_to_queue, double &time_distance_computation, double &time_add_to_queue); // void para_search_with_top_m_subsearch_v3( // const idi local_M_max, // const idi local_M_middle, // const idi query_id, // const idi K, // const idi global_L, // const idi local_L, //// const idi total_L, //// const idi init_queue_size, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const std::vector<idi> &local_queues_starts, // std::vector<idi> &local_queues_sizes, // std::vector<idi> &local_m_counts, // std::vector< std::vector<idi> > &top_m_candidates_list, // boost::dynamic_bitset<> &is_visited); // void para_search_with_top_m_subsearch_v4( // const idi local_M_max, // const idi local_M_middle, // const idi query_id, // const idi K, // const idi global_L, // const idi local_L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const std::vector<idi> &local_queues_starts, // std::vector<idi> &local_queues_sizes, // std::vector<idi> &local_m_counts, // std::vector<idi> &top_m_candidates, // boost::dynamic_bitset<> &is_visited); void para_search_with_top_m_subsearch_v5( const idi local_M_max, const idi local_M_middle, const idi query_id, const idi K, const idi global_L, const idi local_L, // const idi total_L, // const idi init_queue_size, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, const std::vector<idi> &local_queues_starts, std::vector<idi> &local_queues_sizes, // std::vector<idi> &local_m_counts, std::vector<idi> &top_m_candidates, // std::vector< std::vector<idi> > &top_m_candidates_list, boost::dynamic_bitset<> &is_visited); void subsearch_for_simple_search( const idi query_id, const idi local_L, std::vector<Candidate> &set_L, const idi base_set_L, idi &set_L_end, // std::vector<uint8_t> &is_visited, boost::dynamic_bitset<> &is_visited, uint64_t &local_count_distance_computation); void para_simple_search_subsearch( const idi query_id, const idi K, const idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, // std::vector<uint8_t> &is_visited); boost::dynamic_bitset<> &is_visited); // void para_search_with_top_m_merge_queues_global_threshold( // const idi value_M_middle, // const idi value_M_max, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; // std::vector<idi> &local_queues_ends, // Sizes of local queue // std::vector<idi> &top_m_candidates, // boost::dynamic_bitset<> &is_visited); // void para_search_with_top_m_merge_queues_distance_threshold_m( //// const idi value_M_middle, //// const idi value_M_max, // const distf relative_dist_threshold, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; // std::vector<idi> &local_queues_ends, // Sizes of local queue //// std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited) // boost::dynamic_bitset<> &is_visited); //// std::vector<distf> &local_thresholds) //// BitVector &is_visited) // void para_search_with_top_m_merge_queues_distance_threshold_m_middle_iteration( //// const idi value_M_middle, //// const idi value_M_max, // const distf relative_dist_threshold, // const idi middle_iteration, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; // std::vector<idi> &local_queues_ends, // Sizes of local queue //// std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited) // boost::dynamic_bitset<> &is_visited); // void para_search_with_top_m_merge_queues_collectors( // const idi value_M_middle, // const idi value_M_max, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; // std::vector<idi> &local_queues_ends, // Sizes of local queue //// std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited) // boost::dynamic_bitset<> &is_visited); // void para_search_with_top_m_merge_queues_selecting( // const idi value_M_middle, // const idi value_M_max, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; // std::vector<idi> &local_queues_ends, // Sizes of local queue //// std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited) // boost::dynamic_bitset<> &is_visited); // void para_search_with_top_m_merge_queues_myths( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue //// std::vector< std::vector<Candidate> > &local_queues_list, // std::vector<Candidate> &local_queues_array, // std::vector<idi> &local_queues_ends, // Sizes of local queue // BitVector &is_visited); //// std::vector<uint8_t> &is_visited); //// boost::dynamic_bitset<> &is_visited); //// void para_prepare_init_ids( //// std::vector<unsigned> &init_ids, //// unsigned L) const; // void para_search_with_top_m_in_batch_embarassing_para( // const PANNS::idi M, // const PANNS::idi batch_start, // const PANNS::idi batch_size, // const PANNS::idi K, // const PANNS::idi L, // std::vector< std::vector<Candidate> > &set_L_list, // const std::vector<idi> &init_ids, // std::vector< std::vector<idi> > &set_K_list, // std::vector< boost::dynamic_bitset<> > &is_visited_list); // void test_neighbors_distance_to_father( // const idi num_selected) const; // void test_neighbors_normalized_distance_to_father( // const idi num_selected) const; void load_true_NN( const char *filename, std::vector< std::vector<idi> > &true_nn_list); void get_recall_for_all_queries( const std::vector< std::vector<idi> > &true_nn_list, const std::vector<std::vector<unsigned>> &set_K_list, std::unordered_map<unsigned, double> &recalls) const; }; // Class Searching /** * Input the data from the file. * @param filename */ inline void Searching::load_data_load(char *filename) { auto old_d = dimension_; DiskIO::load_data( filename, data_load_, num_v_, dimension_); if (old_d) { if (old_d != dimension_) { std::cerr << "Error: data dimension " << dimension_ << " is not equal to query dimension " << old_d << "." << std::endl; exit(EXIT_FAILURE); } } } /** * Input queries from the file. * @param filename */ inline void Searching::load_queries_load(char *filename) { auto old_d = dimension_; DiskIO::load_data( filename, queries_load_, num_queries_, dimension_); if (old_d) { if (old_d != dimension_) { std::cerr << "Error: query dimension " << dimension_ << " is not equal to data dimension " << old_d << "." << std::endl; exit(EXIT_FAILURE); } } } /** * Input the NSG graph from the file. * Reference: https://github.com/ZJULearning/nsg/blob/master/src/index_nsg.cpp * @param filename */ inline void Searching::load_nsg_graph(char *filename) { std::ifstream fin(filename); if (!fin.is_open()) { std::cerr << "Error: cannot read file " << filename << " ." << std::endl; exit(EXIT_FAILURE); } fin.read(reinterpret_cast<char *>(&width_), sizeof(unsigned)); fin.read(reinterpret_cast<char *>(&ep_), sizeof(unsigned)); data_bytes_ = (1 + dimension_) * sizeof(dataf); neighbor_bytes_ = (1 + width_) * sizeof(idi); vertex_bytes_ = data_bytes_ + neighbor_bytes_; opt_nsg_graph_ = (char *) malloc(num_v_ * vertex_bytes_); if (!opt_nsg_graph_) { std::cerr << "Error: no enough memory for opt_nsg_graph_." << std::endl; exit(EXIT_FAILURE); } idi v_id = 0; num_e_ = 0; char *base_location = opt_nsg_graph_; while (true) { idi degree; fin.read(reinterpret_cast<char *>(&degree), sizeof(unsigned)); if (fin.eof()) { break; } num_e_ += degree; // std::vector<idi> tmp_ngbrs(degree); // fin.read(reinterpret_cast<char *>(tmp_ngbrs.data()), degree * sizeof(unsigned)); // Norm and data distf norm = compute_norm(data_load_ + v_id * dimension_); // distf norm = compute_norm(v_id); std::memcpy(base_location, &norm, sizeof(distf)); // Norm memcpy(base_location + sizeof(distf), data_load_ + v_id * dimension_, dimension_ * sizeof(dataf)); // Data base_location += data_bytes_; // Neighbors memcpy(base_location, &degree, sizeof(idi)); // Number of neighbors fin.read(base_location + sizeof(idi), degree * sizeof(unsigned)); // Neighbors // memcpy(location + sizeof(idi), tmp_ngbrs.data(), degree * sizeof(unsigned)); base_location += neighbor_bytes_; ++v_id; } if (v_id != num_v_) { std::cerr << "Error: NSG data has " << v_id << " vertices, but origin data has " << num_v_ << " vertices." << std::endl; exit(EXIT_FAILURE); } free(data_load_); data_load_ = nullptr; // //////////////////////// // idi v_id = 0; // num_e_ = 0; // while (true) { // idi degree; // fin.read(reinterpret_cast<char *>(&degree), sizeof(unsigned)); // if (fin.eof()) { // break; // } // num_e_ += degree; // // std::vector<idi> ngbrs(degree); // fin.read(reinterpret_cast<char *>(ngbrs.data()), degree * sizeof(unsigned)); //// nsg_graph_.push_back(ngbrs); //// tmp_edge_list.push_back(ngbrs); // edge_list_.push_back(ngbrs); // ++v_id; // } // if (v_id != num_v_) { // std::cerr << "Error: NSG data has " << v_id // << " vertices, but origin data has " << num_v_ << " vertices." << std::endl; // exit(EXIT_FAILURE); // } } /** * Load those true top-K neighbors (ground truth) of queries * @param filename * @param[out] true_nn_list */ inline void Searching::load_true_NN( const char *filename, std::vector< std::vector<idi> > &true_nn_list) // unsigned &t_K) { std::ifstream fin(filename); if (!fin.is_open()) { fprintf(stderr, "Error: cannot open file %s\n", filename); exit(EXIT_FAILURE); } idi t_query_num; idi t_K; // unsigned t_K; fin.read(reinterpret_cast<char *>(&t_query_num), sizeof(t_query_num)); fin.read(reinterpret_cast<char *>(&t_K), sizeof(t_K)); // if (t_query_num != query_num) { // fprintf(stderr, "Error: query_num %u is not equal to the record %u in true-NN file %s\n", // query_num, t_query_num, filename); // exit(EXIT_FAILURE); // } if (t_query_num < num_queries_) { fprintf(stderr, "Error: t_query_num %u is smaller than num_queries_ %u\n", t_query_num, num_queries_); exit(EXIT_FAILURE); } if (t_K < 100) { fprintf(stderr, "Error: t_K %u is smaller than 100.\n", t_K); exit(EXIT_FAILURE); } // data = new unsigned[(size_t) t_query_num * (size_t) t_K]; true_nn_list.resize(t_query_num); for (idi q_i = 0; q_i < t_query_num; ++q_i) { true_nn_list[q_i].resize(t_K); } for (unsigned q_i = 0; q_i < t_query_num; ++q_i) { // size_t offset = q_i * t_K; for (unsigned n_i = 0; n_i < t_K; ++n_i) { unsigned id; float dist; fin.read(reinterpret_cast<char *>(&id), sizeof(id)); fin.read(reinterpret_cast<char *>(&dist), sizeof(dist)); // data[offset + n_i] = id; true_nn_list[q_i][n_i] = id; } } fin.close(); } inline void Searching::get_recall_for_all_queries( const std::vector< std::vector<idi> > &true_nn_list, const std::vector<std::vector<unsigned>> &set_K_list, std::unordered_map<unsigned, double> &recalls) const { // if (t_K < 100) { // fprintf(stderr, "Error: t_K %u is smaller than 100.\n", t_K); // exit(EXIT_FAILURE); // } if (true_nn_list[0].size() < 100) { fprintf(stderr, "Error: Number of true nearest neighbors of a query is smaller than 100.\n"); exit(EXIT_FAILURE); } recalls[1] = 0.0; recalls[5] = 0.0; recalls[10] = 0.0; recalls[20] = 0.0; recalls[50] = 0.0; recalls[100] = 0.0; for (unsigned q_i = 0; q_i < num_queries_; ++q_i) { // size_t offset = q_i * t_K; for (unsigned top_i = 0; top_i < 100; ++top_i) { unsigned true_id = true_nn_list[q_i][top_i]; for (unsigned n_i = 0; n_i < 100; ++n_i) { if (set_K_list[q_i][n_i] == true_id) { if (n_i < 1) recalls[1] += 1; if (n_i < 5) recalls[5] += 1; if (n_i < 10) recalls[10] += 1; if (n_i < 20) recalls[20] += 1; if (n_i < 50) recalls[50] += 1; if (n_i < 100) recalls[100] += 1; } } } } recalls[1] /= 1.0 * num_queries_; recalls[5] /= 5.0 * num_queries_; recalls[10] /= 10.0 * num_queries_; recalls[20] /= 20.0 * num_queries_; recalls[50] /= 50.0 * num_queries_; recalls[100] /= 100.0 * num_queries_; } inline void Searching::search_in_sequential( const idi query_id, const idi K, const idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K) { // {//test // printf("Iteration: Relative_Distance:\n"); //// printf("Iteration: Relative_Distance:\n"); //// printf("----query: %u----\n", query_id); // } boost::dynamic_bitset<> is_visited(num_v_); for (idi v_i = 0; v_i < L; ++v_i) { is_visited[init_ids[v_i]] = true; } const dataf *query_data = queries_load_ + query_id * dimension_; for (idi v_i = 0; v_i < L; ++v_i) { idi v_id = init_ids[v_i]; _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); } // Get the distances of all candidates, store in the set set_L. for (unsigned i = 0; i < L; i++) { unsigned v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; ++count_distance_computation_; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[i] = Candidate(v_id, dist, false); // False means not checked. } std::sort(set_L.begin(), set_L.begin() + L); idi k = 0; // Index of every queue's first unchecked candidate. idi tmp_count = 0; // for debug // {// Print relative distance //// distf top_dist = set_L[0].distance_; // for (idi i_l = 0; i_l < L; ++i_l) { // printf("%u %f\n", // tmp_count, set_L[i_l].distance_); //// tmp_count, set_L[i_l].distance_ - top_dist); // } // } while (k < L) { Candidate &top_cand = set_L[k]; unsigned nk = L; if (!top_cand.is_checked_) { ++tmp_count; top_cand.is_checked_ = true; idi v_id = top_cand.id_; // Vertex ID. _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } // Traverse v_id's all neighbors, pushing them into the queue for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; if (is_visited[nb_id]) { continue; } is_visited[nb_id] = true; auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; // Compute the distance ++count_distance_computation_; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[L-1].distance_) { continue; } // if (dist >= set_L[L-1].distance_) { // continue; // } Candidate cand(nb_id, dist, false); // Insert into the queue idi r = insert_into_queue(set_L, L, cand); if (r < nk) { nk = r; } } // {// Print relative distance //// distf top_dist = set_L[0].distance_; // for (idi i_l = 0; i_l < L; ++i_l) { // printf("%u %f\n", // tmp_count, set_L[i_l].distance_); //// tmp_count, set_L[i_l].distance_ - top_dist); // } // } } if (nk <= k) { k = nk; } else { ++k; } } // cache_miss_kernel.measure_stop(); for (size_t k_i = 0; k_i < K; ++k_i) { set_K[k_i] = set_L[k_i].id_; } // {//test // if (0 == query_id) { // exit(1); // } // } } //inline void Searching::search_in_sequential_BitVector( // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) //{ //// boost::dynamic_bitset<> is_visited(num_v_); // BitVector is_visited(num_v_); // //#pragma omp parallel for // for (idi v_i = 0; v_i < L; ++v_i) { //// is_visited[init_ids[v_i]] = true; // is_visited.atomic_set_bit(init_ids[v_i]); // } // // const dataf *query_data = queries_load_ + query_id * dimension_; // //#pragma omp parallel for // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. //#pragma omp parallel for // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // idi k = 0; // Index of every queue's first unchecked candidate. // while (k < L) { // Candidate &top_cand = set_L[k]; // unsigned nk = L; // if (!top_cand.is_checked_) { // top_cand.is_checked_ = true; // idi v_id = top_cand.id_; // Vertex ID. // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // // Traverse v_id's all neighbors, pushing them into the queue // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; //// if (is_visited[nb_id]) { //// continue; //// } //// is_visited[nb_id] = true; // // {// Self-defined BitVector // if (is_visited.atomic_is_bit_set(nb_id)) { // continue; // } // is_visited.atomic_set_bit(nb_id); // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // // Compute the distance // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); // // Insert into the queue // idi r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // if (nk <= k) { // k = nk; // } else { // ++k; // } // } //// cache_miss_kernel.measure_stop(); //#pragma omp parallel for // for (size_t k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //} /** * Prepare init_ids and flags, as they are constant for all queries. * @param[out] init_ids * @param L */ inline void Searching::prepare_init_ids( std::vector<unsigned int> &init_ids, const unsigned L) const { // idi num_ngbrs = get_out_degree(ep_); // edgei edge_start = nsg_graph_indices_[ep_]; // // Store ep_'s neighbors as candidates // idi tmp_l = 0; // for (; tmp_l < L && tmp_l < num_ngbrs; tmp_l++) { // init_ids[tmp_l] = nsg_graph_out_edges_[edge_start + tmp_l]; // } // std::unordered_set<idi> visited_ids; boost::dynamic_bitset<> is_selected(num_v_); idi *out_edges = (idi *) (opt_nsg_graph_ + ep_ * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; idi init_ids_end = 0; // for (; tmp_l < L && tmp_l < out_degree; tmp_l++) { for (idi e_i = 0; e_i < out_degree && init_ids_end < L; ++e_i) { // idi v_id = out_edges[tmp_l]; idi v_id = out_edges[e_i]; if(is_selected[v_id]) { continue; } is_selected[v_id] = true; // init_ids[tmp_l] = v_id; init_ids[init_ids_end++] = v_id; // init_ids[tmp_l] = out_edges[tmp_l]; // visited_ids.insert(init_ids[tmp_l]); } // for (idi i = 0; i < tmp_l; ++i) { // is_visited[init_ids[i]] = true; // } // If ep_'s neighbors are not enough, add other random vertices idi tmp_id = ep_ + 1; // use tmp_id to replace rand(). while (init_ids_end < L) { tmp_id %= num_v_; idi v_id = tmp_id++; if (is_selected[v_id]) { continue; } // if (visited_ids.find(id) != visited_ids.end()) { // continue; // } is_selected[v_id] = true; // visited_ids.insert(id); init_ids[init_ids_end++] = v_id; // tmp_l++; } } // TODO: re-code in AVX-512 inline dataf Searching::compute_norm( const dataf *data) const // idi vertex_id) // const std::vector<PANNS::dataf> &data) // size_t loc_start, // idi dimension) { // const dataf *a = data.data() + loc_start; // const dataf *a = data_load_ + vertex_id * dimension_; // idi size = dimension_; dataf result = 0; //#define AVX_L2NORM(addr, dest, tmp) \ // tmp = _mm256_load_ps(addr); \ // tmp = _mm256_mul_ps(tmp, tmp); \ // dest = _mm256_add_ps(dest, tmp); #define AVX_L2NORM(addr, dest, tmp) \ tmp = _mm256_loadu_ps(addr); \ tmp = _mm256_mul_ps(tmp, tmp); \ dest = _mm256_add_ps(dest, tmp); __m256 sum; __m256 l0, l1; unsigned D = (dimension_ + 7) & ~7U; unsigned DR = D % 16; unsigned DD = D - DR; const float *l = data; const float *e_l = l + DD; float unpack[8] __attribute__ ((aligned (32))) = {0, 0, 0, 0, 0, 0, 0, 0}; sum = _mm256_load_ps(unpack); // sum = _mm256_loadu_ps(unpack); if (DR) { AVX_L2NORM(e_l, sum, l0); } for (unsigned i = 0; i < DD; i += 16, l += 16) { AVX_L2NORM(l, sum, l0); AVX_L2NORM(l + 8, sum, l1); } _mm256_store_ps(unpack, sum); // _mm256_storeu_ps(unpack, sum); result = unpack[0] + unpack[1] + unpack[2] + unpack[3] + unpack[4] + unpack[5] + unpack[6] + unpack[7]; return result; } inline dataf Searching::compute_distance_with_norm( const dataf *v_data, const dataf *q_data, // idi vertex_id, // idi query_id, // const std::vector<PANNS::dataf> &d_data, // const std::vector<PANNS::dataf> &q_data, // PANNS::idi d_start, // PANNS::idi q_start, const dataf vertex_norm) const // idi dimension) { // idi size = dimension_; float result = 0; //#define AVX_DOT(addr1, addr2, dest, tmp1, tmp2) \ // tmp1 = _mm256_load_ps(addr1);\ // tmp2 = _mm256_load_ps(addr2);\ // tmp1 = _mm256_mul_ps(tmp1, tmp2); \ // dest = _mm256_add_ps(dest, tmp1); #define AVX_DOT(addr1, addr2, dest, tmp1, tmp2) \ tmp1 = _mm256_loadu_ps(addr1);\ tmp2 = _mm256_loadu_ps(addr2);\ tmp1 = _mm256_mul_ps(tmp1, tmp2); \ dest = _mm256_add_ps(dest, tmp1); __m256 sum; __m256 l0, l1; __m256 r0, r1; unsigned D = (dimension_ + 7) & ~7U; unsigned DR = D % 16; unsigned DD = D - DR; const float *l = v_data; const float *r = q_data; // const float *l = (float *) (opt_nsg_graph_ + vertex_id * vertex_bytes_ + sizeof(distf)); // const float *r = queries_load_ + query_id * dimension_; const float *e_l = l + DD; const float *e_r = r + DD; float unpack[8] __attribute__ ((aligned (32))) = {0, 0, 0, 0, 0, 0, 0, 0}; sum = _mm256_load_ps(unpack); // sum = _mm256_loadu_ps(unpack); if (DR) { AVX_DOT(e_l, e_r, sum, l0, r0); } for (unsigned i = 0; i < DD; i += 16, l += 16, r += 16) { AVX_DOT(l, r, sum, l0, r0); AVX_DOT(l + 8, r + 8, sum, l1, r1); } _mm256_store_ps(unpack, sum); // _mm256_storeu_ps(unpack, sum); result = unpack[0] + unpack[1] + unpack[2] + unpack[3] + unpack[4] + unpack[5] + unpack[6] + unpack[7]; result = -2 * result + vertex_norm; return result; } //// DEPRECATED. // The difference from insert_into_queue is that add_into_queue will increase the queue size by 1. //inline idi Searching::add_into_queue( // std::vector<PANNS::Candidate> &queue, // idi &queue_top, // const idi queue_size, // const PANNS::Candidate &cand) //{ // assert(queue_size > 1); // if (0 == queue_top) { // queue[queue_top++] = cand; // return 0; // } else if (1 == queue_top) { // if (queue[0] < cand) { // queue[queue_top++] = cand; // return 1; // } else { // queue[++queue_top] = queue[0]; // queue[0] = cand; // return 0; // } // } // // if (queue[queue_top - 1] < cand) { // if (queue_top < queue_size) { // queue[queue_top++] = cand; // } // return queue_top; // } // // idi r = insert_into_queue( // queue, // queue_top - 1, // cand); //// {//test //// printf("r: %u" //// "queue_top: %u " //// "queue_size: %u\n", //// r, //// queue_top, //// queue_size); //// } // return r; // //// ///////////////////////////////////////////////////////////// //// // Find the insert location //// auto it_loc = std::lower_bound(queue.begin(), queue.begin() + queue_top, cand); //// idi insert_loc = it_loc - queue.begin(); //// if (insert_loc == queue_size) { //// return queue_size; //// } //// //// // Insert ////// if (queue_top == queue_size) { ////// // If full already ////// --queue_top; ////// } //// memmove(reinterpret_cast<char *>(queue.data() + insert_loc + 1), //// reinterpret_cast<char *>(queue.data() + insert_loc), //// (queue_top - insert_loc) * sizeof(Candidate)); ////// for (idi q_i = queue_top; q_i > insert_loc; --q_i) { ////// queue.at(q_i) = queue.at(q_i - 1); ////// } //// queue[insert_loc] = cand; //// ++queue_top; //// return insert_loc; //} // The difference from insert_into_queue is that add_into_queue will increase the queue size by 1. inline idi Searching::add_into_queue( std::vector<PANNS::Candidate> &queue, idi &queue_top, const idi queue_size, const PANNS::Candidate &cand) { if (0 == queue_top) { queue[queue_top++] = cand; return 0; } // Find the insert location auto it_loc = std::lower_bound(queue.begin(), queue.begin() + queue_top, cand); idi insert_loc = it_loc - queue.begin(); if (insert_loc == queue_size) { return queue_size; } // Insert if (queue_top == queue_size) { // If full already --queue_top; } memmove(reinterpret_cast<char *>(queue.data() + insert_loc + 1), reinterpret_cast<char *>(queue.data() + insert_loc), (queue_top - insert_loc) * sizeof(Candidate)); // for (idi q_i = queue_top; q_i > insert_loc; --q_i) { // queue.at(q_i) = queue.at(q_i - 1); // } queue[insert_loc] = cand; ++queue_top; return insert_loc; } // The difference from insert_into_queue is that add_into_queue will increase the queue size by 1. // add_into_queue with a queue_start inline idi Searching::add_into_queue( std::vector<PANNS::Candidate> &queue, const idi queue_start, idi &queue_size, // The insertion location starting from queue_start const idi queue_capacity, // The maximum capacity of queue, independent with queue_start. const PANNS::Candidate &cand) { if (0 == queue_size) { queue[queue_start + queue_size++] = cand; return 0; } idi queue_end = queue_start + queue_size; // Find the insert location const auto it_loc = std::lower_bound(queue.begin() + queue_start, queue.begin() + queue_end, cand); // auto it_loc = std::lower_bound(queue.begin(), queue.begin() + queue_size, cand); idi insert_loc = it_loc - queue.begin(); if (insert_loc != queue_end) { if (cand.id_ == it_loc->id_) { // Duplicate return queue_capacity; } if (queue_size >= queue_capacity) { // Queue is full --queue_size; --queue_end; } } else { // insert_loc == queue_end, insert at the end? if (queue_size < queue_capacity) { // Queue is not full // Insert at the end queue[insert_loc] = cand; ++queue_size; return queue_size - 1; } else { // Queue is full return queue_capacity; } } // Add into queue memmove(reinterpret_cast<char *>(queue.data() + insert_loc + 1), reinterpret_cast<char *>(queue.data() + insert_loc), (queue_end - insert_loc) * sizeof(Candidate)); queue[insert_loc] = cand; ++queue_size; return insert_loc - queue_start; } inline void Searching::add_into_queue_at( const Candidate &cand, std::vector<Candidate> &queue, const idi insert_index, // The insertion location, independent with queue_start const idi queue_start, idi &queue_size, // The number of elements in queue, independent with queue_start const idi queue_length) // The maximum capacity of queue, independent with queue_start. { const idi dest_index = queue_start + insert_index; if (queue_size == queue_length) { --queue_size; } memmove(reinterpret_cast<char *>(queue.data() + dest_index + 1), reinterpret_cast<char *>(queue.data() + dest_index), (queue_size - insert_index) * sizeof(Candidate)); queue[dest_index] = cand; ++queue_size; } inline void Searching::insert_one_element_at( // const T &cand, // T *queue_base, const Candidate &cand, std::vector<Candidate> &queue, const idi insert_index, const idi queue_start, const idi queue_size) { const idi dest_index = queue_start + insert_index; memmove(reinterpret_cast<char *>(queue.data() + dest_index + 1), reinterpret_cast<char *>(queue.data() + dest_index), (queue_size - insert_index - 1) * sizeof(Candidate)); queue[dest_index] = cand; // memmove(reinterpret_cast<char *>(queue_base + dest_index + 1), // reinterpret_cast<char *>(queue_base + dest_index), // (queue_size - insert_index - 1) * sizeof(T)); // for (idi q_i = queue_size - 1; q_i > insert_index; --q_i) { // queue_base.at(q_i + queue_start) = queue_base.at(q_i - 1 + queue_start); // } // queue_base[dest_index] = cand; } /** * PANNS version of InsertIntoPool(): binary-search to find the insert place and then move. * @param[out] c_queue * @param c_queue_top * @param cand * @return */ inline idi Searching::insert_into_queue( std::vector<PANNS::Candidate> &c_queue, PANNS::idi c_queue_top, PANNS::Candidate cand) { if (c_queue[0].distance_ > cand.distance_) { // If the first memmove(reinterpret_cast<char *>(c_queue.data() + 1), reinterpret_cast<char *>(c_queue.data()), c_queue_top * sizeof(Candidate)); c_queue[0] = cand; return 0; } else if (c_queue[c_queue_top - 1].distance_ == cand.distance_) { // If the last if (c_queue[c_queue_top - 1].id_ > cand.id_) { // Use ID as the second metrics for ordering c_queue[c_queue_top - 1] = cand; return c_queue_top - 1; } else { return c_queue_top; } } idi left = 0; idi right = c_queue_top; while (left < right) { idi mid = (right - left) / 2 + left; if (c_queue[mid].distance_ > cand.distance_) { right = mid; } else { left = mid + 1; } } // If the distance is the same if (0 != left && c_queue[left - 1].distance_ != cand.distance_) { ; } else { while (0 != left && c_queue[left - 1].distance_ == cand.distance_ && c_queue[left - 1].id_ > cand.id_) { // Use ID as the second metrics for ordering --left; } } // Insert to left memmove(reinterpret_cast<char *>(c_queue.data() + left + 1), reinterpret_cast<char *>(c_queue.data() + left), (c_queue_top - left) * sizeof(Candidate)); c_queue[left] = cand; return left; } //inline void Searching::cand_pushes_ngbrs_into_queue( // idi cand_id, // const dataf *query_data, // idi L, // idi &new_k, // boost::dynamic_bitset<> &is_visited, // std::vector<Candidate> &set_L) //{ // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = true; // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist >= set_L[L-1].distance_) { // continue; // } // Candidate cand(nb_id, dist, false); // idi r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } //} //inline void Searching::search_in_sequential( // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) const //{ // boost::dynamic_bitset<> is_visited(num_v_); // // for (idi v_i = 0; v_i < L; ++v_i) { // is_visited[init_ids[v_i]] = true; // } // const dataf *query_data = queries_load_ + query_id * dimension_; // // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // idi k = 0; // Index of every queue's first unchecked candidate. // while (k < L) { // Candidate &top_cand = set_L[k]; // unsigned nk = L; // if (!top_cand.is_checked_) { // top_cand.is_checked_ = true; // idi v_id = top_cand.id_; // Vertex ID. // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // // Traverse v_id's all neighbors, pushing them into the queue // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = true; // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // // Compute the distance // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } // Candidate cand(nb_id, dist, false); // // Insert into the queue // idi r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // if (nk <= k) { // k = nk; // } else { // ++k; // } // } // // for (size_t k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //} // Deprecated: cannot use std::set, because its element is constant. //inline void Searching::search_in_sequential( // const idi query_id, // const idi K, // const idi L, //// std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) const //{ // std::set<Candidate> set_L; // boost::dynamic_bitset<> is_visited(num_v_); // // for (idi v_i = 0; v_i < L; ++v_i) { // is_visited[init_ids[v_i]] = true; // } // const dataf *query_data = queries_load_ + query_id * dimension_; // // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; // distf dist = compute_distance_with_norm(v_data, query_data, norm); //// set_L[i] = Candidate(v_id, dist, false); // False means not checked. // set_L.emplace(v_id, dist, false); // } //// std::sort(set_L.begin(), set_L.begin() + L); // idi k = 0; // Index of every queue's first unchecked candidate. // while (k < L) { //// Candidate &top_cand = set_L[k]; // std::set<Candidate>::iterator top_cand = std::next(set_L.begin(), k); // unsigned nk = L; // if (!top_cand->is_checked_) { // top_cand->is_checked_ = true; // idi v_id = top_cand.id_; // Vertex ID. // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // // Traverse v_id's all neighbors, pushing them into the queue // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = true; // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // // Compute the distance // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } // Candidate cand(nb_id, dist, false); // // Insert into the queue // idi r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // if (nk <= k) { // k = nk; // } else { // ++k; // } // } // // for (size_t k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //} /* Function: * queue1_size is fixed. */ inline idi Searching::merge_two_queues_into_1st_queue_seq_fixed( std::vector<Candidate> &queue1, const idi queue1_start, const idi queue1_size, std::vector<Candidate> &queue2, const idi queue2_start, const idi queue2_size) // const idi limit_size) { assert(queue1_size && queue2_size); // Record the lowest insert location. auto it_loc = std::lower_bound( queue1.begin() + queue1_start, queue1.begin() + queue1_start + queue1_size, queue2[queue2_start]); idi insert_index = it_loc - (queue1.begin() + queue1_start); if (insert_index == queue1_size) { return insert_index; } else if (insert_index == queue1_size - 1) { queue1[queue1_start + insert_index] = queue2[queue2_start]; return insert_index; } // Insert the 1st of queue2 if (queue2[queue2_start].id_ != it_loc->id_) { // Not Duplicate insert_one_element_at( queue2[queue2_start], queue1, insert_index, queue1_start, queue1_size); } if (queue2_size == 1) { return insert_index; } // Insert idi q_i_1 = insert_index + 1 + queue1_start; idi q_i_2 = queue2_start + 1; const idi q_i_1_bound = queue1_start + queue1_size; const idi q_i_2_bound = queue2_start + queue2_size; // const idi insert_i_bound = queue1_start + limit_size; for (idi insert_i = insert_index + 1; insert_i < queue1_size; ++insert_i) { if (q_i_1 >= q_i_1_bound || q_i_2 >= q_i_2_bound) { // queue1 or queue2 finished traverse. Rest o break; } else if (queue1[q_i_1] < queue2[q_i_2]) { ++q_i_1; } else if (queue2[q_i_2] < queue1[q_i_1]) { // Insert queue2[q_i_2] into queue1 insert_one_element_at( queue2[q_i_2++], queue1, insert_i, queue1_start, queue1_size); ++q_i_1; } else { // Duplicate ++q_i_2; ++q_i_1; } } return insert_index; } /* Function: * queue1_size should be updated. * queue1_length should be provided. */ inline void Searching::merge_two_queues_into_1st_queue_seq_incr( std::vector<Candidate> &queue1, const idi queue1_start, idi &queue1_size, // The number of element in queue1, independent with queue1_start. const idi queue1_length, // The maximum capacity of queue1, independent with queue1_start. std::vector<Candidate> &queue2, const idi queue2_start, const idi queue2_size) // const idi limit_size) { assert(queue1_size && queue2_size); // Record the lowest insert location. auto it_loc = std::lower_bound( queue1.begin() + queue1_start, queue1.begin() + queue1_start + queue1_size, queue2[queue2_start]); idi insert_index = it_loc - (queue1.begin() + queue1_start); if (insert_index == queue1_size) { idi copy_count = (queue1_size + queue2_size > queue1_length) ? queue1_length - queue1_size : queue2_size; memmove(queue1.data() + queue1_start + queue1_size, queue2.data() + queue2_start, copy_count * sizeof(Candidate)); queue1_size += copy_count; return; } if (queue2[queue2_start].id_ != it_loc->id_) { // Not Duplicate add_into_queue_at( queue2[queue2_start], queue1, insert_index, queue1_start, queue1_size, queue1_length); } if (queue2_size == 1) { return; } // Insert idi q_i_1 = insert_index + 1 + queue1_start; idi q_i_2 = queue2_start + 1; idi q_i_1_bound = queue1_start + queue1_size; // When queue1_size is updated, so should be q_i_1_bound. const idi q_i_2_bound = queue2_start + queue2_size; // idi insert_i; for (idi insert_i = insert_index + 1; insert_i < queue1_length; ++insert_i) { if (q_i_1 >= q_i_1_bound) { queue1_size += std::min(queue1_length - insert_i, q_i_2_bound - q_i_2); for ( ; insert_i < queue1_size; ++insert_i) { queue1[queue1_start + insert_i] = queue2[q_i_2++]; } break; } else if (q_i_2 >= q_i_2_bound) { break; } else if (queue1[q_i_1] < queue2[q_i_2]) { ++q_i_1; } else if (queue2[q_i_2] < queue1[q_i_1]) { add_into_queue_at( queue2[q_i_2++], queue1, insert_i, queue1_start, queue1_size, queue1_length); ++q_i_1; q_i_1_bound = queue1_start + queue1_size; } else { // Duplicate ++q_i_2; ++q_i_1; } } } inline idi Searching::merge_all_queues_para_list( std::vector< std::vector<Candidate> > &local_queues_list, std::vector<idi> &local_queues_ends, std::vector<Candidate> &set_L, const idi L) { int size = 1 << (static_cast<idi>(log2(num_threads_))); idi log2size = static_cast<idi>(log2(size)); for (idi d = 0; d < log2size; ++d) { uint32_t by = 1 << (d + 1); #pragma omp parallel for for (int i = 0; i < size; i += by) { idi ai = i + (1 << (d + 1)) - 1; // i + 2^(d+1) - 1 idi bi = i + (1 << d) - 1; // i + 2^d - 1 if (0 == local_queues_ends[bi]) { continue; } if (local_queues_ends[ai] == 0) { local_queues_list[ai].swap(local_queues_list[bi]); std::swap(local_queues_ends[ai], local_queues_ends[bi]); continue; } // else if (local_queues_ends[ai] < L && local_queues_ends[bi] >= L) { // local_queues_list[ai].swap(local_queues_list[bi]); // std::swap(local_queues_ends[ai], local_queues_ends[bi]); // } // merge_two_queues_into_1st_queue_seq( // local_queues_list[ai], // 0, // local_queues_ends[ai], // local_queues_list[bi], // 0, // local_queues_ends[bi]); idi tmp_length = local_queues_ends[ai] + local_queues_ends[bi]; std::vector<Candidate> tmp_queue(tmp_length); std::merge( local_queues_list[ai].begin(), local_queues_list[ai].begin() + local_queues_ends[ai], local_queues_list[bi].begin(), local_queues_list[bi].begin() + local_queues_ends[bi], tmp_queue.begin()); if (tmp_length > L) { tmp_queue.resize(L); tmp_length = L; } else if (tmp_length < L) { tmp_queue.resize(L); } local_queues_list[ai].swap(tmp_queue); local_queues_ends[ai] = tmp_length; // {// Print queue a // printf("d: %u " // "i: %u " // "ai: %u " // "local_queues_ends[%d]: %d\n", // d, // i, // ai, // ai, // local_queues_ends[ai]); // for (idi i_q = 0; i_q < local_queues_ends[ai]; ++i_q) { // printf("[%u]: " // "id: %u " // "dist: %f\n", // i_q, // local_queues_list[ai][i_q].id_, // local_queues_list[ai][i_q].distance_); // } // } } } // Remain, prefix-sum-like merge if (size != num_threads_) { for (int i = size; i < num_threads_; ++i) { idi ai = i; idi bi = i - 1; if (0 == local_queues_ends[bi]) { continue; } if (local_queues_ends[ai] == 0) { local_queues_list[ai].swap(local_queues_list[bi]); std::swap(local_queues_ends[ai], local_queues_ends[bi]); continue; } // else if (local_queues_ends[ai] < L && local_queues_ends[bi] >= L) { // local_queues_list[ai].swap(local_queues_list[bi]); // std::swap(local_queues_ends[ai], local_queues_ends[bi]); // } // merge_two_queues_into_1st_queue_seq( // local_queues_list[ai], // 0, // local_queues_ends[ai], // local_queues_list[bi], // 0, // local_queues_ends[bi]); idi tmp_length = local_queues_ends[ai] + local_queues_ends[bi]; std::vector<Candidate> tmp_queue(tmp_length); std::merge( local_queues_list[ai].begin(), local_queues_list[ai].begin() + local_queues_ends[ai], local_queues_list[bi].begin(), local_queues_list[bi].begin() + local_queues_ends[bi], tmp_queue.begin()); if (tmp_length > L) { tmp_queue.resize(L); tmp_length = L; } else if (tmp_length < L) { tmp_queue.resize(L); } local_queues_list[ai].swap(tmp_queue); local_queues_ends[ai] = tmp_length; } } // Merge into set_L idi r = L; if (local_queues_ends[num_threads_ - 1]) { r = merge_two_queues_into_1st_queue_seq_fixed( set_L, 0, L, local_queues_list[num_threads_ - 1], 0, local_queues_ends[num_threads_ - 1]); } // Reset local_queues_ends std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); return r; } /* Function: * Use large local_queues_array as a concatenation of all queues */ inline idi Searching::merge_all_queues_para_array( std::vector<Candidate> &set_L, std::vector<idi> &local_queues_ends, const idi local_queue_length, const idi L) { const int num_queues = num_threads_; idi nk = L; int size = 1 << (static_cast<idi>(log2(num_queues))); idi log2size = static_cast<idi>(log2(size)); for (idi d = 0; d < log2size; ++d) { uint32_t by = 1 << (d + 1); #pragma omp parallel for for (int i = 0; i < size; i += by) { idi ai = i + (1 << (d + 1)) - 1; // i + 2^(d+1) - 1 idi a_start = ai * local_queue_length; idi bi = i + (1 << d) - 1; // i + 2^d - 1 idi b_start = bi * local_queue_length; if (0 == local_queues_ends[bi]) { continue; } if (local_queues_ends[ai] == 0) { std::copy(set_L.begin() + b_start, set_L.begin() + b_start + local_queues_ends[bi], set_L.begin() + a_start); // Copy bi to ai local_queues_ends[ai] = local_queues_ends[bi]; local_queues_ends[bi] = 0; continue; } if (ai != static_cast<idi>(num_queues - 1)) { merge_two_queues_into_1st_queue_seq_incr( set_L, a_start, local_queues_ends[ai], local_queue_length, set_L, b_start, local_queues_ends[bi]); } else { idi r = merge_two_queues_into_1st_queue_seq_fixed( set_L, a_start, L, set_L, b_start, local_queues_ends[bi]); if (r < nk) { nk = r; } } } } // Remain, prefix-sum-like merge if (size != num_queues) { for (int i = size; i < num_queues; ++i) { idi ai = i; idi a_start = ai * local_queue_length; idi bi = i - 1; idi b_start = bi * local_queue_length; if (0 == local_queues_ends[bi]) { continue; } if (local_queues_ends[ai] == 0) { std::copy(set_L.begin() + b_start, set_L.begin() + b_start + local_queues_ends[bi], set_L.begin() + a_start); // Copy bi to ai local_queues_ends[ai] = local_queues_ends[bi]; local_queues_ends[bi] = 0; continue; } if (ai != static_cast<idi>(num_queues - 1)) { merge_two_queues_into_1st_queue_seq_incr( set_L, a_start, local_queues_ends[ai], local_queue_length, set_L, b_start, local_queues_ends[bi]); } else { idi r = merge_two_queues_into_1st_queue_seq_fixed( set_L, a_start, L, set_L, b_start, local_queues_ends[bi]); if (r < nk) { nk = r; } } } } // Reset local_queues_ends // Not do this for Collector Idea or Selecting Idea std::fill(local_queues_ends.begin(), local_queues_ends.end() - 1, 0); // std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); return nk; // return r; } /* Function: * When merge all queues (in an array, and [num_threads_ - 1] is the global queue), * the starting local is at [queue_base] */ inline idi Searching::merge_all_queues_queue_base( // std::vector< std::vector<Candidate> > &local_queues_list, std::vector<Candidate> &set_L, // std::vector<Candidate> &local_queues_array, std::vector<idi> &local_queues_ends, const idi queue_base, const int real_threads, const idi local_queue_length, // std::vector<Candidate> &set_L, const idi L) { idi nk = L; int size = 1 << (static_cast<idi>(log2(real_threads))); // int size = 1 << (static_cast<idi>(log2(num_threads_))); idi log2size = static_cast<idi>(log2(size)); for (idi d = 0; d < log2size; ++d) { idi by = 1 << (d + 1); idi i_bound = size + queue_base; #pragma omp parallel for num_threads(real_threads) for (idi i = queue_base; i < i_bound; i += by) { // for (int i = 0; i < size; i += by) { // idi ai = i + (1 << (d + 1)) - 1 + queue_base; // i + 2^(d+1) - 1 idi ai = i + (1 << (d + 1)) - 1; // i + 2^(d+1) - 1 idi a_start = ai * local_queue_length; // idi bi = i + (1 << d) - 1 + queue_base; // i + 2^d - 1 idi bi = i + (1 << d) - 1; // i + 2^d - 1 idi b_start = bi * local_queue_length; if (0 == local_queues_ends[bi]) { continue; } if (local_queues_ends[ai] == 0) { // local_queues_list[ai].swap(local_queues_list[bi]); std::copy(set_L.begin() + b_start, set_L.begin() + b_start + local_queues_ends[bi], set_L.begin() + a_start); // Copy bi to ai local_queues_ends[ai] = local_queues_ends[bi]; local_queues_ends[bi] = 0; continue; } if (ai != static_cast<idi>(num_threads_ - 1)) { merge_two_queues_into_1st_queue_seq_incr( set_L, a_start, local_queues_ends[ai], local_queue_length, set_L, b_start, local_queues_ends[bi]); } else { idi r = merge_two_queues_into_1st_queue_seq_fixed( set_L, a_start, L, set_L, b_start, local_queues_ends[bi]); if (r < nk) { nk = r; } } } } // Remain, prefix-sum-like merge if (size != real_threads) { // if (size != num_threads_) { for (int i = size + queue_base; i < num_threads_; ++i) { // for (int i = size; i < num_threads_; ++i) { idi ai = i; idi a_start = ai * local_queue_length; idi bi = i - 1; idi b_start = bi * local_queue_length; if (0 == local_queues_ends[bi]) { continue; } if (local_queues_ends[ai] == 0) { std::copy(set_L.begin() + b_start, set_L.begin() + b_start + local_queues_ends[bi], set_L.begin() + a_start); // Copy bi to ai local_queues_ends[ai] = local_queues_ends[bi]; local_queues_ends[bi] = 0; continue; } if (ai != static_cast<idi>(num_threads_ - 1)) { merge_two_queues_into_1st_queue_seq_incr( set_L, a_start, local_queues_ends[ai], local_queue_length, set_L, b_start, local_queues_ends[bi]); } else { idi r = merge_two_queues_into_1st_queue_seq_fixed( set_L, a_start, L, set_L, b_start, local_queues_ends[bi]); if (r < nk) { nk = r; } } } } // Reset local_queues_ends std::fill(local_queues_ends.begin(), local_queues_ends.end() - 1, 0); // std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); return nk; // return r; } inline void Searching::merge_two_consecutive_queues_in_place( std::vector<Candidate> &two_queues, const idi base_1, // const idi &end_1, const idi base_2, const idi &length_2) { // idi tid = omp_get_thread_num(); idi index_1 = base_1; idi index_2 = base_2; const idi bound_2 = base_2 + length_2; while (index_1 < index_2 && index_2 < bound_2) { Candidate e_1 = two_queues[index_1]; Candidate e_2 = two_queues[index_2]; if (e_1 < e_2) { ++index_1; } else if (e_2 < e_1) { // time_memmove_list_[tid] -= WallTimer::get_time_mark(); std::memmove(two_queues.data() + index_1 + 1, two_queues.data() + index_1, (index_2 - index_1) * sizeof(Candidate)); // time_memmove_list_[tid] += WallTimer::get_time_mark(); two_queues[index_1] = e_2; ++index_1; ++index_2; } else { // Duplicate, but have no idea what to do right now // time_memmove_list_[tid] -= WallTimer::get_time_mark(); std::memmove(two_queues.data() + index_1 + 1, two_queues.data() + index_1, (index_2 - index_1) * sizeof(Candidate)); // time_memmove_list_[tid] += WallTimer::get_time_mark(); index_1 += 2; ++index_2; } } } ///* Function: // * Merge all queues to the global queue, in a two-queue-merge way // */ //inline idi Searching::merge_all_queues_all_together_in_sequential( // std::vector<Candidate> &set_L, // std::vector<idi> &local_queues_ends, // const idi local_queue_length, // const idi L) //{ // const idi num_queues = num_threads_; // const idi global_queue_base = (num_queues - 1) * local_queue_length; // std::vector<idi> queue_heads(num_queues, 0); // idi queue_id_min; // //// bool is_finished = false; // bool is_1st_selected = true; // idi nk = L; // The highest location of insertion. // { // for (idi q_i = 0; q_i < num_queues; ++q_i) { // if (0 == local_queues_ends[q_i]) { // continue; // } // _mm_prefetch(set_L.data() + q_i * local_queue_length, _MM_HINT_T0); // } // } // while (queue_heads[num_queues - 1] < L) { //// time_compare_minimum_ -= WallTimer::get_time_mark(); // queue_id_min = min_all_queues_at_heads( // set_L, // queue_heads, // local_queues_ends, // local_queue_length, // L); //// time_compare_minimum_ += WallTimer::get_time_mark(); // if (queue_id_min != num_queues - 1) { // Not in the global queue //// time_insert_ -= WallTimer::get_time_mark(); // insert_one_element_at( // set_L[queue_heads[queue_id_min] + queue_id_min * local_queue_length], // set_L, // queue_heads[num_queues - 1], // global_queue_base, // L); //// time_insert_ += WallTimer::get_time_mark(); // if (is_1st_selected) { // Get the highest inserting location // is_1st_selected = false; // nk = queue_heads[num_queues - 1]; // } // ++queue_heads[queue_id_min]; // } // ++queue_heads[num_queues - 1]; // } // // // Reset local_queues_ends // std::fill(local_queues_ends.begin(), local_queues_ends.end() - 1, 0); // return nk; //} ///* Function: // * Find the minimum among queues at their head locations // */ //inline idi Searching::min_all_queues_at_heads( // const std::vector<Candidate> &set_L, // std::vector<idi> &queue_heads, // const std::vector<idi> &local_queues_ends, // const idi local_queue_length, // const idi L) //{ // const idi num_queues = num_threads_; // idi min_queue_id = num_queues - 1; // Candidate min_candidate = set_L[queue_heads[min_queue_id] + min_queue_id * local_queue_length]; // // for (idi q_i = 0; q_i < num_queues - 1; ++q_i) { // if (queue_heads[q_i] >= local_queues_ends[q_i]) { // q_i finished // continue; // } // const Candidate &ele = set_L[queue_heads[q_i] + q_i * local_queue_length]; // if (ele < min_candidate) { // min_candidate = ele; // min_queue_id = q_i; // } else if (ele.id_ == min_candidate.id_) { // Redundant element // ++queue_heads[q_i]; // } // } // // return min_queue_id; //} inline void Searching::merge_in_set_L( std::vector<Candidate> &set_L, const idi set_L_length, const idi num_queues, const idi local_queue_length) { idi size = 1 << (static_cast<idi>(log2(num_queues))); idi log2size = static_cast<idi>(log2(size)); for (idi d = 0; d < log2size; ++d) { const idi merge_length = (local_queue_length << d); idi by = 1 << (d + 1); // Parallel for #pragma omp parallel for for (idi i = 0; i < size; i += by) { // idi a = i + (1 << d) - 1; // idi b = i + (1 << (d + 1)) - 1; idi a = i; idi b = i + (1 << d); idi base_a = a * local_queue_length; idi base_b = b * local_queue_length; if (base_a >= set_L_length || base_b >= set_L_length) { continue; } idi length_b; if (a + by < size) { length_b = merge_length; } else { // The last one if (size == num_queues) { length_b = set_L_length - base_b; } else { length_b = merge_length; } } // printf("a: %u b: %u " // "base_a: %u base_b: %u length_b: %u\n", // a, b, // base_a, base_b, length_b); merge_two_consecutive_queues_in_place( set_L, base_a, base_b, length_b); } } if (size != num_queues) { for (idi i = size; i < num_queues; ++i) { idi a = 0; idi b = i; idi base_a = a; idi base_b = b * local_queue_length; if (base_b >= set_L_length) { continue; } idi length_b; if (b != num_queues - 1) { length_b = local_queue_length; } else { length_b = set_L_length - base_b; } // printf("a: %u b: %u " // "base_a: %u base_b: %u length_b: %u\n", // a, b, // base_a, base_b, length_b); merge_two_consecutive_queues_in_place( set_L, base_a, base_b, length_b); } } } /* * 7/5/2020-20:27 * Every queue keeps only elements which can be ordered in the top-L globally. * local_queues_lengths records the end location for all queues */ inline distf Searching::selecting_top_L_seq( std::vector<Candidate> &set_L, const idi global_L, const idi num_queues, const std::vector<idi> &local_queues_starts, std::vector<idi> &local_queues_sizes) { std::vector<idi> pointers(num_queues, 0); distf bound_lth; idi rank = 0; bool is_finished = false; distf min_dist = FLT_MAX; idi min_q_i; idi min_id; while (rank < global_L) { is_finished = true; min_dist = FLT_MAX; for (idi q_i = 0; q_i < num_queues; ++q_i) { if (pointers[q_i] >= local_queues_sizes[q_i]) { // q_i is finished continue; } is_finished = false; idi sub = pointers[q_i] + local_queues_starts[q_i]; distf tmp_dist = set_L[sub].distance_; idi tmp_id = set_L[sub].id_; if (tmp_dist < min_dist) { min_dist = tmp_dist; min_id = tmp_id; min_q_i = q_i; } else if (tmp_dist == min_dist && tmp_id < min_id) { min_id = tmp_id; min_q_i = q_i; } } if (is_finished) { {//test printf("Error: selecting_top_L_seq: only found %u elements but global_L is %u.\n", rank, global_L); } break; } bound_lth = min_dist; ++pointers[min_q_i]; ++rank; } std::copy(pointers.begin(), pointers.end(), local_queues_sizes.begin()); return bound_lth; } /* * 7/24/2020-10:08 * Record for every queue the position that contains the top-M unchecked vertices. * So the total expanded vertices should still be M, which means the computation should * be the same with merging idea. */ inline void Searching::selecting_unchecked_top_M_seq( const idi query_id, const idi iter, std::vector<Candidate> &set_L, const std::vector<idi> &pointers_starts, const idi value_M, const idi num_queues, const std::vector<idi> &local_queues_starts, const std::vector<idi> &local_queues_sizes, std::vector<idi> &local_m_counts) { std::vector<idi> pointers(pointers_starts); // std::vector<idi> pointers(num_queues, 0); std::fill(local_m_counts.begin(), local_m_counts.end(), 0); idi rank = 0; bool is_finished = true; distf min_dist = FLT_MAX; idi min_q_i; idi min_id; while (rank < value_M) { min_dist = FLT_MAX; for (idi q_i = 0; q_i < num_queues; ++q_i) { idi &pointer = pointers[q_i]; idi sub = pointer + local_queues_starts[q_i]; // {//test // if (133 == query_id && // 3 == iter && // 321341 == set_L[sub].id_) { // printf("(%u %f)\n", // set_L[sub].id_, set_L[sub].distance_); // } // } while (pointer < local_queues_sizes[q_i] && set_L[sub].is_checked_) { ++pointer; ++sub; } if (pointer >= local_queues_sizes[q_i]) { // q_i is finished continue; } is_finished = false; distf tmp_dist = set_L[sub].distance_; idi tmp_id = set_L[sub].id_; if (tmp_dist < min_dist) { min_dist = tmp_dist; min_id = tmp_id; min_q_i = q_i; } else if (tmp_dist == min_dist && tmp_id < min_id) { min_id = tmp_id; min_q_i = q_i; } } if (!is_finished) { is_finished = true; ++pointers[min_q_i]; ++rank; ++local_m_counts[min_q_i]; } else { break; } } // std::copy(pointers.begin(), pointers.end(), local_top_m_positions.begin()); } /* * 7/27/2020-15:41 * Gather the top-M unchecked vertices from local queues. */ inline void Searching::gather_unchecked_top_M_seq( const idi query_id, const idi iter, std::vector<Candidate> &set_L, const std::vector<idi> &pointers_starts, const idi value_M, const idi num_queues, const std::vector<idi> &local_queues_starts, const std::vector<idi> &local_queues_sizes, std::vector<idi> &top_m_candidates, idi &top_m_candidates_size, std::vector<idi> &bound_subs) { std::vector<idi> pointers(pointers_starts); // std::vector<idi> pointers(num_queues, 0); // std::fill(local_m_counts.begin(), local_m_counts.end(), 0); // idi rank = 0; bool is_finished = true; distf min_dist = FLT_MAX; idi min_q_i; idi min_id; while (top_m_candidates_size < value_M) { min_dist = FLT_MAX; for (idi q_i = 0; q_i < num_queues; ++q_i) { idi &pointer = pointers[q_i]; idi sub = pointer + local_queues_starts[q_i]; while (pointer < local_queues_sizes[q_i] && set_L[sub].is_checked_) { ++pointer; ++sub; } if (pointer >= local_queues_sizes[q_i]) { // q_i is finished continue; } is_finished = false; distf tmp_dist = set_L[sub].distance_; idi tmp_id = set_L[sub].id_; if (tmp_dist < min_dist) { min_dist = tmp_dist; min_id = tmp_id; min_q_i = q_i; } else if (tmp_dist == min_dist && tmp_id < min_id) { min_id = tmp_id; min_q_i = q_i; } } if (!is_finished) { is_finished = true; idi sub = local_queues_starts[min_q_i] + pointers[min_q_i]; top_m_candidates[top_m_candidates_size++] = set_L[sub].id_; set_L[sub].is_checked_ = true; // Checked ++pointers[min_q_i]; // ++rank; // ++local_m_counts[min_q_i]; } else { break; } } // std::copy(pointers.begin(), pointers.end(), local_top_m_positions.begin()); std::copy(pointers.begin(), pointers.end(), bound_subs.begin()); } inline void Searching::search_with_top_m( const PANNS::idi M, const PANNS::idi query_id, const PANNS::idi K, const PANNS::idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K) { boost::dynamic_bitset<> is_visited(num_v_); { for (idi c_i = 0; c_i < L; ++c_i) { is_visited[init_ids[c_i]] = true; } } const dataf *query_data = queries_load_ + query_id * dimension_; for (idi v_i = 0; v_i < L; ++v_i) { idi v_id = init_ids[v_i]; _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); } // Get the distances of all candidates, store in the set set_L. for (unsigned i = 0; i < L; i++) { unsigned v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; ++count_distance_computation_; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[i] = Candidate(v_id, dist, false); // False means not checked. } std::sort(set_L.begin(), set_L.begin() + L); std::vector<idi> top_m_candidates(M); idi top_m_candidates_end = 0; idi k = 0; // Index of first unchecked candidate. idi tmp_count = 0; // for debug while (k < L) { ++tmp_count; unsigned nk = L; // Select M candidates idi last_k = L; for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { if (set_L[c_i].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[c_i].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; } // Push M candidates' neighbors into the queue. for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { idi cand_id = top_m_candidates[c_i]; _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; if (is_visited[nb_id]) { continue; } is_visited[nb_id] = true; auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; ++count_distance_computation_; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[L-1].distance_) { continue; } Candidate cand(nb_id, dist, false); idi r = insert_into_queue(set_L, L, cand); if (r < nk) { nk = r; } } } top_m_candidates_end = 0; // Clear top_m_candidates if (nk <= last_k) { k = nk; } else { k = last_k + 1; } } for (idi k_i = 0; k_i < K; ++k_i) { set_K[k_i] = set_L[k_i].id_; } } inline void Searching::search_with_top_m_scale_m( const PANNS::idi value_M_max, const PANNS::idi query_id, const PANNS::idi K, const PANNS::idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, std::vector<idi> &top_m_candidates, boost::dynamic_bitset<> &is_visited) { // boost::dynamic_bitset<> is_visited(num_v_); { for (idi c_i = 0; c_i < L; ++c_i) { is_visited[init_ids[c_i]] = true; } } const dataf *query_data = queries_load_ + query_id * dimension_; for (idi v_i = 0; v_i < L; ++v_i) { idi v_id = init_ids[v_i]; _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); } // Get the distances of all candidates, store in the set set_L. for (unsigned i = 0; i < L; i++) { unsigned v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; ++count_distance_computation_; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[i] = Candidate(v_id, dist, false); // False means not checked. } std::sort(set_L.begin(), set_L.begin() + L); // std::vector<idi> top_m_candidates(M); idi top_m_candidates_end = 0; idi k = 0; // Index of first unchecked candidate. idi tmp_count = 0; // for debug idi M = 1; while (k < L) { ++tmp_count; unsigned nk = L; // Select M candidates idi last_k = L; for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { if (set_L[c_i].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[c_i].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; } // Push M candidates' neighbors into the queue. for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { idi cand_id = top_m_candidates[c_i]; _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; if (is_visited[nb_id]) { continue; } is_visited[nb_id] = true; auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; ++count_distance_computation_; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[L-1].distance_) { continue; } Candidate cand(nb_id, dist, false); idi r = insert_into_queue(set_L, L, cand); if (r < nk) { nk = r; } } } top_m_candidates_end = 0; // Clear top_m_candidates if (nk <= last_k) { k = nk; } else { k = last_k + 1; } if (M < value_M_max) { M <<= 1; } } for (idi k_i = 0; k_i < K; ++k_i) { set_K[k_i] = set_L[k_i].id_; } {// Reset is_visited.reset(); } } ////void Searching::search_with_top_m( //inline void Searching::search_with_top_m_to_get_distance_range( // const PANNS::idi M, // const PANNS::idi query_id, //// const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids) //// std::vector<idi> &set_K) //{ // dist_max_ = -FLT_MAX; // dist_min_ = FLT_MAX; // boost::dynamic_bitset<> is_visited(num_v_); // // { // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = true; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. //// {// For distance range //// if (dist > dist_max_) { //// dist_max_ = dist; //// } //// if (dist < dist_min_) { //// dist_min_ = dist; //// } //// } // } // std::sort(set_L.begin(), set_L.begin() + L); // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; // // unsigned nk = L; // // // Select M candidates // idi last_k = L; // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = true; // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); // idi r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } //// {// For distance range //// if (dist > dist_max_) { //// dist_max_ = dist; //// } //// if (dist < dist_min_) { //// dist_min_ = dist; //// } //// } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // {// For histogram // for (idi i_l = 0; i_l < L; ++i_l) { // distf dist = set_L[i_l].distance_; // {// For distance range // if (dist > dist_max_) { // dist_max_ = dist; // } // if (dist < dist_min_) { // dist_min_ = dist; // } // } // } // } // } // //// for (idi k_i = 0; k_i < K; ++k_i) { //// set_K[k_i] = set_L[k_i].id_; //// } //} // ////void Searching::search_with_top_m( //inline void Searching::search_with_top_m_myths_M( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) //{ //// {//test //// printf("query_id: %u\n", query_id); //// } // const idi loc_range = L / 3; // // // boost::dynamic_bitset<> is_visited(num_v_); // // { // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = true; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // //// {// For histogram //// const distf dist_range = dist_max_ - dist_min_; //// printf("iter:%u\n", 0); //// for (idi i_l = 0; i_l < L; ++i_l) { //// printf("%f\n", (set_L[i_l].distance_ - dist_min_) / dist_range * 100.0); //// } //// } // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // std::vector<idi> range_count(3, 0); // idi zero_inserted_count = 0; //// {//test //// printf("tmp_count: %u\n", tmp_count); //// } // ++tmp_count; // // unsigned nk = L; // // // Select M candidates // idi last_k = L; // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } //// {//test //// printf("top_m_candidates_ends: %u\n", top_m_candidates_end); //// } // { // if (0 == top_m_candidates_end) { // break; // } // } // // // uint64_t count_neighbors = 0; // uint64_t count_inserted = 0; // std::vector<idi> locs_to_count(M); // // Push M candidates' neighbors into the queue. // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // // count_neighbors += out_degree; // idi num_inserted = 0; // // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = true; // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // ++num_inserted; // Candidate cand(nb_id, dist, false); // idi r = insert_into_queue(set_L, L, cand); //// { //// printf("c_i: %u " //// "count: %u " //// "loc_inserted: %u\n", //// c_i, //// num_inserted, //// r); //// } // if (r < nk) { // nk = r; // } // { // ++range_count[r / loc_range]; // } // } // { // if (0 == num_inserted) { // ++zero_inserted_count; // } // locs_to_count[c_i] = num_inserted; // count_inserted += num_inserted; // } //// { //// printf("c_i: %u " //// "num_inserted: %u\n", //// c_i, //// num_inserted); //// } // } //// { //// for (idi c_i = top_m_candidates_end; c_i < M; ++c_i) { //// locs_to_count[c_i] = 0; //// } //// printf("iter:%u\n", tmp_count); //// for (idi c_i = 0; c_i < M; ++c_i) { //// printf("%u %u\n", c_i, locs_to_count[c_i]); //// } //// } //// {//test //// idi sum = 0; //// for (const idi ct : range_count) sum += ct; //// printf("tmp_count: %u " //// "k: %u " //// "actual_M: %u %.1f%% " //// "zero_ins: %u %.1f%% " //// "1/3: %u %.1f%% " //// "2/3: %u %.1f%% " //// "3/3: %u %.1f%%\n", //// tmp_count, //// k, //// top_m_candidates_end, 100.0 * top_m_candidates_end / M, //// zero_inserted_count, 100.0 * zero_inserted_count / top_m_candidates_end, //// range_count[0], 100.0 * range_count[0] / sum, //// range_count[1], 100.0 * range_count[1] / sum, //// range_count[2], 100.0 * range_count[2] / sum); //// } // top_m_candidates_end = 0; // Clear top_m_candidates // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // { // printf("query:%uiter: %u " // "#neighbors: %lu " // "#inserted: %lu " // "ratio: %.2f%%\n", // query_id, tmp_count, // count_neighbors, // count_inserted, // 100.0 * count_inserted / count_neighbors); // } //// {// For histogram ////// const auto it_min = std::min_element(set_L.begin(), set_L.end()); ////// const auto it_max = std::max_element(set_L.begin(), set_L.end()); ////// const distf dist_min = it_min->distance_; ////// const distf dist_max = it_max->distance_; ////// const distf dist_min = it_min->distance_ - 1.0; ////// const distf dist_max = it_max->distance_ + 1.0; //// const distf dist_range = dist_max_ - dist_min_; ////// const distf dist_range = dist_max - dist_min; ////// { ////// printf("it_min->distance_: %f dist_min: %f\n", ////// it_min->distance_, dist_min); ////// } ////// const distf dist_range = it_max->distance_ - it_min->distance_; //// printf("iter:%u\n", tmp_count); //// for (idi i_l = 0; i_l < L; ++i_l) { ////// printf("%f\n", set_L[i_l].distance_); ////// printf("%f\n", (set_L[i_l].distance_ - dist_min) / dist_range * 100.0); //// printf("%f\n", (set_L[i_l].distance_ - dist_min_) / dist_range * 100.0); ////// printf("%.2f\n", (set_L[i_l].distance_ - it_min->distance_) / dist_range * 100.0); //// } //// } // } // // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } // if (query_id == 3) { // exit(1); // } //} // //// Sequential Top-M algorithm for profiling purpose: byte array, CAS, and OpenMP ////void Searching::search_with_top_m( //inline void Searching::search_with_top_m_profile_bit_CAS( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) //{ //// std::vector<uint8_t> is_visited(num_v_, 0); // Byte array //// boost::dynamic_bitset<> is_visited(num_v_); // Bit array // BitVector is_visited(num_v_); // // { //#pragma omp parallel for // for (idi c_i = 0; c_i < L; ++c_i) { //// is_visited[init_ids[c_i]] = true; // is_visited.atomic_set_bit(init_ids[c_i]); // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; //#pragma omp parallel for // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. //#pragma omp parallel for // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; // // unsigned nk = L; // // // Select M candidates // idi last_k = L; // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. //#pragma omp parallel for // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; //// if (is_visited[nb_id]) { //// continue; //// } //// is_visited[nb_id] = true; // //// if (!AtomicOps::CAS(is_visited.data() + nb_id, //// static_cast<uint8_t>(0), //// static_cast<uint8_t>(1))) { //// continue; //// } // {// Self-defined BitVector // if (is_visited.atomic_is_bit_set(nb_id)) { // continue; // } // is_visited.atomic_set_bit(nb_id); // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); // idi r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // } // //#pragma omp parallel for // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //// //// {//test //// for (idi k_i = 0; k_i < K; ++k_i) { //// printf("%u: %u: %u %f\n", //// query_id, //// k_i, set_L[k_i].id_, set_L[k_i].distance_); //// } //// exit(1); //// } //} ///// Backup //inline void Searching::search_with_top_m( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) //// std::vector< std::vector<idi> > &top_m_list) //{ // boost::dynamic_bitset<> is_visited(num_v_); // // { // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = true; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; // // unsigned nk = L; // // // Select M candidates // idi last_k = L; // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = true; // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); // idi r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // } // // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //// //// {//test //// for (idi k_i = 0; k_i < K; ++k_i) { //// printf("%u: %u: %u %f\n", //// query_id, //// k_i, set_L[k_i].id_, set_L[k_i].distance_); //// } //// exit(1); //// } //} // ////// DEPRECATED: the is_visited array cannot be shared among threads. //inline void Searching::search_with_top_m_no_local_arrays( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // boost::dynamic_bitset<> &is_visited) //// std::vector< std::vector<idi> > &top_m_list) //{ //// boost::dynamic_bitset<> is_visited(num_v_); // // { // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = true; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; // // unsigned nk = L; // // // Select M candidates // idi last_k = L; // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = true; // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); // idi r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // } // // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //// //// {//test //// for (idi k_i = 0; k_i < K; ++k_i) { //// printf("%u: %u: %u %f\n", //// query_id, //// k_i, set_L[k_i].id_, set_L[k_i].distance_); //// } //// exit(1); //// } //} inline void Searching::search_with_top_m_in_batch( const PANNS::idi M, const PANNS::idi batch_start, const PANNS::idi batch_size, const PANNS::idi K, const PANNS::idi L, std::vector< std::vector<Candidate> > &set_L_list, const std::vector<idi> &init_ids, std::vector< std::vector<idi> > &set_K_list) { std::vector< boost::dynamic_bitset<> > is_visited_list(batch_size, boost::dynamic_bitset<> (num_v_)); // Prepare the init_ids { //#pragma omp parallel for for (idi q_i = 0; q_i < batch_size; ++q_i) { auto &is_visited = is_visited_list[q_i]; for (idi c_i = 0; c_i < L; ++c_i) { is_visited[init_ids[c_i]] = true; } } } // Initialize set_L_list { //#pragma omp parallel for for (idi q_i = 0; q_i < batch_size; ++q_i) { const dataf *query_data = queries_load_ + (q_i + batch_start) * dimension_; for (idi i = 0; i < L; i++) { idi v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; // ++count_distance_computation_; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L_list[q_i][i] = Candidate(v_id, dist, false); // False means not checked. } std::sort(set_L_list[q_i].begin(), set_L_list[q_i].begin() + L); } } { std::vector<idi> joint_queue(M * batch_size); // Joint queue for all shared top-M candidates idi joint_queue_end = 0; boost::dynamic_bitset<> is_in_joint_queue(num_v_); // std::vector< std::vector<idi> > cands_query_ids(num_v_, std::vector<idi>(batch_size)); // If candidate cand_id is selected by query q_i, q_i should be in cands_query_ids[cand_id]. // std::vector<idi> cands_query_ids_ends(num_v_, 0); std::unordered_map< idi, std::vector<idi> > cands_query_ids(batch_size * M); std::vector<idi> ks(batch_size, 0); // Indices of every queue's first unchecked candidate. std::vector<idi> nks(batch_size, L); // Indices of highest candidate inserted std::vector<idi> last_ks(batch_size, L); // Indices of lowest candidate unchecked std::vector<idi> queries_not_finished(batch_size); idi queries_not_finished_end = batch_size; for (idi q_i = 0; q_i < batch_size; ++q_i) { queries_not_finished[q_i] = q_i; } bool is_finished = false; idi counter_for_debug = 0; while (!is_finished) { ++counter_for_debug; // Build the new joint queue // Traverse every query's queue for(idi q_i = 0; q_i < queries_not_finished_end; ++q_i) { idi q_local_id = queries_not_finished[q_i]; // last_ks[q_local_id] = L; auto &set_L = set_L_list[q_local_id]; idi top_m_count = 0; for (idi c_i = ks[q_local_id]; c_i < L && top_m_count < M; ++c_i) { if (set_L[c_i].is_checked_) { continue; } set_L[c_i].is_checked_ = true; last_ks[q_local_id] = c_i; ++top_m_count; idi cand_id = set_L[c_i].id_; // Record which query selected cand_id auto tmp_c = cands_query_ids.find(cand_id); if (tmp_c != cands_query_ids.end()) { tmp_c->second.push_back(q_local_id); } else { cands_query_ids.emplace(cand_id, std::vector<idi>()); cands_query_ids[cand_id].reserve(batch_size); cands_query_ids[cand_id].push_back(q_local_id); } // cands_query_ids[cand_id][cands_query_ids_ends[cand_id]++] = q_local_id; // Add candidate cand_id into the joint queue if (is_in_joint_queue[cand_id]) { continue; } is_in_joint_queue[cand_id] = true; joint_queue[joint_queue_end++] = cand_id; } } queries_not_finished_end = 0; // Clear queries_not_finished // Traverse every shared candidate for (idi c_i = 0; c_i < joint_queue_end; ++c_i) { idi cand_id = joint_queue[c_i]; is_in_joint_queue[cand_id] = false; // Reset is_in_joint_queue idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; const auto &query_local_ids = cands_query_ids[cand_id]; // Push neighbors to every queue of the queries that selected cand_id. // Traverse cand_id's neighbors // idi &q_i_bound = cands_query_ids_ends[cand_id]; // for (idi q_i = 0; q_i < q_i_bound; ++q_i) { // idi q_local_id = query_local_ids[q_i]; for (idi q_local_id : query_local_ids) { dataf *query_data = queries_load_ + (q_local_id + batch_start) * dimension_; auto &is_visited = is_visited_list[q_local_id]; auto &set_L = set_L_list[q_local_id]; // // Traverse cand_id's neighbors for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; if (is_visited[nb_id]) { continue; } is_visited[nb_id] = true; auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; // ++count_distance_computation_; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[L-1].distance_) { continue; } // if (dist >= set_L[L-1].distance_) { // continue; // } Candidate new_cand(nb_id, dist, false); idi insert_loc = insert_into_queue(set_L, L, new_cand); if (insert_loc < nks[q_local_id]) { nks[q_local_id] = insert_loc; } } } cands_query_ids.erase(cand_id); // q_i_bound = 0; // Clear cands_query_ids[cand_id] } joint_queue_end = 0; // Clear joint_queue for (idi q_local_id = 0; q_local_id < batch_size; ++q_local_id) { if (nks[q_local_id] <= last_ks[q_local_id]) { ks[q_local_id] = nks[q_local_id]; } else { ks[q_local_id] = last_ks[q_local_id] + 1; } nks[q_local_id] = L; last_ks[q_local_id] = L; if (ks[q_local_id] < L) { queries_not_finished[queries_not_finished_end++] = q_local_id; } } if (!queries_not_finished_end) { is_finished = true; } } } { for (idi q_i = 0; q_i < batch_size; ++q_i) { for (idi c_i = 0; c_i < K && c_i < L; ++c_i) { set_K_list[q_i + batch_start][c_i] = set_L_list[q_i][c_i].id_; } } } //// // {//test // for (idi q_i = 0; q_i < batch_size; ++q_i) { // printf("query: %u\n", q_i + batch_start); // for (idi c_i = 0; c_i < K; ++c_i) { // printf("%u: %u %f\n", c_i, set_L_list[q_i][c_i].id_, set_L_list[q_i][c_i].distance_); // } // } // } } //inline void Searching::para_search_with_top_m_critical_area( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) //// std::vector< std::vector<idi> > &top_m_list) //{ // std::vector<uint8_t> is_visited(num_v_, 0); //// boost::dynamic_bitset<> is_visited(num_v_); // // { ////#pragma omp parallel for // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = 1; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. ////#pragma omp parallel for // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; // ++count_distance_computation_; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; // // unsigned nk = L; //// int nk = L; // // // Select M candidates // idi last_k = L; // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. // // OpenMP reduction(min : nk) has a problem if nk is unsigned. nk might end up with being MAX_UINT. ////#pragma omp parallel for ////#pragma omp parallel for reduction(min : nk) // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; //// if (is_visited[nb_id]) { //// continue; //// } //// is_visited[nb_id] = 1; // // if (!AtomicOps::CAS(is_visited.data() + nb_id, // static_cast<uint8_t>(0), // static_cast<uint8_t>(1))) { // continue; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // ++count_distance_computation_; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); // idi r; ////#pragma omp critical // { // r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // } // ////#pragma omp parallel for // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //} // //inline void Searching::para_search_with_top_m_critical_area_no_omp( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) //// std::vector< std::vector<idi> > &top_m_list) //{ // std::vector<uint8_t> is_visited(num_v_, 0); //// boost::dynamic_bitset<> is_visited(num_v_); // // { ////#pragma omp parallel for // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = 1; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. ////#pragma omp parallel for // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; // ++count_distance_computation_; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; // // unsigned nk = L; //// int nk = L; // // // Select M candidates // idi last_k = L; // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. // // OpenMP reduction(min : nk) has a problem if nk is unsigned. nk might end up with being MAX_UINT. ////#pragma omp parallel for ////#pragma omp parallel for reduction(min : nk) // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; //// if (is_visited[nb_id]) { //// continue; //// } //// is_visited[nb_id] = 1; // // if (!AtomicOps::CAS(is_visited.data() + nb_id, // static_cast<uint8_t>(0), // static_cast<uint8_t>(1))) { // continue; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // ++count_distance_computation_; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); // idi r; ////#pragma omp critical // { // r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // } // ////#pragma omp parallel for // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //} // //inline void Searching::para_search_with_top_m_critical_area_yes_omp( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) //// std::vector< std::vector<idi> > &top_m_list) //{ // std::vector<uint8_t> is_visited(num_v_, 0); //// boost::dynamic_bitset<> is_visited(num_v_); // // { //#pragma omp parallel for // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = 1; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. ////#pragma omp parallel for // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; // ++count_distance_computation_; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; // // unsigned nk = L; //// int nk = L; // // // Select M candidates // idi last_k = L; // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. // // OpenMP reduction(min : nk) has a problem if nk is unsigned. nk might end up with being MAX_UINT. ////#pragma omp parallel for ////#pragma omp parallel for reduction(min : nk) // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; //// if (is_visited[nb_id]) { //// continue; //// } //// is_visited[nb_id] = 1; // // if (!AtomicOps::CAS(is_visited.data() + nb_id, // static_cast<uint8_t>(0), // static_cast<uint8_t>(1))) { // continue; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // ++count_distance_computation_; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); // idi r; ////#pragma omp critical // { // r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // } // ////#pragma omp parallel for // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //} // //inline void Searching::para_search_with_top_m_visited_array( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // std::vector<uint8_t> &is_visited) //// std::vector< std::vector<idi> > &top_m_list) //{ //// uint64_t count_visited = 0; // //// std::vector<uint8_t> is_visited(num_v_, 0); //// boost::dynamic_bitset<> is_visited(num_v_); // // { ////#pragma omp parallel for // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = 1; //// ++count_visited; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. ////#pragma omp parallel for // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; // ++count_distance_computation_; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; // // unsigned nk = L; //// int nk = L; // // // Select M candidates // idi last_k = L; // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. // // OpenMP reduction(min : nk) has a problem if nk is unsigned. nk might end up with being MAX_UINT. ////#pragma omp parallel for ////#pragma omp parallel for reduction(min : nk) // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; //// if (is_visited[nb_id]) { //// continue; //// } //// is_visited[nb_id] = 1; // // if (!AtomicOps::CAS(is_visited.data() + nb_id, // static_cast<uint8_t>(0), // static_cast<uint8_t>(1))) { // continue; // } //// ++count_visited; // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // ++count_distance_computation_; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); // idi r; ////#pragma omp critical // { // r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // } // ////#pragma omp parallel for // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } // //// { //// printf("query_id: %u " //// "count_visited: %lu %f%%\n", //// query_id, //// count_visited, //// 100.0 * count_visited / num_v_); //// } //} // //inline void Searching::para_search_with_top_m_merge_queues( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) //{ //// {//test //// printf("query_id: %u\n", query_id); //// } //// const idi local_queue_length = ((M - 1) / num_threads_ + 1) * width_; // const idi local_queue_length = L; // std::vector< std::vector<Candidate> > local_queues_list(num_threads_, std::vector<Candidate>(local_queue_length)); // std::vector<idi> local_queues_ends(num_threads_, 0); // std::vector<uint8_t> is_visited(num_v_, 0); //// boost::dynamic_bitset<> is_visited(num_v_); // // { //#pragma omp parallel for // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = 1; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; //#pragma omp parallel for // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. //#pragma omp parallel for // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; //// {//test //// printf("tmp_count: %d\n", tmp_count); //// } // // // Select M candidates // idi last_k = L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. //#pragma omp parallel for // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // int tid = omp_get_thread_num(); // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; //// if (is_visited[nb_id]) { //// continue; //// } //// is_visited[nb_id] = 1; // // if (!AtomicOps::CAS(is_visited.data() + nb_id, // static_cast<uint8_t>(0), // static_cast<uint8_t>(1))) { // continue; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); // // Add to the local queue. // add_into_queue(local_queues_list[tid], local_queues_ends[tid], local_queue_length, cand); // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // idi nk = L; //// // Merge. Parallel merging in every two queues. //// { //// for (int tid = 0; tid < num_threads_; ++tid) { //// if (0 == local_queues_ends[tid]) continue; //// idi r = merge_two_queues_into_1st_queue_para( //// set_L, //// 0, //// L, //// local_queues_list[tid], //// 0, //// local_queues_ends[tid]); ////// idi r = merge_two_queues_into_1st_queue_seq( ////// set_L, ////// 0, ////// L, ////// local_queues_list[tid], ////// 0, ////// local_queues_ends[tid]); //// local_queues_ends[tid] = 0; // Reset the local queue //// if (r < nk) { //// nk = r; //// } //// } //// } //// {// text //// if (query_id == 4 && //// tmp_count == 5) { //// // Print local queues //// for (int t_i = 0; t_i < num_threads_; ++t_i) { ////// idi start_i = t_i * local_queue_length; //// for (idi q_i = 0; q_i < local_queues_ends[t_i]; ++q_i) { //// printf("t[%u][%u]: " //// "id: %u " //// "dist: %f\n", //// t_i, q_i, //// local_queues_list[t_i][q_i].id_, //// local_queues_list[t_i][q_i].distance_); //// } //// } //// printf("----------\n"); //// for (idi i = 0; i < L; ++i) { //// printf("set_L[%u]: " //// "id: %u " //// "dist: %f\n", //// i, //// set_L[i].id_, //// set_L[i].distance_); //// } //// printf("----------\n"); //// } //// } // // Merge. Merge all queues in parallel. // { // if (num_threads_ > 1) { // idi r = merge_all_queues_para_list( // local_queues_list, // local_queues_ends, // set_L, // L); // if (r < nk) { // nk = r; // } // } else { // if (local_queues_ends[0]) { // idi r = merge_two_queues_into_1st_queue_seq_fixed( // set_L, // 0, // L, // local_queues_list[0], // 0, // local_queues_ends[0]); // local_queues_ends[0] = 0; // if (r < nk) { // nk = r; // } // } // } // } //// {//test //// if (query_id == 4) { //// for (idi i = 0; i < L; ++i) { //// printf("tmp_count: %u " //// "set_L[%u]: " //// "id: %u " //// "dist: %f\n", //// tmp_count, //// i, //// set_L[i].id_, //// set_L[i].distance_); //// } //// } //// //// } // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // } // //#pragma omp parallel for // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //// { //// exit(1); //// } //// {//test //// ////// if (query_id == 4) { //// for (idi i = 0; i < L; ++i) { //// printf("set_L[%u]: " //// "id: %u " //// "dist: %f\n", //// i, //// set_L[i].id_, //// set_L[i].distance_); //// } ////// exit(1); ////// } //// } //} // ////// Using local queue and then sequential merge. //inline void Searching::para_search_with_top_m_queues_seq_merge( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) //// std::vector< std::vector<idi> > &top_m_list) //{ //// const idi local_queue_length = ((L - 1) / num_threads_ + 1) * width_; // const idi local_queue_length = L; // std::vector< std::vector<Candidate> > local_queues_list(num_threads_, std::vector<Candidate>(local_queue_length)); // std::vector<idi> local_queues_ends(num_threads_, 0); // std::vector<uint8_t> is_visited(num_v_, 0); //// boost::dynamic_bitset<> is_visited(num_v_); // // { //#pragma omp parallel for // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = 1; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; //// for (idi v_i = 0; v_i < L; ++v_i) { //// idi v_id = init_ids[v_i]; //// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); //// } // // Get the distances of all candidates, store in the set set_L. //#pragma omp parallel for // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; //// { //// printf("tmp_count: %u " //// "k: %u\n", //// tmp_count, //// k); //// } // //// unsigned nk = L; //// int nk = L; // // // Select M candidates // idi last_k = L; // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. //#pragma omp parallel for // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // int tid = omp_get_thread_num(); // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; //// if (is_visited[nb_id]) { //// continue; //// } //// is_visited[nb_id] = 1; // // if (!AtomicOps::CAS(is_visited.data() + nb_id, // static_cast<uint8_t>(0), // static_cast<uint8_t>(1))) { // continue; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); //// idi r; ////#pragma omp critical //// { //// r = insert_into_queue(set_L, L, cand); //// if (r < nk) { //// nk = r; //// } //// } // // Add to the local queue. // add_into_queue(local_queues_list[tid], local_queues_ends[tid], local_queue_length, cand); // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // idi nk = L; // // Merge // { // for (int tid = 0; tid < num_threads_; ++tid) { // if (0 == local_queues_ends[tid]) continue; // idi r = merge_two_queues_into_1st_queue_seq_fixed( // set_L, // 0, // L, // local_queues_list[tid], // 0, // local_queues_ends[tid]); //// L + 1); // local_queues_ends[tid] = 0; // Reset the local queue // if (r < nk) { // nk = r; // } // } // } // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // } // //#pragma omp parallel for // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //// //// {//test //// for (idi k_i = 0; k_i < K; ++k_i) { //// printf("%u: %u: %u %f\n", //// query_id, //// k_i, set_L[k_i].id_, set_L[k_i].distance_); //// } //// exit(1); //// } //} // //inline void Searching::para_search_with_top_m_merge_queues_no_CAS( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // std::vector< std::vector<Candidate> > &local_queues_list, // std::vector<idi> &local_queues_ends, //// std::vector<uint8_t> &is_visited) // boost::dynamic_bitset<> &is_visited) //{ ////// const idi local_queue_length = ((M - 1) / num_threads_ + 1) * width_; //// const idi local_queue_length = L; //// std::vector< std::vector<Candidate> > local_queues_list(num_threads_, std::vector<Candidate>(local_queue_length)); //// std::vector<idi> local_queues_ends(num_threads_, 0); ////// std::vector<uint8_t> is_visited(num_v_, 0); //// boost::dynamic_bitset<> is_visited(num_v_); // // { //#pragma omp parallel for // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = 1; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; //#pragma omp parallel for // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. //#pragma omp parallel for // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; // // // Select M candidates // idi last_k = L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. //#pragma omp parallel for // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // int tid = omp_get_thread_num(); // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; // //// if (!AtomicOps::CAS(is_visited.data() + nb_id, //// static_cast<uint8_t>(0), //// static_cast<uint8_t>(1))) { //// continue; //// } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); // // Add to the local queue. // add_into_queue(local_queues_list[tid], local_queues_ends[tid], local_queue_length, cand); // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // idi nk = L; //// // Merge. Parallel merging in every two queues. //// { //// for (int tid = 0; tid < num_threads_; ++tid) { //// if (0 == local_queues_ends[tid]) continue; //// idi r = merge_two_queues_into_1st_queue_para( //// set_L, //// 0, //// L, //// local_queues_list[tid], //// 0, //// local_queues_ends[tid]); ////// idi r = merge_two_queues_into_1st_queue_seq( ////// set_L, ////// 0, ////// L, ////// local_queues_list[tid], ////// 0, ////// local_queues_ends[tid]); //// local_queues_ends[tid] = 0; // Reset the local queue //// if (r < nk) { //// nk = r; //// } //// } //// } //// // Merge. Merge all queues in parallel. //// { //// if (num_threads_ > 1) { //// idi r = merge_all_queues_para( //// local_queues_list, //// local_queues_ends, //// set_L, //// L); //// if (r < nk) { //// nk = r; //// } //// } else { //// if (local_queues_ends[0]) { //// idi r = merge_two_queues_into_1st_queue_seq( //// set_L, //// 0, //// L, //// local_queues_list[0], //// 0, //// local_queues_ends[0]); //// local_queues_ends[0] = 0; //// if (r < nk) { //// nk = r; //// } //// } //// } //// } // // Merge // { // for (int tid = 0; tid < num_threads_; ++tid) { // if (0 == local_queues_ends[tid]) continue; // idi r = merge_two_queues_into_1st_queue_seq_fixed( // set_L, // 0, // L, // local_queues_list[tid], // 0, // local_queues_ends[tid]); //// L + 1); // local_queues_ends[tid] = 0; // Reset the local queue // if (r < nk) { // nk = r; // } // } // } // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // } // //#pragma omp parallel for // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } // // {// Reset // is_visited.reset(); //// std::fill(is_visited.begin(), is_visited.end(), 0); // std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); // } //} //inline void Searching::para_search_with_top_m_merge_queues_in_array( //inline void Searching::para_search_with_top_m_merge_queues_new_threshold( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue //// std::vector< std::vector<Candidate> > &local_queues_list, // std::vector<Candidate> &local_queues_array, // std::vector<idi> &local_queues_ends, // Sizes of local queue // BitVector &is_visited) //// std::vector<uint8_t> &is_visited) //// boost::dynamic_bitset<> &is_visited) //{ // { //#pragma omp parallel for // for (idi c_i = 0; c_i < L; ++c_i) { //// is_visited[init_ids[c_i]] = 1; // is_visited.atomic_set_bit(init_ids[c_i]); // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; //#pragma omp parallel for // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. //#pragma omp parallel for // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // // idi min_index = L - 1; // distf min_1st = set_L[min_index].distance_; // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; //// {//test //// printf("tmp_count: %d\n", tmp_count); //// } // // // Select M candidates // idi last_k = L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. //#pragma omp parallel for // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // int tid = omp_get_thread_num(); // const idi local_queue_start = tid * local_queue_length; // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; //// { // Sequential edition //// if (is_visited[nb_id]) { //// continue; //// } //// is_visited[nb_id] = 1; //// } //// { // __ATOMIC_SEQ_CST edition //// if (!AtomicOps::CAS(is_visited.data() + nb_id, //// static_cast<uint8_t>(0), //// static_cast<uint8_t>(1))) { //// continue; //// } //// } //// {// Acquire and Release edition //// if (__atomic_load_n(is_visited.data() + nb_id, __ATOMIC_ACQUIRE)) { //// continue; //// } //// __atomic_store_n(is_visited.data() + nb_id, 1, __ATOMIC_RELEASE); //// } // {// Self-defined BitVector // if (is_visited.atomic_is_bit_set(nb_id)) { // continue; // } // is_visited.atomic_set_bit(nb_id); // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // // if (dist > min_1st) { // continue; // } else if (min_index > 0) { // // Inserted, so min_1st needs update // if (dist > set_L[min_index - 1].distance_) { // min_1st = dist; // if (min_index < L - 1) { // ++min_index; // } // } else { // min_1st = set_L[--min_index].distance_; // } //// min_1st = set_L[--min_index].distance_; // } // //// if (dist > set_L[L-1].distance_) { //// continue; //// } // // Candidate cand(nb_id, dist, false); // // Add to the local queue. // add_into_queue(local_queues_array, local_queue_start, local_queues_ends[tid], local_queue_length, cand); // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // idi nk = L; //// // Merge. Parallel merging in every two queues. //// { //// for (int tid = 0; tid < num_threads_; ++tid) { //// if (0 == local_queues_ends[tid]) continue; //// idi r = merge_two_queues_into_1st_queue_para( //// set_L, //// 0, //// L, //// local_queues_list[tid], //// 0, //// local_queues_ends[tid]); ////// idi r = merge_two_queues_into_1st_queue_seq( ////// set_L, ////// 0, ////// L, ////// local_queues_list[tid], ////// 0, ////// local_queues_ends[tid]); //// local_queues_ends[tid] = 0; // Reset the local queue //// if (r < nk) { //// nk = r; //// } //// } //// } // // Merge. Merge all queues in parallel. // { // if (num_threads_ > 1) { // idi r = merge_all_queues_para_array( //// local_queues_list, // local_queues_array, // local_queues_ends, // local_queue_length, // set_L, // L); // if (r < nk) { // nk = r; // } // } else { // if (local_queues_ends[0]) { // idi r = merge_two_queues_into_1st_queue_seq_fixed( // set_L, // 0, // L, //// local_queues_list[0], // local_queues_array, // 0, // local_queues_ends[0]); // local_queues_ends[0] = 0; // if (r < nk) { // nk = r; // } // } // } // } //// // Merge Sequentially //// { //// for (int tid = 0; tid < num_threads_; ++tid) { //// if (0 == local_queues_ends[tid]) continue; //// idi r = merge_two_queues_into_1st_queue_seq_fixed( //// set_L, //// 0, //// L, ////// local_queues_list[tid], ////// 0, //// local_queues_array, //// tid * local_queue_length, //// local_queues_ends[tid]); ////// L + 1); //// local_queues_ends[tid] = 0; // Reset the local queue //// if (r < nk) { //// nk = r; //// } //// } //// } // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // } // //#pragma omp parallel for // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } // // {// Reset //// is_visited.reset(); //// std::fill(is_visited.begin(), is_visited.end(), 0); // is_visited.clear_all(); // std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); // } //} /* * 5/7/2020-15:14 * Use 1 threads to scale M until the value_M_middle. * Then use multiple threads. */ inline void Searching::para_search_with_top_m_merge_queues_middle_m( const idi value_M_middle, const idi value_M_max, const idi query_id, const idi K, const idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, const idi local_queue_length, // Maximum size of local queue const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; std::vector<idi> &local_queues_ends, // Sizes of local queue std::vector<idi> &top_m_candidates, boost::dynamic_bitset<> &is_visited) { time_initialization_ -= WallTimer::get_time_mark(); // const idi base_set_L = (num_threads_ - 1) * local_queue_length; //#pragma omp parallel for for (idi c_i = 0; c_i < L; ++c_i) { is_visited[init_ids[c_i]] = 1; } const dataf *query_data = queries_load_ + query_id * dimension_; #pragma omp parallel for for (idi v_i = 0; v_i < L; ++v_i) { idi v_id = init_ids[v_i]; _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); } uint64_t tmp_count_computation = 0; // Get the distances of all candidates, store in the set set_L. //#pragma omp parallel for #pragma omp parallel for reduction(+ : tmp_count_computation) for (unsigned i = 0; i < L; i++) { unsigned v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; ++tmp_count_computation; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked. } count_distance_computation_ += tmp_count_computation; tmp_count_computation = 0; // std::sort(set_L.begin(), set_L.begin() + L); std::sort( set_L.begin() + base_set_L, set_L.begin() + base_set_L + L); local_queues_ends[num_threads_ - 1] = L; time_initialization_ += WallTimer::get_time_mark(); idi top_m_candidates_end = 0; idi k = 0; // Index of first unchecked candidate. idi tmp_count = 0; // for debug idi M = 1; time_sequential_phase_ -= WallTimer::get_time_mark(); { // Single thread while (k < L && M < value_M_middle) { ++tmp_count; // {//test // printf("tmp_count: %d\n", tmp_count); // } // Select M candidates idi last_k = L; // Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { idi index_set_L = c_i + base_set_L; if (set_L[index_set_L].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[index_set_L].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; } idi nk = L; // Push M candidates' neighbors into the queue. for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { idi cand_id = top_m_candidates[c_i]; _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; { // Sequential edition if (is_visited[nb_id]) { continue; } is_visited[nb_id] = 1; } auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; ++tmp_count_computation; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[L - 1 + base_set_L].distance_) { continue; } Candidate cand(nb_id, dist, false); // Thread 0 maintains the "global" queue idi r = add_into_queue( set_L, base_set_L, local_queues_ends[num_threads_ - 1], L, cand); if (r < nk) { nk = r; } } } top_m_candidates_end = 0; // Clear top_m_candidates count_distance_computation_ += tmp_count_computation; tmp_count_computation = 0; if (nk <= last_k) { k = nk; } else { k = last_k + 1; } {// Scale M if (M < value_M_max) { M <<= 1; } else { M = value_M_max; } } } } time_sequential_phase_ += WallTimer::get_time_mark(); time_parallel_phase_ -= WallTimer::get_time_mark(); uint64_t tmp_count_add_to_queue = 0; double tmp_time_pick_top_m = 0; double tmp_time_distance_computation = 0; double tmp_time_add_to_queue = 0.0; { // Multiple Threads while (k < L) { time_expand_ -= WallTimer::get_time_mark(); ++tmp_count; // {//test // printf("tmp_count: %d\n", tmp_count); // } // Select M candidates idi last_k = L; time_pick_top_m_ -= WallTimer::get_time_mark(); // Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { idi index_set_L = c_i + base_set_L; if (set_L[index_set_L].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[index_set_L].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; } time_pick_top_m_ += WallTimer::get_time_mark(); idi nk = L; // Push M candidates' neighbors into the queue. #pragma omp parallel for reduction(+ : tmp_count_computation) \ reduction(+ : tmp_count_add_to_queue) \ reduction(+ : tmp_time_pick_top_m) \ reduction(+ : tmp_time_distance_computation) \ reduction(+ : tmp_time_add_to_queue) // for (int tid = 0; tid < num_threads_; ++tid) { for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { tmp_time_pick_top_m -= WallTimer::get_time_mark(); int tid = omp_get_thread_num(); idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } tmp_time_pick_top_m += WallTimer::get_time_mark(); for (idi e_i = 0; e_i < out_degree; ++e_i) { tmp_time_distance_computation -= WallTimer::get_time_mark(); idi nb_id = out_edges[e_i]; { // Sequential edition if (is_visited[nb_id]) { tmp_time_distance_computation += WallTimer::get_time_mark(); continue; } is_visited[nb_id] = 1; } auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; ++tmp_count_computation; distf dist = compute_distance_with_norm(nb_data, query_data, norm); tmp_time_distance_computation += WallTimer::get_time_mark(); if (dist > set_L[L - 1 + base_set_L].distance_) { continue; } ++tmp_count_add_to_queue; Candidate cand(nb_id, dist, false); // Add to the local queue. // tmp_time_pick_top_m -= WallTimer::get_time_mark(); tmp_time_add_to_queue -= WallTimer::get_time_mark(); if (0 != tid) { // Non-Master threads using local queues add_into_queue( set_L, (tid - 1) * local_queue_length, local_queues_ends[tid - 1], local_queue_length, cand); } else { // Thread 0 maintains the "global" queue idi r = add_into_queue( set_L, base_set_L, local_queues_ends[num_threads_ - 1], L, cand); if (r < nk) { nk = r; } } tmp_time_add_to_queue += WallTimer::get_time_mark(); // tmp_time_pick_top_m += WallTimer::get_time_mark(); } } time_add_to_queue_ += tmp_time_add_to_queue; tmp_time_add_to_queue = 0; // } time_distance_computation_ += tmp_time_distance_computation; tmp_time_distance_computation = 0; time_pick_top_m_ += tmp_time_pick_top_m; tmp_time_pick_top_m = 0; top_m_candidates_end = 0; // Clear top_m_candidates count_add_to_queue_ += tmp_count_add_to_queue; tmp_count_add_to_queue = 0; count_distance_computation_ += tmp_count_computation; tmp_count_computation = 0; time_expand_ += WallTimer::get_time_mark(); // // Merge. Merge all queues in parallel. { time_merge_ -= WallTimer::get_time_mark(); if (num_threads_ > 1) { idi r = merge_all_queues_para_array( set_L, local_queues_ends, local_queue_length, L); if (r < nk) { nk = r; } } time_merge_ += WallTimer::get_time_mark(); } if (nk <= last_k) { k = nk; } else { k = last_k + 1; } {// Scale M if (M < value_M_max) { M <<= 1; } else { M = value_M_max; } } } } time_parallel_phase_ += WallTimer::get_time_mark(); time_ending_ -= WallTimer::get_time_mark(); #pragma omp parallel for for (idi k_i = 0; k_i < K; ++k_i) { set_K[k_i] = set_L[k_i + base_set_L].id_; // set_K[k_i] = set_L[k_i].id_; } {// Reset // std::fill(is_visited.begin(), is_visited.end(), 0); is_visited.reset(); // is_visited.clear_all(); std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); } time_ending_ += WallTimer::get_time_mark(); // {//test // if (3 == query_id) { // exit(1); // } // } } inline void Searching::para_search_with_top_m_merge_queues_middle_m_no_merge( const uint64_t computation_threshold, const idi value_M_middle, const idi value_M_max, const idi query_id, const idi K, const idi L, const idi init_size, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, const idi local_queue_length, // Maximum size of local queue const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; std::vector<idi> &local_queues_ends, // Sizes of local queue std::vector<idi> &top_m_candidates, boost::dynamic_bitset<> &is_visited) { uint64_t count_single_query_computation = 0; uint64_t count_init_computation = 0; uint64_t count_seq_computation = 0; uint64_t count_par_computation = 0; // {//test // printf("query_id: %u\n", query_id); // } // time_initialization_ -= WallTimer::get_time_mark(); // const idi base_set_L = (num_threads_ - 1) * local_queue_length; { #pragma omp parallel for for (idi c_i = 0; c_i < init_size; ++c_i) { // for (idi c_i = 0; c_i < L; ++c_i) { is_visited[init_ids[c_i]] = 1; // is_visited.atomic_set_bit(init_ids[c_i]); } } const dataf *query_data = queries_load_ + query_id * dimension_; #pragma omp parallel for for (idi v_i = 0; v_i < init_size; ++v_i) { // for (idi v_i = 0; v_i < L; ++v_i) { idi v_id = init_ids[v_i]; _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); } uint64_t tmp_count_computation = 0; // Get the distances of all candidates, store in the set set_L. //#pragma omp parallel for #pragma omp parallel for reduction(+ : tmp_count_computation) for (unsigned i = 0; i < init_size; i++) { // for (unsigned i = 0; i < L; i++) { unsigned v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; // ++count_distance_computation_; ++tmp_count_computation; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked. // set_L[i] = Candidate(v_id, dist, false); // False means not checked. } count_distance_computation_ += tmp_count_computation; count_init_computation += tmp_count_computation; count_single_query_computation += tmp_count_computation; tmp_count_computation = 0; // std::sort(set_L.begin(), set_L.begin() + L); std::sort( set_L.begin() + base_set_L, set_L.begin() + base_set_L + init_size); // set_L.begin() + base_set_L + L); local_queues_ends[num_threads_ - 1] = init_size; // local_queues_ends[num_threads_ - 1] = L; // time_initialization_ += WallTimer::get_time_mark(); // time_sequential_phase_ -= WallTimer::get_time_mark(); // std::vector<idi> top_m_candidates(M); idi &global_queue_size = local_queues_ends[num_threads_ - 1]; idi top_m_candidates_end = 0; idi k = 0; // Index of first unchecked candidate. idi tmp_count = 0; // for debug idi M = 1; { // Single thread while (k < L && M < value_M_middle && count_single_query_computation <= computation_threshold) { ++tmp_count; // {//test // printf("tmp_count: %d\n", tmp_count); // } // int real_threads = std::min(static_cast<int>(M), num_threads_); // idi queue_base = num_threads_ - real_threads; // Select M candidates idi last_k = L; // Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. for (idi c_i = k; c_i < global_queue_size && top_m_candidates_end < M; ++c_i) { // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { idi index_set_L = c_i + base_set_L; if (set_L[index_set_L].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[index_set_L].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; } idi nk = L; // Push M candidates' neighbors into the queue. for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { idi cand_id = top_m_candidates[c_i]; _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; { // Sequential edition if (is_visited[nb_id]) { continue; } is_visited[nb_id] = 1; } auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; // ++count_distance_computation_; ++tmp_count_computation; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[global_queue_size - 1 + base_set_L].distance_) { // if (dist > set_L[L - 1 + base_set_L].distance_) { continue; } Candidate cand(nb_id, dist, false); // Thread 0 maintains the "global" queue idi r = add_into_queue( set_L, base_set_L, global_queue_size, // local_queues_ends[num_threads_ - 1], L, cand); if (r < nk) { nk = r; } } } top_m_candidates_end = 0; // Clear top_m_candidates count_distance_computation_ += tmp_count_computation; count_seq_computation += tmp_count_computation; count_single_query_computation += tmp_count_computation; tmp_count_computation = 0; // {// Local queues' ends // printf("query%u:iter: %u", query_id, tmp_count); // for (int i_t = 0; i_t < num_threads_; ++i_t) { // printf(" [%u]: %u", i_t, local_queues_ends[i_t]); // } // printf("\n"); // } if (nk <= last_k) { k = nk; } else { k = last_k + 1; } {// Scale M if (M < value_M_max) { M <<= 1; } else { M = value_M_max; } } } } // time_sequential_phase_ += WallTimer::get_time_mark(); // time_parallel_phase_ -= WallTimer::get_time_mark(); { // Multiple Threads while (k < L and count_single_query_computation <= computation_threshold) { // while (k < L) { ++tmp_count; // {//test // printf("tmp_count: %d " // "k: %u " // "global_queue_size: %u\n", // tmp_count, // k, // global_queue_size); // } // int real_threads = std::min(static_cast<int>(M), num_threads_); // idi queue_base = num_threads_ - real_threads; // Select M candidates idi last_k = L; // Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. for (idi c_i = k; c_i < global_queue_size && top_m_candidates_end < M; ++c_i) { // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { idi index_set_L = c_i + base_set_L; if (set_L[index_set_L].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[index_set_L].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; } idi nk = L; // Push M candidates' neighbors into the queue. //#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(real_threads) #pragma omp parallel for reduction(+ : tmp_count_computation) for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { int tid = omp_get_thread_num(); idi cand_id = top_m_candidates[c_i]; _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; { // Sequential edition if (is_visited[nb_id]) { continue; } is_visited[nb_id] = 1; } auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; // ++count_distance_computation_; ++tmp_count_computation; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[global_queue_size - 1 + base_set_L].distance_) { // if (dist > set_L[L - 1 + base_set_L].distance_) { continue; } Candidate cand(nb_id, dist, false); // Add to the local queue. if (0 != tid) { // Non-Master threads using local queues add_into_queue( set_L, (tid - 1) * local_queue_length, local_queues_ends[tid - 1], local_queue_length, cand); } else { // Thread 0 maintains the "global" queue idi r = add_into_queue( set_L, base_set_L, global_queue_size, // local_queues_ends[num_threads_ - 1], L, cand); if (r < nk) { nk = r; } } } } top_m_candidates_end = 0; // Clear top_m_candidates count_distance_computation_ += tmp_count_computation; count_par_computation += tmp_count_computation; count_single_query_computation += tmp_count_computation; tmp_count_computation = 0; // {// Local queues' ends // printf("query%u:iter: %u", query_id, tmp_count); // for (int i_t = 0; i_t < num_threads_; ++i_t) { // printf(" [%u]: %u", i_t, local_queues_ends[i_t]); // } // printf("\n"); // } // Merge. Merge all queues in parallel. { if (num_threads_ > 1) { // idi r = merge_all_queues_queue_base( // set_L, // local_queues_ends, // queue_base, // real_threads, // local_queue_length, // L); idi r = merge_all_queues_para_array( set_L, local_queues_ends, local_queue_length, L); if (r < nk) { nk = r; } } } if (nk <= last_k) { k = nk; } else { k = last_k + 1; } {// Scale M if (M < value_M_max) { M <<= 1; } else { M = value_M_max; } } // {// Print relative distance //// distf top_dist = set_L[base_set_L].distance_; // for (idi i_l = 0; i_l < L; ++i_l) { // printf("%u %f\n", // tmp_count, set_L[i_l + base_set_L].distance_); //// tmp_count, set_L[i_l + base_set_L].distance_ - top_dist); // } // } } } // time_parallel_phase_ += WallTimer::get_time_mark(); #pragma omp parallel for for (idi k_i = 0; k_i < K; ++k_i) { set_K[k_i] = set_L[k_i + base_set_L].id_; // set_K[k_i] = set_L[k_i].id_; } {// Reset // std::fill(is_visited.begin(), is_visited.end(), 0); is_visited.reset(); // is_visited.clear_all(); std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); } // {//test // if (3 == query_id) { // exit(1); // } // } // {//test // printf("count_single: %lu " // "ct_init: %lu " // "ct_seq: %lu " // "ct_par: %lu\n", // count_single_query_computation, // count_init_computation, // count_seq_computation, // count_par_computation); // } } ///* // * 6/15/2020-14:40 // * Queues merging together to the global queue // */ //inline void Searching::para_search_with_top_m_merge_queues_sequential_merge( // const idi value_M_middle, // const idi value_M_max, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; // std::vector<idi> &local_queues_ends, // Sizes of local queue // std::vector<idi> &top_m_candidates, // boost::dynamic_bitset<> &is_visited) //{ //// const idi base_set_L = (num_threads_ - 1) * local_queue_length; // { //#pragma omp parallel for // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = 1; //// is_visited.atomic_set_bit(init_ids[c_i]); // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; //#pragma omp parallel for // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // uint64_t tmp_count_computation = 0; // // Get the distances of all candidates, store in the set set_L. ////#pragma omp parallel for //#pragma omp parallel for reduction(+ : tmp_count_computation) // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation_; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked. //// set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; //// std::sort(set_L.begin(), set_L.begin() + L); // std::sort( // set_L.begin() + base_set_L, // set_L.begin() + base_set_L + L); // local_queues_ends[num_threads_ - 1] = L; // //// std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // idi M = 1; // // { // Single thread // while (k < L && M < value_M_middle) { // ++tmp_count; //// {//test //// printf("tmp_count: %d\n", tmp_count); //// } // // // Select M candidates // idi last_k = L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // idi index_set_L = c_i + base_set_L; // if (set_L[index_set_L].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[index_set_L].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; // } // // // idi nk = L; // // Push M candidates' neighbors into the queue. // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // { // Sequential edition // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation_; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L - 1 + base_set_L].distance_) { // continue; // } // // Candidate cand(nb_id, dist, false); // // Thread 0 maintains the "global" queue // idi r = add_into_queue( // set_L, // base_set_L, // local_queues_ends[num_threads_ - 1], // L, // cand); // if (r < nk) { // nk = r; // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // // {// Scale M // if (M < value_M_max) { // M <<= 1; // } else { // M = value_M_max; // } // } // } // } // // { // Multiple Threads // while (k < L) { // ++tmp_count; //// {//test //// if (num_threads_ == 2) { //// printf("tmp_count: %d " //// "k: %u\n", //// tmp_count, //// k); //// } //// } // // Select M candidates // idi last_k = L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // idi index_set_L = c_i + base_set_L; // if (set_L[index_set_L].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[index_set_L].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; // } // // // idi nk = L; // // Push M candidates' neighbors into the queue. ////#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(real_threads) //#pragma omp parallel for reduction(+ : tmp_count_computation) // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // int tid = omp_get_thread_num(); // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // { // Sequential edition // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation_; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L - 1 + base_set_L].distance_) { // continue; // } // // Candidate cand(nb_id, dist, false); // // Add to the local queue. // if (0 != tid) { // // Non-Master threads using local queues // add_into_queue( // set_L, // (tid - 1) * local_queue_length, // local_queues_ends[tid - 1], // local_queue_length, // cand); // } else { // // Thread 0 maintains the "global" queue // idi r = add_into_queue( // set_L, // base_set_L, // local_queues_ends[num_threads_ - 1], // L, // cand); // if (r < nk) { // nk = r; // } // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; // //// // Merge. Merge all queues in parallel. // { //// {//test //// for (idi q_i = 0; q_i < num_threads_; ++q_i) { //// if (0 == local_queues_ends[q_i]) { //// continue; //// } //// for (idi e_i = 0; e_i < local_queues_ends[q_i]; ++e_i) { //// printf("tmp_count: %u " //// "q_i: %u " //// "[%u]: (%u, %f)\n", //// tmp_count, //// q_i, //// e_i, set_L[q_i * local_queue_length + e_i].id_, set_L[q_i * local_queue_length + e_i].distance_); //// } //// } //// } //// time_merge_ -= WallTimer::get_time_mark(); // if (num_threads_ > 1) { // idi r = merge_all_queues_all_together_in_sequential( // set_L, // local_queues_ends, // local_queue_length, // L); //// idi r = merge_all_queues_para_array( //// set_L, //// local_queues_ends, //// local_queue_length, //// L); // if (r < nk) { // nk = r; // } //// {//test //// printf("tmp_count: %u " //// "r: %u " //// "last_k: %u\n", //// tmp_count, //// r, //// last_k); //// for (idi l_i = 0; l_i < L; ++l_i) { //// printf("tmp_count: %u " //// "[%u]: (%u, %f)\n", //// tmp_count, //// l_i, set_L[l_i + base_set_L].id_, set_L[l_i + base_set_L].distance_); //// } //// } // } // //// time_merge_ += WallTimer::get_time_mark(); // } // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // {// Scale M // if (M < value_M_max) { // M <<= 1; // } else { // M = value_M_max; // } // } // } // } // //#pragma omp parallel for // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i + base_set_L].id_; //// set_K[k_i] = set_L[k_i].id_; // } // // {// Reset //// std::fill(is_visited.begin(), is_visited.end(), 0); // is_visited.reset(); //// is_visited.clear_all(); // std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); // } // //// {//test //// if (0 == query_id) { //// exit(1); //// } //// } //} ///* // * 6/19/2020: // * Intra-query + Inter-query // */ //inline void Searching::para_search_with_top_m_nested_para( // const idi batch_start, // const idi batch_size, // const idi value_M_middle, // const idi value_M_max, // const idi K, // const idi L, // std::vector< std::vector<Candidate> > &set_L_list, // const std::vector<idi> &init_ids, // std::vector< std::vector<idi> > &set_K_list, // const idi local_queue_length, // Maximum size of local queue // const idi base_set_L, // base_set_L = (num_threads_intra_query_ - 1) * local_queue_length; // std::vector< std::vector<idi> > &local_queues_ends_list, // Sizes of local queue // std::vector< std::vector<idi> > &top_m_candidates_list, // std::vector< boost::dynamic_bitset<> > &is_visited_list) //{ // {// Initialize is_visited flag array //#pragma omp parallel for num_threads(num_threads_inter_query_) // for (idi q_i = 0; q_i < batch_size; ++q_i) { // auto &is_visited = is_visited_list[q_i]; //#pragma omp parallel for num_threads(num_threads_intra_query_) // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = 1; // } // } // } // //#pragma omp parallel for // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // uint64_t tmp_count_total_computation = 0; //#pragma omp parallel for num_threads(num_threads_inter_query_) reduction(+ : tmp_count_total_computation) // for (idi q_i = 0; q_i < batch_size; ++q_i) { // idi query_id = batch_start + q_i; // auto &set_L = set_L_list[q_i]; // auto &local_queues_ends = local_queues_ends_list[q_i]; // auto &is_visited = is_visited_list[q_i]; // // const dataf *query_data = queries_load_ + query_id * dimension_; ////#pragma omp parallel for //// for (idi v_i = 0; v_i < L; ++v_i) { //// idi v_id = init_ids[v_i]; //// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); //// } // uint64_t tmp_count_computation = 0; // // Get the distances of all candidates, store in the set set_L. ////#pragma omp parallel for //#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(num_threads_intra_query_) // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation_; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked. //// set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } //// count_distance_computation_ += tmp_count_computation; // tmp_count_total_computation += tmp_count_computation; // tmp_count_computation = 0; //// std::sort(set_L.begin(), set_L.begin() + L); // std::sort( // set_L.begin() + base_set_L, // set_L.begin() + base_set_L + L); // local_queues_ends[num_threads_intra_query_ - 1] = L; // //// std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // idi M = 1; // // auto &top_m_candidates = top_m_candidates_list[q_i]; // { // Single thread // while (k < L && M < value_M_middle) { // ++tmp_count; //// {//test //// printf("tmp_count: %d\n", tmp_count); //// } // // // Select M candidates // idi last_k = L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // idi index_set_L = c_i + base_set_L; // if (set_L[index_set_L].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[index_set_L].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; // } // // idi nk = L; // // Push M candidates' neighbors into the queue. // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // { // Sequential edition // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation_; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); //// {//test //// if (391655 == nb_id) { //// printf("tmp_count: %u " //// "nb_id: %u " //// "distf: %f\n", //// tmp_count, //// nb_id, //// dist); //// } //// } // if (dist > set_L[L - 1 + base_set_L].distance_) { // continue; // } // // Candidate cand(nb_id, dist, false); // // Thread 0 maintains the "global" queue // idi r = add_into_queue( // set_L, // base_set_L, // local_queues_ends[num_threads_intra_query_ - 1], // L, // cand); // if (r < nk) { // nk = r; // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates //// count_distance_computation_ += tmp_count_computation; // tmp_count_total_computation += tmp_count_computation; // tmp_count_computation = 0; // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // // {// Scale M // if (M < value_M_max) { // M <<= 1; // } else { // M = value_M_max; // } // } // } // } // // { // Multiple Threads // while (k < L) { // ++tmp_count; //// {//test //// printf("tmp_count: %d\n", tmp_count); //// } // // Select M candidates // idi last_k = L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // idi index_set_L = c_i + base_set_L; // if (set_L[index_set_L].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[index_set_L].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; // } // // idi nk = L; // // Push M candidates' neighbors into the queue. //#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(num_threads_intra_query_) // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // int tid = omp_get_thread_num(); // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // { // Sequential edition // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation_; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); //// {//test //// if (391655 == nb_id) { //// printf("tmp_count: %u " //// "nb_id: %u " //// "distf: %f\n", //// tmp_count, //// nb_id, //// dist); //// } //// } // if (dist > set_L[L - 1 + base_set_L].distance_) { // continue; // } // // Candidate cand(nb_id, dist, false); // // Add to the local queue. // if (0 != tid) { // // Non-Master threads using local queues // add_into_queue( // set_L, // (tid - 1) * local_queue_length, // local_queues_ends[tid - 1], // local_queue_length, // cand); // } else { // // Thread 0 maintains the "global" queue // idi r = add_into_queue( // set_L, // base_set_L, // local_queues_ends[num_threads_intra_query_ - 1], // L, // cand); // if (r < nk) { // nk = r; // } // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates //// count_distance_computation_ += tmp_count_computation; // tmp_count_total_computation += tmp_count_computation; // tmp_count_computation = 0; // //// // Merge. Merge all queues in parallel. // { //// time_merge_ -= WallTimer::get_time_mark(); // if (num_threads_intra_query_ > 1) { // idi r = merge_all_queues_para_array( // set_L, // local_queues_ends, // local_queue_length, // L); // if (r < nk) { // nk = r; // } // } //// time_merge_ += WallTimer::get_time_mark(); // } // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // {// Scale M // if (M < value_M_max) { // M <<= 1; // } else { // M = value_M_max; // } // } // } // } // count_distance_computation_ += tmp_count_total_computation; // tmp_count_total_computation = 0; // // auto &set_K = set_K_list[query_id]; // //#pragma omp parallel for num_threads(num_threads_intra_query_) // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i + base_set_L].id_; //// set_K[k_i] = set_L[k_i].id_; // } // // {// Reset //// std::fill(is_visited.begin(), is_visited.end(), 0); // is_visited.reset(); //// is_visited.clear_all(); // std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); // } // } // //// {//test //// if (3 == query_id) { //// exit(1); //// } //// } //// { //// for (idi k_i = 0; k_i < K; ++k_i) { //// printf("%u: (%u %f)\n", //// k_i, set_L_list[0][k_i].id_, set_L_list[0][k_i].distance_); //// } //// if (0 == batch_start) { //// exit(1); //// } //// } //} /* * 6/22/2020-21:30 * Do searching on the local_set_L * local_set_L is already sorted * is_visited is already set up. */ inline void Searching::subsearch_with_top_m( const idi value_M_max, const idi query_id, const idi local_L, std::vector<Candidate> &set_L, const idi set_L_start, idi &set_L_size, std::vector<idi> &local_top_m_candidates, boost::dynamic_bitset<> &is_visited, uint64_t &local_count_distance_computation) { const dataf *query_data = queries_load_ + query_id * dimension_; // idi local_top_m_candidates_end = 0; idi k = 0; // Index of first unchecked candidate. idi iter = 0; idi M = 1; // value of M while (k < local_L) { ++iter; subsearch_top_m_for_one_iteration( iter, k, M, query_id, query_data, local_L, set_L, set_L_start, set_L_size, local_top_m_candidates, is_visited, local_count_distance_computation); {// Scale M if (M < value_M_max) { M <<= 1; } else { M = value_M_max; } } } // {//test // printf("set_L_start: %u " // "local_count_distance_computation: %lu\n", // set_L_start, // local_count_distance_computation); // } } //// Backup //inline void Searching::subsearch_with_top_m( // const idi value_M_max, // const idi query_id, // const idi local_L, // std::vector<Candidate> &set_L, // const idi base_set_L, // idi &set_L_end, // std::vector<idi> &local_top_m_candidates, // boost::dynamic_bitset<> &is_visited, // uint64_t &local_count_distance_computation) //{ // const dataf *query_data = queries_load_ + query_id * dimension_; // idi local_top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi iter = 0; // idi M = 1; // value of M // // while (k < local_L) { // ++iter; // // Select M candidates // idi last_k = local_L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k; c_i < set_L_end && local_top_m_candidates_end < M; ++c_i) { // idi index_set_L = c_i + base_set_L; // if (set_L[index_set_L].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[index_set_L].is_checked_ = true; // local_top_m_candidates[local_top_m_candidates_end++] = set_L[index_set_L].id_; // } // // idi nk = local_L; // // Push M candidates' neighbors into the queue. // for (idi c_i = 0; c_i < local_top_m_candidates_end; ++c_i) { // idi cand_id = local_top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // { // Sequential edition // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // ++local_count_distance_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[set_L_end - 1 + base_set_L].distance_) { // continue; // } // // Candidate cand(nb_id, dist, false); // // Thread 0 maintains the "global" queue // idi r = add_into_queue( // set_L, // base_set_L, // set_L_end, // local_L, // cand); // if (r < nk) { // nk = r; // } // } // } // local_top_m_candidates_end = 0; // Clear top_m_candidates // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // // {// Scale M // if (M < value_M_max) { // M <<= 1; // } else { // M = value_M_max; // } // } // } //} /* * 7/6/2020-23:17 * Subsearch only 1 iteration using top-m */ inline void Searching::subsearch_top_m_for_one_iteration( const idi iter, idi &k_uc, const idi value_M, const idi query_id, const dataf *query_data, const idi L, std::vector<Candidate> &set_L, const idi set_L_start, idi &set_L_size, std::vector<idi> &top_m_candidates, boost::dynamic_bitset<> &is_visited, uint64_t &count_distance_computation) { // Select M candidates idi top_m_candidates_end = 0; idi last_k = L; // Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. for (idi c_i = k_uc; c_i < set_L_size && top_m_candidates_end < value_M; ++c_i) { idi index_set_L = c_i + set_L_start; if (set_L[index_set_L].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[index_set_L].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; // {//test // M_ids_.push_back(set_L[index_set_L].id_); // } } idi nk = L; // Push M candidates' neighbors into the queue. for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { idi cand_id = top_m_candidates[c_i]; _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; { // Sequential edition if (is_visited[nb_id]) { continue; } is_visited[nb_id] = 1; } auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; ++count_distance_computation; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[set_L_size - 1 + set_L_start].distance_) { continue; } Candidate cand(nb_id, dist, false); idi r = add_into_queue( set_L, set_L_start, set_L_size, L, cand); if (r < nk) { nk = r; } } } // top_m_candidates_end = 0; // Clear top_m_candidates if (nk <= last_k) { k_uc = nk; } else { k_uc = last_k + 1; } // {//test // for (idi l_i = 0; l_i < set_L_size; ++l_i) { // L_ids_.push_back(set_L[set_L_start + l_i].id_); // } // std::sort(L_ids_.begin(), L_ids_.end()); // std::sort(M_ids_.begin(), M_ids_.end()); // for (idi m_i = 0; m_i < M_ids_.size(); ++m_i) { // printf("query_id: %u " // "iter: %u " // "M[%u]: " // "%u\n", // query_id, // iter, // m_i, // M_ids_[m_i]); // } // M_ids_.clear(); // for (idi l_i = 0; l_i < L_ids_.size(); ++l_i) { // printf("query_id: %u " // "iter: %u " // "L[%u]: " // "%u\n", // query_id, // iter, // l_i, // L_ids_[l_i]); // } // L_ids_.clear(); // } } ///* // * One more parameter for distance bound // */ //inline void Searching::subsearch_top_m_for_one_iteration_lth( // const distf bound_lth, // const idi iter, // idi &k_uc, // const idi value_M, // const idi query_id, // const dataf *query_data, // const idi L, // std::vector<Candidate> &set_L, // const idi set_L_start, // idi &set_L_size, // std::vector<idi> &top_m_candidates, // boost::dynamic_bitset<> &is_visited, // uint64_t &count_distance_computation) //{ // // Select M candidates // idi top_m_candidates_end = 0; // idi last_k = L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k_uc; c_i < set_L_size && top_m_candidates_end < value_M; ++c_i) { // idi index_set_L = c_i + set_L_start; // if (set_L[index_set_L].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[index_set_L].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; // } // // idi nk = L; // // Push M candidates' neighbors into the queue. // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // { // Sequential edition // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // ++count_distance_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > bound_lth) { // continue; // } // // Candidate cand(nb_id, dist, false); // idi r = add_into_queue( // set_L, // set_L_start, // set_L_size, // L, // cand); // if (r < nk) { // nk = r; // } // } // } // // if (nk <= last_k) { // k_uc = nk; // } else { // k_uc = last_k + 1; // } //} /* * 7/24/2020-10:53 * Subsearch for one iteration, with the global L-th value as the bound, * and the top_m_position indicates the bound for local top-M vertices. */ inline void Searching::subsearch_top_m_for_one_iteration_lth_mth( const distf bound_lth, // const idi top_m_position, const idi iter, idi &k_uc, const idi local_m_count, const idi query_id, const dataf *query_data, const idi L, std::vector<Candidate> &set_L, const idi set_L_start, idi &set_L_size, std::vector<idi> &top_m_candidates, boost::dynamic_bitset<> &is_visited, uint64_t &count_distance_computation, double &time_pick_top_m, uint64_t &count_add_to_queue, double &time_distance_computation, double &time_add_to_queue) { // {//test // printf("query_id: %u " // "iter: %u " // "tid: %u \n", // query_id, // iter, // omp_get_thread_num()); // } // Select M candidates idi top_m_candidates_end = 0; idi last_k = L; // Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k_uc; c_i < top_m_position; ++c_i) { time_pick_top_m -= WallTimer::get_time_mark(); for (idi c_i = k_uc; c_i < set_L_size && top_m_candidates_end < local_m_count; ++c_i) { idi index_set_L = c_i + set_L_start; if (set_L[index_set_L].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[index_set_L].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; // {//test // M_ids_.push_back(set_L[index_set_L].id_); // } } time_pick_top_m += WallTimer::get_time_mark(); idi nk = L; // Push M candidates' neighbors into the queue. for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { time_pick_top_m -= WallTimer::get_time_mark(); idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } time_pick_top_m += WallTimer::get_time_mark(); for (idi e_i = 0; e_i < out_degree; ++e_i) { time_distance_computation -= WallTimer::get_time_mark(); idi nb_id = out_edges[e_i]; { // Sequential edition if (is_visited[nb_id]) { time_distance_computation += WallTimer::get_time_mark(); continue; } is_visited[nb_id] = 1; } auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; ++count_distance_computation; distf dist = compute_distance_with_norm(nb_data, query_data, norm); time_distance_computation += WallTimer::get_time_mark(); if (dist > set_L[set_L_start + set_L_size - 1].distance_) { // if (dist > bound_lth) { continue; } ++count_add_to_queue; Candidate cand(nb_id, dist, false); // time_pick_top_m -= WallTimer::get_time_mark(); time_add_to_queue -= WallTimer::get_time_mark(); idi r = add_into_queue( set_L, set_L_start, set_L_size, L, cand); if (r < nk) { nk = r; } time_add_to_queue += WallTimer::get_time_mark(); // time_pick_top_m += WallTimer::get_time_mark(); } } if (nk <= last_k) { k_uc = nk; } else { k_uc = last_k + 1; } } ///* // * 7/26/2020-15:41 // * L-th and M-th Selection. // * Seq-Par Phases: when M is 1 and 2, do sequential searching; // * When M is equal and larger than 4, do parallel searching. // * It's for load-balance issue. // */ //inline void Searching::para_search_with_top_m_subsearch_v3( // const idi local_M_max, // const idi local_M_middle, // const idi query_id, // const idi K, // const idi global_L, // const idi local_L, //// const idi total_L, //// const idi init_queue_size, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const std::vector<idi> &local_queues_starts, // std::vector<idi> &local_queues_sizes, // std::vector<idi> &local_m_counts, // std::vector< std::vector<idi> > &top_m_candidates_list, // boost::dynamic_bitset<> &is_visited) //{ // time_initialization_ -= WallTimer::get_time_mark(); // uint64_t tmp_count_computation = 0; // {// Initialization // // is_visited flag array ////#pragma omp parallel for //// Cannot use OMP for bit array is_visited! // for (idi c_i = 0; c_i < global_L; ++c_i) { // is_visited[init_ids[c_i]] = 1; // } // // const dataf *query_data = queries_load_ + query_id * dimension_; //#pragma omp parallel for // for (idi v_i = 0; v_i < global_L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // // Get the distances of all candidates, store in the set set_L. //#pragma omp parallel for reduction(+ : tmp_count_computation) // for (idi id_i = 0; id_i < global_L; ++id_i) { // idi v_id = init_ids[id_i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[id_i] = Candidate(v_id, dist, false); // False means not checked. // } // local_queues_sizes[0] = global_L; // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; // std::sort(set_L.begin(), set_L.begin() + global_L); // } // time_initialization_ += WallTimer::get_time_mark(); // // // Searching // if (num_threads_ == 1) { // Single threads //// std::sort( //// set_L.begin(), //// set_L.end()); // subsearch_with_top_m( // local_M_max, // query_id, // local_L, // set_L, // 0, // local_queues_sizes[0], // top_m_candidates_list[0], // is_visited, // tmp_count_computation); // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; // } else { // Multiple threads // const dataf *query_data = queries_load_ + query_id * dimension_; // const idi num_queues = num_threads_; // idi local_M = 1; // idi iter = 0; // std::vector<idi> ks(num_queues, 0); // // time_sequential_phase_ -= WallTimer::get_time_mark(); // {// Sequential Search for M = 1, 2. // idi &k = ks[0]; // while (k < global_L && local_M < local_M_middle) { // ++iter; // subsearch_top_m_for_one_iteration( // iter, // k, // local_M, // query_id, // query_data, // global_L, // set_L, // 0, // local_queues_sizes[0], // top_m_candidates_list[0], // is_visited, // tmp_count_computation); // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; // // {// Double M // if (local_M < local_M_max) { // local_M <<= 1; // } // } // } // } // time_sequential_phase_ += WallTimer::get_time_mark(); // // time_parallel_phase_ -= WallTimer::get_time_mark(); // distf bound_lth = set_L[global_L - 1].distance_; // {// Parallel Search for M >= 4, or local_M_middle // time_assign_s_ -=WallTimer::get_time_mark(); // {// Assign elements from Queue[0] to others // idi dst_i = 1; // for (idi e_i = 1; e_i < global_L; ++e_i) { // idi dest_sub = e_i % num_queues; // if (0 == dest_sub) { // set_L[dst_i++] = set_L[e_i]; // } else { // set_L[local_queues_starts[dest_sub] + local_queues_sizes[dest_sub]++] = set_L[e_i]; // } // } // local_queues_sizes[0] = dst_i; // } // std::fill(ks.begin(), ks.end(), 0); // // // selecting_unchecked_top_M_seq( // query_id, // iter, // set_L, // ks, // local_M, // num_queues, // local_queues_starts, // local_queues_sizes, // local_m_counts); // time_assign_s_ +=WallTimer::get_time_mark(); // // double tmp_time_pick_top_m = 0; // uint64_t tmp_count_add_to_queue = 0; // uint8_t not_finished = 1; // double tmp_time_distance_computation = 0; // double tmp_time_add_to_queue = 0; // while (true) { // time_expand_ -= WallTimer::get_time_mark(); // not_finished = 0; // ++iter; //#pragma omp parallel for reduction(+ : tmp_count_computation) \ // reduction(+ : tmp_time_pick_top_m) \ // reduction(+ : tmp_count_add_to_queue) \ // reduction(+ : tmp_time_distance_computation) \ // reduction(+ : tmp_time_add_to_queue) // for (idi q_i = 0; q_i < num_queues; ++q_i) { // tmp_time_pick_top_m -= WallTimer::get_time_mark(); // idi L_value = q_i == 0 ? global_L : local_L; // idi &k = ks[q_i]; // idi &local_queue_size = local_queues_sizes[q_i]; // auto &local_top_m_candidates = top_m_candidates_list[q_i]; // idi local_m_count = local_m_counts[q_i]; //// if (local_M < num_queues && !local_m_count) { //// local_m_count = 1; //// } // tmp_time_pick_top_m += WallTimer::get_time_mark(); // if (!local_m_count) { // continue; // } // not_finished = 1; // const idi local_queue_start = local_queues_starts[q_i]; // // subsearch_top_m_for_one_iteration_lth_mth( // bound_lth, // iter, // k, // local_m_count, // query_id, // query_data, // L_value, // set_L, // local_queue_start, // local_queue_size, // local_top_m_candidates, // is_visited, // tmp_count_computation, // tmp_time_pick_top_m, // tmp_count_add_to_queue, // tmp_time_distance_computation, // tmp_time_add_to_queue); // } // time_add_to_queue_ += tmp_time_add_to_queue; // tmp_time_add_to_queue = 0; // time_distance_computation_ += tmp_time_distance_computation; // tmp_time_distance_computation = 0; // count_add_to_queue_ += tmp_count_add_to_queue; // tmp_count_add_to_queue = 0; // time_pick_top_m_ += tmp_time_pick_top_m; // tmp_time_pick_top_m = 0; // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; // time_expand_ += WallTimer::get_time_mark(); // if (!not_finished) { // break; // } // {// Scale M // if (local_M < local_M_max) { // local_M <<= 1; // } //// else { //// local_M = value_M_max; //// } // } // time_select_ -= WallTimer::get_time_mark(); //#pragma omp parallel sections // { //#pragma omp section // {// Setecting and update local_queues_lengths //// time_select_L_ -= WallTimer::get_time_mark(); // bound_lth = selecting_top_L_seq( // set_L, // global_L, //// local_L, // num_queues, // local_queues_starts, // local_queues_sizes); //// time_select_L_ += WallTimer::get_time_mark(); // } //#pragma omp section // { //// time_select_M_ -= WallTimer::get_time_mark(); // selecting_unchecked_top_M_seq( // query_id, // iter, // set_L, // ks, // local_M, // num_queues, // local_queues_starts, // local_queues_sizes, // local_m_counts); //// time_select_M_ += WallTimer::get_time_mark(); // } // } // time_select_ += WallTimer::get_time_mark(); //// {//test //// printf("query_id: %u " //// "iter: %u", //// query_id, //// iter); //// printf(" local_queues_sizes:"); //// for (idi i = 0; i < num_queues; ++i) { //// printf(" %u", local_queues_sizes[i]); //// } //// printf(" local_m_counts:"); //// for (idi i = 0; i < num_queues; ++i) { //// printf(" %u", local_m_counts[i]); //// } //// printf(" ks:"); //// for (idi i = 0; i < num_queues; ++i) { //// printf(" %u", ks[i]); //// } //// printf("\n"); //// } // } // } // time_parallel_phase_ += WallTimer::get_time_mark(); // } // //// time_merge_ -= WallTimer::get_time_mark(); // time_ending_ -= WallTimer::get_time_mark(); // {// Return the results to set_K // std::vector<idi> pointer(num_threads_, 0); // // get the first // distf min_dist = FLT_MAX; // idi min_q_i; // idi min_id; // idi min_sub; // idi last_id; // for (int q_i = 0; q_i < num_threads_; ++q_i) { // if (pointer[q_i] >= local_queues_sizes[q_i]) { // continue; // } // idi sub = pointer[q_i] + local_queues_starts[q_i]; // distf tmp_dist = set_L[sub].distance_; // idi tmp_id = set_L[sub].id_; // if (tmp_dist < min_dist) { // min_dist = tmp_dist; // min_id = tmp_id; // min_q_i = q_i; // min_sub = sub; // } else if (tmp_dist == min_dist && tmp_id < min_id) { // min_id = tmp_id; // min_q_i = q_i; // min_sub = sub; // } // } // set_K[0] = set_L[min_sub].id_; //// {//test //// printf("query_id: %u " //// "[%u]: " //// "(%u, %f)\n", //// query_id, //// 0, //// set_L[min_sub].id_, set_L[min_sub].distance_); //// } // ++pointer[min_q_i]; // last_id = set_K[0]; // // bool is_finished = false; // idi k_i = 1; // while (k_i < K && !is_finished) { // is_finished = true; // min_dist = FLT_MAX; // for (int q_i = 0; q_i < num_threads_; ++q_i) { // const idi local_queue_size = local_queues_sizes[q_i]; // idi sub = pointer[q_i] + local_queues_starts[q_i]; // // while (pointer[q_i] < local_queue_size // && set_L[sub].id_ == last_id) { // ++pointer[q_i]; // ++sub; // } // if (pointer[q_i] >= local_queue_size) { // continue; // } // is_finished = false; // distf tmp_dist = set_L[sub].distance_; // idi tmp_id = set_L[sub].id_; // if (tmp_dist < min_dist) { // min_dist = tmp_dist; // min_id = tmp_id; // min_q_i = q_i; // min_sub = sub; // } else if (tmp_dist == min_dist && tmp_id < min_id) { // min_id = tmp_id; // min_q_i = q_i; // min_sub = sub; // } // } // set_K[k_i] = set_L[min_sub].id_; //// {//test //// printf("query_id: %u " //// "[%u]: " //// "(%u, %f)\n", //// query_id, //// k_i, //// set_L[min_sub].id_, set_L[min_sub].distance_); //// } // ++pointer[min_q_i]; // ++k_i; // } // } //// time_merge_ += WallTimer::get_time_mark(); // // {// Reset //// std::fill(is_visited.begin(), is_visited.end(), 0); // is_visited.reset(); // std::fill(local_queues_sizes.begin() + 1, local_queues_sizes.end(), 0); // } // // time_ending_ += WallTimer::get_time_mark(); //// {//test //// if (3 == query_id) { //// exit(1); //// } //// } //} // ///* // * 7/27/2020-15:33 // * Same with v3, but gather top-m vertices together // */ //inline void Searching::para_search_with_top_m_subsearch_v4( // const idi local_M_max, // const idi local_M_middle, // const idi query_id, // const idi K, // const idi global_L, // const idi local_L, //// const idi total_L, //// const idi init_queue_size, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const std::vector<idi> &local_queues_starts, // std::vector<idi> &local_queues_sizes, // std::vector<idi> &local_m_counts, // std::vector<idi> &top_m_candidates, //// std::vector< std::vector<idi> > &top_m_candidates_list, // boost::dynamic_bitset<> &is_visited) //{ // time_initialization_ -= WallTimer::get_time_mark(); // uint64_t tmp_count_computation = 0; // {// Initialization // // is_visited flag array ////#pragma omp parallel for //// Cannot use OMP for bit array is_visited! // for (idi c_i = 0; c_i < global_L; ++c_i) { // is_visited[init_ids[c_i]] = 1; // } // // const dataf *query_data = queries_load_ + query_id * dimension_; //#pragma omp parallel for // for (idi v_i = 0; v_i < global_L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // // Get the distances of all candidates, store in the set set_L. //#pragma omp parallel for reduction(+ : tmp_count_computation) // for (idi id_i = 0; id_i < global_L; ++id_i) { // idi v_id = init_ids[id_i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[id_i] = Candidate(v_id, dist, false); // False means not checked. // } // local_queues_sizes[0] = global_L; // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; // std::sort(set_L.begin(), set_L.begin() + global_L); // } // time_initialization_ += WallTimer::get_time_mark(); // // // Searching // if (num_threads_ == 1) { // Single threads //// std::sort( //// set_L.begin(), //// set_L.end()); // subsearch_with_top_m( // local_M_max, // query_id, // local_L, // set_L, // 0, // local_queues_sizes[0], // top_m_candidates, // is_visited, // tmp_count_computation); // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; // } else { // Multiple threads // const dataf *query_data = queries_load_ + query_id * dimension_; // const idi num_queues = num_threads_; // idi local_M = 1; // idi iter = 0; //// std::vector<idi> ks(num_queues, 0); // // time_sequential_phase_ -= WallTimer::get_time_mark(); // {// Sequential Search for M = 1, 2. // idi k = 0; //// idi &k = ks[0]; // while (k < global_L && local_M < local_M_middle) { // ++iter; // subsearch_top_m_for_one_iteration( // iter, // k, // local_M, // query_id, // query_data, // global_L, // set_L, // 0, // local_queues_sizes[0], // top_m_candidates, // is_visited, // tmp_count_computation); // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; // // {// Double M // if (local_M < local_M_max) { // local_M <<= 1; // } // } // } // } // time_sequential_phase_ += WallTimer::get_time_mark(); // // time_parallel_phase_ -= WallTimer::get_time_mark(); // distf bound_lth = set_L[global_L - 1].distance_; // {// Parallel Search for M >= 4, or local_M_middle // time_assign_s_ -=WallTimer::get_time_mark(); // {// Assign elements from Queue[0] to others // idi dst_i = 1; // for (idi e_i = 1; e_i < global_L; ++e_i) { // idi dest_sub = e_i % num_queues; // if (0 == dest_sub) { // set_L[dst_i++] = set_L[e_i]; // } else { // set_L[local_queues_starts[dest_sub] + local_queues_sizes[dest_sub]++] = set_L[e_i]; // } // } // local_queues_sizes[0] = dst_i; // } //// std::fill(ks.begin(), ks.end(), 0); // // idi top_m_candidates_size = 0; //// selecting_unchecked_top_M_seq( //// query_id, //// iter, //// set_L, //// ks, //// local_M, //// num_queues, //// local_queues_starts, //// local_queues_sizes, //// local_m_counts); // time_assign_s_ +=WallTimer::get_time_mark(); // // std::vector<idi> ks(num_queues, 0); // std::vector<idi> nks(num_queues); // std::vector<idi> bound_ks(num_queues); // double tmp_time_pick_top_m = 0; // uint64_t tmp_count_add_to_queue = 0; // uint8_t not_finished = 1; // double tmp_time_distance_computation = 0; // double tmp_time_add_to_queue = 0; // while (true) { // time_expand_ -= WallTimer::get_time_mark(); // not_finished = 0; // ++iter; // // // Gather top-M vertices // time_pick_top_m_ -= WallTimer::get_time_mark(); // gather_unchecked_top_M_seq( // query_id, // iter, // set_L, // ks, // local_M, // num_queues, // local_queues_starts, // local_queues_sizes, // top_m_candidates, // top_m_candidates_size, // bound_ks); // time_pick_top_m_ += WallTimer::get_time_mark(); // if (!top_m_candidates_size) { // time_expand_ += WallTimer::get_time_mark(); // break; // } // std::fill(nks.begin(), nks.end(), global_L); // // // Expand top-M vertices //#pragma omp parallel for schedule(static, 1) \ // reduction(+ : tmp_count_computation) \ // reduction(+ : tmp_count_add_to_queue) \ // reduction(+ : tmp_time_distance_computation) \ // reduction(+ : tmp_time_pick_top_m) \ // reduction(+ : tmp_time_add_to_queue) // for (idi c_i = 0; c_i < top_m_candidates_size; ++c_i) { // tmp_time_pick_top_m -= WallTimer::get_time_mark(); // idi tid = omp_get_thread_num(); // const idi set_L_start = local_queues_starts[tid]; // idi &set_L_size = local_queues_sizes[tid]; // idi &nk = nks[tid]; // idi L_value = tid == 0 ? global_L : local_L; // idi cand_id = top_m_candidates[c_i]; //// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; //// for (idi n_i = 0; n_i < out_degree; ++n_i) { //// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); //// } // tmp_time_pick_top_m += WallTimer::get_time_mark(); // // Expand cand_id's neighbors // for (idi e_i = 0; e_i < out_degree; ++e_i) { // tmp_time_distance_computation -= WallTimer::get_time_mark(); // idi nb_id = out_edges[e_i]; // { // Sequential edition // if (is_visited[nb_id]) { // tmp_time_distance_computation += WallTimer::get_time_mark(); // continue; // } // is_visited[nb_id] = 1; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // tmp_time_distance_computation += WallTimer::get_time_mark(); // if (dist > set_L[set_L_start + set_L_size - 1].distance_) { //// if (dist > bound_lth) { // continue; // } // ++tmp_count_add_to_queue; // Candidate cand(nb_id, dist, false); // tmp_time_add_to_queue -= WallTimer::get_time_mark(); // idi r = add_into_queue( // set_L, // set_L_start, // set_L_size, // L_value, // cand); // if (r < nk) { // nk = r; // } // tmp_time_add_to_queue += WallTimer::get_time_mark(); // } // } // top_m_candidates_size = 0; // time_add_to_queue_ += tmp_time_add_to_queue; // tmp_time_add_to_queue = 0; // time_distance_computation_ += tmp_time_distance_computation; // tmp_time_distance_computation = 0; // count_add_to_queue_ += tmp_count_add_to_queue; // tmp_count_add_to_queue = 0; // time_pick_top_m_ += tmp_time_pick_top_m; // tmp_time_pick_top_m = 0; // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; // for (idi q_i = 0; q_i < num_queues; ++q_i) { // if (nks[q_i] < bound_ks[q_i]) { // ks[q_i] = nks[q_i]; // } else { // ks[q_i] = bound_ks[q_i]; // } // } // time_expand_ += WallTimer::get_time_mark(); // // time_select_ -= WallTimer::get_time_mark(); // {// Select L-th // bound_lth = selecting_top_L_seq( // set_L, // global_L, // num_queues, // local_queues_starts, // local_queues_sizes); // } // time_select_ += WallTimer::get_time_mark(); // {// Scale M // if (local_M < local_M_max) { // local_M <<= 1; // } // } //// {//test //// printf("query_id: %u " //// "iter: %u", //// query_id, //// iter); //// printf(" local_queues_sizes:"); //// for (idi i = 0; i < num_queues; ++i) { //// printf(" %u", local_queues_sizes[i]); //// } //// printf(" local_m_counts:"); //// for (idi i = 0; i < num_queues; ++i) { //// printf(" %u", local_m_counts[i]); //// } //// printf(" ks:"); //// for (idi i = 0; i < num_queues; ++i) { //// printf(" %u", ks[i]); //// } //// printf("\n"); //// } // } // } // time_parallel_phase_ += WallTimer::get_time_mark(); // } // //// time_merge_ -= WallTimer::get_time_mark(); // time_ending_ -= WallTimer::get_time_mark(); // {// Return the results to set_K // std::vector<idi> pointer(num_threads_, 0); // // get the first // distf min_dist = FLT_MAX; // idi min_q_i; // idi min_id; // idi min_sub; // idi last_id; // for (int q_i = 0; q_i < num_threads_; ++q_i) { // if (pointer[q_i] >= local_queues_sizes[q_i]) { // continue; // } // idi sub = pointer[q_i] + local_queues_starts[q_i]; // distf tmp_dist = set_L[sub].distance_; // idi tmp_id = set_L[sub].id_; // if (tmp_dist < min_dist) { // min_dist = tmp_dist; // min_id = tmp_id; // min_q_i = q_i; // min_sub = sub; // } else if (tmp_dist == min_dist && tmp_id < min_id) { // min_id = tmp_id; // min_q_i = q_i; // min_sub = sub; // } // } // set_K[0] = set_L[min_sub].id_; //// {//test //// printf("query_id: %u " //// "[%u]: " //// "(%u, %f)\n", //// query_id, //// 0, //// set_L[min_sub].id_, set_L[min_sub].distance_); //// } // ++pointer[min_q_i]; // last_id = set_K[0]; // // bool is_finished = false; // idi k_i = 1; // while (k_i < K && !is_finished) { // is_finished = true; // min_dist = FLT_MAX; // for (int q_i = 0; q_i < num_threads_; ++q_i) { // const idi local_queue_size = local_queues_sizes[q_i]; // idi sub = pointer[q_i] + local_queues_starts[q_i]; // // while (pointer[q_i] < local_queue_size // && set_L[sub].id_ == last_id) { // ++pointer[q_i]; // ++sub; // } // if (pointer[q_i] >= local_queue_size) { // continue; // } // is_finished = false; // distf tmp_dist = set_L[sub].distance_; // idi tmp_id = set_L[sub].id_; // if (tmp_dist < min_dist) { // min_dist = tmp_dist; // min_id = tmp_id; // min_q_i = q_i; // min_sub = sub; // } else if (tmp_dist == min_dist && tmp_id < min_id) { // min_id = tmp_id; // min_q_i = q_i; // min_sub = sub; // } // } // set_K[k_i] = set_L[min_sub].id_; //// {//test //// printf("query_id: %u " //// "[%u]: " //// "(%u, %f)\n", //// query_id, //// k_i, //// set_L[min_sub].id_, set_L[min_sub].distance_); //// } // ++pointer[min_q_i]; // ++k_i; // } // } //// time_merge_ += WallTimer::get_time_mark(); // // {// Reset //// std::fill(is_visited.begin(), is_visited.end(), 0); // is_visited.reset(); // std::fill(local_queues_sizes.begin() + 1, local_queues_sizes.end(), 0); // } // // time_ending_ += WallTimer::get_time_mark(); //// {//test //// if (3 == query_id) { //// exit(1); //// } //// } //} /* * 7/28/2020-11:25 * Same with V4, but only gather top-m vertices, but not select top-L. */ inline void Searching::para_search_with_top_m_subsearch_v5( const idi local_M_max, const idi local_M_middle, const idi query_id, const idi K, const idi global_L, const idi local_L, // const idi total_L, // const idi init_queue_size, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, const std::vector<idi> &local_queues_starts, std::vector<idi> &local_queues_sizes, // std::vector<idi> &local_m_counts, std::vector<idi> &top_m_candidates, // std::vector< std::vector<idi> > &top_m_candidates_list, boost::dynamic_bitset<> &is_visited) { time_initialization_ -= WallTimer::get_time_mark(); uint64_t tmp_count_computation = 0; {// Initialization // is_visited flag array //#pragma omp parallel for // Cannot use OMP for bit array is_visited! for (idi c_i = 0; c_i < global_L; ++c_i) { is_visited[init_ids[c_i]] = 1; } const dataf *query_data = queries_load_ + query_id * dimension_; #pragma omp parallel for for (idi v_i = 0; v_i < global_L; ++v_i) { idi v_id = init_ids[v_i]; _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); } // Get the distances of all candidates, store in the set set_L. #pragma omp parallel for reduction(+ : tmp_count_computation) for (idi id_i = 0; id_i < global_L; ++id_i) { idi v_id = init_ids[id_i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; ++tmp_count_computation; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[id_i] = Candidate(v_id, dist, false); // False means not checked. } local_queues_sizes[0] = global_L; count_distance_computation_ += tmp_count_computation; tmp_count_computation = 0; std::sort(set_L.begin(), set_L.begin() + global_L); } time_initialization_ += WallTimer::get_time_mark(); // Searching if (num_threads_ == 1) { // Single threads // std::sort( // set_L.begin(), // set_L.end()); subsearch_with_top_m( local_M_max, query_id, local_L, set_L, 0, local_queues_sizes[0], top_m_candidates, is_visited, tmp_count_computation); count_distance_computation_ += tmp_count_computation; tmp_count_computation = 0; } else { // Multiple threads const dataf *query_data = queries_load_ + query_id * dimension_; const idi num_queues = num_threads_; idi local_M = 1; idi iter = 0; // std::vector<idi> ks(num_queues, 0); time_sequential_phase_ -= WallTimer::get_time_mark(); {// Sequential Search for M = 1, 2. idi k = 0; // idi &k = ks[0]; while (k < global_L && local_M < local_M_middle) { ++iter; subsearch_top_m_for_one_iteration( iter, k, local_M, query_id, query_data, global_L, set_L, 0, local_queues_sizes[0], top_m_candidates, is_visited, tmp_count_computation); count_distance_computation_ += tmp_count_computation; tmp_count_computation = 0; {// Double M if (local_M < local_M_max) { local_M <<= 1; } } } } time_sequential_phase_ += WallTimer::get_time_mark(); time_parallel_phase_ -= WallTimer::get_time_mark(); // distf bound_lth = set_L[global_L - 1].distance_; {// Parallel Search for M >= 4, or local_M_middle time_assign_s_ -=WallTimer::get_time_mark(); {// Assign elements from Queue[0] to others idi dst_i = 1; for (idi e_i = 1; e_i < global_L; ++e_i) { idi dest_sub = e_i % num_queues; if (0 == dest_sub) { set_L[dst_i++] = set_L[e_i]; } else { set_L[local_queues_starts[dest_sub] + local_queues_sizes[dest_sub]++] = set_L[e_i]; } } local_queues_sizes[0] = dst_i; } // std::fill(ks.begin(), ks.end(), 0); idi top_m_candidates_size = 0; time_assign_s_ +=WallTimer::get_time_mark(); std::vector<idi> ks(num_queues, 0); std::vector<idi> nks(num_queues); std::vector<idi> bound_ks(num_queues); double tmp_time_pick_top_m = 0; uint64_t tmp_count_add_to_queue = 0; double tmp_time_distance_computation = 0; double tmp_time_add_to_queue = 0; while (true) { time_expand_ -= WallTimer::get_time_mark(); ++iter; // Gather top-M vertices time_gather_ -= WallTimer::get_time_mark(); gather_unchecked_top_M_seq( query_id, iter, set_L, ks, local_M, num_queues, local_queues_starts, local_queues_sizes, top_m_candidates, top_m_candidates_size, bound_ks); time_gather_ += WallTimer::get_time_mark(); {//test printf("query_id: %u " "iter: %u", query_id, iter); printf(" local_queues_sizes:"); for (idi i = 0; i < num_queues; ++i) { printf(" %u", local_queues_sizes[i]); } // printf(" local_m_counts:"); // for (idi i = 0; i < num_queues; ++i) { // printf(" %u", local_m_counts[i]); // } // printf(" ks:"); // for (idi i = 0; i < num_queues; ++i) { // printf(" %u", ks[i]); // } printf(" gathered:"); for (idi i = 0; i < num_queues; ++i) { printf(" %u", bound_ks[i] - ks[i]); } printf("\n"); } if (!top_m_candidates_size) { time_expand_ += WallTimer::get_time_mark(); break; } std::fill(nks.begin(), nks.end(), global_L); // Expand top-M vertices #pragma omp parallel for schedule(static, 1) \ reduction(+ : tmp_count_computation) \ reduction(+ : tmp_count_add_to_queue) \ reduction(+ : tmp_time_distance_computation) \ reduction(+ : tmp_time_pick_top_m) \ reduction(+ : tmp_time_add_to_queue) for (idi c_i = 0; c_i < top_m_candidates_size; ++c_i) { tmp_time_pick_top_m -= WallTimer::get_time_mark(); idi tid = omp_get_thread_num(); const idi set_L_start = local_queues_starts[tid]; idi &set_L_size = local_queues_sizes[tid]; idi &nk = nks[tid]; // idi L_value = tid == 0 ? global_L : local_L; idi L_value = local_L; idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } tmp_time_pick_top_m += WallTimer::get_time_mark(); // Expand cand_id's neighbors for (idi e_i = 0; e_i < out_degree; ++e_i) { tmp_time_distance_computation -= WallTimer::get_time_mark(); idi nb_id = out_edges[e_i]; { // Sequential edition if (is_visited[nb_id]) { tmp_time_distance_computation += WallTimer::get_time_mark(); continue; } is_visited[nb_id] = 1; } auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; ++tmp_count_computation; distf dist = compute_distance_with_norm(nb_data, query_data, norm); tmp_time_distance_computation += WallTimer::get_time_mark(); if (dist > set_L[set_L_start + set_L_size - 1].distance_) { if (set_L_size < L_value) { ++tmp_count_add_to_queue; set_L[set_L_start + set_L_size] = Candidate(nb_id, dist, false); if (set_L_size < nk) { nk = set_L_size; } ++set_L_size; } continue; } // if (dist > set_L[set_L_start + set_L_size - 1].distance_) { //// if (dist > bound_lth) { // continue; // } ++tmp_count_add_to_queue; Candidate cand(nb_id, dist, false); tmp_time_add_to_queue -= WallTimer::get_time_mark(); idi r = add_into_queue( set_L, set_L_start, set_L_size, L_value, cand); if (r < nk) { nk = r; } tmp_time_add_to_queue += WallTimer::get_time_mark(); } } top_m_candidates_size = 0; time_add_to_queue_ += tmp_time_add_to_queue; tmp_time_add_to_queue = 0; time_distance_computation_ += tmp_time_distance_computation; tmp_time_distance_computation = 0; count_add_to_queue_ += tmp_count_add_to_queue; tmp_count_add_to_queue = 0; time_pick_top_m_ += tmp_time_pick_top_m; tmp_time_pick_top_m = 0; count_distance_computation_ += tmp_count_computation; tmp_count_computation = 0; for (idi q_i = 0; q_i < num_queues; ++q_i) { if (nks[q_i] < bound_ks[q_i]) { ks[q_i] = nks[q_i]; } else { ks[q_i] = bound_ks[q_i]; } } time_expand_ += WallTimer::get_time_mark(); // time_select_ -= WallTimer::get_time_mark(); // {// Select L-th // bound_lth = selecting_top_L_seq( // set_L, // global_L, // num_queues, // local_queues_starts, // local_queues_sizes); // } // time_select_ += WallTimer::get_time_mark(); {// Scale M if (local_M < local_M_max) { local_M <<= 1; } } // {//test // printf("query_id: %u " // "iter: %u", // query_id, // iter); // printf(" local_queues_sizes:"); // for (idi i = 0; i < num_queues; ++i) { // printf(" %u", local_queues_sizes[i]); // } //// printf(" local_m_counts:"); //// for (idi i = 0; i < num_queues; ++i) { //// printf(" %u", local_m_counts[i]); //// } //// printf(" ks:"); //// for (idi i = 0; i < num_queues; ++i) { //// printf(" %u", ks[i]); //// } // printf(" bound_ks:"); // for (idi i = 0; i < num_queues; ++i) { // printf(" %u", bound_ks[i]); // } // printf("\n"); // } } } time_parallel_phase_ += WallTimer::get_time_mark(); } // time_merge_ -= WallTimer::get_time_mark(); time_ending_ -= WallTimer::get_time_mark(); {// Return the results to set_K std::vector<idi> pointer(num_threads_, 0); // get the first distf min_dist = FLT_MAX; idi min_q_i; idi min_id; idi min_sub; idi last_id; for (int q_i = 0; q_i < num_threads_; ++q_i) { if (pointer[q_i] >= local_queues_sizes[q_i]) { continue; } idi sub = pointer[q_i] + local_queues_starts[q_i]; distf tmp_dist = set_L[sub].distance_; idi tmp_id = set_L[sub].id_; if (tmp_dist < min_dist) { min_dist = tmp_dist; min_id = tmp_id; min_q_i = q_i; min_sub = sub; } else if (tmp_dist == min_dist && tmp_id < min_id) { min_id = tmp_id; min_q_i = q_i; min_sub = sub; } } set_K[0] = set_L[min_sub].id_; // {//test // printf("query_id: %u " // "[%u]: " // "(%u, %f)\n", // query_id, // 0, // set_L[min_sub].id_, set_L[min_sub].distance_); // } ++pointer[min_q_i]; last_id = set_K[0]; bool is_finished = false; idi k_i = 1; while (k_i < K && !is_finished) { is_finished = true; min_dist = FLT_MAX; for (int q_i = 0; q_i < num_threads_; ++q_i) { const idi local_queue_size = local_queues_sizes[q_i]; idi sub = pointer[q_i] + local_queues_starts[q_i]; while (pointer[q_i] < local_queue_size && set_L[sub].id_ == last_id) { ++pointer[q_i]; ++sub; } if (pointer[q_i] >= local_queue_size) { continue; } is_finished = false; distf tmp_dist = set_L[sub].distance_; idi tmp_id = set_L[sub].id_; if (tmp_dist < min_dist) { min_dist = tmp_dist; min_id = tmp_id; min_q_i = q_i; min_sub = sub; } else if (tmp_dist == min_dist && tmp_id < min_id) { min_id = tmp_id; min_q_i = q_i; min_sub = sub; } } set_K[k_i] = set_L[min_sub].id_; // {//test // printf("query_id: %u " // "[%u]: " // "(%u, %f)\n", // query_id, // k_i, // set_L[min_sub].id_, set_L[min_sub].distance_); // } ++pointer[min_q_i]; ++k_i; } } // time_merge_ += WallTimer::get_time_mark(); {// Reset // std::fill(is_visited.begin(), is_visited.end(), 0); is_visited.reset(); std::fill(local_queues_sizes.begin() + 1, local_queues_sizes.end(), 0); } time_ending_ += WallTimer::get_time_mark(); {//test if (3 == query_id) { exit(1); } } } /* * 6/27/2020-12:33 * Do searching on the local_set_L * local_set_L is already sorted * is_visited is already set up. */ inline void Searching::subsearch_for_simple_search( const idi query_id, const idi local_L, std::vector<Candidate> &set_L, const idi base_set_L, idi &set_L_end, // std::vector<uint8_t> &is_visited, boost::dynamic_bitset<> &is_visited, uint64_t &local_count_distance_computation) { const dataf *query_data = queries_load_ + query_id * dimension_; // idi local_top_m_candidates_end = 0; idi k = 0; // Index of first unchecked candidate. idi iter = 0; // idi M = 1; // value of M while (k < local_L) { ++iter; // {//test // printf("query_id: %u " // "iter: %u\n", // query_id, // iter); // } // Select the top-1 unchecked candidate idi top_1; idi last_k = local_L; // Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. for (idi c_i = k; c_i < set_L_end; ++c_i) { idi index_set_L = c_i + base_set_L; if (set_L[index_set_L].is_checked_) { continue; } top_1 = set_L[index_set_L].id_; last_k = c_i; // Record the location of the last candidate selected. set_L[index_set_L].is_checked_ = true; // local_top_m_candidates[local_top_m_candidates_end++] = set_L[index_set_L].id_; break; } if (last_k == local_L) { break; } idi nk = local_L; // Push top-1' neighbors into the queue. idi cand_id = top_1; _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; { // Sequential edition if (is_visited[nb_id]) { continue; } is_visited[nb_id] = 1; } // {// Critical edition // if (!AtomicOps::CAS(is_visited.data() + nb_id, // static_cast<uint8_t>(0), // static_cast<uint8_t>(1))) { // continue; // } // } auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; ++local_count_distance_computation; distf dist = compute_distance_with_norm(nb_data, query_data, norm); // { // if (0 == query_id // && (785802 == nb_id // || 180955 == nb_id // || 240996 == nb_id // || 813701 == nb_id // || 708177 == nb_id // || 87578 == nb_id // || 561813 == nb_id // || 701258 == nb_id // || 872728 == nb_id)) { //// && 180955 == nb_id) { // printf("parent: %u " // "nb_id: %u " // "dist: %f " // "base_set_L: %u " // "set_L_end: %u\n", // cand_id, // nb_id, // dist, // base_set_L, // set_L_end); // } // } if (dist > set_L[set_L_end - 1 + base_set_L].distance_) { continue; } Candidate cand(nb_id, dist, false); // Thread 0 maintains the "global" queue idi r = add_into_queue( set_L, base_set_L, set_L_end, local_L, cand); if (r < nk) { nk = r; } } if (nk <= last_k) { k = nk; } else { k = last_k + 1; } } } /* * 6/27/2020-12:26 * Is is good to use subsearch by every thread it self? */ inline void Searching::para_simple_search_subsearch( const idi query_id, const idi K, const idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, // std::vector<uint8_t> &is_visited) boost::dynamic_bitset<> &is_visited) { uint64_t tmp_count_computation = 0; {// Initialization // is_visited flag array //#pragma omp parallel for // Cannot use OMP for bit array is_visited! for (idi c_i = 0; c_i < L; ++c_i) { is_visited[init_ids[c_i]] = 1; } const dataf *query_data = queries_load_ + query_id * dimension_; #pragma omp parallel for for (idi v_i = 0; v_i < L; ++v_i) { idi v_id = init_ids[v_i]; _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); } // Get the distances of all candidates, store in the set set_L. //#pragma omp parallel for #pragma omp parallel for reduction(+ : tmp_count_computation) for (unsigned i = 0; i < L; i++) { unsigned v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; ++tmp_count_computation; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[i] = Candidate(v_id, dist, false); // False means not checked. } count_distance_computation_ += tmp_count_computation; tmp_count_computation = 0; // std::sort( // set_L.begin(), // set_L.begin() + L); } idi queue_end = L; // Searching if (num_threads_ == 1) { // Single threads std::sort( set_L.begin(), set_L.end()); subsearch_for_simple_search( query_id, L, set_L, 0, queue_end, is_visited, tmp_count_computation); count_distance_computation_ += tmp_count_computation; // { //// {//test //// for (idi i = 0; i < queue_end; ++i) { //// printf("start: " //// "query_id: %u " //// "set_L[%u]: " //// "(%u %f)\n", //// query_id, //// i, //// set_L[i].id_, set_L[i].distance_); //// } //// } // // idi half_length = queue_end / 2; // std::sort( // set_L.begin(), // set_L.begin() + half_length); //// {//test //// for (idi i = 0; i < half_length; ++i) { //// printf("sorted: " //// "query_id: %u " //// "set_L[%u]: " //// "(%u %f)\n", //// query_id, //// i, //// set_L[i].id_, set_L[i].distance_); //// } //// } // // subsearch_for_simple_search( // query_id, // half_length, // local_L // set_L, // 0, // base_set_L // half_length, // set_L_end // is_visited, // tmp_count_computation); // //// {//test //// for (idi i = 0; i < half_length; ++i) { //// printf("subsearched: " //// "query_id: %u " //// "set_L[%u]: " //// "(%u %f)\n", //// query_id, //// i, //// set_L[i].id_, set_L[i].distance_); //// } //// } // // std::sort( // set_L.begin() + half_length, // set_L.end()); // //// {//test //// for (idi i = half_length; i < queue_end; ++i) { //// printf("sorted: " //// "query_id: %u " //// "set_L[%u]: " //// "(%u %f)\n", //// query_id, //// i, //// set_L[i].id_, set_L[i].distance_); //// } //// } // // subsearch_for_simple_search( // query_id, // half_length, // local_L // set_L, // half_length, // base_set_L // half_length, // set_L_end // is_visited, // tmp_count_computation); //// {//test //// for (idi i = half_length; i < queue_end; ++i) { //// printf("subsearched: " //// "query_id: %u " //// "set_L[%u]: " //// "(%u %f)\n", //// query_id, //// i, //// set_L[i].id_, set_L[i].distance_); //// } //// } //// {//test //// for (idi i = 0; i < queue_end; ++i) { //// printf("explored: " //// "query_id: %u " //// "set_L[%u]: " //// "(%u %f)\n", //// query_id, //// i, //// set_L[i].id_, set_L[i].distance_); //// } //// } // count_distance_computation_ += tmp_count_computation; // // std::vector <Candidate> tmp_set_L(L); // std::merge(set_L.begin(), set_L.begin() + half_length, // set_L.begin() + half_length, set_L.end(), // tmp_set_L.begin()); // std::copy(tmp_set_L.begin(), tmp_set_L.end(), set_L.begin()); //// {//test //// for (idi i = 0; i < queue_end; ++i) { //// printf("merged: " //// "query_id: %u " //// "set_L[%u]: " //// "(%u %f)\n", //// query_id, //// i, //// set_L[i].id_, set_L[i].distance_); //// } //// } // } } else { // Multiple threads const idi num_queues = num_threads_; const idi local_queue_length = (L - 1) / num_queues + 1; // Parallel for #pragma omp parallel for reduction(+ : tmp_count_computation) for (idi q_i = 0; q_i < num_queues; ++q_i) { idi local_queue_base = q_i * local_queue_length; if (local_queue_base >= L) { continue; } idi local_queue_end = local_queue_length; if (local_queue_base + local_queue_end > L) { local_queue_end = L - local_queue_base; } std::sort( set_L.begin() + local_queue_base, set_L.begin() + local_queue_base + local_queue_end); subsearch_for_simple_search( query_id, local_queue_end, // local_L set_L, local_queue_base, // base_set_L local_queue_end, // set_L_end is_visited, tmp_count_computation); } count_distance_computation_ += tmp_count_computation; // Merge // time_merge_ -= WallTimer::get_time_mark(); merge_in_set_L( set_L, L, num_queues, local_queue_length); // time_merge_ += WallTimer::get_time_mark(); } {// Return the results to set_K // How to deal with duplicate? idi last_id = set_L[0].id_; set_K[0] = last_id; idi k_i = 1; idi l_i = 1; while (k_i < K && l_i < L) { if (last_id == set_L[l_i].id_) { ++l_i; continue; } last_id = set_L[l_i++].id_; set_K[k_i++] = last_id; } //#pragma omp parallel for // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; //// set_K[k_i] = set_L[k_i].id_; // } } {// Reset // std::fill(is_visited.begin(), is_visited.end(), 0); is_visited.reset(); // is_visited.clear_all(); } // {//test // if (0 == query_id) { // exit(1); // } // } } ///* // * 6/22/2020-09:38 // * A synchronized last element as the sentinel // */ //inline void Searching::para_search_with_top_m_merge_queues_global_threshold( // const idi value_M_middle, // const idi value_M_max, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; // std::vector<idi> &local_queues_ends, // Sizes of local queue // std::vector<idi> &top_m_candidates, // boost::dynamic_bitset<> &is_visited) //{ //// const idi base_set_L = (num_threads_ - 1) * local_queue_length; // { //#pragma omp parallel for // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = 1; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; //#pragma omp parallel for // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // uint64_t tmp_count_computation = 0; // // Get the distances of all candidates, store in the set set_L. ////#pragma omp parallel for //#pragma omp parallel for reduction(+ : tmp_count_computation) // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked. // } // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; //// std::sort(set_L.begin(), set_L.begin() + L); // std::sort( // set_L.begin() + base_set_L, // set_L.begin() + base_set_L + L); // local_queues_ends[num_threads_ - 1] = L; // // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // idi M = 1; // // { // Single thread // while (k < L && M < value_M_middle) { // ++tmp_count; //// {//test //// printf("tmp_count: %d\n", tmp_count); //// } // // // Select M candidates // idi last_k = L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // idi index_set_L = c_i + base_set_L; // if (set_L[index_set_L].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[index_set_L].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; // } // // idi nk = L; // // Push M candidates' neighbors into the queue. // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // { // Sequential edition // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L - 1 + base_set_L].distance_) { // continue; // } // // Candidate cand(nb_id, dist, false); // // Thread 0 maintains the "global" queue // idi r = add_into_queue( // set_L, // base_set_L, // local_queues_ends[num_threads_ - 1], // L, // cand); // if (r < nk) { // nk = r; // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // // {// Scale M // if (M < value_M_max) { // M <<= 1; // } else { // M = value_M_max; // } // } // // } // } // // { // Multiple Threads // while (k < L) { // ++tmp_count; //// {//test //// printf("tmp_count: %d\n", tmp_count); //// } // // Select M candidates // idi last_k = L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // idi index_set_L = c_i + base_set_L; // if (set_L[index_set_L].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[index_set_L].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; // } // // // idi nk = L; // // Push M candidates' neighbors into the queue. ////#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(real_threads) //#pragma omp parallel for reduction(+ : tmp_count_computation) // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // int tid = omp_get_thread_num(); // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // { // Sequential edition // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L - 1 + base_set_L].distance_) { // continue; // } // // Candidate cand(nb_id, dist, false); // // Add to the local queue. // if (0 != tid) { // // Non-Master threads using local queues // add_into_queue( // set_L, // (tid - 1) * local_queue_length, // local_queues_ends[tid - 1], // local_queue_length, // cand); // } else { // // Thread 0 maintains the "global" queue // idi r = add_into_queue( // set_L, // base_set_L, // local_queues_ends[num_threads_ - 1], // L, // cand); // if (r < nk) { // nk = r; // } // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; // // {// Local queues' ends //// printf("query%u:iter: %u", query_id, tmp_count); // idi total_elements = 0; // for (int i_t = 0; i_t < num_threads_ - 1; ++i_t) { // total_elements += local_queues_ends[i_t]; // } // number_local_elements_ += total_elements; //// printf(" total_elements: %u+%u\n", total_elements - local_queues_ends[num_threads_ - 1], local_queues_ends[num_threads_ - 1]); //// for (int i_t = 0; i_t < num_threads_; ++i_t) { //// printf(" [%u]: %u", i_t, local_queues_ends[i_t]); //// } //// printf("\n"); // } // //// // Merge. Merge all queues in parallel. // { // time_merge_ -= WallTimer::get_time_mark(); // if (num_threads_ > 1) { // idi r = merge_all_queues_para_array( // set_L, // local_queues_ends, // local_queue_length, // L); // if (r < nk) { // nk = r; // } // } // time_merge_ += WallTimer::get_time_mark(); // } // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // {// Scale M // if (M < value_M_max) { // M <<= 1; // } else { // M = value_M_max; // } // } // // } // } // // //#pragma omp parallel for // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i + base_set_L].id_; //// set_K[k_i] = set_L[k_i].id_; // } // // {// Reset //// std::fill(is_visited.begin(), is_visited.end(), 0); // is_visited.reset(); //// is_visited.clear_all(); // std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); // } // //// {//test //// if (0 == query_id) { //// exit(1); //// } //// } //} ///* // * 6/7/2020-16:55 // * Use 1 threads to scale M until the value_M_middle. // * Then use multiple threads. // * Except for Thread 0, other threads are collectors. They collect, but do not merge. // * Only merge once after Thread 0 stops. // */ //inline void Searching::para_search_with_top_m_merge_queues_collectors( // const idi value_M_middle, // const idi value_M_max, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; // std::vector<idi> &local_queues_ends, // Sizes of local queue //// std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited) // boost::dynamic_bitset<> &is_visited) //// std::vector<distf> &local_thresholds) //// BitVector &is_visited) //{ // { //#pragma omp parallel for // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = 1; //// is_visited.atomic_set_bit(init_ids[c_i]); // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; //#pragma omp parallel for // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // uint64_t tmp_count_computation = 0; // // Get the distances of all candidates, store in the set set_L. ////#pragma omp parallel for //#pragma omp parallel for reduction(+ : tmp_count_computation) // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation_; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked. //// set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; //// std::sort(set_L.begin(), set_L.begin() + L); // std::sort( // set_L.begin() + base_set_L, // set_L.begin() + base_set_L + L); //// boost::sort::block_indirect_sort( //// set_L.begin() + base_set_L, //// set_L.begin() + base_set_L + L, //// num_threads_); // local_queues_ends[num_threads_ - 1] = L; // //// std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // idi M = 1; // // // Single thread // { // while (k < L && M < value_M_middle) { // ++tmp_count; //// {//test //// printf("tmp_count: %d\n", tmp_count); //// } // //// int real_threads = std::min(static_cast<int>(M), num_threads_); //// idi queue_base = num_threads_ - real_threads; // // Select M candidates // idi last_k = L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // idi index_set_L = c_i + base_set_L; // if (set_L[index_set_L].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[index_set_L].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; // } // // idi nk = L; // // Push M candidates' neighbors into the queue. // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // { // Sequential edition // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation_; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L - 1 + base_set_L].distance_) { // continue; // } // // Candidate cand(nb_id, dist, false); // // Thread 0 maintains the "global" queue // idi r = add_into_queue( // set_L, // base_set_L, // local_queues_ends[num_threads_ - 1], // L, // cand); // if (r < nk) { // nk = r; // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // // {// Scale M // if (M < value_M_max) { // M <<= 1; // } else { // M = value_M_max; // } // } // } // } // // // Multiple Threads // { //// while (k < L/num_threads_/2) { // while (k < L) { // ++tmp_count; //// {//test //// printf("tmp_count: %d\n", tmp_count); //// } //// int real_threads = std::min(static_cast<int>(M), num_threads_); //// idi queue_base = num_threads_ - real_threads; // // Select M candidates // idi last_k = L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // idi index_set_L = c_i + base_set_L; // if (set_L[index_set_L].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[index_set_L].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; // } // // idi chunk_size; // if (num_threads_ <= top_m_candidates_end) { // chunk_size = (top_m_candidates_end - 1) / num_threads_ + 1; // } else { // chunk_size = 1; // } // idi nk = L; // // Push M candidates' neighbors into the queue. ////#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(real_threads) ////#pragma omp parallel for reduction(+ : tmp_count_computation) //#pragma omp parallel for reduction(+ : tmp_count_computation) schedule(static, chunk_size) // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // int tid = omp_get_thread_num(); //// { //// if (c_i < chunk_size && tid != 0) { //// printf("query_id: %u " //// "tmp_count: %u " //// "chunk_size: %u " //// "c_i: %u " //// "tid: %u\n", //// query_id, //// tmp_count, //// chunk_size, //// c_i, //// tid); //// } //// } // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // { // Sequential edition // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation_; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L - 1 + base_set_L].distance_) { // continue; // } // // Candidate cand(nb_id, dist, false); // // Add to the local queue. // if (0 != tid) { // // Non-Master threads using local queues // add_into_queue( // set_L, // (tid - 1) * local_queue_length, // local_queues_ends[tid - 1], // local_queue_length, // cand); // } else { // // Thread 0 maintains the "global" queue // idi r = add_into_queue( // set_L, // base_set_L, // local_queues_ends[num_threads_ - 1], // L, // cand); // if (r < nk) { // nk = r; // } // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; // ////// // Merge. Merge all queues in parallel. //// { //// time_merge_ -= WallTimer::get_time_mark(); //// if (num_threads_ > 1) { ////// idi r = merge_all_queues_queue_base( ////// set_L, ////// local_queues_ends, ////// queue_base, ////// real_threads, ////// local_queue_length, ////// L); //// idi r = merge_all_queues_para_array( //// set_L, //// local_queues_ends, //// local_queue_length, //// L); //// if (r < nk) { //// nk = r; //// } //// } //// time_merge_ += WallTimer::get_time_mark(); //// } // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // {// Scale M // if (M < value_M_max) { // M <<= 1; // } else { // M = value_M_max; // } // } // } // //// // Merge only once after Master Thread stops. //// { //// time_merge_ -= WallTimer::get_time_mark(); //// if (num_threads_ > 1) { ////// idi r = merge_all_queues_queue_base( ////// set_L, ////// local_queues_ends, ////// queue_base, ////// real_threads, ////// local_queue_length, ////// L); //// merge_all_queues_para_array( //// set_L, //// local_queues_ends, //// local_queue_length, //// L); //// } //// time_merge_ += WallTimer::get_time_mark(); //// } // } // // //#pragma omp parallel for // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i + base_set_L].id_; //// set_K[k_i] = set_L[k_i].id_; // } // // {// Reset //// std::fill(is_visited.begin(), is_visited.end(), 0); // is_visited.reset(); //// is_visited.clear_all(); // std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); // } // //// {//test //// printf("tmp_count: %u\n", tmp_count); //// if (3 == query_id) { //// exit(1); //// } //// } //} ///* // * 6/8/2020-16:39 // * Selecting rather than merging // */ //inline void Searching::para_search_with_top_m_merge_queues_selecting( // const idi value_M_middle, // const idi value_M_max, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; // std::vector<idi> &local_queues_ends, // Sizes of local queue //// std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited) // boost::dynamic_bitset<> &is_visited) //{ // { //#pragma omp parallel for // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = 1; //// is_visited.atomic_set_bit(init_ids[c_i]); // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; //#pragma omp parallel for // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // uint64_t tmp_count_computation = 0; // // Get the distances of all candidates, store in the set set_L. ////#pragma omp parallel for //#pragma omp parallel for reduction(+ : tmp_count_computation) // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation_; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked. //// set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; //// std::sort(set_L.begin(), set_L.begin() + L); // std::sort( // set_L.begin() + base_set_L, // set_L.begin() + base_set_L + L); //// boost::sort::block_indirect_sort( //// set_L.begin() + base_set_L, //// set_L.begin() + base_set_L + L, //// num_threads_); // local_queues_ends[num_threads_ - 1] = L; // //// std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // idi M = 1; // // // Single thread // { // while (k < L && M < value_M_middle) { // ++tmp_count; //// {//test //// printf("tmp_count: %d\n", tmp_count); //// } // //// int real_threads = std::min(static_cast<int>(M), num_threads_); //// idi queue_base = num_threads_ - real_threads; // // Select M candidates // idi last_k = L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // idi index_set_L = c_i + base_set_L; // if (set_L[index_set_L].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[index_set_L].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; // } // // idi nk = L; // // Push M candidates' neighbors into the queue. // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // { // Sequential edition // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation_; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L - 1 + base_set_L].distance_) { // continue; // } // // Candidate cand(nb_id, dist, false); // // Thread 0 maintains the "global" queue // idi r = add_into_queue( // set_L, // base_set_L, // local_queues_ends[num_threads_ - 1], // L, // cand); // if (r < nk) { // nk = r; // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // // {// Scale M // if (M < value_M_max) { // M <<= 1; // } else { // M = value_M_max; // } // } // } // } // // // Multiple Threads // { //// while (k < L/num_threads_/2) { //// while (k < L) { // while (true) { // ++tmp_count; //// {//test //// printf("tmp_count: %d\n", tmp_count); //// } //// // Select M candidates //// idi last_k = L; ////// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. //// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { //// idi index_set_L = c_i + base_set_L; //// if (set_L[index_set_L].is_checked_) { //// continue; //// } //// last_k = c_i; // Record the location of the last candidate selected. //// set_L[index_set_L].is_checked_ = true; //// top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; //// } // // // Select M candidates // { // idi traverse_count = 0; // idi bound_sub = L; // This is not always true! // for (idi sub = 0; sub < bound_sub && top_m_candidates_end < M && traverse_count < L; ++sub) { // for (int tid = 0; tid < num_threads_ && top_m_candidates_end < M && traverse_count < L; ++tid) { // if (sub >= local_queues_ends[tid]) { // continue; // } // idi index_set_L = tid * local_queue_length + sub; // if (set_L[index_set_L].is_checked_) { // continue; // } // set_L[index_set_L].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; // } // } // // if (0 == top_m_candidates_end) { // break; // } // } // //// idi nk = L; // // Push M candidates' neighbors into the queue. ////#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(real_threads) //#pragma omp parallel for reduction(+ : tmp_count_computation) // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // int tid = omp_get_thread_num(); // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // { // Sequential edition // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation_; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L - 1 + base_set_L].distance_) { // continue; // } // // Candidate cand(nb_id, dist, false); // // Add to the local queue. // if (0 != tid) { // // Non-Master threads using local queues // add_into_queue( // set_L, // (tid - 1) * local_queue_length, // local_queues_ends[tid - 1], // local_queue_length, // cand); // } else { // // Thread 0 maintains the "global" queue //// idi r = // add_into_queue( // set_L, // base_set_L, // local_queues_ends[num_threads_ - 1], // L, // cand); //// if (r < nk) { //// nk = r; //// } // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; // //// // Merge. Merge all queues in parallel. // { // time_merge_ -= WallTimer::get_time_mark(); // if (num_threads_ > 1) { //// idi r = merge_all_queues_queue_base( //// set_L, //// local_queues_ends, //// queue_base, //// real_threads, //// local_queue_length, //// L); //// idi r = // merge_all_queues_para_array( // set_L, // local_queues_ends, // local_queue_length, // L); //// if (r < nk) { //// nk = r; //// } // } // time_merge_ += WallTimer::get_time_mark(); // } //// if (nk <= last_k) { //// k = nk; //// } else { //// k = last_k + 1; //// } // {// Scale M // if (M < value_M_max) { // M <<= 1; // } else { // M = value_M_max; // } // } // } // } // // ////#pragma omp parallel for //// for (idi k_i = 0; k_i < K; ++k_i) { //// set_K[k_i] = set_L[k_i + base_set_L].id_; ////// set_K[k_i] = set_L[k_i].id_; //// } // // { // idi k_i = 0; // idi bound_sub = K / num_threads_; // for (idi sub = 0; sub < bound_sub; ++sub) { // for (int tid = 0; tid < num_threads_; ++tid) { // idi index_set_L = tid * local_queue_length + sub; // set_K[k_i++] = set_L[index_set_L].id_; // } // } // idi remain = K - k_i; // if (remain) { // for (int tid = 0; tid < remain; ++tid) { // idi index_set_L = tid * local_queue_length + bound_sub; // set_K[k_i++] = set_L[index_set_L].id_; // } // } // } // // {// Reset //// std::fill(is_visited.begin(), is_visited.end(), 0); // is_visited.reset(); //// is_visited.clear_all(); // std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); // } // //// {//test //// printf("tmp_count: %u\n", tmp_count); //// if (3 == query_id) { //// exit(1); //// } //// } //} } // namespace PANNS #endif //BATCH_SEARCHING_SEARCHING_H
SSBOStreamer.h
/* * SSBOStreamer.h * * Copyright (C) 2018 by VISUS (Universitaet Stuttgart) * Alle Rechte vorbehalten. */ #ifndef MEGAMOLCORE_SSBOSTREAMER_H_INCLUDED #define MEGAMOLCORE_SSBOSTREAMER_H_INCLUDED #if (defined(_MSC_VER) && (_MSC_VER > 1000)) #pragma once #endif /* (defined(_MSC_VER) && (_MSC_VER > 1000)) */ #include "vislib/graphics/gl/IncludeAllGL.h" #include "vislib/graphics/gl/GLSLShader.h" #include <vector> #include <algorithm> #include <cinttypes> #include "mmcore/api/MegaMolCore.std.h" #include <omp.h> namespace megamol { namespace core { namespace utility { /// A class that helps you stream some memory to a persistently mapped /// buffer that can be used as a SSBO. Abstracts some micro-management /// like items/chunk and the sync objects. You can align multiple streamers /// by giving the first a desired buffer size and make all others follow /// the resulting GetMaxNumItemsPerChunk to set their buffer sizes automatically. /// See NGSphereRenderer for a usage example. class MEGAMOLCORE_API SSBOStreamer { public: SSBOStreamer(const std::string& debugLabel = std::string()); ~SSBOStreamer(); /// @param data the pointer to the original data /// @param srcStride the size of a single data item in the original data /// @param dstStride the size of a single data item that will be uploaded /// and must not be split across buffers /// @param numItems the length of the original data in multiples of stride /// @param numBuffers how long the ring buffer should be /// @param bufferSize the size of a ring buffer in bytes /// @returns number of chunks GLuint SetDataWithSize(const void *data, GLuint srcStride, GLuint dstStride, size_t numItems, GLuint numBuffers, GLuint bufferSize); /// @param data the pointer to the original data /// @param srcStride the size of a single data item in the original data /// @param dstStride the size of a single data item that will be uploaded /// and must not be split across buffers /// @param numItems the length of the original data in multiples of stride /// @param numBuffers how long the ring buffer should be /// @param numChunks how many chunks you want to upload /// @returns the size of a ring buffer in bytes GLuint SetDataWithItems(const void *data, GLuint srcStride, GLuint dstStride, size_t numItems, GLuint numBuffers, GLuint numChunks); /// @param idx the chunk to upload [0..SetData()-1] /// @param numItems returns the number of items in this chunk /// (last one is probably shorter than bufferSize) /// @param sync returns the internal ID of a sync object abstraction /// @param dstOffset the buffer offset required for binding the buffer range /// @param dstLength the buffer length required for binding the buffer range void UploadChunk(unsigned int idx, GLuint& numItems, unsigned int& sync, GLsizeiptr& dstOffset, GLsizeiptr& dstLength); /// use this uploader if you want to add a per-item transformation /// that will be executed inside an omp parallel for /// @param idx the chunk to upload [0..SetData()-1] /// @param copyOp the lambda you want to execute. A really hacky subset-changing /// one could be: /// [vertStride](const char *src, char *dst) -> void { /// memcpy(dst, src, vertStride); /// *reinterpret_cast<float *>(dst + 4) = /// *reinterpret_cast<const float *>(src + 4) - 100.0f; /// } /// @param numItems returns the number of items in this chunk /// (last one is probably shorter than bufferSize) /// @param sync returns the internal ID of a sync object abstraction /// @param dstOffset the buffer offset required for binding the buffer range /// @param dstLength the buffer length required for binding the buffer range template<class fun> void UploadChunk(unsigned int idx, fun copyOp, GLuint& numItems, unsigned int& sync, GLsizeiptr& dstOffset, GLsizeiptr& dstLength); /// @param sync the abstract sync object to signal as done void SignalCompletion(unsigned int sync); /// @param numItemsPerChunk the minimum number of items per chunk /// @param up rounds up if true, otherwise rounds down. /// @returns the alignment-friendly (rounded) number of items per chunk GLuint GetNumItemsPerChunkAligned(GLuint numItemsPerChunk, bool up = false) const; GLuint GetHandle(void) const { return theSSBO; } GLuint GetNumChunks(void) const { return numChunks; } GLuint GetMaxNumItemsPerChunk(void) const { return numItemsPerChunk; } private: static void queueSignal(GLsync &syncObj); static void waitSignal(GLsync &syncObj); void genBufferAndMap(GLuint numBuffers, GLuint bufferSize); GLuint theSSBO; /// in bytes! GLuint bufferSize; GLuint numBuffers; GLuint srcStride; GLuint dstStride; const void* theData; void* mappedMem; size_t numItems; GLuint numChunks; GLuint numItemsPerChunk; /// which ring element we upload to next GLuint currIdx; std::vector<GLsync> fences; int numThr; std::string debugLabel; int offsetAlignment = 0; }; template<class fun> void SSBOStreamer::UploadChunk(unsigned int idx, fun copyOp, GLuint& numItems, unsigned int& sync, GLsizeiptr& dstOffset, GLsizeiptr& dstLength) { if (theData == nullptr || idx > this->numChunks - 1) return; // we did not succeed doing anything yet numItems = sync = 0; dstOffset = this->bufferSize * this->currIdx; GLsizeiptr srcOffset = this->numItemsPerChunk * this->srcStride * idx; char *dst = static_cast<char*>(this->mappedMem) + dstOffset; const char *src = static_cast<const char*>(this->theData) + srcOffset; const size_t itemsThisTime = std::min<unsigned int>( this->numItems - idx * this->numItemsPerChunk, this->numItemsPerChunk); dstLength = itemsThisTime * this->dstStride; const void *srcEnd = src + itemsThisTime * srcStride; //printf("going to upload %llu x %u bytes to offset %lld from %lld\n", itemsThisTime, // this->dstStride, dstOffset, srcOffset); waitSignal(this->fences[currIdx]); #pragma omp parallel for for (INT64 i = 0; i < itemsThisTime; ++i) { copyOp(src + i * this->srcStride, dst + i * this->dstStride); } glFlushMappedNamedBufferRange(this->theSSBO, this->bufferSize * this->currIdx, itemsThisTime * this->dstStride); numItems = itemsThisTime; sync = currIdx; currIdx = (currIdx + 1) % this->numBuffers; } } /* end namespace utility */ } /* end namespace core */ } /* end namespace megamol */ #endif /* MEGAMOLCORE_SSBOSTREAMER_H_INCLUDED */
#ifndef MEGAMOLCORE_SSBOSTREAMER_H_INCLUDED #define MEGAMOLCORE_SSBOSTREAMER_H_INCLUDED #if (defined(_MSC_VER) && (_MSC_VER > 1000)) #pragma once #endif /* (defined(_MSC_VER) && (_MSC_VER > 1000)) */ #include "vislib/graphics/gl/IncludeAllGL.h" #include "vislib/graphics/gl/GLSLShader.h" #include <vector> #include <algorithm> #include <cinttypes> #include "mmcore/api/MegaMolCore.std.h" #include <omp.h> namespace megamol { namespace core { namespace utility { ///A class that helps you stream some memory to a persistently mapped /// buffer that can be used as a SSBO.Abstracts some micro - management /// like items / chunk and the sync objects.You can align multiple streamers /// by giving the first a desired buffer size and make all others follow /// the resulting GetMaxNumItemsPerChunk to set their buffer sizes automatically. /// See NGSphereRenderer for a usage example. class MEGAMOLCORE_API SSBOStreamer { public: SSBOStreamer(const std::string & debugLabel = std::string()); ~SSBOStreamer(); ///@param data the pointer to the original data /// @param srcStride the size of a single data item in the original data /// @param dstStride the size of a single data item that will be uploaded /// and must not be split across buffers /// @param numItems the length of the original data in multiples of stride /// @param numBuffers how long the ring buffer should be /// @param bufferSize the size of a ring buffer in bytes /// @returns number of chunks GLuint SetDataWithSize(const void *data, GLuint srcStride, GLuint dstStride, size_t numItems, GLuint numBuffers, GLuint bufferSize); ///@param data the pointer to the original data /// @param srcStride the size of a single data item in the original data /// @param dstStride the size of a single data item that will be uploaded /// and must not be split across buffers /// @param numItems the length of the original data in multiples of stride /// @param numBuffers how long the ring buffer should be /// @param numChunks how many chunks you want to upload /// @returns the size of a ring buffer in bytes GLuint SetDataWithItems(const void *data, GLuint srcStride, GLuint dstStride, size_t numItems, GLuint numBuffers, GLuint numChunks); ///@param idx the chunk to upload[0..SetData() - 1] /// @param numItems returns the number of items in this chunk /// (last one is probably shorter than bufferSize) /// @param sync returns the internal ID of a sync object abstraction /// @param dstOffset the buffer offset required for binding the buffer range /// @param dstLength the buffer length required for binding the buffer range void UploadChunk(unsigned int idx, GLuint & numItems, unsigned int &sync, GLsizeiptr & dstOffset, GLsizeiptr & dstLength); ///use this uploader if you want to add a per - item transformation /// that will be executed inside an omp parallel for ///@param idx the chunk to upload[0..SetData() - 1] ///@param copyOp the lambda you want to execute.A really hacky subset - changing /// one could be: ///[vertStride] (const char *src, char *dst)->void { ///memcpy(dst, src, vertStride); ///*reinterpret_cast < float *>(dst + 4) = ///*reinterpret_cast < const float *>(src + 4) - 100.0 f; /// } ///@param numItems returns the number of items in this chunk /// (last one is probably shorter than bufferSize) /// @param sync returns the internal ID of a sync object abstraction /// @param dstOffset the buffer offset required for binding the buffer range /// @param dstLength the buffer length required for binding the buffer range template < class fun > void UploadChunk(unsigned int idx, fun copyOp, GLuint & numItems, unsigned int &sync, GLsizeiptr & dstOffset, GLsizeiptr & dstLength); ///@param sync the abstract sync object to signal as done void SignalCompletion(unsigned int sync); ///@param numItemsPerChunk the minimum number of items per chunk /// @param up rounds up if true, otherwise rounds down. /// @returns the alignment - friendly(rounded) number of items per chunk GLuint GetNumItemsPerChunkAligned(GLuint numItemsPerChunk, bool up = false) const; GLuint GetHandle(void)const { return theSSBO; } GLuint GetNumChunks(void)const { return numChunks; } GLuint GetMaxNumItemsPerChunk(void)const { return numItemsPerChunk; } private: static void queueSignal(GLsync & syncObj); static void waitSignal(GLsync & syncObj); void genBufferAndMap(GLuint numBuffers, GLuint bufferSize); GLuint theSSBO; ///in bytes ! GLuint bufferSize; GLuint numBuffers; GLuint srcStride; GLuint dstStride; const void *theData; void *mappedMem; size_t numItems; GLuint numChunks; GLuint numItemsPerChunk; ///which ring element we upload to next GLuint currIdx; std: : vector < GLsync > fences; int numThr; std: : string debugLabel; int offsetAlignment = 0; }; template < class fun > void SSBOStreamer::UploadChunk(unsigned int idx, fun copyOp, GLuint & numItems, unsigned int &sync, GLsizeiptr & dstOffset, GLsizeiptr & dstLength) { if (theData == nullptr || idx > this->numChunks - 1) return; //we did not succeed doing anything yet numItems = sync = 0; dstOffset = this->bufferSize * this->currIdx; GLsizeiptr srcOffset = this->numItemsPerChunk * this->srcStride * idx; char *dst = static_cast < char *>(this->mappedMem) + dstOffset; const char *src = static_cast < const char *>(this->theData) + srcOffset; const size_t itemsThisTime = std::min < unsigned int >( this->numItems - idx * this->numItemsPerChunk, this->numItemsPerChunk); dstLength = itemsThisTime * this->dstStride; const void *srcEnd = src + itemsThisTime * srcStride; //printf("going to upload %llu x %u bytes to offset %lld from %lld\n", itemsThisTime, //this->dstStride, dstOffset, srcOffset); waitSignal(this->fences[currIdx]); for (INT64 i = 0; i < itemsThisTime; ++i) { copyOp(src + i * this->srcStride, dst + i * this->dstStride); } glFlushMappedNamedBufferRange(this->theSSBO, this->bufferSize * this->currIdx, itemsThisTime * this->dstStride); numItems = itemsThisTime; sync = currIdx; currIdx = (currIdx + 1) % this->numBuffers; } } /* end namespace utility */ } /* end namespace core */ } /* end namespace megamol */
#ifndef MEGAMOLCORE_SSBOSTREAMER_H_INCLUDED #define MEGAMOLCORE_SSBOSTREAMER_H_INCLUDED #if (defined(_MSC_VER) && (_MSC_VER > 1000)) #pragma once #endif /* (defined(_MSC_VER) && (_MSC_VER > 1000)) */ #include "vislib/graphics/gl/IncludeAllGL.h" #include "vislib/graphics/gl/GLSLShader.h" #include <vector> #include <algorithm> #include <cinttypes> #include "mmcore/api/MegaMolCore.std.h" #include <omp.h> namespace megamol { namespace core { namespace utility { ///A class that helps you stream some memory to a persistently mapped /// buffer that can be used as a SSBO.Abstracts some micro - management /// like items / chunk and the sync objects.You can align multiple streamers /// by giving the first a desired buffer size and make all others follow /// the resulting GetMaxNumItemsPerChunk to set their buffer sizes automatically. /// See NGSphereRenderer for a usage example. class MEGAMOLCORE_API SSBOStreamer { public: SSBOStreamer(const std::string & debugLabel = std::string()); ~SSBOStreamer(); ///@param data the pointer to the original data /// @param srcStride the size of a single data item in the original data /// @param dstStride the size of a single data item that will be uploaded /// and must not be split across buffers /// @param numItems the length of the original data in multiples of stride /// @param numBuffers how long the ring buffer should be /// @param bufferSize the size of a ring buffer in bytes /// @returns number of chunks GLuint SetDataWithSize(const void *data, GLuint srcStride, GLuint dstStride, size_t numItems, GLuint numBuffers, GLuint bufferSize); ///@param data the pointer to the original data /// @param srcStride the size of a single data item in the original data /// @param dstStride the size of a single data item that will be uploaded /// and must not be split across buffers /// @param numItems the length of the original data in multiples of stride /// @param numBuffers how long the ring buffer should be /// @param numChunks how many chunks you want to upload /// @returns the size of a ring buffer in bytes GLuint SetDataWithItems(const void *data, GLuint srcStride, GLuint dstStride, size_t numItems, GLuint numBuffers, GLuint numChunks); ///@param idx the chunk to upload[0..SetData() - 1] /// @param numItems returns the number of items in this chunk /// (last one is probably shorter than bufferSize) /// @param sync returns the internal ID of a sync object abstraction /// @param dstOffset the buffer offset required for binding the buffer range /// @param dstLength the buffer length required for binding the buffer range void UploadChunk(unsigned int idx, GLuint & numItems, unsigned int &sync, GLsizeiptr & dstOffset, GLsizeiptr & dstLength); ///use this uploader if you want to add a per - item transformation /// that will be executed inside an omp parallel for ///@param idx the chunk to upload[0..SetData() - 1] ///@param copyOp the lambda you want to execute.A really hacky subset - changing /// one could be: ///[vertStride] (const char *src, char *dst)->void { ///memcpy(dst, src, vertStride); ///*reinterpret_cast < float *>(dst + 4) = ///*reinterpret_cast < const float *>(src + 4) - 100.0 f; /// } ///@param numItems returns the number of items in this chunk /// (last one is probably shorter than bufferSize) /// @param sync returns the internal ID of a sync object abstraction /// @param dstOffset the buffer offset required for binding the buffer range /// @param dstLength the buffer length required for binding the buffer range template < class fun > void UploadChunk(unsigned int idx, fun copyOp, GLuint & numItems, unsigned int &sync, GLsizeiptr & dstOffset, GLsizeiptr & dstLength); ///@param sync the abstract sync object to signal as done void SignalCompletion(unsigned int sync); ///@param numItemsPerChunk the minimum number of items per chunk /// @param up rounds up if true, otherwise rounds down. /// @returns the alignment - friendly(rounded) number of items per chunk GLuint GetNumItemsPerChunkAligned(GLuint numItemsPerChunk, bool up = false) const; GLuint GetHandle(void)const { return theSSBO; } GLuint GetNumChunks(void)const { return numChunks; } GLuint GetMaxNumItemsPerChunk(void)const { return numItemsPerChunk; } private: static void queueSignal(GLsync & syncObj); static void waitSignal(GLsync & syncObj); void genBufferAndMap(GLuint numBuffers, GLuint bufferSize); GLuint theSSBO; ///in bytes ! GLuint bufferSize; GLuint numBuffers; GLuint srcStride; GLuint dstStride; const void *theData; void *mappedMem; size_t numItems; GLuint numChunks; GLuint numItemsPerChunk; ///which ring element we upload to next GLuint currIdx; std: : vector < GLsync > fences; int numThr; std: : string debugLabel; int offsetAlignment = 0; }; template < class fun > void SSBOStreamer::UploadChunk(unsigned int idx, fun copyOp, GLuint & numItems, unsigned int &sync, GLsizeiptr & dstOffset, GLsizeiptr & dstLength) { if (theData == nullptr || idx > this->numChunks - 1) return; //we did not succeed doing anything yet numItems = sync = 0; dstOffset = this->bufferSize * this->currIdx; GLsizeiptr srcOffset = this->numItemsPerChunk * this->srcStride * idx; char *dst = static_cast < char *>(this->mappedMem) + dstOffset; const char *src = static_cast < const char *>(this->theData) + srcOffset; const size_t itemsThisTime = std::min < unsigned int >( this->numItems - idx * this->numItemsPerChunk, this->numItemsPerChunk); dstLength = itemsThisTime * this->dstStride; const void *srcEnd = src + itemsThisTime * srcStride; //printf("going to upload %llu x %u bytes to offset %lld from %lld\n", itemsThisTime, //this->dstStride, dstOffset, srcOffset); waitSignal(this->fences[currIdx]); #pragma omp parallel for for (INT64 i = 0; i < itemsThisTime; ++i) { copyOp(src + i * this->srcStride, dst + i * this->dstStride); } glFlushMappedNamedBufferRange(this->theSSBO, this->bufferSize * this->currIdx, itemsThisTime * this->dstStride); numItems = itemsThisTime; sync = currIdx; currIdx = (currIdx + 1) % this->numBuffers; } } /* end namespace utility */ } /* end namespace core */ } /* end namespace megamol */
9717.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #define EXTRALARGE_DATASET #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "correlation.h" /* Array initialization. */ static void init_array (int m, int n, DATA_TYPE *float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n)) { int i, j; *float_n = 1.2; for (i = 0; i < m; i++) for (j = 0; j < n; j++) data[i][j] = ((DATA_TYPE) i*j) / M; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int m, DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m)) { int i, j; for (i = 0; i < m; i++) for (j = 0; j < m; j++) { fprintf (stderr, DATA_PRINTF_MODIFIER, symmat[i][j]); if ((i * m + j) % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_correlation(int m, int n, DATA_TYPE float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n), DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m), DATA_TYPE POLYBENCH_1D(mean,M,m), DATA_TYPE POLYBENCH_1D(stddev,M,m)) { int i, j, j1, j2; DATA_TYPE eps = 0.1f; #define sqrt_of_array_cell(x,j) sqrt(x[j]) #pragma scop /* Determine mean of column vectors of input data matrix */ #pragma omp parallel private(i, j, j2) num_threads(2) { #pragma omp for schedule(static, 16) for (j = 0; j < _PB_M; j++) { mean[j] = 0.0; for (i = 0; i < _PB_N; i++) mean[j] += data[i][j]; mean[j] /= float_n; } /* Determine standard deviations of column vectors of data matrix. */ #pragma omp for schedule(static, 16) for (j = 0; j < _PB_M; j++) { stddev[j] = 0.0; for (i = 0; i < _PB_N; i++) stddev[j] += (data[i][j] - mean[j]) * (data[i][j] - mean[j]); stddev[j] /= float_n; stddev[j] = sqrt_of_array_cell(stddev, j); /* The following in an inelegant but usual way to handle near-zero std. dev. values, which below would cause a zero- divide. */ stddev[j] = stddev[j] <= eps ? 1.0 : stddev[j]; } /* Center and reduce the column vectors. */ #pragma omp for schedule(static, 16) for (i = 0; i < _PB_N; i++) for (j = 0; j < _PB_M; j++) { data[i][j] -= mean[j]; data[i][j] /= sqrt(float_n) * stddev[j]; } /* Calculate the m * m correlation matrix. */ #pragma omp for schedule(static, 16) for (j1 = 0; j1 < _PB_M-1; j1++) { symmat[j1][j1] = 1.0; for (j2 = j1+1; j2 < _PB_M; j2++) { symmat[j1][j2] = 0.0; for (i = 0; i < _PB_N; i++) symmat[j1][j2] += (data[i][j1] * data[i][j2]); symmat[j2][j1] = symmat[j1][j2]; } } } #pragma endscop symmat[_PB_M-1][_PB_M-1] = 1.0; } int main(int argc, char** argv) { /* Retrieve problem size. */ int n = N; int m = M; /* Variable declaration/allocation. */ DATA_TYPE float_n; POLYBENCH_2D_ARRAY_DECL(data,DATA_TYPE,M,N,m,n); POLYBENCH_2D_ARRAY_DECL(symmat,DATA_TYPE,M,M,m,m); POLYBENCH_1D_ARRAY_DECL(mean,DATA_TYPE,M,m); POLYBENCH_1D_ARRAY_DECL(stddev,DATA_TYPE,M,m); /* Initialize array(s). */ init_array (m, n, &float_n, POLYBENCH_ARRAY(data)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_correlation (m, n, float_n, POLYBENCH_ARRAY(data), POLYBENCH_ARRAY(symmat), POLYBENCH_ARRAY(mean), POLYBENCH_ARRAY(stddev)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(symmat))); /* Be clean. */ POLYBENCH_FREE_ARRAY(data); POLYBENCH_FREE_ARRAY(symmat); POLYBENCH_FREE_ARRAY(mean); POLYBENCH_FREE_ARRAY(stddev); return 0; }
#define EXTRALARGE_DATASET #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "correlation.h" /* Array initialization. */ static void init_array(int m, int n, DATA_TYPE * float_n, DATA_TYPE POLYBENCH_2D(data, M, N, m, n)) { int i, j; *float_n = 1.2; for (i = 0; i < m; i++) for (j = 0; j < n; j++) data[i][j] = ((DATA_TYPE) i * j) / M; } /* * DCE code. Must scan the entire live-out data. Can be used also to check * the correctness of the output. */ static void print_array(int m, DATA_TYPE POLYBENCH_2D(symmat, M, M, m, m)) { int i, j; for (i = 0; i < m; i++) for (j = 0; j < m; j++) { fprintf(stderr, DATA_PRINTF_MODIFIER, symmat[i][j]); if ((i * m + j) % 20 == 0) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } /* * Main computational kernel. The whole function will be timed, including the * call and return. */ static void kernel_correlation(int m, int n, DATA_TYPE float_n, DATA_TYPE POLYBENCH_2D(data, M, N, m, n), DATA_TYPE POLYBENCH_2D(symmat, M, M, m, m), DATA_TYPE POLYBENCH_1D(mean, M, m), DATA_TYPE POLYBENCH_1D(stddev, M, m)) { int i, j, j1, j2; DATA_TYPE eps = 0.1 f; #define sqrt_of_array_cell(x,j) sqrt(x[j]) #pragma scop /* Determine mean of column vectors of input data matrix */ for (j = 0; j < _PB_M; j++) { mean[j] = 0.0; for (i = 0; i < _PB_N; i++) mean[j] += data[i][j]; mean[j] /= float_n; } /* Determine standard deviations of column vectors of data matrix. */ for (j = 0; j < _PB_M; j++) { stddev[j] = 0.0; for (i = 0; i < _PB_N; i++) stddev[j] += (data[i][j] - mean[j]) * (data[i][j] - mean[j]); stddev[j] /= float_n; stddev[j] = sqrt_of_array_cell(stddev, j); /* * The following in an inelegant but usual way to handle near-zero * std. dev. values, which below would cause a zero- divide. */ stddev[j] = stddev[j] <= eps ? 1.0 : stddev[j]; } /* Center and reduce the column vectors. */ for (i = 0; i < _PB_N; i++) for (j = 0; j < _PB_M; j++) { data[i][j] -= mean[j]; data[i][j] /= sqrt(float_n) * stddev[j]; } /* Calculate the m * m correlation matrix. */ for (j1 = 0; j1 < _PB_M - 1; j1++) { symmat[j1][j1] = 1.0; for (j2 = j1 + 1; j2 < _PB_M; j2++) { symmat[j1][j2] = 0.0; for (i = 0; i < _PB_N; i++) symmat[j1][j2] += (data[i][j1] * data[i][j2]); symmat[j2][j1] = symmat[j1][j2]; } } #pragma endscop symmat[_PB_M - 1][_PB_M - 1] = 1.0; } int main(int argc, char **argv) { /* Retrieve problem size. */ int n = N; int m = M; /* Variable declaration/allocation. */ DATA_TYPE float_n; POLYBENCH_2D_ARRAY_DECL(data, DATA_TYPE, M, N, m, n); POLYBENCH_2D_ARRAY_DECL(symmat, DATA_TYPE, M, M, m, m); POLYBENCH_1D_ARRAY_DECL(mean, DATA_TYPE, M, m); POLYBENCH_1D_ARRAY_DECL(stddev, DATA_TYPE, M, m); /* Initialize array(s). */ init_array(m, n, &float_n, POLYBENCH_ARRAY(data)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_correlation(m, n, float_n, POLYBENCH_ARRAY(data), POLYBENCH_ARRAY(symmat), POLYBENCH_ARRAY(mean), POLYBENCH_ARRAY(stddev)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* * Prevent dead-code elimination. All live-out data must be printed by * the function call in argument. */ polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(symmat))); /* Be clean. */ POLYBENCH_FREE_ARRAY(data); POLYBENCH_FREE_ARRAY(symmat); POLYBENCH_FREE_ARRAY(mean); POLYBENCH_FREE_ARRAY(stddev); return 0; }
#define EXTRALARGE_DATASET #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "correlation.h" /* Array initialization. */ static void init_array(int m, int n, DATA_TYPE * float_n, DATA_TYPE POLYBENCH_2D(data, M, N, m, n)) { int i, j; *float_n = 1.2; for (i = 0; i < m; i++) for (j = 0; j < n; j++) data[i][j] = ((DATA_TYPE) i * j) / M; } /* * DCE code. Must scan the entire live-out data. Can be used also to check * the correctness of the output. */ static void print_array(int m, DATA_TYPE POLYBENCH_2D(symmat, M, M, m, m)) { int i, j; for (i = 0; i < m; i++) for (j = 0; j < m; j++) { fprintf(stderr, DATA_PRINTF_MODIFIER, symmat[i][j]); if ((i * m + j) % 20 == 0) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } /* * Main computational kernel. The whole function will be timed, including the * call and return. */ static void kernel_correlation(int m, int n, DATA_TYPE float_n, DATA_TYPE POLYBENCH_2D(data, M, N, m, n), DATA_TYPE POLYBENCH_2D(symmat, M, M, m, m), DATA_TYPE POLYBENCH_1D(mean, M, m), DATA_TYPE POLYBENCH_1D(stddev, M, m)) { int i, j, j1, j2; DATA_TYPE eps = 0.1 f; #define sqrt_of_array_cell(x,j) sqrt(x[j]) #pragma scop /* Determine mean of column vectors of input data matrix */ #pragma omp parallel private(i, j, j2) num_threads(2) { #pragma omp for schedule(static, 16) for (j = 0; j < _PB_M; j++) { mean[j] = 0.0; for (i = 0; i < _PB_N; i++) mean[j] += data[i][j]; mean[j] /= float_n; } /* Determine standard deviations of column vectors of data matrix. */ #pragma omp for schedule(static, 16) for (j = 0; j < _PB_M; j++) { stddev[j] = 0.0; for (i = 0; i < _PB_N; i++) stddev[j] += (data[i][j] - mean[j]) * (data[i][j] - mean[j]); stddev[j] /= float_n; stddev[j] = sqrt_of_array_cell(stddev, j); /* * The following in an inelegant but usual way to handle * near-zero std. dev. values, which below would cause a zero- * divide. */ stddev[j] = stddev[j] <= eps ? 1.0 : stddev[j]; } /* Center and reduce the column vectors. */ #pragma omp for schedule(static, 16) for (i = 0; i < _PB_N; i++) for (j = 0; j < _PB_M; j++) { data[i][j] -= mean[j]; data[i][j] /= sqrt(float_n) * stddev[j]; } /* Calculate the m * m correlation matrix. */ #pragma omp for schedule(static, 16) for (j1 = 0; j1 < _PB_M - 1; j1++) { symmat[j1][j1] = 1.0; for (j2 = j1 + 1; j2 < _PB_M; j2++) { symmat[j1][j2] = 0.0; for (i = 0; i < _PB_N; i++) symmat[j1][j2] += (data[i][j1] * data[i][j2]); symmat[j2][j1] = symmat[j1][j2]; } } } #pragma endscop symmat[_PB_M - 1][_PB_M - 1] = 1.0; } int main(int argc, char **argv) { /* Retrieve problem size. */ int n = N; int m = M; /* Variable declaration/allocation. */ DATA_TYPE float_n; POLYBENCH_2D_ARRAY_DECL(data, DATA_TYPE, M, N, m, n); POLYBENCH_2D_ARRAY_DECL(symmat, DATA_TYPE, M, M, m, m); POLYBENCH_1D_ARRAY_DECL(mean, DATA_TYPE, M, m); POLYBENCH_1D_ARRAY_DECL(stddev, DATA_TYPE, M, m); /* Initialize array(s). */ init_array(m, n, &float_n, POLYBENCH_ARRAY(data)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_correlation(m, n, float_n, POLYBENCH_ARRAY(data), POLYBENCH_ARRAY(symmat), POLYBENCH_ARRAY(mean), POLYBENCH_ARRAY(stddev)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* * Prevent dead-code elimination. All live-out data must be printed by * the function call in argument. */ polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(symmat))); /* Be clean. */ POLYBENCH_FREE_ARRAY(data); POLYBENCH_FREE_ARRAY(symmat); POLYBENCH_FREE_ARRAY(mean); POLYBENCH_FREE_ARRAY(stddev); return 0; }
GB_binop__rminus_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__rminus_int32 // A.*B function (eWiseMult): GB_AemultB__rminus_int32 // A*D function (colscale): GB_AxD__rminus_int32 // D*A function (rowscale): GB_DxB__rminus_int32 // C+=B function (dense accum): GB_Cdense_accumB__rminus_int32 // C+=b function (dense accum): GB_Cdense_accumb__rminus_int32 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__rminus_int32 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__rminus_int32 // C=scalar+B GB_bind1st__rminus_int32 // C=scalar+B' GB_bind1st_tran__rminus_int32 // C=A+scalar GB_bind2nd__rminus_int32 // C=A'+scalar GB_bind2nd_tran__rminus_int32 // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = (bij - aij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (y - x) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RMINUS || GxB_NO_INT32 || GxB_NO_RMINUS_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__rminus_int32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__rminus_int32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__rminus_int32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__rminus_int32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__rminus_int32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__rminus_int32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__rminus_int32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__rminus_int32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__rminus_int32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = Bx [p] ; Cx [p] = (bij - x) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__rminus_int32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = Ax [p] ; Cx [p] = (y - aij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = (aij - x) ; \ } GrB_Info GB_bind1st_tran__rminus_int32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = (y - aij) ; \ } GrB_Info GB_bind2nd_tran__rminus_int32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__rminus_int32 // A.*B function (eWiseMult): GB_AemultB__rminus_int32 // A*D function (colscale): GB_AxD__rminus_int32 // D*A function (rowscale): GB_DxB__rminus_int32 // C+=B function (dense accum): GB_Cdense_accumB__rminus_int32 // C+=b function (dense accum): GB_Cdense_accumb__rminus_int32 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__rminus_int32 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__rminus_int32 // C=scalar+B GB_bind1st__rminus_int32 // C=scalar+B' GB_bind1st_tran__rminus_int32 // C=A+scalar GB_bind2nd__rminus_int32 // C=A'+scalar GB_bind2nd_tran__rminus_int32 // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = (bij - aij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (y - x) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RMINUS || GxB_NO_INT32 || GxB_NO_RMINUS_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__rminus_int32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__rminus_int32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__rminus_int32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__rminus_int32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__rminus_int32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__rminus_int32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__rminus_int32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__rminus_int32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__rminus_int32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = Bx [p] ; Cx [p] = (bij - x) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__rminus_int32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = Ax [p] ; Cx [p] = (y - aij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = (aij - x) ; \ } GrB_Info GB_bind1st_tran__rminus_int32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = (y - aij) ; \ } GrB_Info GB_bind2nd_tran__rminus_int32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__rminus_int32 // A.*B function (eWiseMult): GB_AemultB__rminus_int32 // A*D function (colscale): GB_AxD__rminus_int32 // D*A function (rowscale): GB_DxB__rminus_int32 // C+=B function (dense accum): GB_Cdense_accumB__rminus_int32 // C+=b function (dense accum): GB_Cdense_accumb__rminus_int32 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__rminus_int32 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__rminus_int32 // C=scalar+B GB_bind1st__rminus_int32 // C=scalar+B' GB_bind1st_tran__rminus_int32 // C=A+scalar GB_bind2nd__rminus_int32 // C=A'+scalar GB_bind2nd_tran__rminus_int32 // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = (bij - aij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (y - x) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RMINUS || GxB_NO_INT32 || GxB_NO_RMINUS_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__rminus_int32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__rminus_int32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__rminus_int32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__rminus_int32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__rminus_int32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__rminus_int32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__rminus_int32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__rminus_int32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__rminus_int32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = Bx [p] ; Cx [p] = (bij - x) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__rminus_int32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = Ax [p] ; Cx [p] = (y - aij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = (aij - x) ; \ } GrB_Info GB_bind1st_tran__rminus_int32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = (y - aij) ; \ } GrB_Info GB_bind2nd_tran__rminus_int32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
loop_multiple_variables_omp.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> int main() { int* a = (int*)malloc(sizeof(int)*4); int* b = (int*)malloc(sizeof(int)*4); a[0] = 0; a[1] = 1; a[2] = 2; a[3] = 3; #pragma omp parallel for { for(int i = 0; i < 4; i++) { b[i] = a[i]; } } printf("[%d,%d,%d,%d]\n", b[0], b[1], b[2], b[3]); free(a); free(b); }
#include <stdio.h> #include <stdlib.h> #include <omp.h> int main() { int *a = (int *)malloc(sizeof(int) * 4); int *b = (int *)malloc(sizeof(int) * 4); a[0] = 0; a[1] = 1; a[2] = 2; a[3] = 3; for (int i = 0; i < 4; i++) { b[i] = a[i]; } printf("[%d,%d,%d,%d]\n", b[0], b[1], b[2], b[3]); free(a); free(b); }
#include <stdio.h> #include <stdlib.h> #include <omp.h> int main() { int *a = (int *)malloc(sizeof(int) * 4); int *b = (int *)malloc(sizeof(int) * 4); a[0] = 0; a[1] = 1; a[2] = 2; a[3] = 3; #pragma omp parallel for { for (int i = 0; i < 4; i++) { b[i] = a[i]; } } printf("[%d,%d,%d,%d]\n", b[0], b[1], b[2], b[3]); free(a); free(b); }
cross_correlate_2d.c
// MIT License // // Copyright (c) 2021 Florian // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. #include <stdio.h> #include <stdlib.h> void cross_correlate_2d( const size_t *s_i, const double *input, const size_t *s_k, const double *kernel, const size_t *s_o, double *output) { #pragma omp parallel #pragma omp for for (int i = 0; i < s_o[0]; ++i) { for (int j = 0; j < s_o[1]; ++j) { output[i * s_o[1] + j] = 0; for (int k = 0; k < s_k[0]; ++k) { for (int l = 0; l < s_k[1]; ++l) { output[i * s_o[1] + j] += input[(i + k) * s_i[1] + j + l] * kernel[k * s_k[1] + l]; } } } } }
// MIT License // // Copyright (c) 2021 Florian // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. #include <stdio.h> #include <stdlib.h> void cross_correlate_2d( const size_t *s_i, const double *input, const size_t *s_k, const double *kernel, const size_t *s_o, double *output) { for (int i = 0; i < s_o[0]; ++i) { for (int j = 0; j < s_o[1]; ++j) { output[i * s_o[1] + j] = 0; for (int k = 0; k < s_k[0]; ++k) { for (int l = 0; l < s_k[1]; ++l) { output[i * s_o[1] + j] += input[(i + k) * s_i[1] + j + l] * kernel[k * s_k[1] + l]; } } } } }
// MIT License // // Copyright (c) 2021 Florian // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. #include <stdio.h> #include <stdlib.h> void cross_correlate_2d( const size_t *s_i, const double *input, const size_t *s_k, const double *kernel, const size_t *s_o, double *output) { #pragma omp parallel #pragma omp for for (int i = 0; i < s_o[0]; ++i) { for (int j = 0; j < s_o[1]; ++j) { output[i * s_o[1] + j] = 0; for (int k = 0; k < s_k[0]; ++k) { for (int l = 0; l < s_k[1]; ++l) { output[i * s_o[1] + j] += input[(i + k) * s_i[1] + j + l] * kernel[k * s_k[1] + l]; } } } } }
GB_unaryop__lnot_uint16_int8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_uint16_int8 // op(A') function: GB_tran__lnot_uint16_int8 // C type: uint16_t // A type: int8_t // cast: uint16_t cij = (uint16_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ int8_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ uint16_t z = (uint16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_UINT16 || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_uint16_int8 ( uint16_t *restrict Cx, const int8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_uint16_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_uint16_int8 // op(A') function: GB_tran__lnot_uint16_int8 // C type: uint16_t // A type: int8_t // cast: uint16_t cij = (uint16_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ int8_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ uint16_t z = (uint16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_UINT16 || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_uint16_int8 ( uint16_t *restrict Cx, const int8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_uint16_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_uint16_int8 // op(A') function: GB_tran__lnot_uint16_int8 // C type: uint16_t // A type: int8_t // cast: uint16_t cij = (uint16_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ int8_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ uint16_t z = (uint16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_UINT16 || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_uint16_int8 ( uint16_t *restrict Cx, const int8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_uint16_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
scratch.c
#include <omp.h> #include <stdio.h> #include <stdlib.h> static void fun() { int tid; tid = omp_get_thread_num(); printf("Hi from thread %d\n", tid); } int main (int argc, char *argv[]) { #pragma omp parallel { fun(); } /* All threads join master thread, barrier and disband */ return 0; }
#include <omp.h> #include <stdio.h> #include <stdlib.h> static void fun() { int tid; tid = omp_get_thread_num(); printf("Hi from thread %d\n", tid); } int main(int argc, char *argv[]) { fun(); /* All threads join master thread, barrier and disband */ return 0; }
#include <omp.h> #include <stdio.h> #include <stdlib.h> static void fun() { int tid; tid = omp_get_thread_num(); printf("Hi from thread %d\n", tid); } int main(int argc, char *argv[]) { #pragma omp parallel { fun(); } /* All threads join master thread, barrier * and disband */ return 0; }
Tutorial.h
//================================================================================================= /*! // \file blaze/Tutorial.h // \brief Tutorial of the Blaze library // // Copyright (C) 2012-2018 Klaus Iglberger - All Rights Reserved // // This file is part of the Blaze library. You can redistribute it and/or modify it under // the terms of the New (Revised) BSD License. Redistribution and use in source and binary // forms, with or without modification, are permitted provided that the following conditions // are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of // conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, this list // of conditions and the following disclaimer in the documentation and/or other materials // provided with the distribution. // 3. Neither the names of the Blaze development group nor the names of its contributors // may be used to endorse or promote products derived from this software without specific // prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT // SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR // BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. */ //================================================================================================= #ifndef _BLAZE_TUTORIAL_H_ #define _BLAZE_TUTORIAL_H_ //================================================================================================= // // BLAZE TUTORIAL // //================================================================================================= //**Mainpage*************************************************************************************** /*!\mainpage // // \image html blaze300x150.jpg // // This is the API for the \b Blaze high performance C++ math library. It gives a complete // overview of the individual features and sublibraries of \b Blaze. To get a first impression // on \b Blaze, the short \ref getting_started tutorial is a good place to start. Afterwards, // the following long tutorial covers the most important aspects of the \b Blaze math library. // The tabs at the top of the page allow a direct access to the individual modules, namespaces, // classes, and files of the \b Blaze library.\n\n // // \section table_of_content Table of Contents // // <ul> // <li> \ref configuration_and_installation </li> // <li> \ref getting_started </li> // <li> \ref vectors // <ul> // <li> \ref vector_types </li> // <li> \ref vector_operations </li> // </ul> // </li> // <li> \ref matrices // <ul> // <li> \ref matrix_types </li> // <li> \ref matrix_operations </li> // </ul> // </li> // <li> \ref adaptors // <ul> // <li> \ref adaptors_symmetric_matrices </li> // <li> \ref adaptors_hermitian_matrices </li> // <li> \ref adaptors_triangular_matrices </li> // </ul> // </li> // <li> \ref views // <ul> // <li> \ref views_subvectors </li> // <li> \ref views_element_selections </li> // <li> \ref views_submatrices </li> // <li> \ref views_rows </li> // <li> \ref views_row_selections </li> // <li> \ref views_columns </li> // <li> \ref views_column_selections </li> // <li> \ref views_bands </li> // </ul> // </li> // <li> \ref arithmetic_operations // <ul> // <li> \ref addition </li> // <li> \ref subtraction </li> // <li> \ref scalar_multiplication </li> // <li> \ref vector_vector_multiplication // <ul> // <li> \ref componentwise_multiplication </li> // <li> \ref inner_product </li> // <li> \ref outer_product </li> // <li> \ref cross_product </li> // </ul> // </li> // <li> \ref vector_vector_division </li> // <li> \ref matrix_vector_multiplication </li> // <li> \ref matrix_matrix_multiplication // <ul> // <li> \ref schur_product </li> // <li> \ref matrix_product </li> // </ul> // </li> // </ul> // </li> // <li> \ref shared_memory_parallelization // <ul> // <li> \ref hpx_parallelization </li> // <li> \ref cpp_threads_parallelization </li> // <li> \ref boost_threads_parallelization </li> // <li> \ref openmp_parallelization </li> // <li> \ref serial_execution </li> // </ul> // </li> // <li> \ref serialization // <ul> // <li> \ref vector_serialization </li> // <li> \ref matrix_serialization </li> // </ul> // </li> // <li> \ref customization // <ul> // <li> \ref configuration_files </li> // <li> \ref vector_and_matrix_customization // <ul> // <li> \ref custom_data_members </li> // <li> \ref custom_operations </li> // <li> \ref custom_data_types </li> // </ul> // </li> // <li> \ref error_reporting_customization </li> // </ul> // </li> // <li> \ref blas_functions </li> // <li> \ref lapack_functions </li> // <li> \ref block_vectors_and_matrices </li> // <li> \ref intra_statement_optimization </li> // <li> \ref faq </li> // <li> \ref issue_creation_guidelines </li> // <li> \ref blaze_references </li> // </ul> */ //************************************************************************************************* //**Configuration and Installation***************************************************************** /*!\page configuration_and_installation Configuration and Installation // // \tableofcontents // // // Since \b Blaze is a header-only library, setting up the \b Blaze library on a particular system // is a fairly easy two step process. In the following, this two step process is explained in // detail, preceded only by a short summary of the requirements. // // // \n \section requirements Requirements // <hr> // // For maximum performance the \b Blaze library expects you to have a BLAS library installed // (<a href="http://software.intel.com/en-us/articles/intel-mkl/">Intel MKL</a>, // <a href="http://developer.amd.com/libraries/acml/">ACML</a>, // <a href="http://math-atlas.sourceforge.net">Atlas</a>, // <a href="http://www.tacc.utexas.edu/tacc-projects/gotoblas2">Goto</a>, ...). If you don't // have a BLAS library installed on your system, \b Blaze will still work and will not be reduced // in functionality, but performance may be limited. Thus it is strongly recommended to install a // BLAS library. // // Additionally, for computing the determinant of a dense matrix, for the decomposition of dense // matrices, for the dense matrix inversion, and for the computation of eigenvalues and singular // values \b Blaze requires <a href="https://en.wikipedia.org/wiki/LAPACK">LAPACK</a>. When either // of these features is used it is necessary to link the LAPACK library to the final executable. // If no LAPACK library is available the use of these features will result in a linker error. // // Furthermore, it is possible to use Boost threads to run numeric operations in parallel. In this // case the Boost library is required to be installed on your system. It is recommended to use the // newest Boost library available, but \b Blaze requires at minimum the Boost version 1.54.0. If // you don't have Boost installed on your system, you can download it for free from // <a href="http://www.boost.org">www.boost.org</a>. // // // \n \section step_1_installation Step 1: Installation // <hr> // // \subsection step_1_cmake Installation via CMake // // The first step is the installation of the \b Blaze header files. The most convenient way // to do this is via <a href="https://cmake.org">CMake</a>. Linux and macOS users can use the // following two lines to copy the \b Blaze headers in the <tt>./blaze</tt> subdirectory to // the directory \c ${CMAKE_INSTALL_PREFIX}/include and the package configuration files to // \c ${CMAKE_INSTALL_PREFIX}/share/blaze/cmake. \code cmake -DCMAKE_INSTALL_PREFIX=/usr/local/ sudo make install \endcode // Windows users can do the same via the cmake-gui. Alternatively, it is possible to include // \b Blaze by adding the following lines in any \c CMakeLists.txt file: \code find_package( blaze ) if( blaze_FOUND ) add_library( blaze_target INTERFACE ) target_link_libraries( blaze_target INTERFACE blaze::blaze ) endif() \endcode // \n \subsection step_1_vcpkg Installation via the VC++ Packaging Tool // // An alternate way to install \b Blaze for Windows users is Microsoft's // <a href="https://github.com/Microsoft/vcpkg">VC++ Packaging Tool (vcpkg)</a>. \b Blaze can // be installed via the command line: \code C:\src\vcpkg> .\vcpkg install blaze \endcode // The tool automatically downloads the latest \b Blaze release and copies the header files to // the common include directory. Please note that since \b Blaze is a header-only library the // attempt to install any static or dynamic library will fail! // // \n \subsection step_1_installation_unix Manual Installation on Linux/macOS // // Since \b Blaze only consists of header files, the <tt>./blaze</tt> subdirectory can be simply // copied to a standard include directory (note that this requires root privileges): \code cp -r ./blaze /usr/local/include \endcode // Alternatively, on Unix-based machines (which includes Linux and Mac OS X) the // \c CPLUS_INCLUDE_PATH environment variable can be set. The specified directory will be // searched after any directories specified on the command line with the option \c -I and // before the standard default directories (such as \c /usr/local/include and \c /usr/include). // Assuming a user named 'Jon', the environment variable can be set as follows: \code CPLUS_INCLUDE_PATH=/usr/home/jon/blaze export CPLUS_INCLUDE_PATH \endcode // Last but not least, the <tt>./blaze</tt> subdirectory can be explicitly specified on the // command line. The following example demonstrates this by means of the GNU C++ compiler: \code g++ -I/usr/home/jon/blaze -o BlazeTest BlazeTest.cpp \endcode // \n \subsection step_1_installation_windows Manual Installation on Windows // // Windows doesn't have a standard include directory. Therefore the \b Blaze header files can be // copied to any other directory or simply left in the default \b Blaze directory. However, the // chosen include directory has to be explicitly specified as include path. In Visual Studio, // this is done via the project property pages, configuration properties, C/C++, General settings. // Here the additional include directories can be specified. // // // \n \section step_2_configuration Step 2: Configuration // <hr> // // The second step is the configuration and customization of the \b Blaze library. Many aspects // of \b Blaze can be adapted to specific requirements, environments and architectures. The most // convenient way to configure \b Blaze is to modify the headers in the <tt>./blaze/config/</tt> // subdirectory by means of <a href="https://cmake.org">CMake</a>. Alternatively these header // files can be customized manually. In both cases, however, the files are modified. If this is // not an option it is possible to configure \b Blaze via the command line (see the tutorial // section \ref configuration_files or the documentation in the configuration files). // // Since the default settings are reasonable for most systems this step can also be skipped. // However, in order to achieve maximum performance a customization of at least the following // configuration files is required: // // - <b><tt><blaze/config/BLAS.h></tt></b>: Via this configuration file \b Blaze can be enabled // to use a third-party BLAS library for several basic linear algebra functions (such as for // instance dense matrix multiplications). In case no BLAS library is used, all linear algebra // functions use the default implementations of the \b Blaze library and therefore BLAS is not a // requirement for the compilation process. However, please note that performance may be limited. // - <b><tt><blaze/config/CacheSize.h></tt></b>: This file contains the hardware specific cache // settings. \b Blaze uses this information to optimize its cache usage. For maximum performance // it is recommended to adapt these setting to a specific target architecture. // - <b><tt><blaze/config/Thresholds.h></tt></b>: This file contains all thresholds for the // customization of the \b Blaze compute kernels. In order to tune the kernels for a specific // architecture and to maximize performance it can be necessary to adjust the thresholds, // especially for a parallel execution (see \ref shared_memory_parallelization). // // For an overview of other customization options and more details, please see the section // \ref configuration_files. // // // \n \section blaze_version Blaze Version // <hr> // // The current major and minor version number of the \b Blaze library can be found in the // <b><tt><blaze/system/Version.h></tt></b> header file. It is automatically included via the // <b><tt><blaze/Blaze.h></tt></b> header file. The file contains the two following macros, // which can for instance be used for conditional compilation: \code #define BLAZE_MAJOR_VERSION 3 #define BLAZE_MINOR_VERSION 2 \endcode // \n Next: \ref getting_started */ //************************************************************************************************* //**Getting Started******************************************************************************** /*!\page getting_started Getting Started // // This short tutorial serves the purpose to give a quick overview of the way mathematical // expressions have to be formulated in \b Blaze. Starting with \ref vector_types, the following // long tutorial covers the most important aspects of the \b Blaze math library. // // // \n \section getting_started_vector_example A First Example // // \b Blaze is written such that using mathematical expressions is as close to mathematical // textbooks as possible and therefore as intuitive as possible. In nearly all cases the seemingly // easiest solution is the right solution and most users experience no problems when trying to // use \b Blaze in the most natural way. The following example gives a first impression of the // formulation of a vector addition in \b Blaze: \code #include <iostream> #include <blaze/Math.h> using blaze::StaticVector; using blaze::DynamicVector; // Instantiation of a static 3D column vector. The vector is directly initialized as // ( 4 -2 5 ) StaticVector<int,3UL> a{ 4, -2, 5 }; // Instantiation of a dynamic 3D column vector. Via the subscript operator the values are set to // ( 2 5 -3 ) DynamicVector<int> b( 3UL ); b[0] = 2; b[1] = 5; b[2] = -3; // Adding the vectors a and b DynamicVector<int> c = a + b; // Printing the result of the vector addition std::cout << "c =\n" << c << "\n"; \endcode // Note that the entire \b Blaze math library can be included via the \c blaze/Math.h header // file. Alternatively, the entire \b Blaze library, including both the math and the entire // utility module, can be included via the \c blaze/Blaze.h header file. Also note that all // classes and functions of \b Blaze are contained in the blaze namespace.\n\n // // Assuming that this program resides in a source file called \c FirstExample.cpp, it can be // compiled for instance via the GNU C++ compiler: \code g++ -ansi -O3 -DNDEBUG -mavx -o FirstExample FirstExample.cpp \endcode // Note the definition of the \c NDEBUG preprocessor symbol. In order to achieve maximum // performance, it is necessary to compile the program in release mode, which deactivates // all debugging functionality inside \b Blaze. It is also strongly recommended to specify // the available architecture specific instruction set (as for instance the AVX instruction // set, which if available can be activated via the \c -mavx flag). This allows \b Blaze // to optimize computations via vectorization.\n\n // // When running the resulting executable \c FirstExample, the output of the last line of // this small program is \code c = 6 3 2 \endcode // \n \section getting_started_matrix_example An Example Involving Matrices // // Similarly easy and intuitive are expressions involving matrices: \code #include <blaze/Math.h> using namespace blaze; // Instantiating a dynamic 3D column vector DynamicVector<int> x{ 4, -1, 3 }; // Instantiating a dynamic 2x3 row-major matrix, preinitialized with 0. Via the function call // operator three values of the matrix are explicitly set to get the matrix // ( 1 0 4 ) // ( 0 -2 0 ) DynamicMatrix<int> A( 2UL, 3UL, 0 ); A(0,0) = 1; A(0,2) = 4; A(1,1) = -2; // Performing a matrix/vector multiplication DynamicVector<int> y = A * x; // Printing the resulting vector std::cout << "y =\n" << y << "\n"; // Instantiating a static column-major matrix. The matrix is directly initialized as // ( 3 -1 ) // ( 0 2 ) // ( -1 0 ) StaticMatrix<int,3UL,2UL,columnMajor> B{ { 3, -1 }, { 0, 2 }, { -1, 0 } }; // Performing a matrix/matrix multiplication DynamicMatrix<int> C = A * B; // Printing the resulting matrix std::cout << "C =\n" << C << "\n"; \endcode // The output of this program is \code y = 16 2 C = ( -1 -1 ) ( 0 -4 ) \endcode // \n \section getting_started_complex_example A Complex Example // // The following example is much more sophisticated. It shows the implementation of the Conjugate // Gradient (CG) algorithm (http://en.wikipedia.org/wiki/Conjugate_gradient) by means of the // \b Blaze library: // // \image html cg.jpg // // In this example it is not important to understand the CG algorithm itself, but to see the // advantage of the API of the \b Blaze library. In the \b Blaze implementation we will use a // sparse matrix/dense vector multiplication for a 2D Poisson equation using \f$ N \times N \f$ // unknowns. It becomes apparent that the core of the algorithm is very close to the mathematical // formulation and therefore has huge advantages in terms of readability and maintainability, // while the performance of the code is close to the expected theoretical peak performance: \code const size_t NN( N*N ); blaze::CompressedMatrix<double,rowMajor> A( NN, NN ); blaze::DynamicVector<double,columnVector> x( NN, 1.0 ), b( NN, 0.0 ), r( NN ), p( NN ), Ap( NN ); double alpha, beta, delta; // ... Initializing the sparse matrix A // Performing the CG algorithm r = b - A * x; p = r; delta = (r,r); for( size_t iteration=0UL; iteration<iterations; ++iteration ) { Ap = A * p; alpha = delta / (p,Ap); x += alpha * p; r -= alpha * Ap; beta = (r,r); if( std::sqrt( beta ) < 1E-8 ) break; p = r + ( beta / delta ) * p; delta = beta; } \endcode // \n Hopefully this short tutorial gives a good first impression of how mathematical expressions // are formulated with \b Blaze. The following long tutorial, starting with \ref vector_types, // will cover all aspects of the \b Blaze math library, i.e. it will introduce all vector and // matrix types, all possible operations on vectors and matrices, and of course all possible // mathematical expressions. // // \n Previous: \ref configuration_and_installation &nbsp; &nbsp; Next: \ref vectors */ //************************************************************************************************* //**Vectors**************************************************************************************** /*!\page vectors Vectors // // \tableofcontents // // // \n \section vectors_general General Concepts // <hr> // // The \b Blaze library currently offers four dense vector types (\ref vector_types_static_vector, // \ref vector_types_dynamic_vector, \ref vector_types_hybrid_vector, and \ref vector_types_custom_vector) // and one sparse vector type (\ref vector_types_compressed_vector). All vectors can be specified // as either column vectors or row vectors: \code using blaze::DynamicVector; using blaze::columnVector; using blaze::rowVector; // Setup of the 3-dimensional dense column vector // // ( 1 ) // ( 2 ) // ( 3 ) // DynamicVector<int,columnVector> a{ 1, 2, 3 }; // Setup of the 3-dimensional dense row vector // // ( 4 5 6 ) // DynamicVector<int,rowVector> b{ 4, 5, 6 }; \endcode // Per default, all vectors in \b Blaze are column vectors: \code // Instantiation of a 3-dimensional column vector blaze::DynamicVector<int> c( 3UL ); \endcode // \n \section vectors_details Vector Details // <hr> // // - \ref vector_types // - \ref vector_operations // // // \n \section vectors_examples Examples // <hr> \code using blaze::StaticVector; using blaze::DynamicVector; using blaze::CompressedVector; using blaze::rowVector; using blaze::columnVector; StaticVector<int,6UL> a; // Instantiation of a 6-dimensional static column vector CompressedVector<int,rowVector> b; // Instantiation of a compressed row vector DynamicVector<int,columnVector> c; // Instantiation of a dynamic column vector // ... Resizing and initialization c = a + trans( b ); \endcode // \n Previous: \ref getting_started &nbsp; &nbsp; Next: \ref vector_types */ //************************************************************************************************* //**Vector Types*********************************************************************************** /*!\page vector_types Vector Types // // \tableofcontents // // // \n \section vector_types_static_vector StaticVector // <hr> // // The blaze::StaticVector class template is the representation of a fixed size vector with // statically allocated elements of arbitrary type. It can be included via the header file \code #include <blaze/math/StaticVector.h> \endcode // The type of the elements, the number of elements, and the transpose flag of the vector can // be specified via the three template parameters: \code template< typename Type, size_t N, bool TF > class StaticVector; \endcode // - \c Type: specifies the type of the vector elements. StaticVector can be used with any // non-cv-qualified, non-reference, non-pointer element type. // - \c N : specifies the total number of vector elements. It is expected that StaticVector is // only used for tiny and small vectors. // - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column // vector (\c blaze::columnVector). The default value is \c blaze::columnVector. // // The blaze::StaticVector is perfectly suited for small to medium vectors whose size is known at // compile time: \code // Definition of a 3-dimensional integral column vector blaze::StaticVector<int,3UL> a; // Definition of a 4-dimensional single precision column vector blaze::StaticVector<float,4UL,blaze::columnVector> b; // Definition of a 6-dimensional double precision row vector blaze::StaticVector<double,6UL,blaze::rowVector> c; \endcode // \n \section vector_types_dynamic_vector DynamicVector // <hr> // // The blaze::DynamicVector class template is the representation of an arbitrary sized vector // with dynamically allocated elements of arbitrary type. It can be included via the header file \code #include <blaze/math/DynamicVector.h> \endcode // The type of the elements and the transpose flag of the vector can be specified via the two // template parameters: \code template< typename Type, bool TF > class DynamicVector; \endcode // - \c Type: specifies the type of the vector elements. DynamicVector can be used with any // non-cv-qualified, non-reference, non-pointer element type. // - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column // vector (\c blaze::columnVector). The default value is \c blaze::columnVector. // // The blaze::DynamicVector is the default choice for all kinds of dense vectors and the best // choice for medium to large vectors. Its size can be modified at runtime: \code // Definition of a 3-dimensional integral column vector blaze::DynamicVector<int> a( 3UL ); // Definition of a 4-dimensional single precision column vector blaze::DynamicVector<float,blaze::columnVector> b( 4UL ); // Definition of a double precision row vector with size 0 blaze::DynamicVector<double,blaze::rowVector> c; \endcode // \n \section vector_types_hybrid_vector HybridVector // <hr> // // The blaze::HybridVector class template combines the advantages of the blaze::StaticVector and // the blaze::DynamicVector class templates. It represents a fixed size vector with statically // allocated elements, but still can be dynamically resized (within the bounds of the available // memory). It can be included via the header file \code #include <blaze/math/HybridVector.h> \endcode // The type of the elements, the number of elements, and the transpose flag of the vector can // be specified via the three template parameters: \code template< typename Type, size_t N, bool TF > class HybridVector; \endcode // - \c Type: specifies the type of the vector elements. HybridVector can be used with any // non-cv-qualified, non-reference, non-pointer element type. // - \c N : specifies the maximum number of vector elements. It is expected that HybridVector // is only used for tiny and small vectors. // - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column // vector (\c blaze::columnVector). The default value is \c blaze::columnVector. // // The blaze::HybridVector is a suitable choice for small to medium vectors, whose size is not // known at compile time or not fixed at runtime, but whose maximum size is known at compile // time: \code // Definition of a 3-dimensional integral column vector with a maximum size of 6 blaze::HybridVector<int,6UL> a( 3UL ); // Definition of a 4-dimensional single precision column vector with a maximum size of 16 blaze::HybridVector<float,16UL,blaze::columnVector> b( 4UL ); // Definition of a double precision row vector with size 0 and a maximum size of 6 blaze::HybridVector<double,6UL,blaze::rowVector> c; \endcode // \n \section vector_types_custom_vector CustomVector // <hr> // // The blaze::CustomVector class template provides the functionality to represent an external // array of elements of arbitrary type and a fixed size as a native \b Blaze dense vector data // structure. Thus in contrast to all other dense vector types a custom vector does not perform // any kind of memory allocation by itself, but it is provided with an existing array of element // during construction. A custom vector can therefore be considered an alias to the existing // array. It can be included via the header file \code #include <blaze/math/CustomVector.h> \endcode // The type of the elements, the properties of the given array of elements and the transpose // flag of the vector can be specified via the following four template parameters: \code template< typename Type, bool AF, bool PF, bool TF > class CustomVector; \endcode // - Type: specifies the type of the vector elements. blaze::CustomVector can be used with // any non-cv-qualified, non-reference, non-pointer element type. // - AF : specifies whether the represented, external arrays are properly aligned with // respect to the available instruction set (SSE, AVX, ...) or not. // - PF : specified whether the represented, external arrays are properly padded with // respect to the available instruction set (SSE, AVX, ...) or not. // - TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column // vector (\c blaze::columnVector). The default value is \c blaze::columnVector. // // The blaze::CustomVector is the right choice if any external array needs to be represented as // a \b Blaze dense vector data structure or if a custom memory allocation strategy needs to be // realized: \code using blaze::CustomVector; using blaze::Deallocate; using blaze::aligned; using blaze::unaligned; using blaze::padded; using blaze::unpadded; // Definition of an unmanaged custom column vector for unaligned, unpadded integer arrays using UnalignedUnpadded = CustomVector<int,unaligned,unpadded,columnVector>; std::vector<int> vec( 7UL ); UnalignedUnpadded a( &vec[0], 7UL ); // Definition of a managed custom column vector for unaligned but padded 'float' arrays using UnalignedPadded = CustomVector<float,unaligned,padded,columnVector>; std::unique_ptr<float[]> memory1( new float[16] ); UnalignedPadded b( memory1.get(), 9UL, 16UL ); // Definition of a managed custom row vector for aligned, unpadded 'double' arrays using AlignedUnpadded = CustomVector<double,aligned,unpadded,rowVector>; std::unique_ptr<double[],Deallocate> memory2( blaze::allocate<double>( 7UL ) ); AlignedUnpadded c( memory2.get(), 7UL ); // Definition of a managed custom row vector for aligned, padded 'complex<double>' arrays using cplx = complex<double>; using AlignedPadded = CustomVector<cplx,aligned,padded,columnVector>; std::unique_ptr<cplx[],Deallocate> memory3( allocate<cplx>( 8UL ) ); AlignedPadded d( memory3.get(), 5UL, 8UL ); \endcode // In comparison with the remaining \b Blaze dense vector types blaze::CustomVector has several // special characteristics. All of these result from the fact that a custom vector is not // performing any kind of memory allocation, but instead is given an existing array of elements. // The following sections discuss all of these characteristics: // // -# <b>\ref vector_types_custom_vector_memory_management</b> // -# <b>\ref vector_types_custom_vector_copy_operations</b> // -# <b>\ref vector_types_custom_vector_alignment</b> // -# <b>\ref vector_types_custom_vector_padding</b> // // \n \subsection vector_types_custom_vector_memory_management Memory Management // // The blaze::CustomVector class template acts as an adaptor for an existing array of elements. As // such it provides everything that is required to use the array just like a native \b Blaze dense // vector data structure. However, this flexibility comes with the price that the user of a custom // vector is responsible for the resource management. // // The following examples give an impression of several possible types of custom vectors: \code using blaze::CustomVector; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::unaligned; using blaze::padded; using blaze::unpadded; // Definition of a 3-dimensional custom vector with unaligned, unpadded and externally // managed integer array. Note that the std::vector must be guaranteed to outlive the // custom vector! std::vector<int> vec( 3UL ); CustomVector<int,unaligned,unpadded> a( &vec[0], 3UL ); // Definition of a custom vector with size 3 and capacity 16 with aligned, padded and // externally managed integer array. Note that the std::unique_ptr must be guaranteed // to outlive the custom vector! std::unique_ptr<int[],Deallocate> memory( allocate<int>( 16UL ) ); CustomVector<int,aligned,padded> b( memory.get(), 3UL, 16UL ); \endcode // \n \subsection vector_types_custom_vector_copy_operations Copy Operations // // As with all dense vectors it is possible to copy construct a custom vector: \code using blaze::CustomVector; using blaze::unaligned; using blaze::unpadded; using CustomType = CustomVector<int,unaligned,unpadded>; std::vector<int> vec( 5UL, 10 ); // Vector of 5 integers of the value 10 CustomType a( &vec[0], 5UL ); // Represent the std::vector as Blaze dense vector a[1] = 20; // Also modifies the std::vector CustomType b( a ); // Creating a copy of vector a b[2] = 20; // Also affects vector a and the std::vector \endcode // It is important to note that a custom vector acts as a reference to the specified array. Thus // the result of the copy constructor is a new custom vector that is referencing and representing // the same array as the original custom vector. // // In contrast to copy construction, just as with references, copy assignment does not change // which array is referenced by the custom vector, but modifies the values of the array: \code std::vector<int> vec2( 5UL, 4 ); // Vector of 5 integers of the value 4 CustomType c( &vec2[0], 5UL ); // Represent the std::vector as Blaze dense vector a = c; // Copy assignment: Set all values of vector a and b to 4. \endcode // \n \subsection vector_types_custom_vector_alignment Alignment // // In case the custom vector is specified as \c aligned the passed array must be guaranteed to // be aligned according to the requirements of the used instruction set (SSE, AVX, ...). For // instance, if AVX is active an array of integers must be 32-bit aligned: \code using blaze::CustomVector; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::unpadded; // Allocation of 32-bit aligned memory std::unique_ptr<int[],Deallocate> memory( allocate<int>( 5UL ) ); CustomVector<int,aligned,unpadded> a( memory.get(), 5UL ); \endcode // In case the alignment requirements are violated, a \c std::invalid_argument exception is // thrown. // // \n \subsection vector_types_custom_vector_padding Padding // // Adding padding elements to the end of an array can have a significant impact on the performance. // For instance, assuming that AVX is available, then two aligned, padded, 3-dimensional vectors // of double precision values can be added via a single SIMD addition operation: \code using blaze::CustomVector; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::padded; using CustomType = CustomVector<double,aligned,padded>; std::unique_ptr<int[],Deallocate> memory1( allocate<double>( 4UL ) ); std::unique_ptr<int[],Deallocate> memory2( allocate<double>( 4UL ) ); std::unique_ptr<int[],Deallocate> memory3( allocate<double>( 4UL ) ); // Creating padded custom vectors of size 3 and a capacity of 4 CustomType a( memory1.get(), 3UL, 4UL ); CustomType b( memory2.get(), 3UL, 4UL ); CustomType c( memory3.get(), 3UL, 4UL ); // ... Initialization c = a + b; // AVX-based vector addition \endcode // In this example, maximum performance is possible. However, in case no padding elements are // inserted, a scalar addition has to be used: \code using blaze::CustomVector; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::unpadded; using CustomType = CustomVector<double,aligned,unpadded>; std::unique_ptr<int[],Deallocate> memory1( allocate<double>( 3UL ) ); std::unique_ptr<int[],Deallocate> memory2( allocate<double>( 3UL ) ); std::unique_ptr<int[],Deallocate> memory3( allocate<double>( 3UL ) ); // Creating unpadded custom vector of size 3 CustomType a( allocate<double>( 3UL ), 3UL ); CustomType b( allocate<double>( 3UL ), 3UL ); CustomType c( allocate<double>( 3UL ), 3UL ); // ... Initialization c = a + b; // Scalar vector addition \endcode // Note the different number of constructor parameters for unpadded and padded custom vectors: // In contrast to unpadded vectors, where during the construction only the size of the array // has to be specified, during the construction of a padded custom vector it is additionally // necessary to explicitly specify the capacity of the array. // // The number of padding elements is required to be sufficient with respect to the available // instruction set: In case of an aligned padded custom vector the added padding elements must // guarantee that the capacity is greater or equal than the size and a multiple of the SIMD vector // width. In case of unaligned padded vectors the number of padding elements can be greater or // equal the number of padding elements of an aligned padded custom vector. In case the padding // is insufficient with respect to the available instruction set, a \a std::invalid_argument // exception is thrown. // // Please also note that \b Blaze will zero initialize the padding elements in order to achieve // maximum performance! // // // \n \section vector_types_compressed_vector CompressedVector // <hr> // // The blaze::CompressedVector class is the representation of an arbitrarily sized sparse // vector, which stores only non-zero elements of arbitrary type. It can be included via the // header file \code #include <blaze/math/CompressedVector.h> \endcode // The type of the elements and the transpose flag of the vector can be specified via the two // template parameters: \code template< typename Type, bool TF > class CompressedVector; \endcode // - \c Type: specifies the type of the vector elements. CompressedVector can be used with any // non-cv-qualified, non-reference, non-pointer element type. // - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column // vector (\c blaze::columnVector). The default value is \c blaze::columnVector. // // The blaze::CompressedVector is the right choice for all kinds of sparse vectors: \code // Definition of a 3-dimensional integral column vector blaze::CompressedVector<int> a( 3UL ); // Definition of a 4-dimensional single precision column vector with capacity for 3 non-zero elements blaze::CompressedVector<float,blaze::columnVector> b( 4UL, 3UL ); // Definition of a double precision row vector with size 0 blaze::CompressedVector<double,blaze::rowVector> c; \endcode // \n Previous: \ref vectors &nbsp; &nbsp; Next: \ref vector_operations */ //************************************************************************************************* //**Vector Operations****************************************************************************** /*!\page vector_operations Vector Operations // // \tableofcontents // // // \n \section vector_operations_constructors Constructors // <hr> // // Instantiating and setting up a vector is very easy and intuitive. However, there are a few // rules to take care of: // - In case the last template parameter (the transpose flag) is omitted, the vector is per // default a column vector. // - The elements of a \c StaticVector or \c HybridVector are default initialized (i.e. built-in // data types are initialized to 0, class types are initialized via the default constructor). // - Newly allocated elements of a \c DynamicVector or \c CompressedVector remain uninitialized // if they are of built-in type and are default constructed if they are of class type. // // \n \subsection vector_operations_default_construction Default Construction \code using blaze::StaticVector; using blaze::DynamicVector; using blaze::CompressedVector; // All vectors can be default constructed. Whereas the size // of StaticVectors is fixed via the second template parameter, // the initial size of a default constructed DynamicVector or // CompressedVector is 0. StaticVector<int,2UL> v1; // Instantiation of a 2D integer column vector. // All elements are initialized to 0. StaticVector<long,3UL,columnVector> v2; // Instantiation of a 3D long integer column vector. // Again, all elements are initialized to 0L. DynamicVector<float> v3; // Instantiation of a dynamic single precision column // vector of size 0. DynamicVector<double,rowVector> v4; // Instantiation of a dynamic double precision row // vector of size 0. CompressedVector<int> v5; // Instantiation of a compressed integer column // vector of size 0. CompressedVector<double,rowVector> v6; // Instantiation of a compressed double precision row // vector of size 0. \endcode // \n \subsection vector_operations_size_construction Construction with Specific Size // // The \c DynamicVector, \c HybridVector and \c CompressedVector classes offer a constructor that // allows to immediately give the vector the required size. Whereas both dense vectors (i.e. // \c DynamicVector and \c HybridVector) use this information to allocate memory for all vector // elements, \c CompressedVector merely acquires the size but remains empty. \code DynamicVector<int,columnVector> v7( 9UL ); // Instantiation of an integer dynamic column vector // of size 9. The elements are NOT initialized! HybridVector< complex<float>, 5UL > v8( 2UL ); // Instantiation of a column vector with two single // precision complex values. The elements are // default constructed. CompressedVector<int,rowVector> v9( 10UL ); // Instantiation of a compressed row vector with // size 10. Initially, the vector provides no // capacity for non-zero elements. \endcode // \n \subsection vector_operations_initialization_constructors Initialization Constructors // // All dense vector classes offer a constructor that allows for a direct, homogeneous initialization // of all vector elements. In contrast, for sparse vectors the predicted number of non-zero elements // can be specified \code StaticVector<int,3UL,rowVector> v10( 2 ); // Instantiation of a 3D integer row vector. // All elements are initialized to 2. DynamicVector<float> v11( 3UL, 7.0F ); // Instantiation of a dynamic single precision // column vector of size 3. All elements are // set to 7.0F. CompressedVector<float,rowVector> v12( 15UL, 3UL ); // Instantiation of a single precision column // vector of size 15, which provides enough // space for at least 3 non-zero elements. \endcode // \n \subsection vector_operations_array_construction Array Construction // // Alternatively, all dense vector classes offer a constructor for an initialization with a dynamic // or static array. If the vector is initialized from a dynamic array, the constructor expects the // actual size of the array as first argument, the array as second argument. In case of a static // array, the fixed size of the array is used: \code const unique_ptr<double[]> array1( new double[2] ); // ... Initialization of the dynamic array blaze::StaticVector<double,2UL> v13( 2UL, array1.get() ); int array2[4] = { 4, -5, -6, 7 }; blaze::StaticVector<int,4UL> v14( array2 ); \endcode // \n \subsection vector_operations_initializer_list_construction Initializer List Construction // // In addition, all dense and sparse vector classes can be directly initialized by means of an // initializer list: \code blaze::DynamicVector<float> v15{ 1.0F, 2.0F, 3.0F, 4.0F }; blaze::CompressedVector<int> v16{ 0, 2, 0, 0, 5, 0, 7, 0 }; \endcode // In case of sparse vectors, only the non-zero elements are used to initialize the vector. // // \n \subsection vector_operations_copy_construction Copy Construction // // All dense and sparse vectors can be created as the copy of any other dense or sparse vector // with the same transpose flag (i.e. blaze::rowVector or blaze::columnVector). \code StaticVector<int,9UL,columnVector> v17( v7 ); // Instantiation of the dense column vector v17 // as copy of the dense column vector v7. DynamicVector<int,rowVector> v18( v9 ); // Instantiation of the dense row vector v18 as // copy of the sparse row vector v9. CompressedVector<int,columnVector> v19( v1 ); // Instantiation of the sparse column vector v19 // as copy of the dense column vector v1. CompressedVector<float,rowVector> v20( v12 ); // Instantiation of the sparse row vector v20 as // copy of the row vector v12. \endcode // Note that it is not possible to create a \c StaticVector as a copy of a vector with a different // size: \code StaticVector<int,5UL,columnVector> v21( v7 ); // Runtime error: Size does not match! StaticVector<int,4UL,rowVector> v22( v10 ); // Compile time error: Size does not match! \endcode // \n \section vector_operations_assignment Assignment // <hr> // // There are several types of assignment to dense and sparse vectors: // \ref vector_operations_homogeneous_assignment, \ref vector_operations_array_assignment, // \ref vector_operations_copy_assignment, and \ref vector_operations_compound_assignment. // // \n \subsection vector_operations_homogeneous_assignment Homogeneous Assignment // // Sometimes it may be necessary to assign the same value to all elements of a dense vector. // For this purpose, the assignment operator can be used: \code blaze::StaticVector<int,3UL> v1; blaze::DynamicVector<double> v2; // Setting all integer elements of the StaticVector to 2 v1 = 2; // Setting all double precision elements of the DynamicVector to 5.0 v2 = 5.0; \endcode // \n \subsection vector_operations_array_assignment Array Assignment // // Dense vectors can also be assigned a static array: \code blaze::StaticVector<float,2UL> v1; blaze::DynamicVector<double,rowVector> v2; float array1[2] = { 1.0F, 2.0F }; double array2[5] = { 2.1, 4.0, -1.7, 8.6, -7.2 }; v1 = array1; v2 = array2; \endcode // \n \subsection vector_operations_initializer_list_assignment Initializer List Assignment // // Alternatively, it is possible to directly assign an initializer list to a dense or sparse // vector: \code blaze::DynamicVector<float> v1; blaze::CompressedVector<double,rowVector> v2; v1 = { 1.0F, 2.0F }; v2 = { 2.1, 0.0, -1.7, 0.0, -7.2 }; \endcode // In case of sparse vectors, only the non-zero elements are considered. // // \n \subsection vector_operations_copy_assignment Copy Assignment // // For all vector types it is generally possible to assign another vector with the same transpose // flag (i.e. blaze::columnVector or blaze::rowVector). Note that in case of \c StaticVectors, the // assigned vector is required to have the same size as the \c StaticVector since the size of a // \c StaticVector cannot be adapted! \code blaze::StaticVector<int,3UL,columnVector> v1; blaze::DynamicVector<int,columnVector> v2( 3UL ); blaze::DynamicVector<float,columnVector> v3( 5UL ); blaze::CompressedVector<int,columnVector> v4( 3UL ); blaze::CompressedVector<float,rowVector> v5( 3UL ); // ... Initialization of the vectors v1 = v2; // OK: Assignment of a 3D dense column vector to another 3D dense column vector v1 = v4; // OK: Assignment of a 3D sparse column vector to a 3D dense column vector v1 = v3; // Runtime error: Cannot assign a 5D vector to a 3D static vector v1 = v5; // Compilation error: Cannot assign a row vector to a column vector \endcode // \n \subsection vector_operations_compound_assignment Compound Assignment // // Next to plain assignment, it is also possible to use addition assignment, subtraction // assignment, and multiplication assignment. Note however, that in contrast to plain assignment // the size and the transpose flag of the vectors has be to equal in order to able to perform a // compound assignment. \code blaze::StaticVector<int,5UL,columnVector> v1; blaze::DynamicVector<int,columnVector> v2( 5UL ); blaze::CompressedVector<float,columnVector> v3( 7UL ); blaze::DynamicVector<float,rowVector> v4( 7UL ); blaze::CompressedVector<float,rowVector> v5( 7UL ); // ... Initialization of the vectors v1 += v2; // OK: Addition assignment between two column vectors of the same size v1 += v3; // Runtime error: No compound assignment between vectors of different size v1 -= v4; // Compilation error: No compound assignment between vectors of different transpose flag v4 *= v5; // OK: Multiplication assignment between two row vectors of the same size \endcode // \n \section vector_operations_element_access Element Access // <hr> // // \n \subsection vector_operations_subscript_operator_1 Subscript Operator // // The easiest and most intuitive way to access a dense or sparse vector is via the subscript // operator. The indices to access a vector are zero-based: \code blaze::DynamicVector<int> v1( 5UL ); v1[0] = 1; v1[1] = 3; // ... blaze::CompressedVector<float> v2( 5UL ); v2[2] = 7.3F; v2[4] = -1.4F; \endcode // Whereas using the subscript operator on a dense vector only accesses the already existing // element, accessing an element of a sparse vector via the subscript operator potentially // inserts the element into the vector and may therefore be more expensive. Consider the // following example: \code blaze::CompressedVector<int> v1( 10UL ); for( size_t i=0UL; i<v1.size(); ++i ) { ... = v1[i]; } \endcode // Although the compressed vector is only used for read access within the for loop, using the // subscript operator temporarily inserts 10 non-zero elements into the vector. Therefore the // preferred way to traverse the non-zero elements of a sparse vector is to use iterators. // // \n \subsection vector_operations_iterators Iterators // // All vectors (sparse as well as dense) offer an alternate way via the \c begin(), \c cbegin(), // \c end(), and \c cend() functions to traverse the currently contained elements by iterators. // In case of non-const vectors, \c begin() and \c end() return an \c Iterator, which allows a // manipulation of the non-zero value, in case of a constant vector or in case \c cbegin() or // \c cend() are used a \c ConstIterator is returned: \code using blaze::CompressedVector; CompressedVector<int> v1( 10UL ); // ... Initialization of the vector // Traversing the vector by Iterator for( CompressedVector<int>::Iterator it=v1.begin(); it!=v1.end(); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the non-zero element. } // Traversing the vector by ConstIterator for( CompressedVector<int>::ConstIterator it=v1.cbegin(); it!=v1.cend(); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the non-zero element. } \endcode // Note that \c begin(), \c cbegin(), \c end(), and \c cend() are also available as free functions: \code for( CompressedVector<int>::Iterator it=begin( v1 ); it!=end( v1 ); ++it ) { // ... } for( CompressedVector<int>::ConstIterator it=cbegin( v1 ); it!=cend( v1 ); ++it ) { // ... } \endcode // \n \section vector_operations_element_insertion Element Insertion // <hr> // // In contrast to dense vectors, that store all elements independent of their value and that // offer direct access to all elements, spares vectors only store the non-zero elements contained // in the vector. Therefore it is necessary to explicitly add elements to the vector. // // \n \subsection vector_operations_subscript_operator_2 Subscript Operator // // The first option to add elements to a sparse vector is the subscript operator: \code using blaze::CompressedVector; CompressedVector<int> v1( 3UL ); v1[1] = 2; \endcode // In case the element at the given index is not yet contained in the vector, it is automatically // inserted. Otherwise the old value is replaced by the new value 2. The operator returns a // reference to the sparse vector element. // // \n \subsection vector_operations_set .set() // // An alternative to the subscript operator is the \c set() function: In case the element is not // yet contained in the vector the element is inserted, else the element's value is modified: \code // Insert or modify the value at index 3 v1.set( 3, 1 ); \endcode // \n \subsection vector_operations_insert .insert() // // The insertion of elements can be better controlled via the \c insert() function. In contrast to // the subscript operator and the \c set() function it emits an exception in case the element is // already contained in the vector. In order to check for this case, the \c find() function can be // used: \code // In case the element at index 4 is not yet contained in the matrix it is inserted // with a value of 6. if( v1.find( 4 ) == v1.end() ) v1.insert( 4, 6 ); \endcode // \n \subsection vector_operations_append .append() // // Although the \c insert() function is very flexible, due to performance reasons it is not suited // for the setup of large sparse vectors. A very efficient, yet also very low-level way to fill // a sparse vector is the \c append() function. It requires the sparse vector to provide enough // capacity to insert a new element. Additionally, the index of the new element must be larger // than the index of the previous element. Violating these conditions results in undefined // behavior! \code v1.reserve( 10 ); // Reserving space for 10 non-zero elements v1.append( 5, -2 ); // Appending the element -2 at index 5 v1.append( 6, 4 ); // Appending the element 4 at index 6 // ... \endcode // \n \section vector_operations_element_removal Element Removal // <hr> // // \subsection vector_operations_erase .erase() // // The \c erase() member functions can be used to remove elements from a sparse vector. The // following example gives an impression of the five different flavors of \c erase(): \code using blaze::CompressedVector; CompressedVector<int> v( 42 ); // ... Initialization of the vector // Erasing the element at index 21 v.erase( 21 ); // Erasing a single element via iterator v.erase( v.find( 4 ) ); // Erasing all non-zero elements in the range [7..24] v.erase( v.lowerBound( 7 ), v.upperBound( 24 ) ); // Erasing all non-zero elements with a value larger than 9 by passing a unary predicate v.erase( []( int i ){ return i > 9; } ); // Erasing all non-zero elements in the range [30..40] with a value larger than 5 v.erase( v.lowerBound( 30 ), v.upperBound( 40 ), []( int i ){ return i > 5; } ); \endcode // \n \section vector_operations_element_lookup Element Lookup // <hr> // // A sparse vector only stores the non-zero elements contained in the vector. Therefore, whenever // accessing a vector element at a specific index a lookup operation is required. Whereas the // subscript operator is performing this lookup automatically, it is also possible to use the // \c find(), \c lowerBound(), and \c upperBound() member functions for a manual lookup. // // \n \subsection vector_operations_find .find() // // The \c find() function can be used to check whether a specific element is contained in a sparse // vector. It specifically searches for the element at the given index. In case the element is // found, the function returns an iterator to the element. Otherwise an iterator just past the // last non-zero element of the compressed vector (the \c end() iterator) is returned. Note that // the returned iterator is subject to invalidation due to inserting operations via the subscript // operator, the \c set() function or the \c insert() function! \code using blaze::CompressedVector; CompressedVector<int> a( 42 ); // ... Initialization of the vector // Searching the element at index 7. In case the element is not // contained in the vector, the end() iterator is returned. CompressedVector<int>::Iterator pos( a.find( 7 ) ); if( pos != a.end( 7 ) ) { // ... } \endcode // \n \subsection vector_operations_lowerbound .lowerBound() // // The \c lowerBound() function returns an iterator to the first element with an index not less // then the given index. In combination with the \c upperBound() function this function can be // used to create a pair of iterators specifying a range of indices. Note that the returned // iterator is subject to invalidation due to inserting operations via the subscript operator, // the \c set() function or the \c insert() function! \code using blaze::CompressedVector; CompressedVector<int> a( 42 ); // ... Initialization of the vector // Searching the lower bound of index 17. CompressedVector<int>::Iterator pos1( A.lowerBound( 17 ) ); // Searching the upper bound of index 28 CompressedVector<int>::Iterator pos2( A.upperBound( 28 ) ); // Erasing all elements in the specified range a.erase( pos1, pos2 ); \endcode // \n \subsection vector_operations_upperbound .upperBound() // // The \c upperBound() function returns an iterator to the first element with an index greater then // the given index. In combination with the \c lowerBound() function this function can be used to // create a pair of iterators specifying a range of indices. Note that the returned iterator is // subject to invalidation due to inserting operations via the subscript operator, the \c set() // function or the \c insert() function! \code using blaze::CompressedVector; CompressedVector<int> a( 42 ); // ... Initialization of the vector // Searching the lower bound of index 17. CompressedVector<int>::Iterator pos1( A.lowerBound( 17 ) ); // Searching the upper bound of index 28 CompressedVector<int>::Iterator pos2( A.upperBound( 28 ) ); // Erasing all elements in the specified range a.erase( pos1, pos2 ); \endcode // \n \section vector_operations_non_modifying_operations Non-Modifying Operations // <hr> // // \subsection vector_operations_size .size() / size() // // Via the \c size() member function, the current size of a dense or sparse vector can be queried: \code // Instantiating a dynamic vector with size 10 blaze::DynamicVector<int> v1( 10UL ); v1.size(); // Returns 10 // Instantiating a compressed vector with size 12 and capacity for 3 non-zero elements blaze::CompressedVector<double> v2( 12UL, 3UL ); v2.size(); // Returns 12 \endcode // Alternatively, the free function \c size() can be used to query to current size of a vector. // In contrast to the member function, the free function can also be used to query the size of // vector expressions: \code size( v1 ); // Returns 10, i.e. has the same effect as the member function size( v2 ); // Returns 12, i.e. has the same effect as the member function blaze::DynamicMatrix<int> A( 15UL, 12UL ); size( A * v2 ); // Returns 15, i.e. the size of the resulting vector \endcode // \n \subsection vector_operations_capacity .capacity() / capacity() // // Via the \c capacity() (member) function the internal capacity of a dense or sparse vector // can be queried. Note that the capacity of a vector doesn't have to be equal to the size // of a vector. In case of a dense vector the capacity will always be greater or equal than // the size of the vector, in case of a sparse vector the capacity may even be less than // the size. \code v1.capacity(); // Returns at least 10 \endcode // For symmetry reasons, there is also a free function /c capacity() available that can be used // to query the capacity: \code capacity( v1 ); // Returns at least 10, i.e. has the same effect as the member function \endcode // Note, however, that it is not possible to query the capacity of a vector expression: \code capacity( A * v1 ); // Compilation error! \endcode // \n \subsection vector_operations_nonzeros .nonZeros() / nonZeros() // // For both dense and sparse vectors the number of non-zero elements can be determined via the // \c nonZeros() member function. Sparse vectors directly return their number of non-zero // elements, dense vectors traverse their elements and count the number of non-zero elements. \code v1.nonZeros(); // Returns the number of non-zero elements in the dense vector v2.nonZeros(); // Returns the number of non-zero elements in the sparse vector \endcode // There is also a free function \c nonZeros() available to query the current number of non-zero // elements: \code nonZeros( v1 ); // Returns the number of non-zero elements in the dense vector nonZeros( v2 ); // Returns the number of non-zero elements in the sparse vector \endcode // The free \c nonZeros() function can also be used to query the number of non-zero elements in // a vector expression. However, the result is not the exact number of non-zero elements, but // may be a rough estimation: \code nonZeros( A * v1 ); // Estimates the number of non-zero elements in the vector expression \endcode // \n \subsection vector_operations_isempty isEmpty() // // The \c isEmpty() function returns whether the total number of elements of the vector is zero: \code blaze::DynamicVector<int> a; // Create an empty vector isEmpty( a ); // Returns true a.resize( 10 ); // Resize to 10 elements isEmpty( a ); // Returns false \endcode // \n \subsection vector_operations_isnan isnan() // // The \c isnan() function provides the means to check a dense or sparse vector for non-a-number // elements: \code blaze::DynamicVector<double> a; // ... Resizing and initialization if( isnan( a ) ) { ... } \endcode \code blaze::CompressedVector<double> a; // ... Resizing and initialization if( isnan( a ) ) { ... } \endcode // If at least one element of the vector is not-a-number, the function returns \c true, otherwise // it returns \c false. Please note that this function only works for vectors with floating point // elements. The attempt to use it for a vector with a non-floating point element type results in // a compile time error. // // // \n \subsection vector_operations_isdefault isDefault() // // The \c isDefault() function returns whether the given dense or sparse vector is in default state: \code blaze::HybridVector<int,20UL> a; // ... Resizing and initialization if( isDefault( a ) ) { ... } \endcode // A vector is in default state if it appears to just have been default constructed. All resizable // vectors (\c HybridVector, \c DynamicVector, or \c CompressedVector) and \c CustomVector are // in default state if its size is equal to zero. A non-resizable vector (\c StaticVector, all // subvectors, element selections, rows, and columns) is in default state if all its elements are // in default state. For instance, in case the vector is instantiated for a built-in integral or // floating point data type, the function returns \c true in case all vector elements are 0 and // \c false in case any vector element is not 0. // // // \n \subsection vector_operations_isUniform isUniform() // // In order to check if all vector elements are identical, the \c isUniform function can be used: \code blaze::DynamicVector<int> a; // ... Resizing and initialization if( isUniform( a ) ) { ... } \endcode // Note that in case of sparse vectors also the zero elements are also taken into account! // // // \n \subsection vector_operations_length length() / sqrLength() // // In order to calculate the length (magnitude) of a dense or sparse vector, both the \c length() // and \c sqrLength() function can be used: \code blaze::StaticVector<float,3UL,rowVector> v{ -1.2F, 2.7F, -2.3F }; const float len = length ( v ); // Computes the current length of the vector const float sqrlen = sqrLength( v ); // Computes the square length of the vector \endcode // Note that both functions can only be used for vectors with built-in or complex element type! // // // \n \subsection vector_operations_vector_trans trans() // // As already mentioned, vectors can either be column vectors (blaze::columnVector) or row vectors // (blaze::rowVector). A column vector cannot be assigned to a row vector and vice versa. However, // vectors can be transposed via the \c trans() function: \code blaze::DynamicVector<int,columnVector> v1( 4UL ); blaze::CompressedVector<int,rowVector> v2( 4UL ); v1 = v2; // Compilation error: Cannot assign a row vector to a column vector v1 = trans( v2 ); // OK: Transposing the row vector to a column vector and assigning it // to the column vector v1 v2 = trans( v1 ); // OK: Transposing the column vector v1 and assigning it to the row vector v2 v1 += trans( v2 ); // OK: Addition assignment of two column vectors \endcode // \n \subsection vector_operations_ctrans ctrans() // // It is also possible to compute the conjugate transpose of a vector. This operation is available // via the \c ctrans() function: \code blaze::CompressedVector< complex<float>, rowVector > v1( 4UL ); blaze::DynamicVector< complex<float>, columnVector > v2( 4UL ); v1 = ctrans( v2 ); // Compute the conjugate transpose vector \endcode // Note that the \c ctrans() function has the same effect as manually applying the \c conj() and // \c trans() function in any order: \code v1 = trans( conj( v2 ) ); // Computing the conjugate transpose vector v1 = conj( trans( v2 ) ); // Computing the conjugate transpose vector \endcode // \n \subsection vector_operations_evaluate eval() / evaluate() // // The \c evaluate() function forces an evaluation of the given vector expression and enables // an automatic deduction of the correct result type of an operation. The following code example // demonstrates its intended use for the multiplication of a dense and a sparse vector: \code using blaze::DynamicVector; using blaze::CompressedVector; blaze::DynamicVector<double> a; blaze::CompressedVector<double> b; // ... Resizing and initialization auto c = evaluate( a * b ); \endcode // In this scenario, the \c evaluate() function assists in deducing the exact result type of // the operation via the \c auto keyword. Please note that if \c evaluate() is used in this // way, no temporary vector is created and no copy operation is performed. Instead, the result // is directly written to the target vector due to the return value optimization (RVO). However, // if \c evaluate() is used in combination with an explicit target type, a temporary will be // created and a copy operation will be performed if the used type differs from the type // returned from the function: \code CompressedVector<double> d( a * b ); // No temporary & no copy operation DynamicVector<double> e( a * b ); // Temporary & copy operation d = evaluate( a * b ); // Temporary & copy operation \endcode // Sometimes it might be desirable to explicitly evaluate a sub-expression within a larger // expression. However, please note that \c evaluate() is not intended to be used for this // purpose. This task is more elegantly and efficiently handled by the \c eval() function: \code blaze::DynamicVector<double> a, b, c, d; d = a + evaluate( b * c ); // Unnecessary creation of a temporary vector d = a + eval( b * c ); // No creation of a temporary vector \endcode // In contrast to the \c evaluate() function, \c eval() can take the complete expression // into account and therefore can guarantee the most efficient way to evaluate it (see also // \ref intra_statement_optimization). // // // \n \section vector_operations_modifying_operations Modifying Operations // <hr> // // \subsection vector_operations_resize_reserve .resize() / .reserve() // // The size of a \c StaticVector is fixed by the second template parameter and a \c CustomVector // cannot be resized. In contrast, the size of \c DynamicVectors, \c HybridVectors as well as // \c CompressedVectors can be changed via the \c resize() function: \code using blaze::DynamicVector; using blaze::CompressedVector; DynamicVector<int,columnVector> v1; CompressedVector<int,rowVector> v2( 4 ); v2[1] = -2; v2[3] = 11; // Adapting the size of the dynamic and compressed vectors. The (optional) second parameter // specifies whether the existing elements should be preserved. Per default, the existing // elements are preserved. v1.resize( 5UL ); // Resizing vector v1 to 5 elements. Elements of built-in type remain // uninitialized, elements of class type are default constructed. v1.resize( 3UL, false ); // Resizing vector v1 to 3 elements. The old elements are lost, the // new elements are NOT initialized! v2.resize( 8UL, true ); // Resizing vector v2 to 8 elements. The old elements are preserved. v2.resize( 5UL, false ); // Resizing vector v2 to 5 elements. The old elements are lost. \endcode // Note that resizing a vector invalidates all existing views (see e.g. \ref views_subvectors) // on the vector: \code blaze::DynamicVector<int,rowVector> v1( 10UL ); // Creating a dynamic vector of size 10 auto sv = subvector( v1, 2UL, 5UL ); // Creating a view on the range [2..6] v1.resize( 6UL ); // Resizing the vector invalidates the view \endcode // When the internal capacity of a vector is no longer sufficient, the allocation of a larger // junk of memory is triggered. In order to avoid frequent reallocations, the \c reserve() // function can be used up front to set the internal capacity: \code blaze::DynamicVector<int> v1; v1.reserve( 100 ); v1.size(); // Returns 0 v1.capacity(); // Returns at least 100 \endcode // Note that the size of the vector remains unchanged, but only the internal capacity is set // according to the specified value! // // \n \subsection vector_operations_shrinkToFit .shrinkToFit() // // The internal capacity of vectors with dynamic memory is preserved in order to minimize the // number of reallocations. For that reason, the \c resize() and \c reserve() functions can lead // to memory overhead. The \c shrinkToFit() member function can be used to minimize the internal // capacity: \code blaze::DynamicVector<int> v1( 1000UL ); // Create a vector of 1000 integers v1.resize( 10UL ); // Resize to 10, but the capacity is preserved v1.shrinkToFit(); // Remove the unused capacity \endcode // Please note that due to padding the capacity might not be reduced exactly to \c size(). Please // also note that in case a reallocation occurs, all iterators (including \c end() iterators), all // pointers and references to elements of the vector are invalidated. // // \subsection vector_operations_reset_clear reset() / clear() // // In order to reset all elements of a vector, the \c reset() function can be used: \code // Setup of a single precision column vector, whose elements are initialized with 2.0F. blaze::DynamicVector<float> v1( 3UL, 2.0F ); // Resetting all elements to 0.0F. Only the elements are reset, the size of the vector is unchanged. reset( v1 ); // Resetting all elements v1.size(); // Returns 3: size and capacity remain unchanged \endcode // In order to return a vector to its default state (i.e. the state of a default constructed // vector), the \c clear() function can be used: \code // Setup of a single precision column vector, whose elements are initialized with -1.0F. blaze::DynamicVector<float> v1( 5, -1.0F ); // Resetting the entire vector. clear( v1 ); // Resetting the entire vector v1.size(); // Returns 0: size is reset, but capacity remains unchanged \endcode // Note that resetting or clearing both dense and sparse vectors does not change the capacity // of the vectors. // // // \n \subsection vector_operations_swap swap() // // Via the \c swap() function it is possible to completely swap the contents of two vectors of // the same type: \code blaze::DynamicVector<int,columnVector> v1( 10UL ); blaze::DynamicVector<int,columnVector> v2( 20UL ); swap( v1, v2 ); // Swapping the contents of v1 and v2 \endcode // \n \section vector_operations_arithmetic_operations Arithmetic Operations // <hr> // // \subsection vector_operations_normalize normalize() // // The \c normalize() function can be used to scale any non-zero vector to a length of 1. In // case the vector does not contain a single non-zero element (i.e. is a zero vector), the // \c normalize() function returns a zero vector. \code blaze::DynamicVector<float,columnVector> v1( 10UL ); blaze::CompressedVector<double,columnVector> v2( 12UL ); v1 = normalize( v1 ); // Normalizing the dense vector v1 length( v1 ); // Returns 1 (or 0 in case of a zero vector) v1 = normalize( v2 ); // Assigning v1 the normalized vector v2 length( v1 ); // Returns 1 (or 0 in case of a zero vector) \endcode // Note that the \c normalize() function only works for floating point vectors. The attempt to // use it for an integral vector results in a compile time error. // // // \n \subsection vector_operations_min_max min() / max() // // The \c min() and \c max() functions can be used for a single vector or multiple vectors. If // passed a single vector, the functions return the smallest and largest element of the given // dense vector or the smallest and largest non-zero element of the given sparse vector, // respectively: \code blaze::StaticVector<int,4UL,rowVector> a{ -5, 2, 7, -4 }; min( a ); // Returns -5 max( a ); // Returns 7 \endcode \code blaze::CompressedVector<int> b{ 1, 0, 3, 0 }; min( b ); // Returns 1 max( b ); // Returns 3 \endcode // For more information on the unary \c min() and \c max() reduction operations see the // \ref vector_operations_reduction_operations section. // // If passed two or more dense vectors, the \c min() and \c max() functions compute the // componentwise minimum or maximum of the given vectors, respectively: \code blaze::StaticVector<int,4UL,rowVector> c{ -5, 1, -7, 4 }; blaze::StaticVector<int,4UL,rowVector> d{ -5, 3, 0, 2 }; min( a, c ); // Results in the vector ( -5, 1, -7, -4 ) max( a, c, d ); // Results in the vector ( -5, 3, 7, 4 ) \endcode // Please note that sparse vectors can only be used in the unary \c min() and \c max() functions. // Also note that all forms of the \c min() and \c max() functions can be used to compute the // smallest and largest element of a vector expression: \code min( a + b + c ); // Returns -9, i.e. the smallest value of the resulting vector max( a - b - c ); // Returns 11, i.e. the largest value of the resulting vector min( a + c, c - d ); // Results in ( -10 -2 -7 0 ) max( a - c, c + d ); // Results in ( 0 4 14 6 ) \endcode // \n \subsection vector_operators_softmax softmax() // // The <a href="https://en.wikipedia.org/wiki/Softmax_function">softmax function</a>, also called // the normalized exponential function, of a given dense vector can be computed via \c softmax(). // The resulting dense vector consists of real values in the range (0..1], which add up to 1. \code blaze::StaticVector<double,7UL,rowVector> x{ 1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0 }; blaze::StaticVector<double,7UL,rowVector> y; // Evaluating the softmax function y = softmax( x ); // Results in ( 0.024 0.064 0.175 0.475 0.024 0.064 0.175 ) double s = sum( y ); // Results in 1 \endcode // \n \subsection vector_operators_abs abs() // // The \c abs() function can be used to compute the absolute values of each element of a vector. // For instance, the following computation \code blaze::StaticVector<int,3UL,rowVector> a{ -1, 2, -3 }; blaze::StaticVector<int,3UL,rowVector> b( abs( a ) ); \endcode // results in the vector \f$ b = \left(\begin{array}{*{1}{c}} 1 \\ 2 \\ 3 \\ \end{array}\right)\f$ // \n \subsection vector_operators_sign sign() // // The \c sign() function can be used to evaluate the sign of each element of a vector \a a. For // each element \c i the corresponding result is 1 if \a a[i] is greater than zero, 0 if \a a[i] // is zero, and -1 if \a a[i] is less than zero. For instance, the following use of the \c sign() // function \code blaze::StaticVector<int,3UL,rowVector> a{ -1, 2, 0 }; blaze::StaticVector<int,3UL,rowVector> b( sign( a ) ); \endcode // results in the vector \f$ b = \left(\begin{array}{*{1}{c}} -1 \\ 1 \\ 0 \\ \end{array}\right)\f$ // \n \subsection vector_operations_rounding_functions floor() / ceil() / trunc() / round() // // The \c floor(), \c ceil(), \c trunc(), and \c round() functions can be used to round down/up // each element of a vector, respectively: \code blaze::StaticVector<double,3UL,rowVector> a, b; b = floor( a ); // Rounding down each element of the vector b = ceil ( a ); // Rounding up each element of the vector b = trunc( a ); // Truncating each element of the vector b = round( a ); // Rounding each element of the vector \endcode // \n \subsection vector_operators_conj conj() // // The \c conj() function can be applied on a dense or sparse vector to compute the complex // conjugate of each element of the vector: \code using blaze::StaticVector; using cplx = std::complex<double>; // Creating the vector // ( (-2,-1) ) // ( ( 1, 1) ) StaticVector<cplx,2UL> a{ cplx(-2.0,-1.0), cplx(1.0,1.0) }; // Computing the vector of complex conjugates // ( (-2, 1) ) // ( ( 1,-1) ) StaticVector<cplx,2UL> b; b = conj( a ); \endcode // Additionally, vectors can be conjugated in-place via the \c conjugate() function: \code blaze::DynamicVector<cplx> c( 5UL ); conjugate( c ); // In-place conjugate operation. c = conj( c ); // Same as above \endcode // \n \subsection vector_operators_real real() // // The \c real() function can be used on a dense or sparse vector to extract the real part of // each element of the vector: \code using blaze::StaticVector; using cplx = std::complex<double>; // Creating the vector // ( (-2,-1) ) // ( ( 1, 1) ) StaticVector<cplx,2UL> a{ cplx(-2.0,-1.0), cplx(1.0,1.0) }; // Extracting the real part of each vector element // ( -2 ) // ( 1 ) StaticVector<double,2UL> b; b = real( a ); \endcode // \n \subsection vector_operators_imag imag() // // The \c imag() function can be used on a dense or sparse vector to extract the imaginary part // of each element of the vector: \code using blaze::StaticVector; using cplx = std::complex<double>; // Creating the vector // ( (-2,-1) ) // ( ( 1, 1) ) StaticVector<cplx,2UL> a{ cplx(-2.0,-1.0), cplx(1.0,1.0) }; // Extracting the imaginary part of each vector element // ( -1 ) // ( 1 ) StaticVector<double,2UL> b; b = imag( a ); \endcode // \n \subsection vector_operations_sqrt sqrt() / invsqrt() // // Via the \c sqrt() and \c invsqrt() functions the (inverse) square root of each element of a // vector can be computed: \code blaze::DynamicVector<double> a, b, c; b = sqrt( a ); // Computes the square root of each element c = invsqrt( a ); // Computes the inverse square root of each element \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_cbrt cbrt() / invcbrt() // // The \c cbrt() and \c invcbrt() functions can be used to compute the the (inverse) cubic root // of each element of a vector: \code blaze::HybridVector<double,3UL> a, b, c; b = cbrt( a ); // Computes the cubic root of each element c = invcbrt( a ); // Computes the inverse cubic root of each element \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_hypot hypot() // // The \c hypot() function can be used to compute the componentwise hypotenous for a pair of // dense vectors: \code blaze::StaticVector<double,3UL> a, b, c; c = hypot( a, b ); // Computes the componentwise hypotenuous \endcode // \n \subsection vector_operations_clamp clamp() // // The \c clamp() function can be used to restrict all elements of a vector to a specific range: \code blaze::DynamicVector<double> a, b b = clamp( a, -1.0, 1.0 ); // Restrict all elements to the range [-1..1] \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_pow pow() // // The \c pow() function can be used to compute the exponential value of each element of a vector. // If passed a vector and a numeric exponent, the function computes the exponential value of each // element of the vector using the same exponent. If passed a second vector, the function computes // the componentwise exponential value: \code blaze::StaticVector<double,3UL> a, b, c; c = pow( a, 1.2 ); // Computes the exponential value of each element c = pow( a, b ); // Computes the componentwise exponential value \endcode // \n \subsection vector_operations_exp exp() / exp2() / exp10() // // \c exp(), \c exp2() and \c exp10() compute the base e/2/10 exponential of each element of a // vector, respectively: \code blaze::DynamicVector<double> a, b; b = exp( a ); // Computes the base e exponential of each element b = exp2( a ); // Computes the base 2 exponential of each element b = exp10( a ); // Computes the base 10 exponential of each element \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_log log() / log2() / log10() // // The \c log(), \c log2() and \c log10() functions can be used to compute the natural, binary // and common logarithm of each element of a vector: \code blaze::StaticVector<double,3UL> a, b; b = log( a ); // Computes the natural logarithm of each element b = log2( a ); // Computes the binary logarithm of each element b = log10( a ); // Computes the common logarithm of each element \endcode // \n \subsection vector_operations_trigonometric_functions sin() / cos() / tan() / asin() / acos() / atan() // // The following trigonometric functions are available for both dense and sparse vectors: \code blaze::DynamicVector<double> a, b; b = sin( a ); // Computes the sine of each element of the vector b = cos( a ); // Computes the cosine of each element of the vector b = tan( a ); // Computes the tangent of each element of the vector b = asin( a ); // Computes the inverse sine of each element of the vector b = acos( a ); // Computes the inverse cosine of each element of the vector b = atan( a ); // Computes the inverse tangent of each element of the vector \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_hyperbolic_functions sinh() / cosh() / tanh() / asinh() / acosh() / atanh() // // The following hyperbolic functions are available for both dense and sparse vectors: \code blaze::DynamicVector<double> a, b; b = sinh( a ); // Computes the hyperbolic sine of each element of the vector b = cosh( a ); // Computes the hyperbolic cosine of each element of the vector b = tanh( a ); // Computes the hyperbolic tangent of each element of the vector b = asinh( a ); // Computes the inverse hyperbolic sine of each element of the vector b = acosh( a ); // Computes the inverse hyperbolic cosine of each element of the vector b = atanh( a ); // Computes the inverse hyperbolic tangent of each element of the vector \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_atan2 atan2() // // The multi-valued inverse tangent is available for a pair of dense vectors: \code blaze::DynamicVector<double> a, b, c; c = atan2( a, b ); // Computes the componentwise multi-valued inverse tangent \endcode // \n \subsection vector_operations_erf erf() / erfc() // // The \c erf() and \c erfc() functions compute the (complementary) error function of each // element of a vector: \code blaze::StaticVector<double,3UL,rowVector> a, b; b = erf( a ); // Computes the error function of each element b = erfc( a ); // Computes the complementary error function of each element \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_map map() / forEach() // // Via the unary and binary \c map() functions it is possible to execute componentwise custom // operations on vectors. The unary \c map() function can be used to apply a custom operation // on each element of a dense or sparse vector. For instance, the following example demonstrates // a custom square root computation via a lambda: \code blaze::DynamicVector<double> a, b; b = map( a, []( double d ) { return std::sqrt( d ); } ); \endcode // The binary \c map() function can be used to apply an operation pairwise to the elements of // two dense vectors. The following example demonstrates the merging of two vectors of double // precision values into a vector of double precision complex numbers: \code blaze::DynamicVector<double> real{ 2.1, -4.2, 1.0, 0.6 }; blaze::DynamicVector<double> imag{ 0.3, 1.4, 2.9, -3.4 }; blaze::DynamicVector< complex<double> > cplx; // Creating the vector // ( (-2.1, 0.3) ) // ( (-4.2, -1.4) ) // ( ( 1.0, 2.9) ) // ( ( 0.6, -3.4) ) cplx = map( real, imag, []( double r, double i ){ return complex( r, i ); } ); \endcode // Although the computation can be parallelized it is not vectorized and thus cannot perform at // peak performance. However, it is also possible to create vectorized custom operations. See // \ref custom_operations for a detailed overview of the possibilities of custom operations. // // Please note that unary custom operations on vectors have been introduced in \b Blaze 3.0 in // form of the \c forEach() function. With the introduction of binary custom functions, the // \c forEach() function has been renamed to \c map(). The \c forEach() function can still be // used (even for binary custom operations), but the function might be deprecated in future // releases of \b Blaze. // // // \n \section vector_operations_reduction_operations Reduction Operations // <hr> // // \subsection vector_operations_reduction_operations_reduce reduce() // // The \c reduce() function performs a total reduction of the elements of the given dense vector // or the non-zero elements of the given sparse vector. The following examples demonstrate the // total reduction of a dense and sparse vector: \code blaze::DynamicVector<double> a; // ... Resizing and initialization const double totalsum1 = reduce( a, blaze::Add() ); const double totalsum2 = reduce( a, []( double a, double b ){ return a + b; } ); \endcode \code blaze::CompressedVector<double> a; // ... Resizing and initialization const double totalmin1 = reduce( a, blaze::Min() ); const double totalmin2 = reduce( a, []( double a, double b ){ return blaze::min( a, b ); } ); \endcode // As demonstrated in the examples it is possible to pass any binary callable as custom reduction // operation. However, for instance in the case of lambdas the vectorization of the reduction // operation is compiler dependent and might not perform at peak performance. However, it is also // possible to create vectorized custom operations. See \ref custom_operations for a detailed // overview of the possibilities of custom operations. // // Please note that the evaluation order of the \c reduce() function is unspecified. Thus the // behavior is non-deterministic if the given reduction operation is not associative or not // commutative. Also, the operation is undefined if the given reduction operation modifies the // values. // // \n \subsection vector_operations_reduction_operations_sum sum() // // The \c sum() function reduces the elements of the given dense vector or the non-zero elements // of the given sparse vector by means of addition: \code blaze::DynamicVector<int> a{ 1, 2, 3, 4 }; const int totalsum = sum( a ); // Results in 10 \endcode \code blaze::CompressedVector<int> a{ 1, 2, 3, 4 }; const int totalsum = sum( a ); // Results in 10 \endcode // Please note that the evaluation order of the \c sum() function is unspecified. // // \n \subsection vector_operations_reduction_operations_prod prod() // // The \c prod() function reduces the elements of the given dense vector or the non-zero elements // of the given sparse vector by means of multiplication: \code blaze::DynamicVector<int> a{ 1, 2, 3, 4 }; const int totalprod = prod( a ); // Results in 24 \endcode \code blaze::CompressedVector<int> a{ 1, 2, 3, 4 }; const int totalprod = prod( a ); // Results in 24 \endcode // \n \subsection vector_operations_reduction_operations_min min() // // The unary \c min() function returns the smallest element of the given dense vector or the // smallest non-zero element of the given sparse vector. It can only be used for element types // that support the smaller-than relationship. In case the given vector currently has a size // of 0, the returned value is the default value (e.g. 0 in case of fundamental data types). \code blaze::DynamicVector<int> a{ 1, -2, 3, 0 }; const int totalmin = min( a ); // Results in -2 \endcode \code blaze::CompressedVector<int> a{ 1, 0, 3, 0 }; const int totalmin = min( a ); // Results in 1 \endcode // \note In case the sparse vector is not completely filled, the implicit zero elements are NOT // taken into account. In the previous example the compressed vector has only 2 non-zero elements. // However, the minimum of the vector is 1. // // \n \subsection vector_operations_reduction_operations_max max() // // The unary \c max() function returns the largest element of the given dense vector or the // largest non-zero element of the given sparse vector. It can only be used for element types // that support the smaller-than relationship. In case the given vector currently has a size // of 0, the returned value is the default value (e.g. 0 in case of fundamental data types). \code blaze::DynamicVector<int> a{ 1, -2, 3, 0 }; const int totalmax = max( a ); // Results in 3 \endcode \code blaze::CompressedVector<int> a{ -1, 0, -3, 0 }; const int totalmin = max( a ); // Results in -1 \endcode // \note In case the sparse vector is not completely filled, the implicit zero elements are NOT // taken into account. In the previous example the compressed vector has only 2 non-zero elements. // However, the maximum of the vector is -1. // // // \n \section vector_operations_norms Norms // <hr> // // \subsection vector_operations_norms_norm norm() // // The \c norm() function computes the L2 norm of the given dense or sparse vector: \code blaze::DynamicVector<double> a; // ... Resizing and initialization const double l2 = norm( a ); \endcode // \n \subsection vector_operations_norms_sqrnorm sqrNorm() // // The \c sqrNorm() function computes the squared L2 norm of the given dense or sparse vector: \code blaze::DynamicVector<double> a; // ... Resizing and initialization const double l2 = sqrNorm( a ); \endcode // \n \subsection vector_operations_norms_l1norm l1Norm() // // The \c l1Norm() function computes the squared L1 norm of the given dense or sparse vector: \code blaze::DynamicVector<double> a; // ... Resizing and initialization const double l1 = l1Norm( a ); \endcode // \n \subsection vector_operations_norms_l2norm l2Norm() // // The \c l2Norm() function computes the squared L2 norm of the given dense or sparse vector: \code blaze::DynamicVector<double> a; // ... Resizing and initialization const double l2 = l2Norm( a ); \endcode // \n \subsection vector_operations_norms_l3norm l3Norm() // // The \c l3Norm() function computes the squared L3 norm of the given dense or sparse vector: \code blaze::DynamicVector<double> a; // ... Resizing and initialization const double l3 = l3Norm( a ); \endcode // \n \subsection vector_operations_norms_l4norm l4Norm() // // The \c l4Norm() function computes the squared L4 norm of the given dense or sparse vector: \code blaze::DynamicVector<double> a; // ... Resizing and initialization const double l4 = l4Norm( a ); \endcode // \n \subsection vector_operations_norms_lpnorm lpNorm() // // The \c lpNorm() function computes the general Lp norm of the given dense or sparse vector, // where the norm is specified by either a compile time or a runtime argument: \code blaze::DynamicVector<double> a; // ... Resizing and initialization const double lp1 = lpNorm<2>( a ); // Compile time argument const double lp2 = lpNorm( a, 2.3 ); // Runtime argument \endcode // \n \subsection vector_operations_norms_maxnorm maxNorm() // // The \c maxNorm() function computes the maximum norm of the given dense or sparse vector: \code blaze::DynamicVector<double> a; // ... Resizing and initialization const double max = maxNorm( a ); \endcode // \n Previous: \ref vector_types &nbsp; &nbsp; Next: \ref matrices */ //************************************************************************************************* //**Matrices*************************************************************************************** /*!\page matrices Matrices // // \tableofcontents // // // \n \section matrices_general General Concepts // <hr> // // The \b Blaze library currently offers four dense matrix types (\ref matrix_types_static_matrix, // \ref matrix_types_dynamic_matrix, \ref matrix_types_hybrid_matrix, and \ref matrix_types_custom_matrix) // and one sparse matrix type (\ref matrix_types_compressed_matrix). All matrices can either be // stored as row-major matrices or column-major matrices: \code using blaze::DynamicMatrix; using blaze::rowMajor; using blaze::columnMajor; // Setup of the 2x3 row-major dense matrix // // ( 1 2 3 ) // ( 4 5 6 ) // DynamicMatrix<int,rowMajor> A{ { 1, 2, 3 }, { 4, 5, 6 } }; // Setup of the 3x2 column-major dense matrix // // ( 1 4 ) // ( 2 5 ) // ( 3 6 ) // DynamicMatrix<int,columnMajor> B{ { 1, 4 }, { 2, 5 }, { 3, 6 } }; \endcode // Per default, all matrices in \b Blaze are row-major matrices: \code // Instantiation of a 3x3 row-major matrix blaze::DynamicMatrix<int> C( 3UL, 3UL ); \endcode // \n \section matrices_details Matrix Details // <hr> // // - \ref matrix_types // - \ref matrix_operations // // // \n \section matrices_examples Examples // <hr> \code using blaze::StaticMatrix; using blaze::DynamicMatrix; using blaze::CompressedMatrix; using blaze::rowMajor; using blaze::columnMajor; StaticMatrix<double,6UL,20UL> A; // Instantiation of a 6x20 row-major static matrix CompressedMatrix<double,rowMajor> B; // Instantiation of a row-major compressed matrix DynamicMatrix<double,columnMajor> C; // Instantiation of a column-major dynamic matrix // ... Resizing and initialization C = A * B; \endcode // \n Previous: \ref vector_operations &nbsp; &nbsp; Next: \ref matrix_types */ //************************************************************************************************* //**Matrix Types*********************************************************************************** /*!\page matrix_types Matrix Types // // \tableofcontents // // // \n \section matrix_types_static_matrix StaticMatrix // <hr> // // The blaze::StaticMatrix class template is the representation of a fixed size matrix with // statically allocated elements of arbitrary type. It can be included via the header file \code #include <blaze/math/StaticMatrix.h> \endcode // The type of the elements, the number of rows and columns, and the storage order of the matrix // can be specified via the four template parameters: \code template< typename Type, size_t M, size_t N, bool SO > class StaticMatrix; \endcode // - \c Type: specifies the type of the matrix elements. StaticMatrix can be used with any // non-cv-qualified, non-reference element type. // - \c M : specifies the total number of rows of the matrix. // - \c N : specifies the total number of columns of the matrix. Note that it is expected // that StaticMatrix is only used for tiny and small matrices. // - \c SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix. // The default value is blaze::rowMajor. // // The blaze::StaticMatrix is perfectly suited for small to medium matrices whose dimensions are // known at compile time: \code // Definition of a 3x4 integral row-major matrix blaze::StaticMatrix<int,3UL,4UL> A; // Definition of a 4x6 single precision row-major matrix blaze::StaticMatrix<float,4UL,6UL,blaze::rowMajor> B; // Definition of a 6x4 double precision column-major matrix blaze::StaticMatrix<double,6UL,4UL,blaze::columnMajor> C; \endcode // \n \section matrix_types_dynamic_matrix DynamicMatrix // <hr> // // The blaze::DynamicMatrix class template is the representation of an arbitrary sized matrix // with \f$ M \cdot N \f$ dynamically allocated elements of arbitrary type. It can be included // via the header file \code #include <blaze/math/DynamicMatrix.h> \endcode // The type of the elements and the storage order of the matrix can be specified via the two // template parameters: \code template< typename Type, bool SO > class DynamicMatrix; \endcode // - \c Type: specifies the type of the matrix elements. DynamicMatrix can be used with any // non-cv-qualified, non-reference element type. // - \c SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix. // The default value is blaze::rowMajor. // // The blaze::DynamicMatrix is the default choice for all kinds of dense matrices and the best // choice for medium to large matrices. The number of rows and columns can be modified at runtime: \code // Definition of a 3x4 integral row-major matrix blaze::DynamicMatrix<int> A( 3UL, 4UL ); // Definition of a 4x6 single precision row-major matrix blaze::DynamicMatrix<float,blaze::rowMajor> B( 4UL, 6UL ); // Definition of a double precision column-major matrix with 0 rows and columns blaze::DynamicMatrix<double,blaze::columnMajor> C; \endcode // \n \section matrix_types_hybrid_matrix HybridMatrix // <hr> // // The HybridMatrix class template combines the flexibility of a dynamically sized matrix with // the efficiency and performance of a fixed size matrix. It is implemented as a crossing between // the blaze::StaticMatrix and the blaze::DynamicMatrix class templates: Similar to the static // matrix it uses static stack memory instead of dynamically allocated memory and similar to the // dynamic matrix it can be resized (within the extend of the static memory). It can be included // via the header file \code #include <blaze/math/HybridMatrix.h> \endcode // The type of the elements, the maximum number of rows and columns and the storage order of the // matrix can be specified via the four template parameters: \code template< typename Type, size_t M, size_t N, bool SO > class HybridMatrix; \endcode // - Type: specifies the type of the matrix elements. HybridMatrix can be used with any // non-cv-qualified, non-reference, non-pointer element type. // - M : specifies the maximum number of rows of the matrix. // - N : specifies the maximum number of columns of the matrix. Note that it is expected // that HybridMatrix is only used for tiny and small matrices. // - SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix. // The default value is blaze::rowMajor. // // The blaze::HybridMatrix is a suitable choice for small to medium matrices, whose dimensions // are not known at compile time or not fixed at runtime, but whose maximum dimensions are known // at compile time: \code // Definition of a 3x4 integral row-major matrix with maximum dimensions of 6x8 blaze::HybridMatrix<int,6UL,8UL> A( 3UL, 4UL ); // Definition of a 4x6 single precision row-major matrix with maximum dimensions of 12x16 blaze::HybridMatrix<float,12UL,16UL,blaze::rowMajor> B( 4UL, 6UL ); // Definition of a 0x0 double precision column-major matrix and maximum dimensions of 6x6 blaze::HybridMatrix<double,6UL,6UL,blaze::columnMajor> C; \endcode // \n \section matrix_types_custom_matrix CustomMatrix // <hr> // // The blaze::CustomMatrix class template provides the functionality to represent an external // array of elements of arbitrary type and a fixed size as a native \b Blaze dense matrix data // structure. Thus in contrast to all other dense matrix types a custom matrix does not perform // any kind of memory allocation by itself, but it is provided with an existing array of element // during construction. A custom matrix can therefore be considered an alias to the existing // array. It can be included via the header file \code #include <blaze/math/CustomMatrix.h> \endcode // The type of the elements, the properties of the given array of elements and the storage order // of the matrix can be specified via the following four template parameters: \code template< typename Type, bool AF, bool PF, bool SO > class CustomMatrix; \endcode // - Type: specifies the type of the matrix elements. blaze::CustomMatrix can be used with // any non-cv-qualified, non-reference, non-pointer element type. // - AF : specifies whether the represented, external arrays are properly aligned with // respect to the available instruction set (SSE, AVX, ...) or not. // - PF : specified whether the represented, external arrays are properly padded with // respect to the available instruction set (SSE, AVX, ...) or not. // - SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix. // The default value is blaze::rowMajor. // // The blaze::CustomMatrix is the right choice if any external array needs to be represented as // a \b Blaze dense matrix data structure or if a custom memory allocation strategy needs to be // realized: \code using blaze::CustomMatrix; using blaze::Deallocate; using blaze::aligned; using blaze::unaligned; using blaze::padded; using blaze::unpadded; // Definition of an unmanaged 3x4 custom matrix for unaligned, unpadded integer arrays using UnalignedUnpadded = CustomMatrix<int,unaligned,unpadded,rowMajor>; std::vector<int> vec( 12UL ) UnalignedUnpadded A( &vec[0], 3UL, 4UL ); // Definition of a managed 5x6 custom matrix for unaligned but padded 'float' arrays using UnalignedPadded = CustomMatrix<float,unaligned,padded,columnMajor>; std::unique_ptr<float[]> memory1( new float[40] ); UnalignedPadded B( memory1.get(), 5UL, 6UL, 8UL ); // Definition of a managed 12x13 custom matrix for aligned, unpadded 'double' arrays using AlignedUnpadded = CustomMatrix<double,aligned,unpadded,rowMajor>; std::unique_ptr<double[],Deallocate> memory2( blaze::allocate<double>( 192UL ) ); AlignedUnpadded C( memory2.get(), 12UL, 13UL, 16UL ); // Definition of a 7x14 custom matrix for aligned, padded 'complex<double>' arrays using cplx = complex<double>; using AlignedPadded = CustomMatrix<cplx,aligned,padded,columnMajor>; std::unique_ptr<cplx[],Deallocate> memory3( blaze::allocate<cplx>( 112UL ) ); AlignedPadded D( memory3.get(), 7UL, 14UL, 16UL ); \endcode // In comparison with the remaining \b Blaze dense matrix types blaze::CustomMatrix has several // special characteristics. All of these result from the fact that a custom matrix is not // performing any kind of memory allocation, but instead is given an existing array of elements. // The following sections discuss all of these characteristics: // // -# <b>\ref matrix_types_custom_matrix_memory_management</b> // -# <b>\ref matrix_types_custom_matrix_copy_operations</b> // -# <b>\ref matrix_types_custom_matrix_alignment</b> // -# <b>\ref matrix_types_custom_matrix_padding</b> // // \n \subsection matrix_types_custom_matrix_memory_management Memory Management // // The blaze::CustomMatrix class template acts as an adaptor for an existing array of elements. As // such it provides everything that is required to use the array just like a native \b Blaze dense // matrix data structure. However, this flexibility comes with the price that the user of a custom // matrix is responsible for the resource management. // // The following examples give an impression of several possible types of custom matrices: \code using blaze::CustomMatrix; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::unaligned; using blaze::padded; using blaze::unpadded; // Definition of a 3x4 custom row-major matrix with unaligned, unpadded and externally // managed integer array. Note that the std::vector must be guaranteed to outlive the // custom matrix! std::vector<int> vec( 12UL ); CustomMatrix<int,unaligned,unpadded> A( &vec[0], 3UL, 4UL ); // Definition of a custom 8x12 matrix for an aligned and padded integer array of // capacity 128 (including 8 padding elements per row). Note that the std::unique_ptr // must be guaranteed to outlive the custom matrix! std::unique_ptr<int[],Deallocate> memory( allocate<int>( 128UL ) ); CustomMatrix<int,aligned,padded> B( memory.get(), 8UL, 12UL, 16UL ); \endcode // \n \subsection matrix_types_custom_matrix_copy_operations Copy Operations // // As with all dense matrices it is possible to copy construct a custom matrix: \code using blaze::CustomMatrix; using blaze::unaligned; using blaze::unpadded; using CustomType = CustomMatrix<int,unaligned,unpadded>; std::vector<int> vec( 6UL, 10 ); // Vector of 6 integers of the value 10 CustomType A( &vec[0], 2UL, 3UL ); // Represent the std::vector as Blaze dense matrix a[1] = 20; // Also modifies the std::vector CustomType B( a ); // Creating a copy of vector a b[2] = 20; // Also affects matrix A and the std::vector \endcode // It is important to note that a custom matrix acts as a reference to the specified array. Thus // the result of the copy constructor is a new custom matrix that is referencing and representing // the same array as the original custom matrix. // // In contrast to copy construction, just as with references, copy assignment does not change // which array is referenced by the custom matrices, but modifies the values of the array: \code std::vector<int> vec2( 6UL, 4 ); // Vector of 6 integers of the value 4 CustomType C( &vec2[0], 2UL, 3UL ); // Represent the std::vector as Blaze dense matrix A = C; // Copy assignment: Set all values of matrix A and B to 4. \endcode // \n \subsection matrix_types_custom_matrix_alignment Alignment // // In case the custom matrix is specified as \c aligned the passed array must adhere to some // alignment restrictions based on the alignment requirements of the used data type and the // used instruction set (SSE, AVX, ...). The restriction applies to the first element of each // row/column: In case of a row-major matrix the first element of each row must be properly // aligned, in case of a column-major matrix the first element of each column must be properly // aligned. For instance, if a row-major matrix is used and AVX is active the first element of // each row must be 32-bit aligned: \code using blaze::CustomMatrix; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::padded; using blaze::rowMajor; // Allocation of 32-bit aligned memory std::unique_ptr<int[],Deallocate> memory( allocate<int>( 40UL ) ); CustomMatrix<int,aligned,padded,rowMajor> A( memory.get(), 5UL, 6UL, 8UL ); \endcode // In the example, the row-major matrix has six columns. However, since with AVX eight integer // values are loaded together the matrix is padded with two additional elements. This guarantees // that the first element of each row is 32-bit aligned. In case the alignment requirements are // violated, a \c std::invalid_argument exception is thrown. // // \n \subsection matrix_types_custom_matrix_padding Padding // // Adding padding elements to the end of each row/column can have a significant impact on the // performance. For instance, assuming that AVX is available, then two aligned, padded, 3x3 double // precision matrices can be added via three SIMD addition operations: \code using blaze::CustomMatrix; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::padded; using CustomType = CustomMatrix<double,aligned,padded>; std::unique_ptr<int[],Deallocate> memory1( allocate<double>( 12UL ) ); std::unique_ptr<int[],Deallocate> memory2( allocate<double>( 12UL ) ); std::unique_ptr<int[],Deallocate> memory3( allocate<double>( 12UL ) ); // Creating padded custom 3x3 matrix with an additional padding element in each row CustomType A( memory1.get(), 3UL, 3UL, 4UL ); CustomType B( memory2.get(), 3UL, 3UL, 4UL ); CustomType C( memory3.get(), 3UL, 3UL, 4UL ); // ... Initialization C = A + B; // AVX-based matrix addition \endcode // In this example, maximum performance is possible. However, in case no padding elements are // inserted a scalar addition has to be used: \code using blaze::CustomMatrix; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::unpadded; using CustomType = CustomMatrix<double,aligned,unpadded>; std::unique_ptr<int[],Deallocate> memory1( allocate<double>( 9UL ) ); std::unique_ptr<int[],Deallocate> memory2( allocate<double>( 9UL ) ); std::unique_ptr<int[],Deallocate> memory3( allocate<double>( 9UL ) ); // Creating unpadded custom 3x3 matrix CustomType A( memory1.get(), 3UL, 3UL ); CustomType B( memory2.get(), 3UL, 3UL ); CustomType C( memory3.get(), 3UL, 3UL ); // ... Initialization C = A + B; // Scalar matrix addition \endcode // Note that the construction of padded and unpadded aligned matrices looks identical. However, // in case of padded matrices, \b Blaze will zero initialize the padding element and use them // in all computations in order to achieve maximum performance. In case of an unpadded matrix // \b Blaze will ignore the elements with the downside that it is not possible to load a complete // row to an AVX register, which makes it necessary to fall back to a scalar addition. // // The number of padding elements is required to be sufficient with respect to the available // instruction set: In case of an aligned padded custom matrix the added padding elements must // guarantee that the total number of elements in each row/column is a multiple of the SIMD // vector width. In case of an unaligned padded matrix the number of padding elements can be // greater or equal the number of padding elements of an aligned padded custom matrix. In case // the padding is insufficient with respect to the available instruction set, a // \c std::invalid_argument exception is thrown. // // // \n \section matrix_types_compressed_matrix CompressedMatrix // <hr> // // The blaze::CompressedMatrix class template is the representation of an arbitrary sized sparse // matrix with \f$ M \cdot N \f$ dynamically allocated elements of arbitrary type. It can be // included via the header file \code #include <blaze/math/CompressedMatrix.h> \endcode // The type of the elements and the storage order of the matrix can be specified via the two // template parameters: \code template< typename Type, bool SO > class CompressedMatrix; \endcode // - \c Type: specifies the type of the matrix elements. CompressedMatrix can be used with // any non-cv-qualified, non-reference, non-pointer element type. // - \c SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix. // The default value is blaze::rowMajor. // // The blaze::CompressedMatrix is the right choice for all kinds of sparse matrices: \code // Definition of a 3x4 integral row-major matrix blaze::CompressedMatrix<int> A( 3UL, 4UL ); // Definition of a 4x6 single precision row-major matrix blaze::CompressedMatrix<float,blaze::rowMajor> B( 4UL, 6UL ); // Definition of a double precision column-major matrix with 0 rows and columns blaze::CompressedMatrix<double,blaze::columnMajor> C; \endcode // \n \section matrix_types_identity_matrix IdentityMatrix // <hr> // // The blaze::IdentityMatrix class template is the representation of an immutable, arbitrary // sized identity matrix with \f$ N \cdot N \f$ elements of arbitrary type. It can be included // via the header file \code #include <blaze/math/IdentityMatrix.h> \endcode // The type of the elements and the storage order of the matrix can be specified via the two // template parameters: \code template< typename Type, bool SO > class IdentityMatrix; \endcode // - Type: specifies the type of the matrix elements. IdentityMatrix can be used with any // non-cv-qualified, non-reference, non-pointer element type. // - SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix. // The default value is blaze::rowMajor. // // The blaze::IdentityMatrix is the perfect choice to represent an identity matrix: \code // Definition of a 3x3 integral row-major identity matrix blaze::IdentityMatrix<int> A( 3UL ); // Definition of a 6x6 single precision row-major identity matrix blaze::IdentityMatrix<float,blaze::rowMajor> B( 6UL ); // Definition of a double precision column-major identity matrix with 0 rows and columns blaze::IdentityMatrix<double,blaze::columnMajor> C; \endcode // \n Previous: \ref matrices &nbsp; &nbsp; Next: \ref matrix_operations */ //************************************************************************************************* //**Matrix Operations****************************************************************************** /*!\page matrix_operations Matrix Operations // // \tableofcontents // // // \n \section matrix_operations_constructors Constructors // <hr> // // Matrices are just as easy and intuitive to create as vectors. Still, there are a few rules // to be aware of: // - In case the last template parameter (the storage order) is omitted, the matrix is per // default stored in row-major order. // - The elements of a \c StaticMatrix or \c HybridMatrix are default initialized (i.e. built-in // data types are initialized to 0, class types are initialized via the default constructor). // - Newly allocated elements of a \c DynamicMatrix or \c CompressedMatrix remain uninitialized // if they are of built-in type and are default constructed if they are of class type. // // \n \subsection matrix_operations_default_construction Default Construction \code using blaze::StaticMatrix; using blaze::DynamicMatrix; using blaze::CompressedMatrix; // All matrices can be default constructed. Whereas the size of // a StaticMatrix is fixed via the second and third template // parameter, the initial size of a constructed DynamicMatrix // or CompressedMatrix is 0. StaticMatrix<int,2UL,2UL> M1; // Instantiation of a 2x2 integer row-major // matrix. All elements are initialized to 0. DynamicMatrix<float> M2; // Instantiation of a single precision dynamic // row-major matrix with 0 rows and 0 columns. DynamicMatrix<double,columnMajor> M3; // Instantiation of a double precision dynamic // column-major matrix with 0 rows and 0 columns. CompressedMatrix<int> M4; // Instantiation of a compressed integer // row-major matrix of size 0x0. CompressedMatrix<double,columnMajor> M5; // Instantiation of a compressed double precision // column-major matrix of size 0x0. \endcode // \n \subsection matrix_operations_size_construction Construction with Specific Size // // The \c DynamicMatrix, \c HybridMatrix, and \c CompressedMatrix classes offer a constructor // that allows to immediately give the matrices a specific number of rows and columns: \code DynamicMatrix<int> M6( 5UL, 4UL ); // Instantiation of a 5x4 dynamic row-major // matrix. The elements are not initialized. HybridMatrix<double,5UL,9UL> M7( 3UL, 7UL ); // Instantiation of a 3x7 hybrid row-major // matrix. The elements are not initialized. CompressedMatrix<float,columnMajor> M8( 8UL, 6UL ); // Instantiation of an empty 8x6 compressed // column-major matrix. \endcode // Note that dense matrices (in this case \c DynamicMatrix and \c HybridMatrix) immediately // allocate enough capacity for all matrix elements. Sparse matrices on the other hand (in this // example \c CompressedMatrix) merely acquire the size, but don't necessarily allocate memory. // // // \n \subsection matrix_operations_initialization_constructors Initialization Constructors // // All dense matrix classes offer a constructor for a direct, homogeneous initialization of all // matrix elements. In contrast, for sparse matrices the predicted number of non-zero elements // can be specified. \code StaticMatrix<int,4UL,3UL,columnMajor> M9( 7 ); // Instantiation of a 4x3 integer column-major // matrix. All elements are initialized to 7. DynamicMatrix<float> M10( 2UL, 5UL, 2.0F ); // Instantiation of a 2x5 single precision row-major // matrix. All elements are initialized to 2.0F. CompressedMatrix<int> M11( 3UL, 4UL, 4 ); // Instantiation of a 3x4 integer row-major // matrix with capacity for 4 non-zero elements. \endcode // \n \subsection matrix_operations_array_construction Array Construction // // Alternatively, all dense matrix classes offer a constructor for an initialization with a // dynamic or static array. If the matrix is initialized from a dynamic array, the constructor // expects the dimensions of values provided by the array as first and second argument, the // array as third argument. In case of a static array, the fixed size of the array is used: \code const std::unique_ptr<double[]> array1( new double[6] ); // ... Initialization of the dynamic array blaze::StaticMatrix<double,2UL,3UL> M12( 2UL, 3UL, array1.get() ); int array2[2][2] = { { 4, -5 }, { -6, 7 } }; blaze::StaticMatrix<int,2UL,2UL,rowMajor> M13( array2 ); \endcode // \n \subsection matrix_operations_initializer_list_construction // // In addition, all dense and sparse matrix classes can be directly initialized by means of an // initializer list: \code blaze::DynamicMatrix<float,columnMajor> M14{ { 3.1F, 6.4F }, { -0.9F, -1.2F }, { 4.8F, 0.6F } }; blaze::CompressedMatrix<int,rowMajor> M15{ { 3 }, { 1 }, { 0, 2 } }; \endcode // In case of sparse matrices, only the non-zero elements are used to initialize the matrix. // Missing values are considered to be default values. // // \n \subsection matrix_operations_copy_construction Copy Construction // // All dense and sparse matrices can be created as a copy of another dense or sparse matrix. \code StaticMatrix<int,5UL,4UL,rowMajor> M16( M6 ); // Instantiation of the dense row-major matrix M16 // as copy of the dense row-major matrix M6. DynamicMatrix<float,columnMajor> M17( M8 ); // Instantiation of the dense column-major matrix M17 // as copy of the sparse column-major matrix M8. CompressedMatrix<double,columnMajor> M18( M7 ); // Instantiation of the compressed column-major matrix // M18 as copy of the dense row-major matrix M7. CompressedMatrix<float,rowMajor> M19( M8 ); // Instantiation of the compressed row-major matrix // M19 as copy of the compressed column-major matrix M8. \endcode // Note that it is not possible to create a \c StaticMatrix as a copy of a matrix with a different // number of rows and/or columns: \code StaticMatrix<int,4UL,5UL,rowMajor> M20( M6 ); // Runtime error: Number of rows and columns // does not match! StaticMatrix<int,4UL,4UL,columnMajor> M21( M9 ); // Compile time error: Number of columns does // not match! \endcode // \n \section matrix_operations_assignment Assignment // <hr> // // There are several types of assignment to dense and sparse matrices: // \ref matrix_operations_homogeneous_assignment, \ref matrix_operations_array_assignment, // \ref matrix_operations_copy_assignment, and \ref matrix_operations_compound_assignment. // // // \n \subsection matrix_operations_homogeneous_assignment Homogeneous Assignment // // It is possible to assign the same value to all elements of a dense matrix. All dense matrix // classes provide an according assignment operator: \code blaze::StaticMatrix<int,3UL,2UL> M1; blaze::DynamicMatrix<double> M2; // Setting all integer elements of the StaticMatrix to 4 M1 = 4; // Setting all double precision elements of the DynamicMatrix to 3.5 M2 = 3.5 \endcode // \n \subsection matrix_operations_array_assignment Array Assignment // // Dense matrices can also be assigned a static array: \code blaze::StaticMatrix<int,2UL,2UL,rowMajor> M1; blaze::StaticMatrix<int,2UL,2UL,columnMajor> M2; blaze::DynamicMatrix<double> M3; int array1[2][2] = { { 1, 2 }, { 3, 4 } }; double array2[3][2] = { { 3.1, 6.4 }, { -0.9, -1.2 }, { 4.8, 0.6 } }; M1 = array1; M2 = array1; M3 = array2; \endcode // Note that the dimensions of the static array have to match the size of a \c StaticMatrix, // whereas a \c DynamicMatrix is resized according to the array dimensions: \f$ M3 = \left(\begin{array}{*{2}{c}} 3.1 & 6.4 \\ -0.9 & -1.2 \\ 4.8 & 0.6 \\ \end{array}\right)\f$ // \n \subsection matrix_operations_initializer_list_assignment Initializer List Assignment // // Alternatively, it is possible to directly assign an initializer list to a dense or sparse // matrix: \code blaze::DynamicMatrix<double> M1; blaze::CompressedMatrix<int> M2; M1 = { { 3.1, 6.4 }, { -0.9, -1.2 }, { 4.8, 0.6 } }; M2 = { { 1, 0 }, {}, { 0, 1 }, { 2 } }; \endcode // In case of sparse matrices, only the non-zero elements are considered. Missing values are // considered to be default values. // // \n \subsection matrix_operations_copy_assignment Copy Assignment // // All kinds of matrices can be assigned to each other. The only restriction is that since a // \c StaticMatrix cannot change its size, the assigned matrix must match both in the number of // rows and in the number of columns. \code blaze::StaticMatrix<int,3UL,2UL,rowMajor> M1; blaze::DynamicMatrix<int,rowMajor> M2( 3UL, 2UL ); blaze::DynamicMatrix<float,rowMajor> M3( 5UL, 2UL ); blaze::CompressedMatrix<int,rowMajor> M4( 3UL, 2UL ); blaze::CompressedMatrix<float,columnMajor> M5( 3UL, 2UL ); // ... Initialization of the matrices M1 = M2; // OK: Assignment of a 3x2 dense row-major matrix to another 3x2 dense row-major matrix M1 = M4; // OK: Assignment of a 3x2 sparse row-major matrix to a 3x2 dense row-major matrix M1 = M3; // Runtime error: Cannot assign a 5x2 matrix to a 3x2 static matrix M1 = M5; // OK: Assignment of a 3x2 sparse column-major matrix to a 3x2 dense row-major matrix \endcode // \n \subsection matrix_operations_compound_assignment Compound Assignment // // Compound assignment is also available for matrices: addition assignment, subtraction assignment, // and multiplication assignment. In contrast to plain assignment, however, the number of rows // and columns of the two operands have to match according to the arithmetic operation. \code blaze::StaticMatrix<int,2UL,3UL,rowMajor> M1; blaze::DynamicMatrix<int,rowMajor> M2( 2UL, 3UL ); blaze::CompressedMatrix<float,columnMajor> M3( 2UL, 3UL ); blaze::CompressedMatrix<float,rowMajor> M4( 2UL, 4UL ); blaze::StaticMatrix<float,2UL,4UL,rowMajor> M5; blaze::CompressedMatrix<float,rowMajor> M6( 3UL, 2UL ); // ... Initialization of the matrices M1 += M2; // OK: Addition assignment between two row-major matrices of the same dimensions M1 -= M3; // OK: Subtraction assignment between between a row-major and a column-major matrix M1 += M4; // Runtime error: No compound assignment between matrices of different size M1 -= M5; // Compilation error: No compound assignment between matrices of different size M2 *= M6; // OK: Multiplication assignment between two row-major matrices \endcode // Note that the multiplication assignment potentially changes the number of columns of the // target matrix: \f$\left(\begin{array}{*{3}{c}} 2 & 0 & 1 \\ 0 & 3 & 2 \\ \end{array}\right) \times \left(\begin{array}{*{2}{c}} 4 & 0 \\ 1 & 0 \\ 0 & 3 \\ \end{array}\right) = \left(\begin{array}{*{2}{c}} 8 & 3 \\ 3 & 6 \\ \end{array}\right)\f$ // Since a \c StaticMatrix cannot change its size, only a square StaticMatrix can be used in a // multiplication assignment with other square matrices of the same dimensions. // // // \n \section matrix_operations_element_access Element Access // <hr> // // \n \subsection matrix_operations_function_call_operator_1 Function Call Operator // // The easiest way to access a specific dense or sparse matrix element is via the function call // operator. The indices to access a matrix are zero-based: \code blaze::DynamicMatrix<int> M1( 4UL, 6UL ); M1(0,0) = 1; M1(0,1) = 3; // ... blaze::CompressedMatrix<double> M2( 5UL, 3UL ); M2(0,2) = 4.1; M2(1,1) = -6.3; \endcode // Since dense matrices allocate enough memory for all contained elements, using the function // call operator on a dense matrix directly returns a reference to the accessed value. In case // of a sparse matrix, if the accessed value is currently not contained in the matrix, the // value is inserted into the matrix prior to returning a reference to the value, which can // be much more expensive than the direct access to a dense matrix. Consider the following // example: \code blaze::CompressedMatrix<int> M1( 4UL, 4UL ); for( size_t i=0UL; i<M1.rows(); ++i ) { for( size_t j=0UL; j<M1.columns(); ++j ) { ... = M1(i,j); } } \endcode // Although the compressed matrix is only used for read access within the for loop, using the // function call operator temporarily inserts 16 non-zero elements into the matrix. Therefore // the preferred way to traverse the non-zero elements of a sparse matrix is to use iterators. // // \n \subsection matrix_operations_iterators Iterators // // All matrices (sparse as well as dense) offer an alternate way via the \c begin(), \c cbegin(), // \c end() and \c cend() functions to traverse all contained elements by iterator. Note that // it is not possible to traverse all elements of the matrix, but that it is only possible to // traverse elements in a row/column-wise fashion. In case of a non-const matrix, \c begin() and // \c end() return an \c Iterator, which allows a manipulation of the non-zero value, in case of // a constant matrix or in case \c cbegin() or \c cend() are used a \c ConstIterator is returned: \code using blaze::CompressedMatrix; CompressedMatrix<int,rowMajor> M1( 4UL, 6UL ); // Traversing the matrix by Iterator for( size_t i=0UL; i<A.rows(); ++i ) { for( CompressedMatrix<int,rowMajor>::Iterator it=A.begin(i); it!=A.end(i); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the non-zero element. } } // Traversing the matrix by ConstIterator for( size_t i=0UL; i<A.rows(); ++i ) { for( CompressedMatrix<int,rowMajor>::ConstIterator it=A.cbegin(i); it!=A.cend(i); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the non-zero element. } } \endcode // Note that \c begin(), \c cbegin(), \c end(), and \c cend() are also available as free functions: \code for( size_t i=0UL; i<A.rows(); ++i ) { for( CompressedMatrix<int,rowMajor>::Iterator it=begin( A, i ); it!=end( A, i ); ++it ) { // ... } } for( size_t i=0UL; i<A.rows(); ++i ) { for( CompressedMatrix<int,rowMajor>::ConstIterator it=cbegin( A, i ); it!=cend( A, i ); ++it ) { // ... } } \endcode // \n \section matrix_operations_element_insertion Element Insertion // <hr> // // Whereas a dense matrix always provides enough capacity to store all matrix elements, a sparse // matrix only stores the non-zero elements. Therefore it is necessary to explicitly add elements // to the matrix. // // \n \subsection matrix_operations_function_call_operator_2 Function Call Operator // // The first possibility to add elements to a sparse matrix is the function call operator: \code using blaze::CompressedMatrix; CompressedMatrix<int> M1( 3UL, 4UL ); M1(1,2) = 9; \endcode // In case the element at the given position is not yet contained in the sparse matrix, it is // automatically inserted. Otherwise the old value is replaced by the new value 2. The operator // returns a reference to the sparse vector element. // // \n \subsection matrix_operations_set .set() // // An alternative to the function call operator is the \c set() function: In case the element is // not yet contained in the matrix the element is inserted, else the element's value is modified: \code // Insert or modify the value at position (2,0) M1.set( 2, 0, 1 ); \endcode // \n \subsection matrix_operations_insert .insert() // The insertion of elements can be better controlled via the \c insert() function. In contrast // to the function call operator and the \c set() function it emits an exception in case the // element is already contained in the matrix. In order to check for this case, the \c find() // function can be used: \code // In case the element at position (2,3) is not yet contained in the matrix it is inserted // with a value of 4. if( M1.find( 2, 3 ) == M1.end( 2 ) ) M1.insert( 2, 3, 4 ); \endcode // \n \subsection matrix_operations_append .append() // // Although the \c insert() function is very flexible, due to performance reasons it is not // suited for the setup of large sparse matrices. A very efficient, yet also very low-level // way to fill a sparse matrix is the \c append() function. It requires the sparse matrix to // provide enough capacity to insert a new element in the specified row/column. Additionally, // the index of the new element must be larger than the index of the previous element in the // same row/column. Violating these conditions results in undefined behavior! \code M1.reserve( 0, 3 ); // Reserving space for three non-zero elements in row 0 M1.append( 0, 1, 2 ); // Appending the element 2 in row 0 at column index 1 M1.append( 0, 2, -4 ); // Appending the element -4 in row 0 at column index 2 // ... \endcode // The most efficient way to fill a sparse matrix with elements, however, is a combination of // \c reserve(), \c append(), and the \c finalize() function: \code // Setup of the compressed row-major matrix // // ( 0 1 0 2 0 ) // A = ( 0 0 0 0 0 ) // ( 3 0 0 0 0 ) // blaze::CompressedMatrix<int> M1( 3UL, 5UL ); M1.reserve( 3 ); // Reserving enough space for 3 non-zero elements M1.append( 0, 1, 1 ); // Appending the value 1 in row 0 with column index 1 M1.append( 0, 3, 2 ); // Appending the value 2 in row 0 with column index 3 M1.finalize( 0 ); // Finalizing row 0 M1.finalize( 1 ); // Finalizing the empty row 1 to prepare row 2 M1.append( 2, 0, 3 ); // Appending the value 3 in row 2 with column index 0 M1.finalize( 2 ); // Finalizing row 2 \endcode // \note The \c finalize() function has to be explicitly called for each row or column, even // for empty ones! // \note Although \c append() does not allocate new memory, it still invalidates all iterators // returned by the \c end() functions! // // // \n \section matrix_operations_element_removal Element Removal // <hr> // // \subsection matrix_operations_erase .erase() // // The \c erase() member functions can be used to remove elements from a sparse matrix. The // following example gives an impression of the five different flavors of \c erase(): \code using blaze::CompressedMatrix; CompressedMatrix<int,rowMajor> A( 42, 53 ); // ... Initialization of the matrix // Erasing the element at position (21,23) A.erase( 21, 23 ); // Erasing a single element in row 17 via iterator A.erase( 17, A.find( 4 ) ); // Erasing all non-zero elements in the range [7..24] of row 33 A.erase( 33, A.lowerBound( 33, 7 ), A.upperBound( 33, 24 ) ); // Erasing all non-zero elements with a value larger than 9 by passing a unary predicate A.erase( []( int i ){ return i > 9; } ); // Erasing all non-zero elements in the range [30..40] of row 37 with a value larger than 5 CompressedMatrix<int,rowMajor>::Iterator pos1( A.lowerBound( 37, 30 ) ); CompressedMatrix<int,rowMajor>::Iterator pos2( A.upperBound( 37, 40 ) ); A.erase( 37, pos1, pos2, []( int i ){ return i > 5; } ); \endcode // \n \section matrix_operations_element_lookup Element Lookup // <hr> // // A sparse matrix only stores the non-zero elements contained in the matrix. Therefore, whenever // accessing a matrix element at a specific position a lookup operation is required. Whereas the // function call operator is performing this lookup automatically, it is also possible to use the // \c find(), \c lowerBound(), and \c upperBound() member functions for a manual lookup. // // \n \subsection matrix_operations_find .find() // // The \c find() function can be used to check whether a specific element is contained in the // sparse matrix. It specifically searches for the element at the specified position. In case // the element is found, the function returns an iterator to the element. Otherwise an iterator // just past the last non-zero element of the according row or column (the \c end() iterator) // is returned. Note that the returned iterator is subject to invalidation due to inserting // operations via the function call operator, the \c set() function or the \c insert() function! \code using blaze::CompressedMatrix; CompressedMatrix<int,rowMajor> A( 42, 53 ); // ... Initialization of the matrix // Searching the element at position (7,17). In case the element is not // contained in the vector, the end() iterator of row 7 is returned. CompressedMatrix<int,rowMajor>::Iterator pos( A.find( 7, 17 ) ); if( pos != A.end( 7 ) ) { // ... } \endcode // \n \subsection matrix_operations_lowerbound .lowerBound() // // In case of a row-major matrix, this function returns a row iterator to the first element with // an index not less then the given column index. In case of a column-major matrix, the function // returns a column iterator to the first element with an index not less then the given row // index. In combination with the \c upperBound() function this function can be used to create a // pair of iterators specifying a range of indices. Note that the returned iterator is subject // to invalidation due to inserting operations via the function call operator, the \c set() // function or the \c insert() function! \code using blaze::CompressedMatrix; CompressedMatrix<int,rowMajor> A( 42, 53 ); // ... Initialization of the matrix // Searching the lower bound of column index 17 in row 7. CompressedMatrix<int,rowMajor>::Iterator pos1( A.lowerBound( 7, 17 ) ); // Searching the upper bound of column index 28 in row 7 CompressedMatrix<int,rowMajor>::Iterator pos2( A.upperBound( 7, 28 ) ); // Erasing all elements in the specified range A.erase( 7, pos1, pos2 ); \endcode // \n \subsection matrix_operations_upperbound .upperBound() // // In case of a row-major matrix, this function returns a row iterator to the first element with // an index greater then the given column index. In case of a column-major matrix, the function // returns a column iterator to the first element with an index greater then the given row // index. In combination with the \c lowerBound() function this function can be used to create a // pair of iterators specifying a range of indices. Note that the returned iterator is subject // to invalidation due to inserting operations via the function call operator, the \c set() // function or the \c insert() function! \code using blaze::CompressedMatrix; CompressedMatrix<int,columnMajor> A( 42, 53 ); // ... Initialization of the matrix // Searching the lower bound of row index 17 in column 9. CompressedMatrix<int,columnMajor>::Iterator pos1( A.lowerBound( 17, 9 ) ); // Searching the upper bound of row index 28 in column 9 CompressedMatrix<int,columnMajor>::Iterator pos2( A.upperBound( 28, 9 ) ); // Erasing all elements in the specified range A.erase( 9, pos1, pos2 ); \endcode // \n \section matrix_operations_non_modifying_operations Non-Modifying Operations // <hr> // // \subsection matrix_operations_rows .rows() / rows() // // The current number of rows of a matrix can be acquired via the \c rows() member function: \code // Instantiating a dynamic matrix with 10 rows and 8 columns blaze::DynamicMatrix<int> M1( 10UL, 8UL ); M1.rows(); // Returns 10 // Instantiating a compressed matrix with 8 rows and 12 columns blaze::CompressedMatrix<double> M2( 8UL, 12UL ); M2.rows(); // Returns 8 \endcode // Alternatively, the free functions \c rows() can be used to query the current number of rows of // a matrix. In contrast to the member function, the free function can also be used to query the // number of rows of a matrix expression: \code rows( M1 ); // Returns 10, i.e. has the same effect as the member function rows( M2 ); // Returns 8, i.e. has the same effect as the member function rows( M1 * M2 ); // Returns 10, i.e. the number of rows of the resulting matrix \endcode // \n \subsection matrix_operations_columns .columns() / columns() // // The current number of columns of a matrix can be acquired via the \c columns() member function: \code // Instantiating a dynamic matrix with 6 rows and 8 columns blaze::DynamicMatrix<int> M1( 6UL, 8UL ); M1.columns(); // Returns 8 // Instantiating a compressed matrix with 8 rows and 7 columns blaze::CompressedMatrix<double> M2( 8UL, 7UL ); M2.columns(); // Returns 7 \endcode // There is also a free function \c columns() available, which can also be used to query the number // of columns of a matrix expression: \code columns( M1 ); // Returns 8, i.e. has the same effect as the member function columns( M2 ); // Returns 7, i.e. has the same effect as the member function columns( M1 * M2 ); // Returns 7, i.e. the number of columns of the resulting matrix \endcode // \subsection matrix_operations_size size() // // The \c size() function returns the total number of elements of a matrix: \code // Instantiating a dynamic matrix with 6 rows and 8 columns blaze::DynamicMatrix<int> M1( 6UL, 8UL ); size( M1 ); // Returns 48 // Instantiating a compressed matrix with 8 rows and 7 columns blaze::CompressedMatrix<double> M2( 8UL, 7UL ); size( M2 ); // Returns 56 \endcode // \subsection matrix_operations_spacing .spacing() / spacing() // // The total number of elements of a row or column of a dense matrix, including potential padding // elements, can be acquired via the \c spacing member function. In case of a row-major matrix // (i.e. in case the storage order is set to blaze::rowMajor) the function returns the spacing // between two rows, in case of a column-major matrix (i.e. in case the storage flag is set to // blaze::columnMajor) the function returns the spacing between two columns: \code // Instantiating a row-major dynamic matrix with 7 rows and 8 columns blaze::DynamicMatrix<int,blaze::rowMajor> M1( 7UL, 8UL ); M1.spacing(); // Returns the total number of elements in a row // Instantiating a column-major dynamic matrix with 8 rows and 12 columns blaze::CompressedMatrix<double> M2( 8UL, 12UL ); M2.spacing(); // Returns the total number of element in a column \endcode // Alternatively, the free functions \c spacing() can be used to query the current number of // elements in a row/column. \code spacing( M1 ); // Returns the total number of elements in a row spacing( M2 ); // Returns the total number of elements in a column \endcode // \n \subsection matrix_operations_capacity .capacity() / capacity() // // The \c capacity() member function returns the internal capacity of a dense or sparse matrix. // Note that the capacity of a matrix doesn't have to be equal to the size of a matrix. In case of // a dense matrix the capacity will always be greater or equal than the total number of elements // of the matrix. In case of a sparse matrix, the capacity will usually be much less than the // total number of elements. \code blaze::DynamicMatrix<float> M1( 5UL, 7UL ); blaze::StaticMatrix<float,7UL,4UL> M2; M1.capacity(); // Returns at least 35 M2.capacity(); // Returns at least 28 \endcode // There is also a free function \c capacity() available to query the capacity. However, please // note that this function cannot be used to query the capacity of a matrix expression: \code capacity( M1 ); // Returns at least 35, i.e. has the same effect as the member function capacity( M2 ); // Returns at least 28, i.e. has the same effect as the member function capacity( M1 * M2 ); // Compilation error! \endcode // \n \subsection matrix_operations_nonzeros .nonZeros() / nonZeros() // // For both dense and sparse matrices the current number of non-zero elements can be queried // via the \c nonZeros() member function. In case of matrices there are two flavors of the // \c nonZeros() function: One returns the total number of non-zero elements in the matrix, // the second returns the number of non-zero elements in a specific row (in case of a row-major // matrix) or column (in case of a column-major matrix). Sparse matrices directly return their // number of non-zero elements, dense matrices traverse their elements and count the number of // non-zero elements. \code blaze::DynamicMatrix<int,rowMajor> M1( 3UL, 5UL ); // ... Initializing the dense matrix M1.nonZeros(); // Returns the total number of non-zero elements in the dense matrix M1.nonZeros( 2 ); // Returns the number of non-zero elements in row 2 \endcode \code blaze::CompressedMatrix<double,columnMajor> M2( 4UL, 7UL ); // ... Initializing the sparse matrix M2.nonZeros(); // Returns the total number of non-zero elements in the sparse matrix M2.nonZeros( 3 ); // Returns the number of non-zero elements in column 3 \endcode // The free \c nonZeros() function can also be used to query the number of non-zero elements in a // matrix expression. However, the result is not the exact number of non-zero elements, but may be // a rough estimation: \code nonZeros( M1 ); // Has the same effect as the member function nonZeros( M1, 2 ); // Has the same effect as the member function nonZeros( M2 ); // Has the same effect as the member function nonZeros( M2, 3 ); // Has the same effect as the member function nonZeros( M1 * M2 ); // Estimates the number of non-zero elements in the matrix expression \endcode // \n \subsection matrix_operations_isempty isEmpty() // // The \c isEmpty() function returns whether the total number of elements of the matrix is zero: \code blaze::DynamicMatrix<int> A; // Create an empty matrix isEmpty( A ); // Returns true a.resize( 5, 0 ); // Resize to a 5x0 matrix isEmpty( A ); // Returns true a.resize( 5, 3 ); // Resize to a 5x3 matrix isEmpty( A ); // Returns false \endcode // \n \subsection matrix_operations_isnan isnan() // // The \c isnan() function provides the means to check a dense or sparse matrix for non-a-number // elements: \code blaze::DynamicMatrix<double> A( 3UL, 4UL ); // ... Initialization if( isnan( A ) ) { ... } \endcode \code blaze::CompressedMatrix<double> A( 3UL, 4UL ); // ... Initialization if( isnan( A ) ) { ... } \endcode // If at least one element of the matrix is not-a-number, the function returns \c true, otherwise // it returns \c false. Please note that this function only works for matrices with floating point // elements. The attempt to use it for a matrix with a non-floating point element type results in // a compile time error. // // // \n \subsection matrix_operations_isdefault isDefault() // // The \c isDefault() function returns whether the given dense or sparse matrix is in default state: \code blaze::HybridMatrix<int,5UL,4UL> A; // ... Resizing and initialization if( isDefault( A ) ) { ... } \endcode // A matrix is in default state if it appears to just have been default constructed. All resizable // matrices (\c HybridMatrix, \c DynamicMatrix, or \c CompressedMatrix) and \c CustomMatrix are in // default state if its size is equal to zero. A non-resizable matrix (\c StaticMatrix and all // submatrices) is in default state if all its elements are in default state. For instance, in case // the matrix is instantiated for a built-in integral or floating point data type, the function // returns \c true in case all matrix elements are 0 and \c false in case any matrix element is // not 0. // // // \n \subsection matrix_operations_isSquare isSquare() // // Whether a dense or sparse matrix is a square matrix (i.e. if the number of rows is equal to the // number of columns) can be checked via the \c isSquare() function: \code blaze::DynamicMatrix<double> A; // ... Resizing and initialization if( isSquare( A ) ) { ... } \endcode // \n \subsection matrix_operations_issymmetric isSymmetric() // // Via the \c isSymmetric() function it is possible to check whether a dense or sparse matrix // is symmetric: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isSymmetric( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be symmetric! // // // \n \subsection matrix_operations_isUniform isUniform() // // In order to check if all matrix elements are identical, the \c isUniform function can be used: \code blaze::DynamicMatrix<int> A; // ... Resizing and initialization if( isUniform( A ) ) { ... } \endcode // Note that in case of a sparse matrix also the zero elements are also taken into account! // // // \n \subsection matrix_operations_islower isLower() // // Via the \c isLower() function it is possible to check whether a dense or sparse matrix is // lower triangular: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isLower( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be lower triangular! // // // \n \subsection matrix_operations_isunilower isUniLower() // // Via the \c isUniLower() function it is possible to check whether a dense or sparse matrix is // lower unitriangular: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isUniLower( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be lower unitriangular! // // // \n \subsection matrix_operations_isstrictlylower isStrictlyLower() // // Via the \c isStrictlyLower() function it is possible to check whether a dense or sparse matrix // is strictly lower triangular: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isStrictlyLower( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be strictly lower triangular! // // // \n \subsection matrix_operations_isUpper isUpper() // // Via the \c isUpper() function it is possible to check whether a dense or sparse matrix is // upper triangular: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isUpper( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be upper triangular! // // // \n \subsection matrix_operations_isuniupper isUniUpper() // // Via the \c isUniUpper() function it is possible to check whether a dense or sparse matrix is // upper unitriangular: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isUniUpper( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be upper unitriangular! // // // \n \subsection matrix_operations_isstrictlyupper isStrictlyUpper() // // Via the \c isStrictlyUpper() function it is possible to check whether a dense or sparse matrix // is strictly upper triangular: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isStrictlyUpper( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be strictly upper triangular! // // // \n \subsection matrix_operations_isdiagonal isDiagonal() // // The \c isDiagonal() function checks if the given dense or sparse matrix is a diagonal matrix, // i.e. if it has only elements on its diagonal and if the non-diagonal elements are default // elements: \code blaze::CompressedMatrix<float> A; // ... Resizing and initialization if( isDiagonal( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be diagonal! // // // \n \subsection matrix_operations_isidentity isIdentity() // // The \c isIdentity() function checks if the given dense or sparse matrix is an identity matrix, // i.e. if all diagonal elements are 1 and all non-diagonal elements are 0: \code blaze::CompressedMatrix<float> A; // ... Resizing and initialization if( isIdentity( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be identity matrices! // // // \n \subsection matrix_operations_matrix_determinant det() // // The determinant of a square dense matrix can be computed by means of the \c det() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization double d = det( A ); // Compute the determinant of A \endcode // In case the given dense matrix is not a square matrix, a \c std::invalid_argument exception is // thrown. // // \note The \c det() function can only be used for dense matrices with \c float, \c double, // \c complex<float> or \c complex<double> element type. The attempt to call the function with // matrices of any other element type or with a sparse matrix results in a compile time error! // // \note The function is depending on LAPACK kernels. Thus the function can only be used if the // fitting LAPACK library is available and linked to the executable. Otherwise a linker error // will be created. // // // \n \subsection matrix_operations_matrix_trans trans() // // Matrices can be transposed via the \c trans() function. Row-major matrices are transposed into // a column-major matrix and vice versa: \code blaze::DynamicMatrix<int,rowMajor> M1( 5UL, 2UL ); blaze::CompressedMatrix<int,columnMajor> M2( 3UL, 7UL ); M1 = M2; // Assigning a column-major matrix to a row-major matrix M1 = trans( M2 ); // Assigning the transpose of M2 (i.e. a row-major matrix) to M1 M1 += trans( M2 ); // Addition assignment of two row-major matrices \endcode // \n \subsection matrix_operations_ctrans ctrans() // // The conjugate transpose of a dense or sparse matrix (also called adjoint matrix, Hermitian // conjugate, or transjugate) can be computed via the \c ctrans() function: \code blaze::DynamicMatrix< complex<float>, rowMajor > M1( 5UL, 2UL ); blaze::CompressedMatrix< complex<float>, columnMajor > M2( 2UL, 5UL ); M1 = ctrans( M2 ); // Compute the conjugate transpose matrix \endcode // Note that the \c ctrans() function has the same effect as manually applying the \c conj() and // \c trans() function in any order: \code M1 = trans( conj( M2 ) ); // Computing the conjugate transpose matrix M1 = conj( trans( M2 ) ); // Computing the conjugate transpose matrix \endcode // \n \subsection matrix_operations_matrix_evaluate eval() / evaluate() // // The \c evaluate() function forces an evaluation of the given matrix expression and enables // an automatic deduction of the correct result type of an operation. The following code example // demonstrates its intended use for the multiplication of a lower and a strictly lower dense // matrix: \code using blaze::DynamicMatrix; using blaze::LowerMatrix; using blaze::StrictlyLowerMatrix; LowerMatrix< DynamicMatrix<double> > A; StrictlyLowerMatrix< DynamicMatrix<double> > B; // ... Resizing and initialization auto C = evaluate( A * B ); \endcode // In this scenario, the \c evaluate() function assists in deducing the exact result type of // the operation via the \c auto keyword. Please note that if \c evaluate() is used in this // way, no temporary matrix is created and no copy operation is performed. Instead, the result // is directly written to the target matrix due to the return value optimization (RVO). However, // if \c evaluate() is used in combination with an explicit target type, a temporary will be // created and a copy operation will be performed if the used type differs from the type // returned from the function: \code StrictlyLowerMatrix< DynamicMatrix<double> > D( A * B ); // No temporary & no copy operation LowerMatrix< DynamicMatrix<double> > E( A * B ); // Temporary & copy operation DynamicMatrix<double> F( A * B ); // Temporary & copy operation D = evaluate( A * B ); // Temporary & copy operation \endcode // Sometimes it might be desirable to explicitly evaluate a sub-expression within a larger // expression. However, please note that \c evaluate() is not intended to be used for this // purpose. This task is more elegantly and efficiently handled by the \c eval() function: \code blaze::DynamicMatrix<double> A, B, C, D; D = A + evaluate( B * C ); // Unnecessary creation of a temporary matrix D = A + eval( B * C ); // No creation of a temporary matrix \endcode // In contrast to the \c evaluate() function, \c eval() can take the complete expression // into account and therefore can guarantee the most efficient way to evaluate it (see also // \ref intra_statement_optimization). // // // \n \section matrix_operations_modifying_operations Modifying Operations // <hr> // // \subsection matrix_operations_resize_reserve .resize() / .reserve() // // The dimensions of a \c StaticMatrix are fixed at compile time by the second and third template // parameter and a \c CustomMatrix cannot be resized. In contrast, the number or rows and columns // of \c DynamicMatrix, \c HybridMatrix, and \c CompressedMatrix can be changed at runtime: \code using blaze::DynamicMatrix; using blaze::CompressedMatrix; DynamicMatrix<int,rowMajor> M1; CompressedMatrix<int,columnMajor> M2( 3UL, 2UL ); // Adapting the number of rows and columns via the resize() function. The (optional) // third parameter specifies whether the existing elements should be preserved. Per // default, the existing elements are preserved. M1.resize( 2UL, 2UL ); // Resizing matrix M1 to 2x2 elements. Elements of built-in type // remain uninitialized, elements of class type are default // constructed. M1.resize( 3UL, 1UL, false ); // Resizing M1 to 3x1 elements. The old elements are lost, the // new elements are NOT initialized! M2.resize( 5UL, 7UL, true ); // Resizing M2 to 5x7 elements. The old elements are preserved. M2.resize( 3UL, 2UL, false ); // Resizing M2 to 3x2 elements. The old elements are lost. \endcode // Note that resizing a matrix invalidates all existing views (see e.g. \ref views_submatrices) // on the matrix: \code blaze::DynamicMatrix<int,rowMajor> M1( 10UL, 20UL ); // Creating a 10x20 matrix auto row8 = row( M1, 8UL ); // Creating a view on the 8th row of the matrix M1.resize( 6UL, 20UL ); // Resizing the matrix invalidates the view \endcode // When the internal capacity of a matrix is no longer sufficient, the allocation of a larger // junk of memory is triggered. In order to avoid frequent reallocations, the \c reserve() // function can be used up front to set the internal capacity: \code blaze::DynamicMatrix<int> M1; M1.reserve( 100 ); M1.rows(); // Returns 0 M1.capacity(); // Returns at least 100 \endcode // Additionally it is possible to reserve memory in a specific row (for a row-major matrix) or // column (for a column-major matrix): \code blaze::CompressedMatrix<int> M1( 4UL, 6UL ); M1.reserve( 1, 4 ); // Reserving enough space for four non-zero elements in row 1 \endcode // \n \subsection matrix_operations_shrinkToFit .shrinkToFit() // // The internal capacity of matrices with dynamic memory is preserved in order to minimize the // number of reallocations. For that reason, the \c resize() and \c reserve() functions can lead // to memory overhead. The \c shrinkToFit() member function can be used to minimize the internal // capacity: \code blaze::DynamicMatrix<int> M1( 100UL, 100UL ); // Create a 100x100 integer matrix M1.resize( 10UL, 10UL ); // Resize to 10x10, but the capacity is preserved M1.shrinkToFit(); // Remove the unused capacity \endcode // Please note that due to padding the capacity might not be reduced exactly to \c rows() times // \c columns(). Please also note that in case a reallocation occurs, all iterators (including // \c end() iterators), all pointers and references to elements of this matrix are invalidated. // // // \subsection matrix_operations_reset_clear reset() / clear // // In order to reset all elements of a dense or sparse matrix, the \c reset() function can be // used. The number of rows and columns of the matrix are preserved: \code // Setting up a single precision row-major matrix, whose elements are initialized with 2.0F. blaze::DynamicMatrix<float> M1( 4UL, 5UL, 2.0F ); // Resetting all elements to 0.0F. reset( M1 ); // Resetting all elements M1.rows(); // Returns 4: size and capacity remain unchanged \endcode // Alternatively, only a single row or column of the matrix can be resetted: \code blaze::DynamicMatrix<int,blaze::rowMajor> M1( 7UL, 6UL, 5 ); // Setup of a row-major matrix blaze::DynamicMatrix<int,blaze::columnMajor> M2( 4UL, 5UL, 4 ); // Setup of a column-major matrix reset( M1, 2UL ); // Resetting the 2nd row of the row-major matrix reset( M2, 3UL ); // Resetting the 3rd column of the column-major matrix \endcode // In order to reset a row of a column-major matrix or a column of a row-major matrix, use a // row or column view (see \ref views_rows and views_colums). // // In order to return a matrix to its default state (i.e. the state of a default constructed // matrix), the \c clear() function can be used: \code // Setting up a single precision row-major matrix, whose elements are initialized with 2.0F. blaze::DynamicMatrix<float> M1( 4UL, 5UL, 2.0F ); // Resetting all elements to 0.0F. clear( M1 ); // Resetting the entire matrix M1.rows(); // Returns 0: size is reset, but capacity remains unchanged \endcode // \n \subsection matrix_operations_matrix_transpose transpose() // // In addition to the non-modifying \c trans() function, matrices can be transposed in-place via // the \c transpose() function: \code blaze::DynamicMatrix<int,rowMajor> M( 5UL, 2UL ); transpose( M ); // In-place transpose operation. M = trans( M ); // Same as above \endcode // Note however that the transpose operation fails if ... // // - ... the given matrix has a fixed size and is non-square; // - ... the given matrix is a triangular matrix; // - ... the given submatrix affects the restricted parts of a triangular matrix; // - ... the given submatrix would cause non-deterministic results in a symmetric/Hermitian matrix. // // // \n \subsection matrix_operations_ctranspose ctranspose() // // The \c ctranspose() function can be used to perform an in-place conjugate transpose operation: \code blaze::DynamicMatrix<int,rowMajor> M( 5UL, 2UL ); ctranspose( M ); // In-place conjugate transpose operation. M = ctrans( M ); // Same as above \endcode // Note however that the conjugate transpose operation fails if ... // // - ... the given matrix has a fixed size and is non-square; // - ... the given matrix is a triangular matrix; // - ... the given submatrix affects the restricted parts of a triangular matrix; // - ... the given submatrix would cause non-deterministic results in a symmetric/Hermitian matrix. // // // \n \subsection matrix_operations_swap swap() // // Via the \c \c swap() function it is possible to completely swap the contents of two matrices // of the same type: \code blaze::DynamicMatrix<int,blaze::rowMajor> M1( 10UL, 15UL ); blaze::DynamicMatrix<int,blaze::rowMajor> M2( 20UL, 10UL ); swap( M1, M2 ); // Swapping the contents of M1 and M2 \endcode // \n \section matrix_operations_arithmetic_operations Arithmetic Operations // <hr> // // \subsection matrix_operations_min_max min() / max() // // The \c min() and \c max() functions can be used for a single vector or multiple vectors. If // passed a single matrix, the functions return the smallest and largest element of the given // dense matrix or the smallest and largest non-zero element of the given sparse matrix, // respectively: \code blaze::StaticMatrix<int,2UL,3UL> A{ { -5, 2, 7 }, { -4, 0, 1 } }; min( A ); // Returns -5 max( A ); // Returns 7 \endcode \code blaze::CompressedMatrix<int> B{ { 1, 0, 3 }, { 0, 0, 0 } }; min( B ); // Returns 1 max( B ); // Returns 3 \endcode // For more information on the unary \c min() and \c max() reduction operations see the // \ref matrix_operations_reduction_operations section. // // If passed two or more dense matrices, the \c min() and \c max() functions compute the // componentwise minimum or maximum of the given matrices, respectively: \code blaze::StaticMatrix<int,2UL,3UL,rowMajor> C{ { -5, 1, -7 }, { 4, 1, 0 } }; blaze::StaticMatrix<int,2UL,3UL,rowMajor> D{ { -5, 3, 0 }, { 2, 2, -2 } }; min( A, C ); // Results in the matrix ( -5, 1, -7 ) ( -4, 0, 0 ) max( A, C, D ); // Results in the matrix ( -5, 3, 7 ) ( 4, 2, 1 ) \endcode // Please note that sparse matrices can only be used in the unary \c min() and \c max() functions. // Also note that all forms of the \c min() and \c max() functions can be used to compute the // smallest and largest element of a matrix expression: \code min( A + B + C ); // Returns -9, i.e. the smallest value of the resulting matrix max( A - B - C ); // Returns 11, i.e. the largest value of the resulting matrix \endcode // \n \subsection matrix_operators_softmax softmax() // // The <a href="https://en.wikipedia.org/wiki/Softmax_function">softmax function</a>, also called // the normalized exponential function, of a given dense matrix can be computed via \c softmax(). // The resulting dense matrix consists of real values in the range (0..1], which add up to 1. \code blaze::StaticMatrix<double,3UL,3UL> A{ { 1.0, 2.0, 3.0 } , { 4.0, 1.0, 2.0 } , { 3.0, 4.0, 1.0 } }; blaze::StaticMatrix<double,3UL,3UL> B; // Evaluating the softmax function B = softmax( A ); // Results in ( 0.0157764 0.0428847 0.116573 ) // ( 0.316878 0.0157764 0.0428847 ) // ( 0.116573 0.316878 0.0157764 ) double s = sum( B ); // Results in 1 \endcode // \n \subsection matrix_operators_trace trace() // // The \c trace() function sums the diagonal elements of a square dense or sparse matrix: \code blaze::StaticMatrix<int,3UL,3UL> A{ { -1, 2, -3 } , { -4, -5, 6 } , { 7, -8, -9 } }; trace( A ); // Returns the sum of the diagonal elements, i.e. -15 \endcode // In case the given matrix is not a square matrix, a \c std::invalid_argument exception is // thrown. // // // \n \subsection matrix_operators_abs abs() // // The \c abs() function can be used to compute the absolute values of each element of a matrix. // For instance, the following computation \code blaze::StaticMatrix<int,2UL,3UL,rowMajor> A{ { -1, 2, -3 }, { 4, -5, 6 } }; blaze::StaticMatrix<int,2UL,3UL,rowMajor> B( abs( A ) ); \endcode // results in the matrix \f$ B = \left(\begin{array}{*{3}{c}} 1 & 2 & 3 \\ 4 & 5 & 6 \\ \end{array}\right)\f$ // \n \subsection matrix_operators_sign sign() // // The \c sign() function can be used to evaluate the sign of each element of a matrix \a A. For // each element \c (i,j) the corresponding result is 1 if \a A(i,j) is greater than zero, 0 if // \a A(i,j) is zero, and -1 if \a A(i,j) is less than zero. For instance, the following use of // the \c sign() function \code blaze::StaticMatrix<int,2UL,3UL,rowMajor> A{ { -1, 2, 0 }, { 4, 0, -6 } }; blaze::StaticMatrix<int,2UL,3UL,rowMajor> B( sign( A ) ); \endcode // results in the matrix \f$ B = \left(\begin{array}{*{3}{c}} -1 & 1 & 0 \\ 1 & 0 & -1 \\ \end{array}\right)\f$ // \n \subsection matrix_operators_rounding_functions floor() / ceil() / trunc() / round() // // The \c floor(), \c ceil(), \c trunc(), and \c round() functions can be used to round down/up // each element of a matrix, respectively: \code blaze::StaticMatrix<double,3UL,3UL> A, B; B = floor( A ); // Rounding down each element of the matrix B = ceil ( A ); // Rounding up each element of the matrix B = trunc( A ); // Truncating each element of the matrix B = round( A ); // Rounding each element of the matrix \endcode // \n \subsection matrix_operators_conj conj() // // The \c conj() function can be applied on a dense or sparse matrix to compute the complex // conjugate of each element of the matrix: \code using blaze::StaticMatrix; using cplx = std::complex<double>; // Creating the matrix // ( (1,0) (-2,-1) ) // ( (1,1) ( 0, 1) ) StaticMatrix<cplx,2UL,2UL> A{ { cplx( 1.0, 0.0 ), cplx( -2.0, -1.0 ) }, { cplx( 1.0, 1.0 ), cplx( 0.0, 1.0 ) } }; // Computing the matrix of conjugate values // ( (1, 0) (-2, 1) ) // ( (1,-1) ( 0,-1) ) StaticMatrix<cplx,2UL,2UL> B; B = conj( A ); \endcode // Additionally, matrices can be conjugated in-place via the \c conjugate() function: \code blaze::DynamicMatrix<cplx> C( 5UL, 2UL ); conjugate( C ); // In-place conjugate operation. C = conj( C ); // Same as above \endcode // \n \subsection matrix_operators_real real() // // The \c real() function can be used on a dense or sparse matrix to extract the real part of // each element of the matrix: \code using blaze::StaticMatrix; using cplx = std::complex<double>; // Creating the matrix // ( (1,0) (-2,-1) ) // ( (1,1) ( 0, 1) ) StaticMatrix<cplx,2UL,2UL> A{ { cplx( 1.0, 0.0 ), cplx( -2.0, -1.0 ) }, { cplx( 1.0, 1.0 ), cplx( 0.0, 1.0 ) } }; // Extracting the real part of each matrix element // ( 1 -2 ) // ( 1 0 ) StaticMatrix<double,2UL,2UL> B; B = real( A ); \endcode // \n \subsection matrix_operators_imag imag() // // The \c imag() function can be used on a dense or sparse matrix to extract the imaginary part // of each element of the matrix: \code using blaze::StaticMatrix; using cplx = std::complex<double>; // Creating the matrix // ( (1,0) (-2,-1) ) // ( (1,1) ( 0, 1) ) StaticMatrix<cplx,2UL,2UL> A{ { cplx( 1.0, 0.0 ), cplx( -2.0, -1.0 ) }, { cplx( 1.0, 1.0 ), cplx( 0.0, 1.0 ) } }; // Extracting the imaginary part of each matrix element // ( 0 -1 ) // ( 1 1 ) StaticMatrix<double,2UL,2UL> B; B = imag( A ); \endcode // \n \subsection matrix_operators_sqrt sqrt() / invsqrt() // // Via the \c sqrt() and \c invsqrt() functions the (inverse) square root of each element of a // matrix can be computed: \code blaze::StaticMatrix<double,3UL,3UL> A, B, C; B = sqrt( A ); // Computes the square root of each element C = invsqrt( A ); // Computes the inverse square root of each element \endcode // Note that in case of sparse matrices only the non-zero elements are taken into account! // // // \n \subsection matrix_operators_cbrt cbrt() / invcbrt() // // The \c cbrt() and \c invcbrt() functions can be used to compute the the (inverse) cubic root // of each element of a matrix: \code blaze::DynamicMatrix<double> A, B, C; B = cbrt( A ); // Computes the cubic root of each element C = invcbrt( A ); // Computes the inverse cubic root of each element \endcode // Note that in case of sparse matrices only the non-zero elements are taken into account! // // // \n \subsection matrix_operations_hypot hypot() // // The \c hypot() function can be used to compute the componentwise hypotenous for a pair of // dense matrices: \code blaze::StaticMatrix<double,3UL,3UL> A, B, C; C = hypot( A, B ); // Computes the componentwise hypotenuous \endcode // \n \subsection matrix_operators_clamp clamp() // // The \c clamp() function can be used to restrict all elements of a matrix to a specific range: \code blaze::DynamicMatrix<double> A, B; B = clamp( A, -1.0, 1.0 ); // Restrict all elements to the range [-1..1] \endcode // Note that in case of sparse matrices only the non-zero elements are taken into account! // // // \n \subsection matrix_operators_pow pow() // // The \c pow() function can be used to compute the exponential value of each element of a matrix. // If passed a matrix and a numeric exponent, the function computes the exponential value of each // element of the matrix using the same exponent. If passed a second matrix, the function computes // the componentwise exponential value: \code blaze::StaticMatrix<double,3UL,3UL> A, B, C; C = pow( A, 1.2 ); // Computes the exponential value of each element C = pow( A, B ); // Computes the componentwise exponential value \endcode // \n \subsection matrix_operators_exp exp() // // \c exp(), \c exp2() and \c exp10() compute the base e/2/10 exponential of each element of a // matrix, respectively: \code blaze::HybridMatrix<double,3UL,3UL> A, B; B = exp( A ); // Computes the base e exponential of each element B = exp2( A ); // Computes the base 2 exponential of each element B = exp10( A ); // Computes the base 10 exponential of each element \endcode // Note that in case of sparse matrices only the non-zero elements are taken into account! // // // \n \subsection matrix_operators_log log() / log2() / log10() // // The \c log(), \c log2() and \c log10() functions can be used to compute the natural, binary // and common logarithm of each element of a matrix: \code blaze::StaticMatrix<double,3UL,3UL> A, B; B = log( A ); // Computes the natural logarithm of each element B = log2( A ); // Computes the binary logarithm of each element B = log10( A ); // Computes the common logarithm of each element \endcode // \n \subsection matrix_operators_trigonometric_functions sin() / cos() / tan() / asin() / acos() / atan() // // The following trigonometric functions are available for both dense and sparse matrices: \code blaze::DynamicMatrix<double> A, B; B = sin( A ); // Computes the sine of each element of the matrix B = cos( A ); // Computes the cosine of each element of the matrix B = tan( A ); // Computes the tangent of each element of the matrix B = asin( A ); // Computes the inverse sine of each element of the matrix B = acos( A ); // Computes the inverse cosine of each element of the matrix B = atan( A ); // Computes the inverse tangent of each element of the matrix \endcode // Note that in case of sparse matrices only the non-zero elements are taken into account! // // // \n \subsection matrix_operators_hyperbolic_functions sinh() / cosh() / tanh() / asinh() / acosh() / atanh() // // The following hyperbolic functions are available for both dense and sparse matrices: \code blaze::DynamicMatrix<double> A, B; B = sinh( A ); // Computes the hyperbolic sine of each element of the matrix B = cosh( A ); // Computes the hyperbolic cosine of each element of the matrix B = tanh( A ); // Computes the hyperbolic tangent of each element of the matrix B = asinh( A ); // Computes the inverse hyperbolic sine of each element of the matrix B = acosh( A ); // Computes the inverse hyperbolic cosine of each element of the matrix B = atanh( A ); // Computes the inverse hyperbolic tangent of each element of the matrix \endcode // \n \subsection matrix_operations_atan2 atan2() // // The multi-valued inverse tangent is available for a pair of dense matrices: \code blaze::DynamicMatrix<double> A, B, C; C = atan2( A, B ); // Computes the componentwise multi-valued inverse tangent \endcode // \n \subsection matrix_operators_erf erf() / erfc() // // The \c erf() and \c erfc() functions compute the (complementary) error function of each // element of a matrix: \code blaze::StaticMatrix<double,3UL,3UL> A, B; B = erf( A ); // Computes the error function of each element B = erfc( A ); // Computes the complementary error function of each element \endcode // Note that in case of sparse matrices only the non-zero elements are taken into account! // // // \n \subsection matrix_operations_map map() / forEach() // // Via the unary and binary \c map() functions it is possible to execute componentwise custom // operations on matrices. The unary \c map() function can be used to apply a custom operation // on each element of a dense or sparse matrix. For instance, the following example demonstrates // a custom square root computation via a lambda: \code blaze::DynamicMatrix<double> A, B; B = map( A, []( double d ) { return std::sqrt( d ); } ); \endcode // The binary \c map() function can be used to apply an operation pairwise to the elements of // two dense matrices. The following example demonstrates the merging of two matrices of double // precision values into a matrix of double precision complex numbers: \code blaze::DynamicMatrix<double> real{ { 2.1, -4.2 }, { 1.0, 0.6 } }; blaze::DynamicMatrix<double> imag{ { 0.3, 1.4 }, { 2.9, -3.4 } }; blaze::DynamicMatrix< complex<double> > cplx; // Creating the matrix // ( (-2.1, 0.3) (-4.2, -1.4) ) // ( ( 1.0, 2.9) ( 0.6, -3.4) ) cplx = map( real, imag, []( double r, double i ){ return complex( r, i ); } ); \endcode // Although the computation can be parallelized it is not vectorized and thus cannot perform at // peak performance. However, it is also possible to create vectorized custom operations. See // \ref custom_operations for a detailed overview of the possibilities of custom operations. // // Please note that unary custom operations on vectors have been introduced in \b Blaze 3.0 in // form of the \c forEach() function. With the introduction of binary custom functions, the // \c forEach() function has been renamed to \c map(). The \c forEach() function can still be // used (even for binary custom operations), but the function might be deprecated in future // releases of \b Blaze. // // // \n \section matrix_operations_reduction_operations Reduction Operations // <hr> // // \subsection matrix_operations_reduction_operations_reduce reduce() // // The \c reduce() function performs either a total reduction, a rowwise reduction or a columnwise // reduction of the elements of the given dense matrix or the non-zero elements of the given sparse // matrix. The following examples demonstrate the total reduction of a dense and sparse matrix: \code blaze::DynamicMatrix<double> A; // ... Resizing and initialization const double totalsum1 = reduce( A, blaze::Add() ); const double totalsum2 = reduce( A, []( double a, double b ){ return a + b; } ); \endcode \code blaze::CompressedMatrix<double> A; // ... Resizing and initialization const double totalsum1 = reduce( A, blaze::Add() ); const double totalsum2 = reduce( A, []( double a, double b ){ return a + b; } ); \endcode // By specifying \c blaze::columnwise or \c blaze::rowwise the \c reduce() function performs a // column-wise or row-wise reduction, respectively. In case \c blaze::columnwise is specified, the // (non-zero) elements of the matrix are reduced column-wise and the result is a row vector. In // case \c blaze::rowwise is specified, the (non-zero) elements of the matrix are reduced row-wise // and the result is a column vector: \code blaze::DynamicMatrix<double> A; blaze::CompressedMatrix<double> B; blaze::DynamicVector<double,rowVector> colsum1, colsum2; // ... Resizing and initialization colsum1 = reduce<columnwise>( A, blaze::Add() ); colsum2 = reduce<columnwise>( B, []( double a, double b ){ return a + b; } ); \endcode \code blaze::DynamicMatrix<double> A; blaze::CompressedMatrix<double> B; blaze::DynamicVector<double,columnVector> rowsum1, rowsum2; // ... Resizing and initialization rowsum1 = reduce<rowwise>( A, blaze::Add() ); rowsum2 = reduce<rowwise>( B, []( double a, double b ){ return a + b; } ); \endcode // As demonstrated in the examples it is possible to pass any binary callable as custom reduction // operation. However, for instance in the case of lambdas the vectorization of the reduction // operation is compiler dependent and might not perform at peak performance. However, it is also // possible to create vectorized custom operations. See \ref custom_operations for a detailed // overview of the possibilities of custom operations. // // Please note that the evaluation order of the \c reduce() function is unspecified. Thus the // behavior is non-deterministic if the given reduction operation is not associative or not // commutative. Also, the operation is undefined if the given reduction operation modifies the // values. // // \n \subsection matrix_operations_reduction_operations_sum sum() // // The \c sum() function reduces the elements of the given dense vector or the non-zero elements // of the given sparse vector by means of addition: \code blaze::DynamicMatrix<int> A{ { 1, 2 }, { 3, 4 } }; const int totalsum = sum( A ); // Results in 10 \endcode \code blaze::CompressedMatrix<int> a{ { 1, 2 }, { 3, 4 } }; const int totalsum = sum( A ); // Results in 10 \endcode // By specifying \c blaze::columnwise or \c blaze::rowwise the \c sum() function performs a // column-wise or row-wise summation, respectively. In case \c blaze::columnwise is specified, // the (non-zero) elements of the matrix are summed up column-wise and the result is a row vector. // In case \c blaze::rowwise is specified, the (non-zero) elements of the matrix are summed up // row-wise and the result is a column vector: \code using blaze::columnwise; blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::CompressedMatrix<int> B{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::DynamicVector<int,rowVector> colsum1, colsum2; colsum1 = sum<columnwise>( A ); // Results in ( 2, 3, 6 ) colsum2 = sum<columnwise>( B ); // Same result \endcode \code using blaze::rowwise; blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::CompressedMatrix<int> B{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::DynamicVector<int,columnVector> rowsum1, rowsum2; rowsum1 = sum<rowwise>( A ); // Results in ( 3, 8 ) rowsum2 = sum<rowwise>( B ); // Same result \endcode // Please note that the evaluation order of the \c sum() function is unspecified. // // \n \subsection matrix_operations_reduction_operations_prod prod() // // The \c prod() function reduces the elements of the given dense vector or the non-zero elements // of the given sparse vector by means of multiplication: \code blaze::DynamicMatrix<int> A{ { 1, 2 }, { 3, 4 } }; const int totalprod = prod( A ); // Results in 24 \endcode \code blaze::CompressedMatrix<int> A{ { 1, 2 }, { 3, 4 } }; const int totalprod = prod( A ); // Results in 24 \endcode // By specifying \c blaze::columnwise or \c blaze::rowwise the \c prod() function performs a // column-wise or row-wise multiplication, respectively. In case \c blaze::columnwise is specified, // the (non-zero) elements of the matrix are multiplied column-wise and the result is a row vector. // In case \c blaze::rowwise is specified, the (non-zero) elements of the matrix are multiplied // row-wise and the result is a column vector: \code using blaze::columnwise; blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::CompressedMatrix<int> B{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::DynamicVector<int,rowVector> colprod1, colprod2; colprod1 = prod<columnwise>( A ); // Results in ( 1, 0, 8 ) colprod2 = prod<columnwise>( A ); // Results in ( 1, 3, 8 ) \endcode \code using blaze::rowwise; blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::CompressedMatrix<int> B{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::DynamicVector<int,columnVector> rowprod1, rowprod2; rowprod1 = prod<rowwise>( A ); // Results in ( 0, 12 ) rowprod2 = prod<rowwise>( A ); // Results in ( 2, 12 ) \endcode // Please note that the evaluation order of the \c prod() function is unspecified. // // \n \subsection matrix_operations_reduction_operations_min min() // // The unary \c min() function returns the smallest element of the given dense matrix or the // smallest non-zero element of the given sparse matrix. This function can only be used for // element types that support the smaller-than relationship. In case the given matrix currently // has either 0 rows or 0 columns, the returned value is the default value (e.g. 0 in case of // fundamental data types). \code blaze::DynamicMatrix<int> A{ { 1, 2 }, { 3, 4 } }; const int totalmin = min( A ); // Results in 1 \endcode \code blaze::CompressedMatrix<int> A{ { 1, 0 }, { 3, 0 } }; const int totalmin = min( A ); // Results in 1 \endcode // \note In case the sparse matrix is not completely filled, the implicit zero elements are NOT // taken into account. In the previous example the compressed matrix has only 2 non-zero elements. // However, the minimum of this matrix is 1. // // By specifying \c blaze::columnwise or \c blaze::rowwise the \c min() function determines the // smallest (non-zero) element in each row or column, respectively. In case \c blaze::columnwise // is specified, the smallest (non-zero) element of each column is determined and the result is // a row vector. In case \c blaze::rowwise is specified, the smallest (non-zero) element of each // row is determined and the result is a column vector. \code using blaze::columnwise; blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::CompressedMatrix<int> B{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::DynamicVector<int,rowVector> colmin1, colmin2; colmin1 = min<columnwise>( A ); // Results in ( 1, 0, 2 ) colmin2 = min<columnwise>( B ); // Results in ( 1, 3, 2 ) \endcode \code using blaze::rowwise; blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::CompressedMatrix<int> B{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::DynamicVector<int,columnVector> rowmin1, rowmin2; rowmin1 = min<rowwise>( A ); // Results in ( 0, 1 ) rowmin2 = min<rowwise>( B ); // Results in ( 1, 1 ) \endcode // \note In case the sparse matrix is not completely filled, the implicit zero elements are NOT // taken into account. // // \n \subsection matrix_operations_reduction_operations_max max() // // The unary \c max() function returns the largest element of the given dense matrix or the // largest non-zero element of the given sparse matrix. This function can only be used for // element types that support the smaller-than relationship. In case the given matrix currently // has either 0 rows or 0 columns, the returned value is the default value (e.g. 0 in case of // fundamental data types). \code blaze::DynamicMatrix<int> A{ { 1, 2 }, { 3, 4 } }; const int totalmax = max( A ); // Results in 4 \endcode \code blaze::CompressedMatrix<int> A{ { -1, 0 }, { -3, 0 } }; const int totalmax = max( A ); // Results in -1 \endcode // \note In case the sparse matrix is not completely filled, the implicit zero elements are NOT // taken into account. In the previous example the compressed matrix has only 2 non-zero elements. // However, the maximum of this matrix is -1. // // By specifying \c blaze::columnwise or \c blaze::rowwise the \c max() function determines the // largest (non-zero) element in each row or column, respectively. In case \c blaze::columnwise // is specified, the largest (non-zero) element of each column is determined and the result is // a row vector. In case \c blaze::rowwise is specified, the largest (non-zero) element of each // row is determined and the result is a column vector. \code using blaze::columnwise; blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::CompressedMatrix<int> B{ { -1, 0, -2 }, { -1, -3, -4 } }; blaze::DynamicVector<int,rowVector> colmax1, colmax2; colmax1 = max<columnwise>( A ); // Results in ( 1, 3, 4 ) colmax2 = max<columnwise>( B ); // Results in ( -1, -3, -2 ) \endcode \code using blaze::rowwise; blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::CompressedMatrix<int> B{ { -1, 0, -2 }, { -1, -3, -4 } }; blaze::DynamicVector<int,columnVector> rowmax1, rowmax2; rowmax1 = max<rowwise>( A ); // Results in ( 2, 4 ) rowmax2 = max<rowwise>( B ); // Results in ( -1, -1 ) \endcode // \note In case the sparse matrix is not completely filled, the implicit zero elements are NOT // taken into account. // // // \n \section matrix_operations_norms Norms // <hr> // // \subsection matrix_operations_norms_norm norm() // // The \c norm() function computes the L2 norm of the given dense or sparse matrix: \code blaze::DynamicMatrix<double> A; // ... Resizing and initialization const double l2 = norm( A ); \endcode // \n \subsection matrix_operations_norms_sqrnorm sqrNorm() // // The \c sqrNorm() function computes the squared L2 norm of the given dense or sparse matrix: \code blaze::DynamicMatrix<double> A; // ... Resizing and initialization const double l2 = sqrNorm( A ); \endcode // \n \subsection matrix_operations_norms_l1norm l1Norm() // // The \c l1Norm() function computes the squared L1 norm of the given dense or sparse matrix: \code blaze::DynamicMatrix<double> A; // ... Resizing and initialization const double l1 = l1Norm( A ); \endcode // \n \subsection matrix_operations_norms_l2norm l2Norm() // // The \c l2Norm() function computes the squared L2 norm of the given dense or sparse matrix: \code blaze::DynamicMatrix<double> A; // ... Resizing and initialization const double l2 = l2Norm( A ); \endcode // \n \subsection matrix_operations_norms_l3norm l3Norm() // // The \c l3Norm() function computes the squared L3 norm of the given dense or sparse matrix: \code blaze::DynamicMatrix<double> A; // ... Resizing and initialization const double l3 = l3Norm( A ); \endcode // \n \subsection matrix_operations_norms_l4norm l4Norm() // // The \c l4Norm() function computes the squared L4 norm of the given dense or sparse matrix: \code blaze::DynamicMatrix<double> A; // ... Resizing and initialization const double l4 = l4Norm( A ); \endcode // \n \subsection matrix_operations_norms_lpnorm lpNorm() // // The \c lpNorm() function computes the general Lp norm of the given dense or sparse matrix, // where the norm is specified by either a compile time or a runtime argument: \code blaze::DynamicMatrix<double> A; // ... Resizing and initialization const double lp1 = lpNorm<2>( A ); // Compile time argument const double lp2 = lpNorm( A, 2.3 ); // Runtime argument \endcode // \n \subsection matrix_operations_norms_maxnorm maxNorm() // // The \c maxNorm() function computes the maximum norm of the given dense or sparse matrix: \code blaze::DynamicMatrix<double> A; // ... Resizing and initialization const double max = maxNorm( A ); \endcode // \n \section matrix_operations_declaration_operations Declaration Operations // <hr> // // \subsection matrix_operations_declsym declsym() // // The \c declsym() operation can be used to explicitly declare any matrix or matrix expression // as symmetric: \code blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization B = declsym( A ); \endcode // Any matrix or matrix expression that has been declared as symmetric via \c declsym() will // gain all the benefits of a symmetric matrix, which range from reduced runtime checking to // a considerable speed-up in computations: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; DynamicMatrix<double> A, B, C; SymmetricMatrix< DynamicMatrix<double> > S; // ... Resizing and initialization isSymmetric( declsym( A ) ); // Will always return true without runtime effort S = declsym( A ); // Omit any runtime check for symmetry C = declsym( A * B ); // Declare the result of the matrix multiplication as symmetric, // i.e. perform an optimized matrix multiplication \endcode // \warning The \c declsym() operation has the semantics of a cast: The caller is completely // responsible and the system trusts the given information. Declaring a non-symmetric matrix or // matrix expression as symmetric via the \c declsym() operation leads to undefined behavior // (which can be violated invariants or wrong computation results)! // // // \n \subsection matrix_operations_declherm declherm() // // The \c declherm() operation can be used to explicitly declare any matrix or matrix expression // as Hermitian: \code blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization B = declherm( A ); \endcode // Any matrix or matrix expression that has been declared as Hermitian via \c declherm() will // gain all the benefits of an Hermitian matrix, which range from reduced runtime checking to // a considerable speed-up in computations: \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; DynamicMatrix<double> A, B, C; HermitianMatrix< DynamicMatrix<double> > S; // ... Resizing and initialization isHermitian( declherm( A ) ); // Will always return true without runtime effort S = declherm( A ); // Omit any runtime check for Hermitian symmetry C = declherm( A * B ); // Declare the result of the matrix multiplication as Hermitian, // i.e. perform an optimized matrix multiplication \endcode // \warning The \c declherm() operation has the semantics of a cast: The caller is completely // responsible and the system trusts the given information. Declaring a non-Hermitian matrix or // matrix expression as Hermitian via the \c declherm() operation leads to undefined behavior // (which can be violated invariants or wrong computation results)! // // // \n \subsection matrix_operations_decllow decllow() // // The \c decllow() operation can be used to explicitly declare any matrix or matrix expression // as lower triangular: \code blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization B = decllow( A ); \endcode // Any matrix or matrix expression that has been declared as lower triangular via \c decllow() // will gain all the benefits of a lower triangular matrix, which range from reduced runtime // checking to a considerable speed-up in computations: \code using blaze::DynamicMatrix; using blaze::LowerMatrix; DynamicMatrix<double> A, B, C; LowerMatrix< DynamicMatrix<double> > L; // ... Resizing and initialization isLower( decllow( A ) ); // Will always return true without runtime effort L = decllow( A ); // Omit any runtime check for A being a lower matrix C = decllow( A * B ); // Declare the result of the matrix multiplication as lower triangular, // i.e. perform an optimized matrix multiplication \endcode // \warning The \c decllow() operation has the semantics of a cast: The caller is completely // responsible and the system trusts the given information. Declaring a non-lower matrix or // matrix expression as lower triangular via the \c decllow() operation leads to undefined // behavior (which can be violated invariants or wrong computation results)! // // // \n \subsection matrix_operations_declupp declupp() // // The \c declupp() operation can be used to explicitly declare any matrix or matrix expression // as upper triangular: \code blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization B = declupp( A ); \endcode // Any matrix or matrix expression that has been declared as upper triangular via \c declupp() // will gain all the benefits of a upper triangular matrix, which range from reduced runtime // checking to a considerable speed-up in computations: \code using blaze::DynamicMatrix; using blaze::UpperMatrix; DynamicMatrix<double> A, B, C; UpperMatrix< DynamicMatrix<double> > U; // ... Resizing and initialization isUpper( declupp( A ) ); // Will always return true without runtime effort U = declupp( A ); // Omit any runtime check for A being a upper matrix C = declupp( A * B ); // Declare the result of the matrix multiplication as upper triangular, // i.e. perform an optimized matrix multiplication \endcode // \warning The \c declupp() operation has the semantics of a cast: The caller is completely // responsible and the system trusts the given information. Declaring a non-upper matrix or // matrix expression as upper triangular via the \c declupp() operation leads to undefined // behavior (which can be violated invariants or wrong computation results)! // // // \n \subsection matrix_operations_decldiag decldiag() // // The \c decldiag() operation can be used to explicitly declare any matrix or matrix expression // as diagonal: \code blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization B = decldiag( A ); \endcode // Any matrix or matrix expression that has been declared as diagonal via \c decldiag() will // gain all the benefits of a diagonal matrix, which range from reduced runtime checking to // a considerable speed-up in computations: \code using blaze::DynamicMatrix; using blaze::DiagonalMatrix; DynamicMatrix<double> A, B, C; DiagonalMatrix< DynamicMatrix<double> > D; // ... Resizing and initialization isDiagonal( decldiag( A ) ); // Will always return true without runtime effort D = decldiag( A ); // Omit any runtime check for A being a diagonal matrix C = decldiag( A * B ); // Declare the result of the matrix multiplication as diagonal, // i.e. perform an optimized matrix multiplication \endcode // \warning The \c decldiag() operation has the semantics of a cast: The caller is completely // responsible and the system trusts the given information. Declaring a non-diagonal matrix // or matrix expression as diagonal via the \c decldiag() operation leads to undefined // behavior (which can be violated invariants or wrong computation results)! // // // \n \subsection matrix_operations_declid declid() // // The \c declid() operation can be used to explicitly declare any matrix or matrix expression // as identity matrix: \code blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization B = declid( A ); \endcode // Any matrix or matrix expression that has been declared as identity matrix via \c declid() will // gain all the benefits of an identity matrix, which range from reduced runtime checking to a // considerable speed-up in computations: \code using blaze::DynamicMatrix; using blaze::DiagonalMatrix; DynamicMatrix<double> A, B, C; DiagonalMatrix< DynamicMatrix<double> > D; // ... Resizing and initialization isIdentity( declid( A ) ); // Will always return true without runtime effort D = declid( A ); // Omit any runtime check for A being a diagonal matrix C = declid( A ) * B; // Declare the left operand of the matrix multiplication as an // identity matrix, i.e. perform an optimized matrix multiplication \endcode // \warning The \c declid() operation has the semantics of a cast: The caller is completely // responsible and the system trusts the given information. Declaring a non-identity matrix // or matrix expression as identity matrix via the \c declid() operation leads to undefined // behavior (which can be violated invariants or wrong computation results)! // // // \n \section matrix_operations_matrix_inversion Matrix Inversion // <hr> // // The inverse of a square dense matrix can be computed via the \c inv() function: \code blaze::DynamicMatrix<float,blaze::rowMajor> A, B; // ... Resizing and initialization B = inv( A ); // Compute the inverse of A \endcode // Alternatively, an in-place inversion of a dense matrix can be performed via the \c invert() // function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization invert( A ); // In-place matrix inversion \endcode // Both the \c inv() and the \c invert() functions will automatically select the most suited matrix // inversion algorithm depending on the size and type of the given matrix. For small matrices of // up to 6x6, both functions use manually optimized kernels for maximum performance. For matrices // larger than 6x6 the inversion is performed by means of the most suited matrix decomposition // method: In case of a general matrix the LU decomposition is used, for symmetric matrices the // LDLT decomposition is applied, for Hermitian matrices the LDLH decomposition is performed, and // for triangular matrices the inverse is computed via a forward or back substitution. // // In case the type of the matrix does not provide additional compile time information about its // structure (symmetric, lower, upper, diagonal, ...), the information can be provided manually // when calling the \c invert() function: \code using blaze::asGeneral; using blaze::asSymmetric; using blaze::asHermitian; using blaze::asLower; using blaze::asUniLower; using blaze::asUpper; using blaze::asUniUpper; using blaze::asDiagonal; invert<asGeneral> ( A ); // In-place inversion of a general matrix invert<asSymmetric>( A ); // In-place inversion of a symmetric matrix invert<asHermitian>( A ); // In-place inversion of a Hermitian matrix invert<asLower> ( A ); // In-place inversion of a lower triangular matrix invert<asUniLower> ( A ); // In-place inversion of a lower unitriangular matrix invert<asUpper> ( A ); // In-place inversion of a upper triangular matrix invert<asUniUpper> ( A ); // In-place inversion of a upper unitriangular matrix invert<asDiagonal> ( A ); // In-place inversion of a diagonal matrix \endcode // Alternatively, via the \c invert() function it is possible to explicitly specify the inversion // algorithm: \code using blaze::byLU; using blaze::byLDLT; using blaze::byLDLH; using blaze::byLLH; // In-place inversion of a general matrix by means of an LU decomposition invert<byLU>( A ); // In-place inversion of a symmetric indefinite matrix by means of a Bunch-Kaufman decomposition invert<byLDLT>( A ); // In-place inversion of a Hermitian indefinite matrix by means of a Bunch-Kaufman decomposition invert<byLDLH>( A ); // In-place inversion of a positive definite matrix by means of a Cholesky decomposition invert<byLLH>( A ); \endcode // Whereas the inversion by means of an LU decomposition works for every general square matrix, // the inversion by LDLT only works for symmetric indefinite matrices, the inversion by LDLH is // restricted to Hermitian indefinite matrices and the Cholesky decomposition (LLH) only works // for Hermitian positive definite matrices. Please note that it is in the responsibility of the // function caller to guarantee that the selected algorithm is suited for the given matrix. In // case this precondition is violated the result can be wrong and might not represent the inverse // of the given matrix! // // For both the \c inv() and \c invert() function the matrix inversion fails if ... // // - ... the given matrix is not a square matrix; // - ... the given matrix is singular and not invertible. // // In all failure cases either a compilation error is created if the failure can be predicted at // compile time or a \c std::invalid_argument exception is thrown. // // \note The matrix inversion can only be used for dense matrices with \c float, \c double, // \c complex<float> or \c complex<double> element type. The attempt to call the function with // matrices of any other element type or with a sparse matrix results in a compile time error! // // \note The functions invert the dense matrix by means of LAPACK kernels. Thus the functions can // only be used if a fitting LAPACK library is available and linked to the executable. Otherwise // a linker error will be created. // // \note It is not possible to use any kind of view on the expression object returned by the // \c inv() function. Also, it is not possible to access individual elements via the function call // operator on the expression object: \code row( inv( A ), 2UL ); // Compilation error: Views cannot be used on an inv() expression! inv( A )(1,2); // Compilation error: It is not possible to access individual elements! \endcode // \note The inversion functions do not provide any exception safety guarantee, i.e. in case an // exception is thrown the matrix may already have been modified. // // // \n \section matrix_operations_decomposition Matrix Decomposition // <hr> // // \note All decomposition functions can only be used for dense matrices with \c float, \c double, // \c complex<float> or \c complex<double> element type. The attempt to call the function with // matrices of any other element type or with a sparse matrix results in a compile time error! // // \note The functions decompose a dense matrix by means of LAPACK kernels. Thus the functions can // only be used if a fitting LAPACK library is available and linked to the executable. Otherwise // a linker error will be created. // // \subsection matrix_operations_decomposition_lu LU Decomposition // // The LU decomposition of a dense matrix can be computed via the \c lu() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> L, U, P; lu( A, L, U, P ); // LU decomposition of a row-major matrix assert( A == L * U * P ); \endcode \code blaze::DynamicMatrix<double,blaze::columnMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::columnMajor> L, U, P; lu( A, L, U, P ); // LU decomposition of a column-major matrix assert( A == P * L * U ); \endcode // The function works for both \c rowMajor and \c columnMajor matrices. Note, however, that the // three matrices \c A, \c L and \c U are required to have the same storage order. Also, please // note that the way the permutation matrix \c P needs to be applied differs between row-major and // column-major matrices, since the algorithm uses column interchanges for row-major matrices and // row interchanges for column-major matrices. // // Furthermore, \c lu() can be used with adaptors. For instance, the following example demonstrates // the LU decomposition of a symmetric matrix into a lower and upper triangular matrix: \code blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > A; // ... Resizing and initialization blaze::LowerMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > L; blaze::UpperMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > U; blaze::DynamicMatrix<double,blaze::columnMajor> P; lu( A, L, U, P ); // LU decomposition of A \endcode // \n \subsection matrix_operations_decomposition_llh Cholesky Decomposition // // The Cholesky (LLH) decomposition of a dense matrix can be computed via the \c llh() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> L; llh( A, L ); // LLH decomposition of a row-major matrix assert( A == L * ctrans( L ) ); \endcode // The function works for both \c rowMajor and \c columnMajor matrices and the two matrices \c A // and \c L can have any storage order. // // Furthermore, \c llh() can be used with adaptors. For instance, the following example demonstrates // the LLH decomposition of a symmetric matrix into a lower triangular matrix: \code blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > A; // ... Resizing and initialization blaze::LowerMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > L; llh( A, L ); // Cholesky decomposition of A \endcode // \n \subsection matrix_operations_decomposition_qr QR Decomposition // // The QR decomposition of a dense matrix can be computed via the \c qr() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::columnMajor> Q; blaze::DynamicMatrix<double,blaze::rowMajor> R; qr( A, Q, R ); // QR decomposition of a row-major matrix assert( A == Q * R ); \endcode // The function works for both \c rowMajor and \c columnMajor matrices and the three matrices // \c A, \c Q and \c R can have any storage order. // // Furthermore, \c qr() can be used with adaptors. For instance, the following example demonstrates // the QR decomposition of a symmetric matrix into a general matrix and an upper triangular matrix: \code blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> Q; blaze::UpperMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > R; qr( A, Q, R ); // QR decomposition of A \endcode // \n \subsection matrix_operations_decomposition_rq RQ Decomposition // // Similar to the QR decomposition, the RQ decomposition of a dense matrix can be computed via // the \c rq() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> R; blaze::DynamicMatrix<double,blaze::columnMajor> Q; rq( A, R, Q ); // RQ decomposition of a row-major matrix assert( A == R * Q ); \endcode // The function works for both \c rowMajor and \c columnMajor matrices and the three matrices // \c A, \c R and \c Q can have any storage order. // // Also the \c rq() function can be used in combination with matrix adaptors. For instance, the // following example demonstrates the RQ decomposition of an Hermitian matrix into a general // matrix and an upper triangular matrix: \code blaze::HermitianMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > A; // ... Resizing and initialization blaze::UpperMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > R; blaze::DynamicMatrix<complex<double>,blaze::rowMajor> Q; rq( A, R, Q ); // RQ decomposition of A \endcode // \n \subsection matrix_operations_decomposition_ql QL Decomposition // // The QL decomposition of a dense matrix can be computed via the \c ql() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> Q; blaze::DynamicMatrix<double,blaze::columnMajor> L; ql( A, Q, L ); // QL decomposition of a row-major matrix assert( A == Q * L ); \endcode // The function works for both \c rowMajor and \c columnMajor matrices and the three matrices // \c A, \c Q and \c L can have any storage order. // // Also the \c ql() function can be used in combination with matrix adaptors. For instance, the // following example demonstrates the QL decomposition of a symmetric matrix into a general // matrix and a lower triangular matrix: \code blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> Q; blaze::LowerMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > L; ql( A, Q, L ); // QL decomposition of A \endcode // \n \subsection matrix_operations_decomposition_lq LQ Decomposition // // The LQ decomposition of a dense matrix can be computed via the \c lq() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> L; blaze::DynamicMatrix<double,blaze::columnMajor> Q; lq( A, L, Q ); // LQ decomposition of a row-major matrix assert( A == L * Q ); \endcode // The function works for both \c rowMajor and \c columnMajor matrices and the three matrices // \c A, \c L and \c Q can have any storage order. // // Furthermore, \c lq() can be used with adaptors. For instance, the following example demonstrates // the LQ decomposition of an Hermitian matrix into a lower triangular matrix and a general matrix: \code blaze::HermitianMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > A; // ... Resizing and initialization blaze::LowerMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > L; blaze::DynamicMatrix<complex<double>,blaze::rowMajor> Q; lq( A, L, Q ); // LQ decomposition of A \endcode // \n \section matrix_operations_eigenvalues Eigenvalues/Eigenvectors // <hr> // // The eigenvalues and eigenvectors of a dense matrix can be computed via the \c eigen() functions: \code namespace blaze { template< typename MT, bool SO, typename VT, bool TF > void eigen( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w ); template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool SO2 > void eigen( const DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, DenseMatrix<MT2,SO2>& V ); } // namespace blaze \endcode // The first function computes only the eigenvalues of the given \a n-by-\a n matrix, the second // function additionally computes the eigenvectors. The eigenvalues are returned in the given vector // \a w and the eigenvectors are returned in the given matrix \a V, which are both resized to the // correct dimensions (if possible and necessary). // // Depending on the given matrix type, the resulting eigenvalues are either of floating point // or complex type: In case the given matrix is either a compile time symmetric matrix with // floating point elements or an Hermitian matrix with complex elements, the resulting eigenvalues // will be of floating point type and therefore the elements of the given eigenvalue vector are // expected to be of floating point type. In all other cases they are expected to be of complex // type. Please note that for complex eigenvalues no order of eigenvalues can be assumed, except // that complex conjugate pairs of eigenvalues appear consecutively with the eigenvalue having // the positive imaginary part first. // // In case \a A is a row-major matrix, the left eigenvectors are returned in the rows of \a V, // in case \a A is a column-major matrix, the right eigenvectors are returned in the columns of // \a V. In case the given matrix is a compile time symmetric matrix with floating point elements, // the resulting eigenvectors will be of floating point type and therefore the elements of the // given eigenvector matrix are expected to be of floating point type. In all other cases they // are expected to be of complex type. // // The following examples give an impression of the computation of eigenvalues and eigenvectors // for a general, a symmetric, and an Hermitian matrix: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::rowMajor; using blaze::columnVector; DynamicMatrix<double,rowMajor> A( 5UL, 5UL ); // The general matrix A // ... Initialization DynamicVector<complex<double>,columnVector> w( 5UL ); // The vector for the complex eigenvalues DynamicMatrix<complex<double>,rowMajor> V( 5UL, 5UL ); // The matrix for the left eigenvectors eigen( A, w, V ); \endcode \code using blaze::SymmetricMatrix; using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::rowMajor; using blaze::columnVector; SymmetricMatrix< DynamicMatrix<double,rowMajor> > A( 5UL, 5UL ); // The symmetric matrix A // ... Initialization DynamicVector<double,columnVector> w( 5UL ); // The vector for the real eigenvalues DynamicMatrix<double,rowMajor> V( 5UL, 5UL ); // The matrix for the left eigenvectors eigen( A, w, V ); \endcode \code using blaze::HermitianMatrix; using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::rowMajor; using blaze::columnVector; HermitianMatrix< DynamicMatrix<complex<double>,rowMajor> > A( 5UL, 5UL ); // The Hermitian matrix A // ... Initialization DynamicVector<double,columnVector> w( 5UL ); // The vector for the real eigenvalues DynamicMatrix<complex<double>,rowMajor> V( 5UL, 5UL ); // The matrix for the left eigenvectors eigen( A, w, V ); \endcode // The functions fail if ... // // - ... the given matrix \a A is not a square matrix; // - ... the given vector \a w is a fixed size vector and the size doesn't match; // - ... the given matrix \a V is a fixed size matrix and the dimensions don't match; // - ... the eigenvalue computation fails. // // In all failure cases an exception is thrown. // // \note All \c eigen() functions can only be used for dense matrices with \c float, \c double, // \c complex<float> or \c complex<double> element type. The attempt to call the function with // matrices of any other element type or with a sparse matrix results in a compile time error! // // \note The functions compute the eigenvalues and/or eigenvectors of a dense matrix by means of // LAPACK kernels. Thus the functions can only be used if a fitting LAPACK library is available // and linked to the executable. Otherwise a linker error will be created. // // // \n \section matrix_operations_singularvalues Singular Values/Singular Vectors // <hr> // // The singular value decomposition (SVD) of a dense matrix can be computed via the \c svd() // functions: \code namespace blaze { template< typename MT, bool SO, typename VT, bool TF > void svd( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s ); template< typename MT1, bool SO, typename VT, bool TF, typename MT2, typename MT3 > void svd( const DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V ); template< typename MT, bool SO, typename VT, bool TF, typename ST > size_t svd( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s, ST low, ST upp ); template< typename MT1, bool SO, typename VT, bool TF, typename MT2, typename MT3, typename ST > size_t svd( const DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V, ST low, ST upp ); } // namespace blaze \endcode // The first and third function compute only singular values of the given general \a m-by-\a n // matrix, the second and fourth function additionally compute singular vectors. The resulting // singular values are returned in the given vector \a s, the left singular vectors are returned // in the given matrix \a U, and the right singular vectors are returned in the matrix \a V. \a s, // \a U, and \a V are resized to the correct dimensions (if possible and necessary). // // The third and fourth function allow for the specification of a subset of singular values and/or // vectors. The number of singular values and vectors to be computed is specified by the lower // bound \a low and the upper bound \a upp, which either form an integral or a floating point // range. // // In case \a low and \a upp form are of integral type, the function computes all singular values // in the index range \f$[low..upp]\f$. The \a num resulting real and non-negative singular values // are stored in descending order in the given vector \a s, which is either resized (if possible) // or expected to be a \a num-dimensional vector. The resulting left singular vectors are stored // in the given matrix \a U, which is either resized (if possible) or expected to be a // \a m-by-\a num matrix. The resulting right singular vectors are stored in the given matrix \a V, // which is either resized (if possible) or expected to be a \a num-by-\a n matrix. // // In case \a low and \a upp are of floating point type, the function computes all singular values // in the half-open interval \f$(low..upp]\f$. The resulting real and non-negative singular values // are stored in descending order in the given vector \a s, which is either resized (if possible) // or expected to be a min(\a m,\a n)-dimensional vector. The resulting left singular vectors are // stored in the given matrix \a U, which is either resized (if possible) or expected to be a // \a m-by-min(\a m,\a n) matrix. The resulting right singular vectors are stored in the given // matrix \a V, which is either resized (if possible) or expected to be a min(\a m,\a n)-by-\a n // matrix. // // The functions fail if ... // // - ... the given matrix \a U is a fixed size matrix and the dimensions don't match; // - ... the given vector \a s is a fixed size vector and the size doesn't match; // - ... the given matrix \a V is a fixed size matrix and the dimensions don't match; // - ... the given scalar values don't form a proper range; // - ... the singular value decomposition fails. // // In all failure cases an exception is thrown. // // Examples: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::rowMajor; using blaze::columnVector; DynamicMatrix<double,rowMajor> A( 5UL, 8UL ); // The general matrix A // ... Initialization DynamicMatrix<double,rowMajor> U; // The matrix for the left singular vectors DynamicVector<double,columnVector> s; // The vector for the singular values DynamicMatrix<double,rowMajor> V; // The matrix for the right singular vectors svd( A, U, s, V ); \endcode \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::rowMajor; using blaze::columnVector; DynamicMatrix<complex<double>,rowMajor> A( 5UL, 8UL ); // The general matrix A // ... Initialization DynamicMatrix<complex<double>,rowMajor> U; // The matrix for the left singular vectors DynamicVector<double,columnVector> s; // The vector for the singular values DynamicMatrix<complex<double>,rowMajor> V; // The matrix for the right singular vectors svd( A, U, s, V, 0, 2 ); \endcode // \note All \c svd() functions can only be used for dense matrices with \c float, \c double, // \c complex<float> or \c complex<double> element type. The attempt to call the function with // matrices of any other element type or with a sparse matrix results in a compile time error! // // \note The functions compute the singular values and/or singular vectors of a dense matrix by // means of LAPACK kernels. Thus the functions can only be used if a fitting LAPACK library is // available and linked to the executable. Otherwise a linker error will be created. // // // \n Previous: \ref matrix_types &nbsp; &nbsp; Next: \ref adaptors */ //************************************************************************************************* //**Adaptors*************************************************************************************** /*!\page adaptors Adaptors // // \tableofcontents // // // \section adaptors_general General Concepts // <hr> // // Adaptors act as wrappers around the general \ref matrix_types. They adapt the interface of the // matrices such that certain invariants are preserved. Due to this adaptors can provide a compile // time guarantee of certain properties, which can be exploited for optimized performance. // // The \b Blaze library provides a total of 9 different adaptors: // // <ul> // <li> \ref adaptors_symmetric_matrices </li> // <li> \ref adaptors_hermitian_matrices </li> // <li> \ref adaptors_triangular_matrices // <ul> // <li> \ref adaptors_triangular_matrices "Lower Triangular Matrices" // <ul> // <li> \ref adaptors_triangular_matrices_lowermatrix </li> // <li> \ref adaptors_triangular_matrices_unilowermatrix </li> // <li> \ref adaptors_triangular_matrices_strictlylowermatrix </li> // </ul> // </li> // <li> \ref adaptors_triangular_matrices "Upper Triangular Matrices" // <ul> // <li> \ref adaptors_triangular_matrices_uppermatrix </li> // <li> \ref adaptors_triangular_matrices_uniuppermatrix </li> // <li> \ref adaptors_triangular_matrices_strictlyuppermatrix </li> // </ul> // </li> // <li> \ref adaptors_triangular_matrices "Diagonal Matrices" // <ul> // <li> \ref adaptors_triangular_matrices_diagonalmatrix </li> // </ul> // </li> // </ul> // </li> // </ul> // // In combination with the general matrix types, \b Blaze provides a total of 40 different matrix // types that make it possible to exactly adapt the type of matrix to every specific problem. // // // \n \section adaptors_examples Examples // <hr> // // The following code examples give an impression on the use of adaptors. The first example shows // the multiplication between two lower matrices: \code using blaze::DynamicMatrix; using blaze::LowerMatrix; using blaze::rowMajor; using blaze::columnMajor; LowerMatrix< DynamicMatrix<double,rowMajor> > A; LowerMatrix< DynamicMatrix<double,columnMajor> > B; DynamicMatrix<double,columnMajor> C; // ... Resizing and initialization C = A * B; \endcode // When multiplying two matrices, at least one of which is triangular, \b Blaze can exploit the // fact that either the lower or upper part of the matrix contains only default elements and // restrict the algorithm to the non-zero elements. Thus the adaptor provides a significant // performance advantage in comparison to a general matrix multiplication, especially for large // matrices. // // The second example shows the \c SymmetricMatrix adaptor in a row-major dense matrix/sparse // vector multiplication: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::CompressedVector; using blaze::rowMajor; using blaze::columnVector; SymmetricMatrix< DynamicMatrix<double,rowMajor> > A; CompressedVector<double,columnVector> x; DynamicVector<double,columnVector> y; // ... Resizing and initialization y = A * x; \endcode // In this example it is not intuitively apparent that using a row-major matrix is not the best // possible choice in terms of performance since the computation cannot be vectorized. Choosing // a column-major matrix instead, however, would enable a vectorized computation. Therefore // \b Blaze exploits the fact that \c A is symmetric, selects the best suited storage order and // evaluates the multiplication as \code y = trans( A ) * x; \endcode // which significantly increases the performance. // // \n Previous: \ref matrix_operations &nbsp; &nbsp; Next: \ref adaptors_symmetric_matrices */ //************************************************************************************************* //**Symmetric Matrices***************************************************************************** /*!\page adaptors_symmetric_matrices Symmetric Matrices // // \tableofcontents // // // \n \section adaptors_symmetric_matrices_general Symmetric Matrices // <hr> // // In contrast to general matrices, which have no restriction in their number of rows and columns // and whose elements can have any value, symmetric matrices provide the compile time guarantee // to be square matrices with pair-wise identical values. Mathematically, this means that a // symmetric matrix is always equal to its transpose (\f$ A = A^T \f$) and that all non-diagonal // values have an identical counterpart (\f$ a_{ij} == a_{ji} \f$). This symmetry property can // be exploited to provide higher efficiency and/or lower memory consumption. Within the \b Blaze // library, symmetric matrices are realized by the \ref adaptors_symmetric_matrices_symmetricmatrix // class template. // // // \n \section adaptors_symmetric_matrices_symmetricmatrix SymmetricMatrix // <hr> // // The SymmetricMatrix class template is an adapter for existing dense and sparse matrix types. // It inherits the properties and the interface of the given matrix type \c MT and extends it // by enforcing the additional invariant of symmetry (i.e. the matrix is always equal to its // transpose \f$ A = A^T \f$). It can be included via the header file \code #include <blaze/math/SymmetricMatrix.h> \endcode // The type of the adapted matrix can be specified via template parameter: \code template< typename MT > class SymmetricMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. SymmetricMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Note // that the given matrix type must be either resizable (as for instance blaze::HybridMatrix or // blaze::DynamicMatrix) or must be square at compile time (as for instance blaze::StaticMatrix). // // The following examples give an impression of several possible symmetric matrices: \code using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; using blaze::columnMajor; // Definition of a 3x3 row-major dense symmetric matrix with static memory blaze::SymmetricMatrix< blaze::StaticMatrix<int,3UL,3UL,rowMajor> > A; // Definition of a resizable column-major dense symmetric matrix based on HybridMatrix blaze::SymmetricMatrix< blaze::HybridMatrix<float,4UL,4UL,columnMajor> B; // Definition of a resizable row-major dense symmetric matrix based on DynamicMatrix blaze::SymmetricMatrix< blaze::DynamicMatrix<double,rowMajor> > C; // Definition of a fixed size row-major dense symmetric matrix based on CustomMatrix blaze::SymmetricMatrix< blaze::CustomMatrix<double,unaligned,unpadded,rowMajor> > D; // Definition of a compressed row-major single precision symmetric matrix blaze::SymmetricMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > E; \endcode // The storage order of a symmetric matrix is depending on the storage order of the adapted matrix // type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified as // blaze::rowMajor), the symmetric matrix will also be a row-major matrix. Otherwise, if the // adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the symmetric matrix // will also be a column-major matrix. // // // \n \section adaptors_symmetric_matrices_special_properties Special Properties of Symmetric Matrices // <hr> // // A symmetric matrix is used exactly like a matrix of the underlying, adapted matrix type \c MT. // It also provides (nearly) the same interface as the underlying matrix type. However, there are // some important exceptions resulting from the symmetry constraint: // // -# <b>\ref adaptors_symmetric_matrices_square</b> // -# <b>\ref adaptors_symmetric_matrices_symmetry</b> // -# <b>\ref adaptors_symmetric_matrices_initialization</b> // // \n \subsection adaptors_symmetric_matrices_square Symmetric Matrices Must Always be Square! // // In case a resizable matrix is used (as for instance blaze::HybridMatrix, blaze::DynamicMatrix, // or blaze::CompressedMatrix), this means that the according constructors, the \c resize() and // the \c extend() functions only expect a single parameter, which specifies both the number of // rows and columns, instead of two (one for the number of rows and one for the number of columns): \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; using blaze::rowMajor; // Default constructed, default initialized, row-major 3x3 symmetric dynamic matrix SymmetricMatrix< DynamicMatrix<double,rowMajor> > A( 3 ); // Resizing the matrix to 5x5 A.resize( 5 ); // Extending the number of rows and columns by 2, resulting in a 7x7 matrix A.extend( 2 ); \endcode // In case a matrix with a fixed size is used (as for instance blaze::StaticMatrix), the number // of rows and number of columns must be specified equally: \code using blaze::StaticMatrix; using blaze::SymmetricMatrix; using blaze::columnMajor; // Correct setup of a fixed size column-major 3x3 symmetric static matrix SymmetricMatrix< StaticMatrix<int,3UL,3UL,columnMajor> > A; // Compilation error: the provided matrix type is not a square matrix type SymmetricMatrix< StaticMatrix<int,3UL,4UL,columnMajor> > B; \endcode // \n \subsection adaptors_symmetric_matrices_symmetry The Symmetric Property is Always Enforced! // // This means that modifying the element \f$ a_{ij} \f$ of a symmetric matrix also modifies its // counterpart element \f$ a_{ji} \f$. Also, it is only possible to assign matrices that are // symmetric themselves: \code using blaze::CompressedMatrix; using blaze::DynamicMatrix; using blaze::StaticMatrix; using blaze::SymmetricMatrix; using blaze::rowMajor; // Default constructed, row-major 3x3 symmetric compressed matrix SymmetricMatrix< CompressedMatrix<double,rowMajor> > A( 3 ); // Initializing three elements via the function call operator A(0,0) = 1.0; // Initialization of the diagonal element (0,0) A(0,2) = 2.0; // Initialization of the elements (0,2) and (2,0) // Inserting three more elements via the insert() function A.insert( 1, 1, 3.0 ); // Inserting the diagonal element (1,1) A.insert( 1, 2, 4.0 ); // Inserting the elements (1,2) and (2,1) // Access via a non-const iterator *A.begin(1UL) = 10.0; // Modifies both elements (1,0) and (0,1) // Erasing elements via the erase() function A.erase( 0, 0 ); // Erasing the diagonal element (0,0) A.erase( 0, 2 ); // Erasing the elements (0,2) and (2,0) // Construction from a symmetric dense matrix StaticMatrix<double,3UL,3UL> B{ { 3.0, 8.0, -2.0 }, { 8.0, 0.0, -1.0 }, { -2.0, -1.0, 4.0 } }; SymmetricMatrix< DynamicMatrix<double,rowMajor> > C( B ); // OK // Assignment of a non-symmetric dense matrix StaticMatrix<double,3UL,3UL> D{ { 3.0, 7.0, -2.0 }, { 8.0, 0.0, -1.0 }, { -2.0, -1.0, 4.0 } }; C = D; // Throws an exception; symmetric invariant would be violated! \endcode // The same restriction also applies to the \c append() function for sparse matrices: Appending // the element \f$ a_{ij} \f$ additionally inserts the element \f$ a_{ji} \f$ into the matrix. // Despite the additional insertion, the \c append() function still provides the most efficient // way to set up a symmetric sparse matrix. In order to achieve the maximum efficiency, the // capacity of the individual rows/columns of the matrix should to be specifically prepared with // \c reserve() calls: \code using blaze::CompressedMatrix; using blaze::SymmetricMatrix; using blaze::rowMajor; // Setup of the symmetric matrix // // ( 0 1 3 ) // A = ( 1 2 0 ) // ( 3 0 0 ) // SymmetricMatrix< CompressedMatrix<double,rowMajor> > A( 3 ); A.reserve( 5 ); // Reserving enough space for 5 non-zero elements A.reserve( 0, 2 ); // Reserving two non-zero elements in the first row A.reserve( 1, 2 ); // Reserving two non-zero elements in the second row A.reserve( 2, 1 ); // Reserving a single non-zero element in the third row A.append( 0, 1, 1.0 ); // Appending the value 1 at position (0,1) and (1,0) A.append( 1, 1, 2.0 ); // Appending the value 2 at position (1,1) A.append( 2, 0, 3.0 ); // Appending the value 3 at position (2,0) and (0,2) \endcode // The symmetry property is also enforced for symmetric custom matrices: In case the given array // of elements does not represent a symmetric matrix, a \c std::invalid_argument exception is // thrown: \code using blaze::CustomMatrix; using blaze::SymmetricMatrix; using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; using CustomSymmetric = SymmetricMatrix< CustomMatrix<double,unaligned,unpadded,rowMajor> >; // Creating a 3x3 symmetric custom matrix from a properly initialized array double array[9] = { 1.0, 2.0, 4.0, 2.0, 3.0, 5.0, 4.0, 5.0, 6.0 }; CustomSymmetric A( array, 3UL ); // OK // Attempt to create a second 3x3 symmetric custom matrix from an uninitialized array std::unique_ptr<double[]> memory( new double[9UL] ); CustomSymmetric B( memory.get(), 3UL ); // Throws an exception \endcode // Finally, the symmetry property is enforced for views (rows, columns, submatrices, ...) on the // symmetric matrix. The following example demonstrates that modifying the elements of an entire // row of the symmetric matrix also affects the counterpart elements in the according column of // the matrix: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; // Setup of the symmetric matrix // // ( 0 1 0 2 ) // A = ( 1 3 4 0 ) // ( 0 4 0 5 ) // ( 2 0 5 0 ) // SymmetricMatrix< DynamicMatrix<int> > A( 4 ); A(0,1) = 1; A(0,3) = 2; A(1,1) = 3; A(1,2) = 4; A(2,3) = 5; // Setting all elements in the 1st row to 0 results in the matrix // // ( 0 0 0 2 ) // A = ( 0 0 0 0 ) // ( 0 0 0 5 ) // ( 2 0 5 0 ) // row( A, 1 ) = 0; \endcode // The next example demonstrates the (compound) assignment to submatrices of symmetric matrices. // Since the modification of element \f$ a_{ij} \f$ of a symmetric matrix also modifies the // element \f$ a_{ji} \f$, the matrix to be assigned must be structured such that the symmetry // of the symmetric matrix is preserved. Otherwise a \c std::invalid_argument exception is // thrown: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; // Setup of two default 4x4 symmetric matrices SymmetricMatrix< DynamicMatrix<int> > A1( 4 ), A2( 4 ); // Setup of the 3x2 dynamic matrix // // ( 1 2 ) // B = ( 3 4 ) // ( 5 6 ) // DynamicMatrix<int> B{ { 1, 2 }, { 3, 4 }, { 5, 6 } }; // OK: Assigning B to a submatrix of A1 such that the symmetry can be preserved // // ( 0 0 1 2 ) // A1 = ( 0 0 3 4 ) // ( 1 3 5 6 ) // ( 2 4 6 0 ) // submatrix( A1, 0UL, 2UL, 3UL, 2UL ) = B; // OK // Error: Assigning B to a submatrix of A2 such that the symmetry cannot be preserved! // The elements marked with X cannot be assigned unambiguously! // // ( 0 1 2 0 ) // A2 = ( 1 3 X 0 ) // ( 2 X 6 0 ) // ( 0 0 0 0 ) // submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = B; // Assignment throws an exception! \endcode // \n \subsection adaptors_symmetric_matrices_initialization The Elements of a Dense Symmetric Matrix are Always Default Initialized! // // Although this results in a small loss of efficiency (especially in case all default values are // overridden afterwards), this property is important since otherwise the symmetric property of // dense symmetric matrices could not be guaranteed: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; // Uninitialized, 5x5 row-major dynamic matrix DynamicMatrix<int,rowMajor> A( 5, 5 ); // Default initialized, 5x5 row-major symmetric dynamic matrix SymmetricMatrix< DynamicMatrix<int,rowMajor> > B( 5 ); \endcode // \n \section adaptors_symmetric_matrices_arithmetic_operations Arithmetic Operations // <hr> // // A SymmetricMatrix matrix can participate in numerical operations in any way any other dense // or sparse matrix can participate. It can also be combined with any other dense or sparse vector // or matrix. The following code example gives an impression of the use of SymmetricMatrix within // arithmetic operations: \code using blaze::SymmetricMatrix; using blaze::DynamicMatrix; using blaze::HybridMatrix; using blaze::StaticMatrix; using blaze::CompressedMatrix; using blaze::rowMajor; using blaze::columnMajor; DynamicMatrix<double,rowMajor> A( 3, 3 ); CompressedMatrix<double,rowMajor> B( 3, 3 ); SymmetricMatrix< DynamicMatrix<double,rowMajor> > C( 3 ); SymmetricMatrix< CompressedMatrix<double,rowMajor> > D( 3 ); SymmetricMatrix< HybridMatrix<float,3UL,3UL,rowMajor> > E; SymmetricMatrix< StaticMatrix<float,3UL,3UL,columnMajor> > F; E = A + B; // Matrix addition and assignment to a row-major symmetric matrix (includes runtime check) F = C - D; // Matrix subtraction and assignment to a column-major symmetric matrix (only compile time check) F = A * D; // Matrix multiplication between a dense and a sparse matrix (includes runtime check) C *= 2.0; // In-place scaling of matrix C E = 2.0 * B; // Scaling of matrix B (includes runtime check) F = C * 2.0; // Scaling of matrix C (only compile time check) E += A - B; // Addition assignment (includes runtime check) F -= C + D; // Subtraction assignment (only compile time check) F *= A * D; // Multiplication assignment (includes runtime check) \endcode // Note that it is possible to assign any kind of matrix to a symmetric matrix. In case the matrix // to be assigned is not symmetric at compile time, a runtime check is performed. // // // \n \section adaptors_symmetric_matrices_block_matrices Symmetric Block Matrices // <hr> // // It is also possible to use symmetric block matrices: \code using blaze::CompressedMatrix; using blaze::StaticMatrix; using blaze::SymmetricMatrix; // Definition of a 3x3 symmetric block matrix based on CompressedMatrix SymmetricMatrix< CompressedMatrix< StaticMatrix<int,3UL,3UL> > > A( 3 ); \endcode // Also in this case, the SymmetricMatrix class template enforces the invariant of symmetry and // guarantees that a modifications of element \f$ a_{ij} \f$ of the adapted matrix is also // applied to element \f$ a_{ji} \f$: \code // Inserting the elements (2,4) and (4,2) A.insert( 2, 4, StaticMatrix<int,3UL,3UL>{ { 1, -4, 5 }, { 6, 8, -3 }, { 2, -1, 2 } } ); // Manipulating the elements (2,4) and (4,2) A(2,4)(1,1) = -5; \endcode // For more information on block matrices, see the tutorial on \ref block_vectors_and_matrices. // // // \n \section adaptors_symmetric_matrices_performance Performance Considerations // <hr> // // When the symmetric property of a matrix is known beforehands using the SymmetricMatrix adaptor // instead of a general matrix can be a considerable performance advantage. The \b Blaze library // tries to exploit the properties of symmetric matrices whenever possible. However, there are // also situations when using a symmetric matrix introduces some overhead. The following examples // demonstrate several situations where symmetric matrices can positively or negatively impact // performance. // // \n \subsection adaptors_symmetric_matrices_matrix_matrix_multiplication Positive Impact: Matrix/Matrix Multiplication // // When multiplying two matrices, at least one of which is symmetric, \b Blaze can exploit the fact // that \f$ A = A^T \f$ and choose the fastest and most suited combination of storage orders for the // multiplication. The following example demonstrates this by means of a dense matrix/sparse matrix // multiplication: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; using blaze::rowMajor; using blaze::columnMajor; SymmetricMatrix< DynamicMatrix<double,rowMajor> > A; SymmetricMatrix< CompressedMatrix<double,columnMajor> > B; DynamicMatrix<double,columnMajor> C; // ... Resizing and initialization C = A * B; \endcode // Intuitively, the chosen combination of a row-major and a column-major matrix is the most suited // for maximum performance. However, \b Blaze evaluates the multiplication as \code C = A * trans( B ); \endcode // which significantly increases the performance since in contrast to the original formulation the // optimized form can be vectorized. Therefore, in the context of matrix multiplications, using the // SymmetricMatrix adapter is obviously an advantage. // // \n \subsection adaptors_symmetric_matrices_matrix_vector_multiplication Positive Impact: Matrix/Vector Multiplication // // A similar optimization is possible in case of matrix/vector multiplications: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::CompressedVector; using blaze::rowMajor; using blaze::columnVector; SymmetricMatrix< DynamicMatrix<double,rowMajor> > A; CompressedVector<double,columnVector> x; DynamicVector<double,columnVector> y; // ... Resizing and initialization y = A * x; \endcode // In this example it is not intuitively apparent that using a row-major matrix is not the best // possible choice in terms of performance since the computation cannot be vectorized. Choosing // a column-major matrix instead, however, would enable a vectorized computation. Therefore // \b Blaze exploits the fact that \c A is symmetric, selects the best suited storage order and // evaluates the multiplication as \code y = trans( A ) * x; \endcode // which also significantly increases the performance. // // \n \subsection adaptors_symmetric_matrices_views Positive Impact: Row/Column Views on Column/Row-Major Matrices // // Another example is the optimization of a row view on a column-major symmetric matrix: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; using blaze::columnMajor; SymmetricMatrix< DynamicMatrix<double,columnMajor> > A( 10UL ); auto row5 = row( A, 5UL ); \endcode // Usually, a row view on a column-major matrix results in a considerable performance decrease in // comparison to a row view on a row-major matrix due to the non-contiguous storage of the matrix // elements. However, in case of symmetric matrices, \b Blaze instead uses the according column of // the matrix, which provides the same performance as if the matrix would be row-major. Note that // this also works for column views on row-major matrices, where \b Blaze can use the according // row instead of a column in order to provide maximum performance. // // \n \subsection adaptors_symmetric_matrices_assignment Negative Impact: Assignment of a General Matrix // // In contrast to using a symmetric matrix on the right-hand side of an assignment (i.e. for read // access), which introduces absolutely no performance penalty, using a symmetric matrix on the // left-hand side of an assignment (i.e. for write access) may introduce additional overhead when // it is assigned a general matrix, which is not symmetric at compile time: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; SymmetricMatrix< DynamicMatrix<double> > A, C; DynamicMatrix<double> B; B = A; // Only read-access to the symmetric matrix; no performance penalty C = A; // Assignment of a symmetric matrix to another symmetric matrix; no runtime overhead C = B; // Assignment of a general matrix to a symmetric matrix; some runtime overhead \endcode // When assigning a general, potentially not symmetric matrix to a symmetric matrix it is necessary // to check whether the matrix is symmetric at runtime in order to guarantee the symmetry property // of the symmetric matrix. In case it turns out to be symmetric, it is assigned as efficiently as // possible, if it is not, an exception is thrown. In order to prevent this runtime overhead it is // therefore generally advisable to assign symmetric matrices to other symmetric matrices.\n // In this context it is especially noteworthy that in contrast to additions and subtractions the // multiplication of two symmetric matrices does not necessarily result in another symmetric matrix: \code SymmetricMatrix< DynamicMatrix<double> > A, B, C; C = A + B; // Results in a symmetric matrix; no runtime overhead C = A - B; // Results in a symmetric matrix; no runtime overhead C = A * B; // Is not guaranteed to result in a symmetric matrix; some runtime overhead \endcode // \n Previous: \ref adaptors &nbsp; &nbsp; Next: \ref adaptors_hermitian_matrices */ //************************************************************************************************* //**Hermitian Matrices***************************************************************************** /*!\page adaptors_hermitian_matrices Hermitian Matrices // // \tableofcontents // // // \n \section adaptors_hermitian_matrices_general Hermitian Matrices // <hr> // // In addition to symmetric matrices, \b Blaze also provides an adaptor for Hermitian matrices. // Hermitian matrices provide the compile time guarantee to be square matrices with pair-wise // conjugate complex values. Mathematically, this means that an Hermitian matrix is always equal // to its conjugate transpose (\f$ A = \overline{A^T} \f$) and that all non-diagonal values have // a complex conjugate counterpart (\f$ a_{ij} == \overline{a_{ji}} \f$). Within the \b Blaze // library, Hermitian matrices are realized by the \ref adaptors_hermitian_matrices_hermitianmatrix // class template. // // // \n \section adaptors_hermitian_matrices_hermitianmatrix HermitianMatrix // <hr> // // The HermitianMatrix class template is an adapter for existing dense and sparse matrix types. // It inherits the properties and the interface of the given matrix type \c MT and extends it by // enforcing the additional invariant of Hermitian symmetry (i.e. the matrix is always equal to // its conjugate transpose \f$ A = \overline{A^T} \f$). It can be included via the header file \code #include <blaze/math/HermitianMatrix.h> \endcode // The type of the adapted matrix can be specified via template parameter: \code template< typename MT > class HermitianMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. HermitianMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Also, // the given matrix type must have numeric element types (i.e. all integral types except \c bool, // floating point and complex types). Note that the given matrix type must be either resizable (as // for instance blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as // for instance blaze::StaticMatrix). // // The following examples give an impression of several possible Hermitian matrices: \code using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; using blaze::columnMajor; // Definition of a 3x3 row-major dense Hermitian matrix with static memory blaze::HermitianMatrix< blaze::StaticMatrix<int,3UL,3UL,rowMajor> > A; // Definition of a resizable column-major dense Hermitian matrix based on HybridMatrix blaze::HermitianMatrix< blaze::HybridMatrix<float,4UL,4UL,columnMajor> B; // Definition of a resizable row-major dense Hermitian matrix based on DynamicMatrix blaze::HermitianMatrix< blaze::DynamicMatrix<std::complex<double>,rowMajor> > C; // Definition of a fixed size row-major dense Hermitian matrix based on CustomMatrix blaze::HermitianMatrix< blaze::CustomMatrix<double,unaligned,unpadded,rowMajor> > D; // Definition of a compressed row-major single precision complex Hermitian matrix blaze::HermitianMatrix< blaze::CompressedMatrix<std::complex<float>,rowMajor> > E; \endcode // The storage order of a Hermitian matrix is depending on the storage order of the adapted matrix // type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified as // blaze::rowMajor), the Hermitian matrix will also be a row-major matrix. Otherwise, if the // adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the Hermitian matrix // will also be a column-major matrix. // // // \n \section adaptors_hermitian_matrices_vs_symmetric_matrices Hermitian Matrices vs. Symmetric Matrices // // The blaze::HermitianMatrix adaptor and the blaze::SymmetricMatrix adaptor share several traits. // However, there are a couple of differences, both from a mathematical point of view as well as // from an implementation point of view. // // From a mathematical point of view, a matrix is called symmetric when it is equal to its // transpose (\f$ A = A^T \f$) and it is called Hermitian when it is equal to its conjugate // transpose (\f$ A = \overline{A^T} \f$). For matrices of real values, however, these two // conditions coincide, which means that symmetric matrices of real values are also Hermitian // and Hermitian matrices of real values are also symmetric. // // From an implementation point of view, \b Blaze restricts Hermitian matrices to numeric data // types (i.e. all integral types except \c bool, floating point and complex types), whereas // symmetric matrices can also be block matrices (i.e. can have vector or matrix elements). // For built-in element types, the HermitianMatrix adaptor behaves exactly like the according // SymmetricMatrix implementation. For complex element types, however, the Hermitian property // is enforced (see also \ref adaptors_hermitian_matrices_hermitian). \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::HermitianMatrix; using blaze::SymmetricMatrix; // The following two matrices provide an identical experience (including performance) HermitianMatrix< DynamicMatrix<double> > A; // Both Hermitian and symmetric SymmetricMatrix< DynamicMatrix<double> > B; // Both Hermitian and symmetric // The following two matrices will behave differently HermitianMatrix< DynamicMatrix< complex<double> > > C; // Only Hermitian SymmetricMatrix< DynamicMatrix< complex<double> > > D; // Only symmetric // Hermitian block matrices are not allowed HermitianMatrix< DynamicMatrix< DynamicVector<double> > > E; // Compilation error! SymmetricMatrix< DynamicMatrix< DynamicVector<double> > > F; // Symmetric block matrix \endcode // \n \section adaptors_hermitian_matrices_special_properties Special Properties of Hermitian Matrices // <hr> // // A Hermitian matrix is used exactly like a matrix of the underlying, adapted matrix type \c MT. // It also provides (nearly) the same interface as the underlying matrix type. However, there are // some important exceptions resulting from the Hermitian symmetry constraint: // // -# <b>\ref adaptors_hermitian_matrices_square</b> // -# <b>\ref adaptors_hermitian_matrices_hermitian</b> // -# <b>\ref adaptors_hermitian_matrices_initialization</b> // // \n \subsection adaptors_hermitian_matrices_square Hermitian Matrices Must Always be Square! // // In case a resizable matrix is used (as for instance blaze::HybridMatrix, blaze::DynamicMatrix, // or blaze::CompressedMatrix), this means that the according constructors, the \c resize() and // the \c extend() functions only expect a single parameter, which specifies both the number of // rows and columns, instead of two (one for the number of rows and one for the number of columns): \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; using blaze::rowMajor; // Default constructed, default initialized, row-major 3x3 Hermitian dynamic matrix HermitianMatrix< DynamicMatrix<std::complex<double>,rowMajor> > A( 3 ); // Resizing the matrix to 5x5 A.resize( 5 ); // Extending the number of rows and columns by 2, resulting in a 7x7 matrix A.extend( 2 ); \endcode // In case a matrix with a fixed size is used (as for instance blaze::StaticMatrix), the number // of rows and number of columns must be specified equally: \code using blaze::StaticMatrix; using blaze::HermitianMatrix; using blaze::columnMajor; // Correct setup of a fixed size column-major 3x3 Hermitian static matrix HermitianMatrix< StaticMatrix<std::complex<float>,3UL,3UL,columnMajor> > A; // Compilation error: the provided matrix type is not a square matrix type HermitianMatrix< StaticMatrix<std::complex<float>,3UL,4UL,columnMajor> > B; \endcode // \n \subsection adaptors_hermitian_matrices_hermitian The Hermitian Property is Always Enforced! // // This means that the following properties of a Hermitian matrix are always guaranteed: // // - The diagonal elements are real numbers, i.e. the imaginary part is zero // - Element \f$ a_{ij} \f$ is always the complex conjugate of element \f$ a_{ji} \f$ // // Thus modifying the element \f$ a_{ij} \f$ of a Hermitian matrix also modifies its // counterpart element \f$ a_{ji} \f$. Also, it is only possible to assign matrices that // are Hermitian themselves: \code using blaze::CompressedMatrix; using blaze::DynamicMatrix; using blaze::StaticMatrix; using blaze::HermitianMatrix; using blaze::rowMajor; using cplx = std::complex<double>; // Default constructed, row-major 3x3 Hermitian compressed matrix HermitianMatrix< CompressedMatrix<cplx,rowMajor> > A( 3 ); // Initializing the matrix via the function call operator // // ( (1, 0) (0,0) (2,1) ) // ( (0, 0) (0,0) (0,0) ) // ( (2,-1) (0,0) (0,0) ) // A(0,0) = cplx( 1.0, 0.0 ); // Initialization of the diagonal element (0,0) A(0,2) = cplx( 2.0, 1.0 ); // Initialization of the elements (0,2) and (2,0) // Inserting three more elements via the insert() function // // ( (1,-3) (0,0) (2, 1) ) // ( (0, 0) (2,0) (4,-2) ) // ( (2,-1) (4,2) (0, 0) ) // A.insert( 1, 1, cplx( 2.0, 0.0 ) ); // Inserting the diagonal element (1,1) A.insert( 1, 2, cplx( 4.0, -2.0 ) ); // Inserting the elements (1,2) and (2,1) // Access via a non-const iterator // // ( (1,-3) (8,1) (2, 1) ) // ( (8,-1) (2,0) (4,-2) ) // ( (2,-1) (4,2) (0, 0) ) // *A.begin(1UL) = cplx( 8.0, -1.0 ); // Modifies both elements (1,0) and (0,1) // Erasing elements via the erase() function // // ( (0, 0) (8,1) (0, 0) ) // ( (8,-1) (2,0) (4,-2) ) // ( (0, 0) (4,2) (0, 0) ) // A.erase( 0, 0 ); // Erasing the diagonal element (0,0) A.erase( 0, 2 ); // Erasing the elements (0,2) and (2,0) // Construction from a Hermitian dense matrix StaticMatrix<cplx,3UL,3UL> B{ { cplx( 3.0, 0.0 ), cplx( 8.0, 2.0 ), cplx( -2.0, 2.0 ) }, { cplx( 8.0, 1.0 ), cplx( 0.0, 0.0 ), cplx( -1.0, -1.0 ) }, { cplx( -2.0, -2.0 ), cplx( -1.0, 1.0 ), cplx( 4.0, 0.0 ) } }; HermitianMatrix< DynamicMatrix<double,rowMajor> > C( B ); // OK // Assignment of a non-Hermitian dense matrix StaticMatrix<cplx,3UL,3UL> D{ { cplx( 3.0, 0.0 ), cplx( 7.0, 2.0 ), cplx( 3.0, 2.0 ) }, { cplx( 8.0, 1.0 ), cplx( 0.0, 0.0 ), cplx( 6.0, 4.0 ) }, { cplx( -2.0, 2.0 ), cplx( -1.0, 1.0 ), cplx( 4.0, 0.0 ) } }; C = D; // Throws an exception; Hermitian invariant would be violated! \endcode // The same restriction also applies to the \c append() function for sparse matrices: Appending // the element \f$ a_{ij} \f$ additionally inserts the element \f$ a_{ji} \f$ into the matrix. // Despite the additional insertion, the \c append() function still provides the most efficient // way to set up a Hermitian sparse matrix. In order to achieve the maximum efficiency, the // capacity of the individual rows/columns of the matrix should to be specifically prepared with // \c reserve() calls: \code using blaze::CompressedMatrix; using blaze::HermitianMatrix; using blaze::rowMajor; using cplx = std::complex<double>; // Setup of the Hermitian matrix // // ( (0, 0) (1,2) (3,-4) ) // A = ( (1,-2) (2,0) (0, 0) ) // ( (3, 4) (0,0) (0, 0) ) // HermitianMatrix< CompressedMatrix<cplx,rowMajor> > A( 3 ); A.reserve( 5 ); // Reserving enough space for 5 non-zero elements A.reserve( 0, 2 ); // Reserving two non-zero elements in the first row A.reserve( 1, 2 ); // Reserving two non-zero elements in the second row A.reserve( 2, 1 ); // Reserving a single non-zero element in the third row A.append( 0, 1, cplx( 1.0, 2.0 ) ); // Appending an element at position (0,1) and (1,0) A.append( 1, 1, cplx( 2.0, 0.0 ) ); // Appending an element at position (1,1) A.append( 2, 0, cplx( 3.0, 4.0 ) ); // Appending an element at position (2,0) and (0,2) \endcode // The Hermitian property is also enforced for Hermitian custom matrices: In case the given array // of elements does not represent a Hermitian matrix, a \c std::invalid_argument exception is // thrown: \code using blaze::CustomMatrix; using blaze::HermitianMatrix; using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; using CustomHermitian = HermitianMatrix< CustomMatrix<double,unaligned,unpadded,rowMajor> >; // Creating a 3x3 Hermitian custom matrix from a properly initialized array double array[9] = { 1.0, 2.0, 4.0, 2.0, 3.0, 5.0, 4.0, 5.0, 6.0 }; CustomHermitian A( array, 3UL ); // OK // Attempt to create a second 3x3 Hermitian custom matrix from an uninitialized array std::unique_ptr<double[]> memory( new double[9UL] ); CustomHermitian B( memory.get(), 3UL ); // Throws an exception \endcode // Finally, the Hermitian property is enforced for views (rows, columns, submatrices, ...) on the // Hermitian matrix. The following example demonstrates that modifying the elements of an entire // row of the Hermitian matrix also affects the counterpart elements in the according column of // the matrix: \code using blaze::DynamicMatrix; using blaze::HermtianMatrix; using cplx = std::complex<double>; // Setup of the Hermitian matrix // // ( (0, 0) (1,-1) (0,0) (2, 1) ) // A = ( (1, 1) (3, 0) (4,2) (0, 0) ) // ( (0, 0) (4,-2) (0,0) (5,-3) ) // ( (2,-1) (0, 0) (5,3) (0, 0) ) // HermitianMatrix< DynamicMatrix<int> > A( 4 ); A(0,1) = cplx( 1.0, -1.0 ); A(0,3) = cplx( 2.0, 1.0 ); A(1,1) = cplx( 3.0, 0.0 ); A(1,2) = cplx( 4.0, 2.0 ); A(2,3) = cplx( 5.0, 3.0 ); // Setting all elements in the 1st row to 0 results in the matrix // // ( (0, 0) (0,0) (0,0) (2, 1) ) // A = ( (0, 0) (0,0) (0,0) (0, 0) ) // ( (0, 0) (0,0) (0,0) (5,-3) ) // ( (2,-1) (0,0) (5,3) (0, 0) ) // row( A, 1 ) = cplx( 0.0, 0.0 ); \endcode // The next example demonstrates the (compound) assignment to submatrices of Hermitian matrices. // Since the modification of element \f$ a_{ij} \f$ of a Hermitian matrix also modifies the // element \f$ a_{ji} \f$, the matrix to be assigned must be structured such that the Hermitian // symmetry of the matrix is preserved. Otherwise a \c std::invalid_argument exception is thrown: \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; std::complex<double> cplx; // Setup of two default 4x4 Hermitian matrices HermitianMatrix< DynamicMatrix<cplx> > A1( 4 ), A2( 4 ); // Setup of the 3x2 dynamic matrix // // ( (1,-1) (2, 5) ) // B = ( (3, 0) (4,-6) ) // ( (5, 0) (6, 0) ) // DynamicMatrix<int> B( 3UL, 2UL ); B(0,0) = cplx( 1.0, -1.0 ); B(0,1) = cplx( 2.0, 5.0 ); B(1,0) = cplx( 3.0, 0.0 ); B(1,1) = cplx( 4.0, -6.0 ); B(2,1) = cplx( 5.0, 0.0 ); B(2,2) = cplx( 6.0, 7.0 ); // OK: Assigning B to a submatrix of A1 such that the Hermitian property is preserved // // ( (0, 0) (0, 0) (1,-1) (2, 5) ) // A1 = ( (0, 0) (0, 0) (3, 0) (4,-6) ) // ( (1, 1) (3, 0) (5, 0) (6, 0) ) // ( (2,-5) (4, 6) (6, 0) (0, 0) ) // submatrix( A1, 0UL, 2UL, 3UL, 2UL ) = B; // OK // Error: Assigning B to a submatrix of A2 such that the Hermitian property isn't preserved! // The elements marked with X cannot be assigned unambiguously! // // ( (0, 0) (1,-1) (2,5) (0,0) ) // A2 = ( (1, 1) (3, 0) (X,X) (0,0) ) // ( (2,-5) (X, X) (6,0) (0,0) ) // ( (0, 0) (0, 0) (0,0) (0,0) ) // submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = B; // Assignment throws an exception! \endcode // \n \subsection adaptors_hermitian_matrices_initialization The Elements of a Dense Hermitian Matrix are Always Default Initialized! // // Although this results in a small loss of efficiency (especially in case all default values are // overridden afterwards), this property is important since otherwise the Hermitian property of // dense Hermitian matrices could not be guaranteed: \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; // Uninitialized, 5x5 row-major dynamic matrix DynamicMatrix<int,rowMajor> A( 5, 5 ); // Default initialized, 5x5 row-major Hermitian dynamic matrix HermitianMatrix< DynamicMatrix<int,rowMajor> > B( 5 ); \endcode // \n \section adaptors_hermitian_matrices_arithmetic_operations Arithmetic Operations // <hr> // // A HermitianMatrix can be used within all numerical operations in any way any other dense or // sparse matrix can be used. It can also be combined with any other dense or sparse vector or // matrix. The following code example gives an impression of the use of HermitianMatrix within // arithmetic operations: \code using blaze::HermitianMatrix; using blaze::DynamicMatrix; using blaze::HybridMatrix; using blaze::StaticMatrix; using blaze::CompressedMatrix; using blaze::rowMajor; using blaze::columnMajor; using cplx = complex<float>; DynamicMatrix<cplx,rowMajor> A( 3, 3 ); CompressedMatrix<cplx,rowMajor> B( 3, 3 ); HermitianMatrix< DynamicMatrix<cplx,rowMajor> > C( 3 ); HermitianMatrix< CompressedMatrix<cplx,rowMajor> > D( 3 ); HermitianMatrix< HybridMatrix<cplx,3UL,3UL,rowMajor> > E; HermitianMatrix< StaticMatrix<cplx,3UL,3UL,columnMajor> > F; E = A + B; // Matrix addition and assignment to a row-major Hermitian matrix (includes runtime check) F = C - D; // Matrix subtraction and assignment to a column-major Hermitian matrix (only compile time check) F = A * D; // Matrix multiplication between a dense and a sparse matrix (includes runtime check) C *= 2.0; // In-place scaling of matrix C E = 2.0 * B; // Scaling of matrix B (includes runtime check) F = C * 2.0; // Scaling of matrix C (only compile time check) E += A - B; // Addition assignment (includes runtime check) F -= C + D; // Subtraction assignment (only compile time check) F *= A * D; // Multiplication assignment (includes runtime check) \endcode // Note that it is possible to assign any kind of matrix to a Hermitian matrix. In case the matrix // to be assigned is not Hermitian at compile time, a runtime check is performed. // // // \n \section adaptors_hermitian_matrices_performance Performance Considerations // <hr> // // When the Hermitian property of a matrix is known beforehands using the HermitianMatrix adaptor // instead of a general matrix can be a considerable performance advantage. This is particularly // true in case the Hermitian matrix is also symmetric (i.e. has built-in element types). The // \b Blaze library tries to exploit the properties of Hermitian (symmetric) matrices whenever // possible. However, there are also situations when using a Hermitian matrix introduces some // overhead. The following examples demonstrate several situations where Hermitian matrices can // positively or negatively impact performance. // // \n \subsection adaptors_hermitian_matrices_matrix_matrix_multiplication Positive Impact: Matrix/Matrix Multiplication // // When multiplying two matrices, at least one of which is symmetric, \b Blaze can exploit the fact // that \f$ A = A^T \f$ and choose the fastest and most suited combination of storage orders for the // multiplication. The following example demonstrates this by means of a dense matrix/sparse matrix // multiplication: \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; using blaze::rowMajor; using blaze::columnMajor; HermitianMatrix< DynamicMatrix<double,rowMajor> > A; // Both Hermitian and symmetric HermitianMatrix< CompressedMatrix<double,columnMajor> > B; // Both Hermitian and symmetric DynamicMatrix<double,columnMajor> C; // ... Resizing and initialization C = A * B; \endcode // Intuitively, the chosen combination of a row-major and a column-major matrix is the most suited // for maximum performance. However, \b Blaze evaluates the multiplication as \code C = A * trans( B ); \endcode // which significantly increases the performance since in contrast to the original formulation the // optimized form can be vectorized. Therefore, in the context of matrix multiplications, using a // symmetric matrix is obviously an advantage. // // \n \subsection adaptors_hermitian_matrices_matrix_vector_multiplication Positive Impact: Matrix/Vector Multiplication // // A similar optimization is possible in case of matrix/vector multiplications: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::CompressedVector; using blaze::HermitianMatrix; using blaze::rowMajor; using blaze::columnVector; HermitianMatrix< DynamicMatrix<double,rowMajor> > A; // Hermitian and symmetric CompressedVector<double,columnVector> x; DynamicVector<double,columnVector> y; // ... Resizing and initialization y = A * x; \endcode // In this example it is not intuitively apparent that using a row-major matrix is not the best // possible choice in terms of performance since the computation cannot be vectorized. Choosing // a column-major matrix instead, however, would enable a vectorized computation. Therefore // \b Blaze exploits the fact that \c A is symmetric, selects the best suited storage order and // evaluates the multiplication as \code y = trans( A ) * x; \endcode // which also significantly increases the performance. // // \n \subsection adaptors_hermitian_matrices_views Positive Impact: Row/Column Views on Column/Row-Major Matrices // // Another example is the optimization of a row view on a column-major symmetric matrix: \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; using blaze::columnMajor; HermitianMatrix< DynamicMatrix<double,columnMajor> > A( 10UL ); // Both Hermitian and symmetric auto row5 = row( A, 5UL ); \endcode // Usually, a row view on a column-major matrix results in a considerable performance decrease in // comparison to a row view on a row-major matrix due to the non-contiguous storage of the matrix // elements. However, in case of symmetric matrices, \b Blaze instead uses the according column of // the matrix, which provides the same performance as if the matrix would be row-major. Note that // this also works for column views on row-major matrices, where \b Blaze can use the according // row instead of a column in order to provide maximum performance. // // \n \subsection adaptors_hermitian_matrices_assignment Negative Impact: Assignment of a General Matrix // // In contrast to using a Hermitian matrix on the right-hand side of an assignment (i.e. for read // access), which introduces absolutely no performance penalty, using a Hermitian matrix on the // left-hand side of an assignment (i.e. for write access) may introduce additional overhead when // it is assigned a general matrix, which is not Hermitian at compile time: \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; HermitianMatrix< DynamicMatrix< complex<double> > > A, C; DynamicMatrix<double> B; B = A; // Only read-access to the Hermitian matrix; no performance penalty C = A; // Assignment of a Hermitian matrix to another Hermitian matrix; no runtime overhead C = B; // Assignment of a general matrix to a Hermitian matrix; some runtime overhead \endcode // When assigning a general, potentially not Hermitian matrix to a Hermitian matrix it is necessary // to check whether the matrix is Hermitian at runtime in order to guarantee the Hermitian property // of the Hermitian matrix. In case it turns out to be Hermitian, it is assigned as efficiently as // possible, if it is not, an exception is thrown. In order to prevent this runtime overhead it is // therefore generally advisable to assign Hermitian matrices to other Hermitian matrices.\n // In this context it is especially noteworthy that in contrast to additions and subtractions the // multiplication of two Hermitian matrices does not necessarily result in another Hermitian matrix: \code HermitianMatrix< DynamicMatrix<double> > A, B, C; C = A + B; // Results in a Hermitian matrix; no runtime overhead C = A - B; // Results in a Hermitian matrix; no runtime overhead C = A * B; // Is not guaranteed to result in a Hermitian matrix; some runtime overhead \endcode // \n Previous: \ref adaptors_symmetric_matrices &nbsp; &nbsp; Next: \ref adaptors_triangular_matrices */ //************************************************************************************************* //**Triangular Matrices**************************************************************************** /*!\page adaptors_triangular_matrices Triangular Matrices // // \tableofcontents // // // \n \section adaptors_triangular_matrices_general Triangular Matrices // <hr> // // Triangular matrices come in three flavors: Lower triangular matrices provide the compile time // guarantee to be square matrices and that the upper part of the matrix contains only default // elements that cannot be modified. Upper triangular matrices on the other hand provide the // compile time guarantee to be square and that the lower part of the matrix contains only fixed // default elements. Finally, diagonal matrices provide the compile time guarantee to be square // and that both the lower and upper part of the matrix contain only immutable default elements. // These properties can be exploited to gain higher performance and/or to save memory. Within the // \b Blaze library, several kinds of lower and upper triangular and diagonal matrices are realized // by the following class templates: // // Lower triangular matrices: // - <b>\ref adaptors_triangular_matrices_lowermatrix</b> // - <b>\ref adaptors_triangular_matrices_unilowermatrix</b> // - <b>\ref adaptors_triangular_matrices_strictlylowermatrix</b> // // Upper triangular matrices: // - <b>\ref adaptors_triangular_matrices_uppermatrix</b> // - <b>\ref adaptors_triangular_matrices_uniuppermatrix</b> // - <b>\ref adaptors_triangular_matrices_strictlyuppermatrix</b> // // Diagonal matrices // - <b>\ref adaptors_triangular_matrices_diagonalmatrix</b> // // // \n \section adaptors_triangular_matrices_lowermatrix LowerMatrix // <hr> // // The blaze::LowerMatrix class template is an adapter for existing dense and sparse matrix types. // It inherits the properties and the interface of the given matrix type \c MT and extends it by // enforcing the additional invariant that all matrix elements above the diagonal are 0 (lower // triangular matrix): \f[\left(\begin{array}{*{5}{c}} l_{0,0} & 0 & 0 & \cdots & 0 \\ l_{1,0} & l_{1,1} & 0 & \cdots & 0 \\ l_{2,0} & l_{2,1} & l_{2,2} & \cdots & 0 \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ l_{N,0} & l_{N,1} & l_{N,2} & \cdots & l_{N,N} \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/LowerMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class LowerMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::LowerMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Note // that the given matrix type must be either resizable (as for instance blaze::HybridMatrix or // blaze::DynamicMatrix) or must be square at compile time (as for instance blaze::StaticMatrix). // // The following examples give an impression of several possible lower matrices: \code using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; using blaze::columnMajor; // Definition of a 3x3 row-major dense lower matrix with static memory blaze::LowerMatrix< blaze::StaticMatrix<int,3UL,3UL,rowMajor> > A; // Definition of a resizable column-major dense lower matrix based on HybridMatrix blaze::LowerMatrix< blaze::HybridMatrix<float,4UL,4UL,columnMajor> B; // Definition of a resizable row-major dense lower matrix based on DynamicMatrix blaze::LowerMatrix< blaze::DynamicMatrix<double,rowMajor> > C; // Definition of a fixed size row-major dense lower matrix based on CustomMatrix blaze::LowerMatrix< blaze::CustomMatrix<double,unaligned,unpadded,rowMajor> > D; // Definition of a compressed row-major single precision lower matrix blaze::LowerMatrix< blaze::CompressedMatrix<float,rowMajor> > E; \endcode // The storage order of a lower matrix is depending on the storage order of the adapted matrix // type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified // as blaze::rowMajor), the lower matrix will also be a row-major matrix. Otherwise, if the // adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the lower matrix // will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_unilowermatrix UniLowerMatrix // <hr> // // The blaze::UniLowerMatrix class template is an adapter for existing dense and sparse matrix // types. It inherits the properties and the interface of the given matrix type \c MT and extends // it by enforcing the additional invariant that all diagonal matrix elements are 1 and all matrix // elements above the diagonal are 0 (lower unitriangular matrix): \f[\left(\begin{array}{*{5}{c}} 1 & 0 & 0 & \cdots & 0 \\ l_{1,0} & 1 & 0 & \cdots & 0 \\ l_{2,0} & l_{2,1} & 1 & \cdots & 0 \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ l_{N,0} & l_{N,1} & l_{N,2} & \cdots & 1 \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/UniLowerMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class UniLowerMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::UniLowerMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Also, // the given matrix type must have numeric element types (i.e. all integral types except \c bool, // floating point and complex types). Note that the given matrix type must be either resizable (as // for instance blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as // for instance blaze::StaticMatrix). // // The following examples give an impression of several possible lower unitriangular matrices: \code // Definition of a 3x3 row-major dense unilower matrix with static memory blaze::UniLowerMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; // Definition of a resizable column-major dense unilower matrix based on HybridMatrix blaze::UniLowerMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; // Definition of a resizable row-major dense unilower matrix based on DynamicMatrix blaze::UniLowerMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C; // Definition of a compressed row-major single precision unilower matrix blaze::UniLowerMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode // The storage order of a lower unitriangular matrix is depending on the storage order of the // adapted matrix type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. // is specified as blaze::rowMajor), the unilower matrix will also be a row-major matrix. // Otherwise if the adapted matrix is column-major (i.e. is specified as blaze::columnMajor), // the unilower matrix will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_strictlylowermatrix StrictlyLowerMatrix // <hr> // // The blaze::StrictlyLowerMatrix class template is an adapter for existing dense and sparse matrix // types. It inherits the properties and the interface of the given matrix type \c MT and extends // it by enforcing the additional invariant that all diagonal matrix elements and all matrix // elements above the diagonal are 0 (strictly lower triangular matrix): \f[\left(\begin{array}{*{5}{c}} 0 & 0 & 0 & \cdots & 0 \\ l_{1,0} & 0 & 0 & \cdots & 0 \\ l_{2,0} & l_{2,1} & 0 & \cdots & 0 \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ l_{N,0} & l_{N,1} & l_{N,2} & \cdots & 0 \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/StrictlyLowerMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class StrictlyLowerMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::StrictlyLowerMatrix can be used // with any non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix // type. Note that the given matrix type must be either resizable (as for instance // blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as for instance // blaze::StaticMatrix). // // The following examples give an impression of several possible strictly lower triangular matrices: \code // Definition of a 3x3 row-major dense strictly lower matrix with static memory blaze::StrictlyLowerMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; // Definition of a resizable column-major dense strictly lower matrix based on HybridMatrix blaze::StrictlyLowerMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; // Definition of a resizable row-major dense strictly lower matrix based on DynamicMatrix blaze::StrictlyLowerMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C; // Definition of a compressed row-major single precision strictly lower matrix blaze::StrictlyLowerMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode // The storage order of a strictly lower triangular matrix is depending on the storage order of // the adapted matrix type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. // is specified as blaze::rowMajor), the strictly lower matrix will also be a row-major matrix. // Otherwise if the adapted matrix is column-major (i.e. is specified as blaze::columnMajor), // the strictly lower matrix will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_uppermatrix UpperMatrix // <hr> // // The blaze::UpperMatrix class template is an adapter for existing dense and sparse matrix types. // It inherits the properties and the interface of the given matrix type \c MT and extends it by // enforcing the additional invariant that all matrix elements below the diagonal are 0 (upper // triangular matrix): \f[\left(\begin{array}{*{5}{c}} u_{0,0} & u_{0,1} & u_{0,2} & \cdots & u_{0,N} \\ 0 & u_{1,1} & u_{1,2} & \cdots & u_{1,N} \\ 0 & 0 & u_{2,2} & \cdots & u_{2,N} \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & 0 & \cdots & u_{N,N} \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/UpperMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class UpperMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::UpperMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Note // that the given matrix type must be either resizable (as for instance blaze::HybridMatrix or // blaze::DynamicMatrix) or must be square at compile time (as for instance blaze::StaticMatrix). // // The following examples give an impression of several possible upper matrices: \code // Definition of a 3x3 row-major dense upper matrix with static memory blaze::UpperMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; // Definition of a resizable column-major dense upper matrix based on HybridMatrix blaze::UpperMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; // Definition of a resizable row-major dense upper matrix based on DynamicMatrix blaze::UpperMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C; // Definition of a compressed row-major single precision upper matrix blaze::UpperMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode // The storage order of an upper matrix is depending on the storage order of the adapted matrix // type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified // as blaze::rowMajor), the upper matrix will also be a row-major matrix. Otherwise, if the // adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the upper matrix // will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_uniuppermatrix UniUpperMatrix // <hr> // // The blaze::UniUpperMatrix class template is an adapter for existing dense and sparse matrix // types. It inherits the properties and the interface of the given matrix type \c MT and extends // it by enforcing the additional invariant that all diagonal matrix elements are 1 and all matrix // elements below the diagonal are 0 (upper unitriangular matrix): \f[\left(\begin{array}{*{5}{c}} 1 & u_{0,1} & u_{0,2} & \cdots & u_{0,N} \\ 0 & 1 & u_{1,2} & \cdots & u_{1,N} \\ 0 & 0 & 1 & \cdots & u_{2,N} \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & 0 & \cdots & 1 \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/UniUpperMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class UniUpperMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::UniUpperMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Also, // the given matrix type must have numeric element types (i.e. all integral types except \c bool, // floating point and complex types). Note that the given matrix type must be either resizable (as // for instance blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as // for instance blaze::StaticMatrix). // // The following examples give an impression of several possible upper unitriangular matrices: \code // Definition of a 3x3 row-major dense uniupper matrix with static memory blaze::UniUpperMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; // Definition of a resizable column-major dense uniupper matrix based on HybridMatrix blaze::UniUpperMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; // Definition of a resizable row-major dense uniupper matrix based on DynamicMatrix blaze::UniUpperMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C; // Definition of a compressed row-major single precision uniupper matrix blaze::UniUpperMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode // The storage order of an upper unitriangular matrix is depending on the storage order of the // adapted matrix type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. // is specified as blaze::rowMajor), the uniupper matrix will also be a row-major matrix. // Otherwise, if the adapted matrix is column-major (i.e. is specified as blaze::columnMajor), // the uniupper matrix will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_strictlyuppermatrix StrictlyUpperMatrix // <hr> // // The blaze::StrictlyUpperMatrix class template is an adapter for existing dense and sparse matrix // types. It inherits the properties and the interface of the given matrix type \c MT and extends // it by enforcing the additional invariant that all diagonal matrix elements and all matrix // elements below the diagonal are 0 (strictly upper triangular matrix): \f[\left(\begin{array}{*{5}{c}} 0 & u_{0,1} & u_{0,2} & \cdots & u_{0,N} \\ 0 & 0 & u_{1,2} & \cdots & u_{1,N} \\ 0 & 0 & 0 & \cdots & u_{2,N} \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & 0 & \cdots & 0 \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/StrictlyUpperMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class StrictlyUpperMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::StrictlyUpperMatrix can be used // with any non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix // type. Note that the given matrix type must be either resizable (as for instance // blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as for instance // blaze::StaticMatrix). // // The following examples give an impression of several possible strictly upper triangular matrices: \code // Definition of a 3x3 row-major dense strictly upper matrix with static memory blaze::StrictlyUpperMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; // Definition of a resizable column-major dense strictly upper matrix based on HybridMatrix blaze::StrictlyUpperMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; // Definition of a resizable row-major dense strictly upper matrix based on DynamicMatrix blaze::StrictlyUpperMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C; // Definition of a compressed row-major single precision strictly upper matrix blaze::StrictlyUpperMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode // The storage order of a strictly upper triangular matrix is depending on the storage order of // the adapted matrix type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. // is specified as blaze::rowMajor), the strictly upper matrix will also be a row-major matrix. // Otherwise, if the adapted matrix is column-major (i.e. is specified as blaze::columnMajor), // the strictly upper matrix will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_diagonalmatrix DiagonalMatrix // <hr> // // The blaze::DiagonalMatrix class template is an adapter for existing dense and sparse matrix // types. It inherits the properties and the interface of the given matrix type \c MT and extends // it by enforcing the additional invariant that all matrix elements above and below the diagonal // are 0 (diagonal matrix): \f[\left(\begin{array}{*{5}{c}} l_{0,0} & 0 & 0 & \cdots & 0 \\ 0 & l_{1,1} & 0 & \cdots & 0 \\ 0 & 0 & l_{2,2} & \cdots & 0 \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & 0 & \cdots & l_{N,N} \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/DiagonalMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class DiagonalMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::DiagonalMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Note // that the given matrix type must be either resizable (as for instance blaze::HybridMatrix or // blaze::DynamicMatrix) or must be square at compile time (as for instance blaze::StaticMatrix). // // The following examples give an impression of several possible diagonal matrices: \code // Definition of a 3x3 row-major dense diagonal matrix with static memory blaze::DiagonalMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; // Definition of a resizable column-major dense diagonal matrix based on HybridMatrix blaze::DiagonalMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; // Definition of a resizable row-major dense diagonal matrix based on DynamicMatrix blaze::DiagonalMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C; // Definition of a compressed row-major single precision diagonal matrix blaze::DiagonalMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode // The storage order of a diagonal matrix is depending on the storage order of the adapted matrix // type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified // as blaze::rowMajor), the diagonal matrix will also be a row-major matrix. Otherwise, if the // adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the diagonal matrix // will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_special_properties Special Properties of Triangular Matrices // <hr> // // A triangular matrix is used exactly like a matrix of the underlying, adapted matrix type \c MT. // It also provides (nearly) the same interface as the underlying matrix type. However, there are // some important exceptions resulting from the triangular matrix constraint: // // -# <b>\ref adaptors_triangular_matrices_square</b> // -# <b>\ref adaptors_triangular_matrices_triangular</b> // -# <b>\ref adaptors_triangular_matrices_initialization</b> // -# <b>\ref adaptors_triangular_matrices_storage</b> // -# <b>\ref adaptors_triangular_matrices_scaling</b> // // \n \subsection adaptors_triangular_matrices_square Triangular Matrices Must Always be Square! // // In case a resizable matrix is used (as for instance blaze::HybridMatrix, blaze::DynamicMatrix, // or blaze::CompressedMatrix), this means that the according constructors, the \c resize() and // the \c extend() functions only expect a single parameter, which specifies both the number of // rows and columns, instead of two (one for the number of rows and one for the number of columns): \code using blaze::DynamicMatrix; using blaze::LowerMatrix; using blaze::rowMajor; // Default constructed, default initialized, row-major 3x3 lower dynamic matrix LowerMatrix< DynamicMatrix<double,rowMajor> > A( 3 ); // Resizing the matrix to 5x5 A.resize( 5 ); // Extending the number of rows and columns by 2, resulting in a 7x7 matrix A.extend( 2 ); \endcode // In case a matrix with a fixed size is used (as for instance blaze::StaticMatrix), the number // of rows and number of columns must be specified equally: \code using blaze::StaticMatrix; using blaze::LowerMatrix; using blaze::columnMajor; // Correct setup of a fixed size column-major 3x3 lower static matrix LowerMatrix< StaticMatrix<int,3UL,3UL,columnMajor> > A; // Compilation error: the provided matrix type is not a square matrix type LowerMatrix< StaticMatrix<int,3UL,4UL,columnMajor> > B; \endcode // \n \subsection adaptors_triangular_matrices_triangular The Triangular Property is Always Enforced! // // This means that it is only allowed to modify elements in the lower part or the diagonal of // a lower triangular matrix and in the upper part or the diagonal of an upper triangular matrix. // Unitriangular and strictly triangular matrices are even more restrictive and don't allow the // modification of diagonal elements. Also, triangular matrices can only be assigned matrices that // don't violate their triangular property. The following example demonstrates this restriction // by means of the blaze::LowerMatrix adaptor. For examples with other triangular matrix types // see the according class documentations. \code using blaze::CompressedMatrix; using blaze::DynamicMatrix; using blaze::StaticMatrix; using blaze::LowerMatrix; using blaze::rowMajor; using CompressedLower = LowerMatrix< CompressedMatrix<double,rowMajor> >; // Default constructed, row-major 3x3 lower compressed matrix CompressedLower A( 3 ); // Initializing elements via the function call operator A(0,0) = 1.0; // Initialization of the diagonal element (0,0) A(2,0) = 2.0; // Initialization of the lower element (2,0) A(1,2) = 9.0; // Throws an exception; invalid modification of upper element // Inserting two more elements via the insert() function A.insert( 1, 0, 3.0 ); // Inserting the lower element (1,0) A.insert( 2, 1, 4.0 ); // Inserting the lower element (2,1) A.insert( 0, 2, 9.0 ); // Throws an exception; invalid insertion of upper element // Appending an element via the append() function A.reserve( 1, 3 ); // Reserving enough capacity in row 1 A.append( 1, 1, 5.0 ); // Appending the diagonal element (1,1) A.append( 1, 2, 9.0 ); // Throws an exception; appending an element in the upper part // Access via a non-const iterator CompressedLower::Iterator it = A.begin(1); *it = 6.0; // Modifies the lower element (1,0) ++it; *it = 9.0; // Modifies the diagonal element (1,1) // Erasing elements via the erase() function A.erase( 0, 0 ); // Erasing the diagonal element (0,0) A.erase( 2, 0 ); // Erasing the lower element (2,0) // Construction from a lower dense matrix StaticMatrix<double,3UL,3UL> B{ { 3.0, 0.0, 0.0 }, { 8.0, 0.0, 0.0 }, { -2.0, -1.0, 4.0 } }; LowerMatrix< DynamicMatrix<double,rowMajor> > C( B ); // OK // Assignment of a non-lower dense matrix StaticMatrix<double,3UL,3UL> D{ { 3.0, 0.0, -2.0 }, { 8.0, 0.0, 0.0 }, { -2.0, -1.0, 4.0 } }; C = D; // Throws an exception; lower matrix invariant would be violated! \endcode // The triangular property is also enforced during the construction of triangular custom matrices: // In case the given array of elements does not represent the according triangular matrix type, a // \c std::invalid_argument exception is thrown: \code using blaze::CustomMatrix; using blaze::LowerMatrix; using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; using CustomLower = LowerMatrix< CustomMatrix<double,unaligned,unpadded,rowMajor> >; // Creating a 3x3 lower custom matrix from a properly initialized array double array[9] = { 1.0, 0.0, 0.0, 2.0, 3.0, 0.0, 4.0, 5.0, 6.0 }; CustomLower A( array, 3UL ); // OK // Attempt to create a second 3x3 lower custom matrix from an uninitialized array std::unique_ptr<double[]> memory( new double[9UL] ); CustomLower B( memory.get(), 3UL ); // Throws an exception \endcode // Finally, the triangular matrix property is enforced for views (rows, columns, submatrices, ...) // on the triangular matrix. The following example demonstrates that modifying the elements of an // entire row and submatrix of a lower matrix only affects the lower and diagonal matrix elements. // Again, this example uses blaze::LowerMatrix, for examples with other triangular matrix types // see the according class documentations. \code using blaze::DynamicMatrix; using blaze::LowerMatrix; // Setup of the lower matrix // // ( 0 0 0 0 ) // A = ( 1 2 0 0 ) // ( 0 3 0 0 ) // ( 4 0 5 0 ) // LowerMatrix< DynamicMatrix<int> > A( 4 ); A(1,0) = 1; A(1,1) = 2; A(2,1) = 3; A(3,0) = 4; A(3,2) = 5; // Setting the lower and diagonal elements in the 2nd row to 9 results in the matrix // // ( 0 0 0 0 ) // A = ( 1 2 0 0 ) // ( 9 9 9 0 ) // ( 4 0 5 0 ) // row( A, 2 ) = 9; // Setting the lower and diagonal elements in the 1st and 2nd column to 7 results in // // ( 0 0 0 0 ) // A = ( 1 7 0 0 ) // ( 9 7 7 0 ) // ( 4 7 7 0 ) // submatrix( A, 0, 1, 4, 2 ) = 7; \endcode // The next example demonstrates the (compound) assignment to rows/columns and submatrices of // triangular matrices. Since only lower/upper and potentially diagonal elements may be modified // the matrix to be assigned must be structured such that the triangular matrix invariant of the // matrix is preserved. Otherwise a \c std::invalid_argument exception is thrown: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::LowerMatrix; using blaze::rowVector; // Setup of two default 4x4 lower matrices LowerMatrix< DynamicMatrix<int> > A1( 4 ), A2( 4 ); // Setup of a 4-dimensional vector // // v = ( 1 2 3 0 ) // DynamicVector<int,rowVector> v{ 1, 2, 3, 0 }; // OK: Assigning v to the 2nd row of A1 preserves the lower matrix invariant // // ( 0 0 0 0 ) // A1 = ( 0 0 0 0 ) // ( 1 2 3 0 ) // ( 0 0 0 0 ) // row( A1, 2 ) = v; // OK // Error: Assigning v to the 1st row of A1 violates the lower matrix invariant! The element // marked with X cannot be assigned and triggers an exception. // // ( 0 0 0 0 ) // A1 = ( 1 2 X 0 ) // ( 1 2 3 0 ) // ( 0 0 0 0 ) // row( A1, 1 ) = v; // Assignment throws an exception! // Setup of the 3x2 dynamic matrix // // ( 0 0 ) // B = ( 7 0 ) // ( 8 9 ) // DynamicMatrix<int> B( 3UL, 2UL, 0 ); B(1,0) = 7; B(2,0) = 8; B(2,1) = 9; // OK: Assigning B to a submatrix of A2 such that the lower matrix invariant can be preserved // // ( 0 0 0 0 ) // A2 = ( 0 7 0 0 ) // ( 0 8 9 0 ) // ( 0 0 0 0 ) // submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = B; // OK // Error: Assigning B to a submatrix of A2 such that the lower matrix invariant cannot be // preserved! The elements marked with X cannot be assigned without violating the invariant! // // ( 0 0 0 0 ) // A2 = ( 0 7 X 0 ) // ( 0 8 8 X ) // ( 0 0 0 0 ) // submatrix( A2, 0UL, 2UL, 3UL, 2UL ) = B; // Assignment throws an exception! \endcode // \n \subsection adaptors_triangular_matrices_initialization The Elements of a Dense Triangular Matrix are Always Default Initialized! // // Although this results in a small loss of efficiency during the creation of a dense lower or // upper matrix this initialization is important since otherwise the lower/upper matrix property // of dense lower matrices would not be guaranteed: \code using blaze::DynamicMatrix; using blaze::LowerMatrix; using blaze::UpperMatrix; // Uninitialized, 5x5 row-major dynamic matrix DynamicMatrix<int,rowMajor> A( 5, 5 ); // 5x5 row-major lower dynamic matrix with default initialized upper matrix LowerMatrix< DynamicMatrix<int,rowMajor> > B( 5 ); // 7x7 column-major upper dynamic matrix with default initialized lower matrix UpperMatrix< DynamicMatrix<int,columnMajor> > C( 7 ); // 3x3 row-major diagonal dynamic matrix with default initialized lower and upper matrix DiagonalMatrix< DynamicMatrix<int,rowMajor> > D( 3 ); \endcode // \n \subsection adaptors_triangular_matrices_storage Dense Triangular Matrices Store All Elements! // // All dense triangular matrices store all \f$ N \times N \f$ elements, including the immutable // elements in the lower or upper part, respectively. Therefore dense triangular matrices don't // provide any kind of memory reduction! There are two main reasons for this: First, storing also // the zero elements guarantees maximum performance for many algorithms that perform vectorized // operations on the triangular matrices, which is especially true for small dense matrices. // Second, conceptually all triangular adaptors merely restrict the interface to the matrix type // \c MT and do not change the data layout or the underlying matrix type. // // This property matters most for diagonal matrices. In order to achieve the perfect combination // of performance and memory consumption for a diagonal matrix it is recommended to use dense // matrices for small diagonal matrices and sparse matrices for large diagonal matrices: \code // Recommendation 1: use dense matrices for small diagonal matrices using SmallDiagonalMatrix = blaze::DiagonalMatrix< blaze::StaticMatrix<float,3UL,3UL> >; // Recommendation 2: use sparse matrices for large diagonal matrices using LargeDiagonalMatrix = blaze::DiagonalMatrix< blaze::CompressedMatrix<float> >; \endcode // \n \subsection adaptors_triangular_matrices_scaling Unitriangular Matrices Cannot Be Scaled! // // Since the diagonal elements of a unitriangular matrix have a fixed value of 1 it is not possible // to self-scale such a matrix: \code using blaze::DynamicMatrix; using blaze::UniLowerMatrix; UniLowerMatrix< DynamicMatrix<int> > A( 4 ); A *= 2; // Compilation error; Scale operation is not available on an unilower matrix A /= 2; // Compilation error; Scale operation is not available on an unilower matrix A.scale( 2 ); // Compilation error; Scale function is not available on an unilower matrix A = A * 2; // Throws an exception; Invalid assignment of non-unilower matrix A = A / 2; // Throws an exception; Invalid assignment of non-unilower matrix \endcode // \n \section adaptors_triangular_matrices_arithmetic_operations Arithmetic Operations // <hr> // // A lower and upper triangular matrix can participate in numerical operations in any way any other // dense or sparse matrix can participate. It can also be combined with any other dense or sparse // vector or matrix. The following code example gives an impression of the use of blaze::LowerMatrix // within arithmetic operations: \code using blaze::LowerMatrix; using blaze::DynamicMatrix; using blaze::HybridMatrix; using blaze::StaticMatrix; using blaze::CompressedMatrix; using blaze::rowMajor; using blaze::columnMajor; DynamicMatrix<double,rowMajor> A( 3, 3 ); CompressedMatrix<double,rowMajor> B( 3, 3 ); LowerMatrix< DynamicMatrix<double,rowMajor> > C( 3 ); LowerMatrix< CompressedMatrix<double,rowMajor> > D( 3 ); LowerMatrix< HybridMatrix<float,3UL,3UL,rowMajor> > E; LowerMatrix< StaticMatrix<float,3UL,3UL,columnMajor> > F; E = A + B; // Matrix addition and assignment to a row-major lower matrix (includes runtime check) F = C - D; // Matrix subtraction and assignment to a column-major lower matrix (only compile time check) F = A * D; // Matrix multiplication between a dense and a sparse matrix (includes runtime check) C *= 2.0; // In-place scaling of matrix C E = 2.0 * B; // Scaling of matrix B (includes runtime check) F = C * 2.0; // Scaling of matrix C (only compile time check) E += A - B; // Addition assignment (includes runtime check) F -= C + D; // Subtraction assignment (only compile time check) F *= A * D; // Multiplication assignment (includes runtime check) \endcode // Note that it is possible to assign any kind of matrix to a triangular matrix. In case the // matrix to be assigned does not satisfy the invariants of the triangular matrix at compile // time, a runtime check is performed. Also note that upper triangular, diagonal, unitriangular // and strictly triangular matrix types can be used in the same way, but may pose some additional // restrictions (see the according class documentations). // // // \n \section adaptors_triangular_matrices_block_matrices Triangular Block Matrices // <hr> // // It is also possible to use triangular block matrices: \code using blaze::CompressedMatrix; using blaze::DynamicMatrix; using blaze::StaticMatrix; using blaze::LowerMatrix; using blaze::UpperMatrix; // Definition of a 5x5 lower block matrix based on DynamicMatrix LowerMatrix< DynamicMatrix< StaticMatrix<int,3UL,3UL> > > A( 5 ); // Definition of a 7x7 upper block matrix based on CompressedMatrix UpperMatrix< CompressedMatrix< StaticMatrix<int,3UL,3UL> > > B( 7 ); \endcode // Also in this case the triangular matrix invariant is enforced, i.e. it is not possible to // manipulate elements in the upper part (lower triangular matrix) or the lower part (upper // triangular matrix) of the matrix: \code const StaticMatrix<int,3UL,3UL> C{ { 1, -4, 5 }, { 6, 8, -3 }, { 2, -1, 2 } }; A(2,4)(1,1) = -5; // Invalid manipulation of upper matrix element; Results in an exception B.insert( 4, 2, C ); // Invalid insertion of the elements (4,2); Results in an exception \endcode // Note that unitriangular matrices are restricted to numeric element types and therefore cannot // be used for block matrices: \code using blaze::CompressedMatrix; using blaze::DynamicMatrix; using blaze::StaticMatrix; using blaze::UniLowerMatrix; using blaze::UniUpperMatrix; // Compilation error: lower unitriangular matrices are restricted to numeric element types UniLowerMatrix< DynamicMatrix< StaticMatrix<int,3UL,3UL> > > A( 5 ); // Compilation error: upper unitriangular matrices are restricted to numeric element types UniUpperMatrix< CompressedMatrix< StaticMatrix<int,3UL,3UL> > > B( 7 ); \endcode // For more information on block matrices, see the tutorial on \ref block_vectors_and_matrices. // // // \n \section adaptors_triangular_matrices_performance Performance Considerations // <hr> // // The \b Blaze library tries to exploit the properties of lower and upper triangular matrices // whenever and wherever possible. Therefore using triangular matrices instead of a general // matrices can result in a considerable performance improvement. However, there are also // situations when using a triangular matrix introduces some overhead. The following examples // demonstrate several common situations where triangular matrices can positively or negatively // impact performance. // // \n \subsection adaptors_triangular_matrices_matrix_matrix_multiplication Positive Impact: Matrix/Matrix Multiplication // // When multiplying two matrices, at least one of which is triangular, \b Blaze can exploit the // fact that either the lower or upper part of the matrix contains only default elements and // restrict the algorithm to the non-zero elements. The following example demonstrates this by // means of a dense matrix/dense matrix multiplication with lower triangular matrices: \code using blaze::DynamicMatrix; using blaze::LowerMatrix; using blaze::rowMajor; using blaze::columnMajor; LowerMatrix< DynamicMatrix<double,rowMajor> > A; LowerMatrix< DynamicMatrix<double,columnMajor> > B; DynamicMatrix<double,columnMajor> C; // ... Resizing and initialization C = A * B; \endcode // In comparison to a general matrix multiplication, the performance advantage is significant, // especially for large matrices. Therefore is it highly recommended to use the blaze::LowerMatrix // and blaze::UpperMatrix adaptors when a matrix is known to be lower or upper triangular, // respectively. Note however that the performance advantage is most pronounced for dense matrices // and much less so for sparse matrices. // // \n \subsection adaptors_triangular_matrices_matrix_vector_multiplication Positive Impact: Matrix/Vector Multiplication // // A similar performance improvement can be gained when using a triangular matrix in a matrix/vector // multiplication: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::rowMajor; using blaze::columnVector; LowerMatrix< DynamicMatrix<double,rowMajor> > A; DynamicVector<double,columnVector> x, y; // ... Resizing and initialization y = A * x; \endcode // In this example, \b Blaze also exploits the structure of the matrix and approx. halves the // runtime of the multiplication. Also in case of matrix/vector multiplications the performance // improvement is most pronounced for dense matrices and much less so for sparse matrices. // // \n \subsection adaptors_triangular_matrices_assignment Negative Impact: Assignment of a General Matrix // // In contrast to using a triangular matrix on the right-hand side of an assignment (i.e. for // read access), which introduces absolutely no performance penalty, using a triangular matrix // on the left-hand side of an assignment (i.e. for write access) may introduce additional // overhead when it is assigned a general matrix, which is not triangular at compile time: \code using blaze::DynamicMatrix; using blaze::LowerMatrix; LowerMatrix< DynamicMatrix<double> > A, C; DynamicMatrix<double> B; B = A; // Only read-access to the lower matrix; no performance penalty C = A; // Assignment of a lower matrix to another lower matrix; no runtime overhead C = B; // Assignment of a general matrix to a lower matrix; some runtime overhead \endcode // When assigning a general (potentially not lower triangular) matrix to a lower matrix or a // general (potentially not upper triangular) matrix to an upper matrix it is necessary to check // whether the matrix is lower or upper at runtime in order to guarantee the triangular property // of the matrix. In case it turns out to be lower or upper, respectively, it is assigned as // efficiently as possible, if it is not, an exception is thrown. In order to prevent this runtime // overhead it is therefore generally advisable to assign lower or upper triangular matrices to // other lower or upper triangular matrices.\n // In this context it is especially noteworthy that the addition, subtraction, and multiplication // of two triangular matrices of the same structure always results in another triangular matrix: \code LowerMatrix< DynamicMatrix<double> > A, B, C; C = A + B; // Results in a lower matrix; no runtime overhead C = A - B; // Results in a lower matrix; no runtime overhead C = A * B; // Results in a lower matrix; no runtime overhead \endcode \code UpperMatrix< DynamicMatrix<double> > A, B, C; C = A + B; // Results in a upper matrix; no runtime overhead C = A - B; // Results in a upper matrix; no runtime overhead C = A * B; // Results in a upper matrix; no runtime overhead \endcode // \n Previous: \ref adaptors_hermitian_matrices &nbsp; &nbsp; Next: \ref views */ //************************************************************************************************* //**Views****************************************************************************************** /*!\page views Views // // \tableofcontents // // // \section views_general General Concepts // <hr> // // Views represents parts of a vector or matrix, such as a subvector, a submatrix, or a specific // row, column, or band of a matrix. As such, views act as a reference to specific elements of // a vector or matrix. This reference is valid and can be used in every way as any other vector // or matrix can be used as long as the referenced vector or matrix is not resized or entirely // destroyed. Views also act as alias to the elements of the vector or matrix: Changes made to the // elements (e.g. modifying values, inserting or erasing elements) via the view are immediately // visible in the vector or matrix and changes made via the vector or matrix are immediately // visible in the view. // // It is also possible to create nested views (compound views), such as for instance bands of // submatrices or row selections on column selections. A compound view also acts as reference // to specific elements of the underlying vector or matrix and is valid as long as the underlying, // referenced vector or matrix is not resized or entirely destroyed. // // The \b Blaze library provides the following views on vectors and matrices: // // Vector views: // - \ref views_subvectors // - \ref views_element_selections // // Matrix views: // - \ref views_submatrices // - \ref views_rows // - \ref views_row_selections // - \ref views_columns // - \ref views_column_selections // - \ref views_bands // // // \n \section views_examples Examples \code using blaze::DynamicMatrix; using blaze::StaticVector; // Setup of the 3x5 row-major matrix DynamicMatrix<int> A{ { 1, 0, -2, 3, 0 }, { 0, 2, 5, -1, -1 }, { 1, 0, 0, 2, 1 } }; // Setup of the 2-dimensional row vector StaticVector<int,2UL,rowVector> vec{ 18, 19 }; // Assigning to the elements (1,2) and (1,3) via a subvector of a row // // ( 1 0 -2 3 0 ) // ( 0 2 18 19 -1 ) // ( 1 0 0 2 1 ) // subvector( row( A, 1UL ), 2UL, 2UL ) = vec; // Switching rows 0 and 2 of A // // ( 1 0 0 2 1 ) // ( 0 2 18 19 -1 ) // ( 1 0 -2 3 0 ) // rows<0,2>( A ) = rows<2,0>( A ); // Warning: It is the programmer's responsibility to ensure the view does not outlive // the viewed vector or matrix (dangling reference)! auto row1 = row<1UL>( DynamicMatrix<int>{ { 1, 2, 3 }, { 4, 5, 6 } } ); \endcode // \n Previous: \ref adaptors_triangular_matrices &nbsp; &nbsp; Next: \ref views_subvectors */ //************************************************************************************************* //**Subvectors************************************************************************************* /*!\page views_subvectors Subvectors // // \tableofcontents // // // Subvectors provide views on a specific part of a dense or sparse vector. As such, subvectors // act as a reference to a specific range within a vector. This reference is valid and can be // used in every way any other dense or sparse vector can be used as long as the vector containing // the subvector is not resized or entirely destroyed. The subvector also acts as an alias to the // vector elements in the specified range: Changes made to the elements (e.g. modifying values, // inserting or erasing elements) are immediately visible in the vector and changes made via the // vector are immediately visible in the subvector. // // // \n \section views_subvectors_setup Setup of Subvectors // <hr> // // A view on a dense or sparse subvector can be created very conveniently via the \c subvector() // function. It can be included via the header file \code #include <blaze/math/Subvector.h> \endcode // The first parameter specifies the offset of the subvector within the underlying dense or sparse // vector, the second parameter specifies the size of the subvector. The two parameters can be // specified either at compile time or at runtime: \code blaze::DynamicVector<double,blaze::rowVector> x; // ... Resizing and initialization // Create a subvector from index 4 with a size of 12 (i.e. in the range [4..15]) (compile time arguments) auto sv1 = subvector<4UL,12UL>( x ); // Create a subvector from index 8 with a size of 16 (i.e. in the range [8..23]) (runtime arguments) auto sv2 = subvector( x, 8UL, 16UL ); \endcode // The \c subvector() function returns an expression representing the subvector view. The type of // this expression depends on the given subvector arguments, primarily the type of the vector and // the compile time arguments. If the type is required, it can be determined via the \c decltype // specifier: \code using VectorType = blaze::DynamicVector<int>; using SubvectorType = decltype( blaze::subvector<4UL,12UL>( std::declval<VectorType>() ) ); \endcode // The resulting view can be treated as any other dense or sparse vector, i.e. it can be assigned // to, it can be copied from, and it can be used in arithmetic operations. A subvector created // from a row vector can be used as any other row vector, a subvector created from a column vector // can be used as any other column vector. The view can also be used on both sides of an assignment: // The subvector can either be used as an alias to grant write access to a specific subvector of a // vector primitive on the left-hand side of an assignment or to grant read-access to a specific // subvector of a vector primitive or expression on the right-hand side of an assignment. The // following example demonstrates this in detail: \code blaze::DynamicVector<double,blaze::rowVector> x; blaze::CompressedVector<double,blaze::rowVector> y; blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Create a subvector from index 0 with a size of 10 (i.e. in the range [0..9]) auto sv = subvector( x, 0UL, 10UL ); // Setting the first ten elements of x to the 2nd row of matrix A sv = row( A, 2UL ); // Setting the second ten elements of x to y subvector( x, 10UL, 10UL ) = y; // Setting the 3rd row of A to a subvector of x row( A, 3UL ) = subvector( x, 3UL, 10UL ); // Setting x to a subvector of the result of the addition between y and the 1st row of A x = subvector( y + row( A, 1UL ), 2UL, 5UL ); \endcode // \warning It is the programmer's responsibility to ensure the subvector does not outlive the // viewed vector: \code // Creating a subvector on a temporary vector; results in a dangling reference! auto sv = subvector<1UL,3UL>( DynamicVector<int>{ 1, 2, 3, 4, 5 } ); \endcode // \n \section views_subvectors_element_access Element Access // <hr> // // The elements of a subvector can be directly accessed via the subscript operator: \code blaze::DynamicVector<double,blaze::rowVector> v; // ... Resizing and initialization // Creating an 8-dimensional subvector, starting from index 4 auto sv = subvector( v, 4UL, 8UL ); // Setting the 1st element of the subvector, which corresponds to // the element at index 5 in vector v sv[1] = 2.0; \endcode // The numbering of the subvector elements is \f[\left(\begin{array}{*{5}{c}} 0 & 1 & 2 & \cdots & N-1 \\ \end{array}\right),\f] // where N is the specified size of the subvector. Alternatively, the elements of a subvector can // be traversed via iterators. Just as with vectors, in case of non-const subvectors, \c begin() // and \c end() return an iterator, which allows to manipulate the elements, in case of constant // subvectors an iterator to immutable elements is returned: \code blaze::DynamicVector<int,blaze::rowVector> v( 256UL ); // ... Resizing and initialization // Creating a reference to a specific subvector of vector v auto sv = subvector( v, 16UL, 64UL ); // Traversing the elements via iterators to non-const elements for( auto it=sv.begin(); it!=sv.end(); ++it ) { *it = ...; // OK: Write access to the dense subvector value. ... = *it; // OK: Read access to the dense subvector value. } // Traversing the elements via iterators to const elements for( auto it=sv.cbegin(); it!=sv.cend(); ++it ) { *it = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = *it; // OK: Read access to the dense subvector value. } \endcode \code blaze::CompressedVector<int,blaze::rowVector> v( 256UL ); // ... Resizing and initialization // Creating a reference to a specific subvector of vector v auto sv = subvector( v, 16UL, 64UL ); // Traversing the elements via iterators to non-const elements for( auto it=sv.begin(); it!=sv.end(); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } // Traversing the elements via iterators to const elements for( auto it=sv.cbegin(); it!=sv.cend(); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } \endcode // \n \section views_subvectors_element_insertion Element Insertion // <hr> // // Inserting/accessing elements in a sparse subvector can be done by several alternative functions. // The following example demonstrates all options: \code blaze::CompressedVector<double,blaze::rowVector> v( 256UL ); // Non-initialized vector of size 256 auto sv = subvector( v, 10UL, 60UL ); // View on the range [10..69] of v // The subscript operator provides access to all possible elements of the sparse subvector, // including the zero elements. In case the subscript operator is used to access an element // that is currently not stored in the sparse subvector, the element is inserted into the // subvector. sv[42] = 2.0; // The second operation for inserting elements is the set() function. In case the element is // not contained in the subvector it is inserted into the subvector, if it is already contained // in the subvector its value is modified. sv.set( 45UL, -1.2 ); // An alternative for inserting elements into the subvector is the insert() function. However, // it inserts the element only in case the element is not already contained in the subvector. sv.insert( 50UL, 3.7 ); // Just as in case of vectors, elements can also be inserted via the append() function. In // case of subvectors, append() also requires that the appended element's index is strictly // larger than the currently largest non-zero index of the subvector and that the subvector's // capacity is large enough to hold the new element. Note however that due to the nature of // a subvector, which may be an alias to the middle of a sparse vector, the append() function // does not work as efficiently for a subvector as it does for a vector. sv.reserve( 10UL ); sv.append( 51UL, -2.1 ); \endcode // \n \section views_subvectors_common_operations Common Operations // <hr> // // A subvector view can be used like any other dense or sparse vector. This means that with // only a few exceptions all \ref vector_operations and \ref arithmetic_operations can be used. // For instance, the current number of elements can be obtained via the \c size() function, the // current capacity via the \c capacity() function, and the number of non-zero elements via the // \c nonZeros() function. However, since subvectors are references to a specific range of a // vector, several operations are not possible, such as resizing and swapping. The following // example shows this by means of a dense subvector view: \code blaze::DynamicVector<int,blaze::rowVector> v( 42UL ); // ... Resizing and initialization // Creating a view on the range [5..15] of vector v auto sv = subvector( v, 5UL, 10UL ); sv.size(); // Returns the number of elements in the subvector sv.capacity(); // Returns the capacity of the subvector sv.nonZeros(); // Returns the number of non-zero elements contained in the subvector sv.resize( 84UL ); // Compilation error: Cannot resize a subvector of a vector auto sv2 = subvector( v, 15UL, 10UL ); swap( sv, sv2 ); // Compilation error: Swap operation not allowed \endcode // \n \section views_subvectors_arithmetic_operations Arithmetic Operations // <hr> // // Both dense and sparse subvectors can be used in all arithmetic operations that any other dense // or sparse vector can be used in. The following example gives an impression of the use of dense // subvectors within arithmetic operations. All operations (addition, subtraction, multiplication, // scaling, ...) can be performed on all possible combinations of dense and sparse subvectors with // fitting element types: \code blaze::DynamicVector<double,blaze::rowVector> d1, d2, d3; blaze::CompressedVector<double,blaze::rowVector> s1, s2; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> A; auto sv( subvector( d1, 0UL, 10UL ) ); // View on the range [0..9] of vector d1 sv = d2; // Dense vector initialization of the range [0..9] subvector( d1, 10UL, 10UL ) = s1; // Sparse vector initialization of the range [10..19] d3 = sv + d2; // Dense vector/dense vector addition s2 = s1 + subvector( d1, 10UL, 10UL ); // Sparse vector/dense vector addition d2 = sv * subvector( d1, 20UL, 10UL ); // Component-wise vector multiplication subvector( d1, 3UL, 4UL ) *= 2.0; // In-place scaling of the range [3..6] d2 = subvector( d1, 7UL, 3UL ) * 2.0; // Scaling of the range [7..9] d2 = 2.0 * subvector( d1, 7UL, 3UL ); // Scaling of the range [7..9] subvector( d1, 0UL , 10UL ) += d2; // Addition assignment subvector( d1, 10UL, 10UL ) -= s2; // Subtraction assignment subvector( d1, 20UL, 10UL ) *= sv; // Multiplication assignment double scalar = subvector( d1, 5UL, 10UL ) * trans( s1 ); // Scalar/dot/inner product between two vectors A = trans( s1 ) * subvector( d1, 4UL, 16UL ); // Outer product between two vectors \endcode // \n \section views_aligned_subvectors Aligned Subvectors // <hr> // // Usually subvectors can be defined anywhere within a vector. They may start at any position and // may have an arbitrary size (only restricted by the size of the underlying vector). However, in // contrast to vectors themselves, which are always properly aligned in memory and therefore can // provide maximum performance, this means that subvectors in general have to be considered to be // unaligned. This can be made explicit by the \c blaze::unaligned flag: \code using blaze::unaligned; blaze::DynamicVector<double,blaze::rowVector> x; // ... Resizing and initialization // Identical creations of an unaligned subvector in the range [8..23] auto sv1 = subvector ( x, 8UL, 16UL ); auto sv2 = subvector<unaligned>( x, 8UL, 16UL ); auto sv3 = subvector<8UL,16UL> ( x ); auto sv4 = subvector<unaligned,8UL,16UL>( x ); \endcode // All of these calls to the \c subvector() function are identical. Whether the alignment flag is // explicitly specified or not, it always returns an unaligned subvector. Whereas this may provide // full flexibility in the creation of subvectors, this might result in performance disadvantages // in comparison to vector primitives (even in case the specified subvector could be aligned). // Whereas vector primitives are guaranteed to be properly aligned and therefore provide maximum // performance in all operations, a general view on a vector might not be properly aligned. This // may cause a performance penalty on some platforms and/or for some operations. // // However, it is also possible to create aligned subvectors. Aligned subvectors are identical to // unaligned subvectors in all aspects, except that they may pose additional alignment restrictions // and therefore have less flexibility during creation, but don't suffer from performance penalties // and provide the same performance as the underlying vector. Aligned subvectors are created by // explicitly specifying the \c blaze::aligned flag: \code using blaze::aligned; // Creating an aligned subvector in the range [8..23] auto sv1 = subvector<aligned>( x, 8UL, 16UL ); auto sv2 = subvector<aligned,8UL,16UL>( x ); \endcode // The alignment restrictions refer to system dependent address restrictions for the used element // type and the available vectorization mode (SSE, AVX, ...). In order to be properly aligned the // first element of the subvector must be aligned. The following source code gives some examples // for a double precision dynamic vector, assuming that AVX is available, which packs 4 \c double // values into a SIMD vector: \code using blaze::aligned; blaze::DynamicVector<double,blaze::columnVector> d( 17UL ); // ... Resizing and initialization // OK: Starts at the beginning, i.e. the first element is aligned auto dsv1 = subvector<aligned>( d, 0UL, 13UL ); // OK: Start index is a multiple of 4, i.e. the first element is aligned auto dsv2 = subvector<aligned>( d, 4UL, 7UL ); // OK: The start index is a multiple of 4 and the subvector includes the last element auto dsv3 = subvector<aligned>( d, 8UL, 9UL ); // Error: Start index is not a multiple of 4, i.e. the first element is not aligned auto dsv4 = subvector<aligned>( d, 5UL, 8UL ); \endcode // Note that the discussed alignment restrictions are only valid for aligned dense subvectors. // In contrast, aligned sparse subvectors at this time don't pose any additional restrictions. // Therefore aligned and unaligned sparse subvectors are truly fully identical. Still, in case // the \c blaze::aligned flag is specified during setup, an aligned subvector is created: \code using blaze::aligned; blaze::CompressedVector<double,blaze::rowVector> x; // ... Resizing and initialization // Creating an aligned subvector in the range [8..23] auto sv1 = subvector<aligned>( x, 8UL, 16UL ); auto sv2 = subvector<aligned,8UL,16UL>( x ); \endcode // \n Previous: \ref views &nbsp; &nbsp; Next: \ref views_element_selections */ //************************************************************************************************* //**Element Selections***************************************************************************** /*!\page views_element_selections Element Selections // // \tableofcontents // // // Element selections provide views on arbitrary compositions of elements of dense and sparse // vectors. These views act as a reference to the selected elements and represent them as another // dense or sparse vector. This reference is valid and can be used in every way any other dense // or sparse vector can be used as long as the vector containing the elements is not resized or // entirely destroyed. The element selection also acts as an alias to the vector elements in the // specified range: Changes made to the elements (e.g. modifying values, inserting or erasing // elements) are immediately visible in the vector and changes made via the vector are immediately // visible in the elements. // // // \n \section views_element_selections_setup Setup of Element Selections // // An element selection can be created very conveniently via the \c elements() function. It can // be included via the header file \code #include <blaze/math/Elements.h> \endcode // The indices of the elements to be selected can be specified either at compile time or at runtime // (by means of an initializer list, array or vector): \code blaze::DynamicVector<double,blaze::rowVector> x; // ... Resizing and initialization // Selecting the elements 4, 6, 8, and 10 (compile time arguments) auto e1 = elements<4UL,6UL,8UL,10UL>( x ); // Selecting the elements 3, 2, and 1 (runtime arguments via an initializer list) const std::initializer_list<size_t> list{ 3UL, 2UL, 1UL }; auto e2 = elements( x, { 3UL, 2UL, 1UL } ); auto e3 = elements( x, list ); // Selecting the elements 1, 2, 3, 3, 2, and 1 (runtime arguments via a std::array) const std::array<size_t> array{ 1UL, 2UL, 3UL, 3UL, 2UL, 1UL }; auto e4 = elements( x, array ); auto e5 = elements( x, array.data(), array.size() ); // Selecting the element 4 fives times (runtime arguments via a std::vector) const std::vector<size_t> vector{ 4UL, 4UL, 4UL, 4UL, 4UL }; auto e6 = elements( x, vector ); auto e7 = elements( x, vector.data(), vector.size() ); \endcode // Note that it is possible to alias the elements of the underlying vector in any order. Also note // that it is possible to use the same index multiple times. The \c elements() function returns an // expression representing the view on the selected elements. The type of this expression depends // on the given arguments, primarily the type of the vector and the compile time arguments. If the // type is required, it can be determined via the \c decltype specifier: \code using VectorType = blaze::DynamicVector<int>; using ElementsType = decltype( blaze::elements<4UL,12UL>( std::declval<VectorType>() ) ); \endcode // The resulting view can be treated as any other dense or sparse vector, i.e. it can be assigned // to, it can be copied from, and it can be used in arithmetic operations. An element selection // created from a row vector can be used as any other row vector, an element selection created // from a column vector can be used as any other column vector. The view can also be used on both // sides of an assignment: It can either be used as an alias to grant write access to specific // elements of a vector primitive on the left-hand side of an assignment or to grant read-access // to specific elements of a vector primitive or expression on the right-hand side of an assignment. // The following example demonstrates this in detail: \code blaze::DynamicVector<double,blaze::rowVector> x; blaze::CompressedVector<double,blaze::rowVector> y; blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Selecting the elements 1, 3, 5, and 7 auto e = elements( x, { 1UL, 3UL, 5UL, 7UL } ); // Setting the elements 1, 3, 5, and 7 of x to the 2nd row of matrix A e = row( A, 2UL ); // Setting the elements 2, 4, 6, and 8 of x to y elements( x, { 2UL, 4UL, 6UL, 8UL } ) = y; // Setting the 3rd row of A to the elements 5, 4, 3, and 2 of x row( A, 3UL ) = elements( x, { 5UL, 4UL, 3UL, 2UL } ); // Rotating the result of the addition between y and the 1st row of A x = elements( y + row( A, 1UL ), { 2UL, 3UL, 0UL, 1UL } ) \endcode // Please note that using an element selection, which refers to an index multiple times, on the // left-hand side of an assignment leads to undefined behavior: \code blaze::DynamicVector<int,blaze::rowVector> a{ 1, 2, 3 }; blaze::DynamicVector<int,blaze::rowVector> b{ 1, 2, 3, 4 }; auto e = elements( a, { 1, 1, 1, 1 } ); // Selecting the element 1 four times e = b; // Undefined behavior \endcode // In this example both vectors have the same size, which results in a correct vector assignment, // but the final value of the element at index 1 is unspecified. // // \warning It is the programmer's responsibility to ensure the element selection does not outlive // the viewed vector: \code // Creating an element selection on a temporary vector; results in a dangling reference! auto e = elements<1UL,3UL>( DynamicVector<int>{ 1, 2, 3, 4, 5 } ); \endcode // \n \section views_element_selections_element_access Element Access // // The elements of an element selection can be directly accessed via the subscript operator: \code blaze::DynamicVector<double,blaze::rowVector> v; // ... Resizing and initialization // Selecting the elements 2, 4, 6, and 8 auto e = elements( v, { 2UL, 4UL, 6UL, 8UL } ); // Setting the 1st element of the element selection, which corresponds to // the element at index 4 in vector v e[1] = 2.0; \endcode // The numbering of the selected elements is \f[\left(\begin{array}{*{5}{c}} 0 & 1 & 2 & \cdots & N-1 \\ \end{array}\right),\f] // where N is the number of selected elements. Alternatively, the elements of an element selection // can be traversed via iterators. Just as with vectors, in case of non-const element selections, // \c begin() and \c end() return an iterator, which allows to manipulate the elements, in case of // constant element selections an iterator to immutable elements is returned: \code blaze::DynamicVector<int,blaze::rowVector> v( 256UL ); // ... Resizing and initialization // Creating an element selection including specific elements of dense vector v auto e = elements( v, { 0UL, 3UL, 6UL, 9UL, 12UL } ); // Traversing the elements via iterators to non-const elements for( auto it=e.begin(); it!=e.end(); ++it ) { *it = ...; // OK: Write access to the dense vector value. ... = *it; // OK: Read access to the dense vector value. } // Traversing the elements via iterators to const elements for( auto it=e.cbegin(); it!=e.cend(); ++it ) { *it = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = *it; // OK: Read access to the dense vector value. } \endcode \code blaze::CompressedVector<int,blaze::rowVector> v( 256UL ); // ... Resizing and initialization // Creating an element selection including specific elements of sparse vector v auto e = elements( v, { 0UL, 3UL, 6UL, 9UL, 12UL } ); // Traversing the elements via iterators to non-const elements for( auto it=e.begin(); it!=e.end(); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } // Traversing the elements via iterators to const elements for( auto it=e.cbegin(); it!=e.cend(); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } \endcode // \n \section views_element_selections_element_insertion Element Insertion // // Inserting/accessing elements in a sparse element selection can be done by several alternative // functions. The following example demonstrates all options: \code blaze::CompressedVector<double,blaze::rowVector> v( 256UL ); // Non-initialized vector of size 256 std::vector<size_t> indices; // ... Selecting indices of the sparse vector auto e = elements( v, indices ); // The subscript operator provides access to the selected elements of the sparse vector, // including the zero elements. In case the subscript operator is used to access an element // that is currently not stored in the sparse vector, the element is inserted. e[42] = 2.0; // The second operation for inserting elements via the element selection is the set() function. // In case the element is not contained in the vector it is inserted into the vector, if it is // already contained in the vector its value is modified. e.set( 45UL, -1.2 ); // An alternative for inserting elements into the vector is the insert() function. However, it // inserts the element only in case the element is not already contained in the vector. e.insert( 50UL, 3.7 ); // Just as in case of vectors, elements can also be inserted via the append() function. In case // of element selections, append() also requires that the appended element's index is strictly // larger than the currently largest non-zero index of the selection and that the selections's // capacity is large enough to hold the new element. Note however that due to the nature of an // element selection, which is an alias to arbitrary elements of a sparse vector, the append() // function does not work as efficiently for an element selection as it does for a vector. e.reserve( 10UL ); e.append( 51UL, -2.1 ); \endcode // \n \section views_element_selections_common_operations Common Operations // // An element selection can be used like any other dense or sparse vector. For instance, the // number of selected elements can be obtained via the \c size() function, the current capacity // via the \c capacity() function, and the number of non-zero elements via the \c nonZeros() // function. However, since element selections are references to a specific range of a vector, // several operations are not possible, such as resizing and swapping. The following example // shows this by means of an element selection on a dense vector: \code blaze::DynamicVector<int,blaze::rowVector> v( 42UL ); // ... Resizing and initialization // Selecting the elements 5 and 10 auto e = elements( v, { 5UL, 10UL } ); e.size(); // Returns the number of elements in the element selection e.capacity(); // Returns the capacity of the element selection e.nonZeros(); // Returns the number of non-zero elements contained in the element selection e.resize( 84UL ); // Compilation error: Cannot resize an element selection auto e2 = elements( v, { 15UL, 10UL } ); swap( e, e2 ); // Compilation error: Swap operation not allowed \endcode // \n \section views_element_selections_arithmetic_operations Arithmetic Operations // // Both dense and sparse element selections can be used in all arithmetic operations that any other // dense or sparse vector can be used in. The following example gives an impression of the use of // dense element selections within arithmetic operations. All operations (addition, subtraction, // multiplication, scaling, ...) can be performed on all possible combinations of dense and sparse // element selections with fitting element types: \code blaze::DynamicVector<double,blaze::rowVector> d1, d2, d3; blaze::CompressedVector<double,blaze::rowVector> s1, s2; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> A; std::initializer_list<size_t> indices1{ 0UL, 3UL, 6UL, 9UL, 12UL, 15UL, 18UL, 21UL }; std::initializer_list<size_t> indices2{ 1UL, 4UL, 7UL, 10UL, 13UL, 16UL, 19UL, 22UL }; std::initializer_list<size_t> indices3{ 2UL, 5UL, 8UL, 11UL, 14UL, 17UL, 20UL, 23UL }; auto e( elements( d1, indices1 ) ); // Selecting the every third element of d1 in the range [0..21] e = d2; // Dense vector assignment to the selected elements elements( d1, indices2 ) = s1; // Sparse vector assignment to the selected elements d3 = e + d2; // Dense vector/dense vector addition s2 = s1 + elements( d1, indices2 ); // Sparse vector/dense vector addition d2 = e * elements( d1, indices3 ); // Component-wise vector multiplication elements( d1, indices2 ) *= 2.0; // In-place scaling of the second selection of elements d2 = elements( d1, indices3 ) * 2.0; // Scaling of the elements in the third selection of elements d2 = 2.0 * elements( d1, indices3 ); // Scaling of the elements in the third selection of elements elements( d1, indices1 ) += d2; // Addition assignment elements( d1, indices2 ) -= s2; // Subtraction assignment elements( d1, indices3 ) *= e; // Multiplication assignment double scalar = elements( d1, indices2 ) * trans( s1 ); // Scalar/dot/inner product between two vectors A = trans( s1 ) * elements( d1, { 3UL, 6UL } ); // Outer product between two vectors \endcode // \n Previous: \ref views_subvectors &nbsp; &nbsp; Next: \ref views_submatrices */ //************************************************************************************************* //**Submatrices************************************************************************************ /*!\page views_submatrices Submatrices // // \tableofcontents // // // Submatrices provide views on a specific part of a dense or sparse matrix just as subvectors // provide views on specific parts of vectors. As such, submatrices act as a reference to a // specific block within a matrix. This reference is valid and can be used in evary way any // other dense or sparse matrix can be used as long as the matrix containing the submatrix is // not resized or entirely destroyed. The submatrix also acts as an alias to the matrix elements // in the specified block: Changes made to the elements (e.g. modifying values, inserting or // erasing elements) are immediately visible in the matrix and changes made via the matrix are // immediately visible in the submatrix. // // // \n \section views_submatrices_setup Setup of Submatrices // <hr> // // A view on a dense or sparse submatrix can be created very conveniently via the \c submatrix() // function. It can be included via the header file \code #include <blaze/math/Submatrix.h> \endcode // The first and second parameter specify the row and column of the first element of the submatrix. // The third and fourth parameter specify the number of rows and columns, respectively. The four // parameters can be specified either at compile time or at runtime: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Creating a dense submatrix of size 4x8, starting in row 3 and column 0 (compile time arguments) auto sm1 = submatrix<3UL,0UL,4UL,8UL>( A ); // Creating a dense submatrix of size 8x16, starting in row 0 and column 4 (runtime arguments) auto sm2 = submatrix( A, 0UL, 4UL, 8UL, 16UL ); \endcode // The \c submatrix() function returns an expression representing the submatrix view. The type of // this expression depends on the given submatrix arguments, primarily the type of the matrix and // the compile time arguments. If the type is required, it can be determined via the \c decltype // specifier: \code using MatrixType = blaze::DynamicMatrix<int>; using SubmatrixType = decltype( blaze::submatrix<3UL,0UL,4UL,8UL>( std::declval<MatrixType>() ) ); \endcode // The resulting view can be treated as any other dense or sparse matrix, i.e. it can be assigned // to, it can be copied from, and it can be used in arithmetic operations. A submatrix created from // a row-major matrix will itself be a row-major matrix, a submatrix created from a column-major // matrix will be a column-major matrix. The view can also be used on both sides of an assignment: // The submatrix can either be used as an alias to grant write access to a specific submatrix // of a matrix primitive on the left-hand side of an assignment or to grant read-access to // a specific submatrix of a matrix primitive or expression on the right-hand side of an // assignment. The following example demonstrates this in detail: \code blaze::DynamicMatrix<double,blaze::columnMajor> A, B; blaze::CompressedMatrix<double,blaze::rowMajor> C; // ... Resizing and initialization // Creating a dense submatrix of size 8x4, starting in row 0 and column 2 auto sm = submatrix( A, 0UL, 2UL, 8UL, 4UL ); // Setting the submatrix of A to a 8x4 submatrix of B sm = submatrix( B, 0UL, 0UL, 8UL, 4UL ); // Copying the sparse matrix C into another 8x4 submatrix of A submatrix( A, 8UL, 2UL, 8UL, 4UL ) = C; // Assigning part of the result of a matrix addition to the first submatrix sm = submatrix( B + C, 0UL, 0UL, 8UL, 4UL ); \endcode // \warning It is the programmer's responsibility to ensure the submatrix does not outlive the // viewed matrix: \code // Creating a submatrix on a temporary matrix; results in a dangling reference! auto sm = submatrix<1UL,0UL,2UL,3UL>( DynamicMatrix<int>{ { 1, 2, 3 }, { 4, 5, 6 }, { 7, 8, 9 } } ); \endcode // \n \section views_submatrices_element_access Element Access // <hr> // // The elements of a submatrix can be directly accessed with the function call operator: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Creating a 8x8 submatrix, starting from position (4,4) auto sm = submatrix( A, 4UL, 4UL, 8UL, 8UL ); // Setting the element (0,0) of the submatrix, which corresponds to // the element at position (4,4) in matrix A sm(0,0) = 2.0; \endcode // Alternatively, the elements of a submatrix can be traversed via (const) iterators. Just as // with matrices, in case of non-const submatrices, \c begin() and \c end() return an iterator, // which allows to manipuate the elements, in case of constant submatrices an iterator to // immutable elements is returned: \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 256UL, 512UL ); // ... Resizing and initialization // Creating a reference to a specific submatrix of matrix A auto sm = submatrix( A, 16UL, 16UL, 64UL, 128UL ); // Traversing the elements of the 0th row via iterators to non-const elements for( auto it=sm.begin(0); it!=sm.end(0); ++it ) { *it = ...; // OK: Write access to the dense submatrix value. ... = *it; // OK: Read access to the dense submatrix value. } // Traversing the elements of the 1st row via iterators to const elements for( auto it=sm.cbegin(1); it!=sm.cend(1); ++it ) { *it = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = *it; // OK: Read access to the dense submatrix value. } \endcode \code blaze::CompressedMatrix<int,blaze::rowMajor> A( 256UL, 512UL ); // ... Resizing and initialization // Creating a reference to a specific submatrix of matrix A auto sm = submatrix( A, 16UL, 16UL, 64UL, 128UL ); // Traversing the elements of the 0th row via iterators to non-const elements for( auto it=sm.begin(0); it!=sm.end(0); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } // Traversing the elements of the 1st row via iterators to const elements for( auto it=sm.cbegin(1); it!=sm.cend(1); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } \endcode // \n \section views_submatrices_element_insertion Element Insertion // <hr> // // Inserting/accessing elements in a sparse submatrix can be done by several alternative functions. // The following example demonstrates all options: \code blaze::CompressedMatrix<double,blaze::rowMajor> A( 256UL, 512UL ); // Non-initialized matrix of size 256x512 auto sm = submatrix( A, 10UL, 10UL, 16UL, 16UL ); // View on a 16x16 submatrix of A // The function call operator provides access to all possible elements of the sparse submatrix, // including the zero elements. In case the function call operator is used to access an element // that is currently not stored in the sparse submatrix, the element is inserted into the // submatrix. sm(2,4) = 2.0; // The second operation for inserting elements is the set() function. In case the element is // not contained in the submatrix it is inserted into the submatrix, if it is already contained // in the submatrix its value is modified. sm.set( 2UL, 5UL, -1.2 ); // An alternative for inserting elements into the submatrix is the insert() function. However, // it inserts the element only in case the element is not already contained in the submatrix. sm.insert( 2UL, 6UL, 3.7 ); // Just as in the case of sparse matrices, elements can also be inserted via the append() // function. In case of submatrices, append() also requires that the appended element's // index is strictly larger than the currently largest non-zero index in the according row // or column of the submatrix and that the according row's or column's capacity is large // enough to hold the new element. Note however that due to the nature of a submatrix, which // may be an alias to the middle of a sparse matrix, the append() function does not work as // efficiently for a submatrix as it does for a matrix. sm.reserve( 2UL, 10UL ); sm.append( 2UL, 10UL, -2.1 ); \endcode // \n \section views_submatrices_common_operations Common Operations // <hr> // // A submatrix view can be used like any other dense or sparse matrix. This means that with only // a few exceptions all \ref matrix_operations and \ref arithmetic_operations can be used. For // instance, the current size of the matrix, i.e. the number of rows or columns can be obtained // via the \c rows() and \c columns() functions, the current total capacity via the \c capacity() // function, and the number of non-zero elements via the \c nonZeros() function. However, since // submatrices are views on a specific submatrix of a matrix, several operations are not possible, // such as resizing and swapping: \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 42UL, 42UL ); // ... Resizing and initialization // Creating a view on the a 8x12 submatrix of matrix A auto sm = submatrix( A, 0UL, 0UL, 8UL, 12UL ); sm.rows(); // Returns the number of rows of the submatrix sm.columns(); // Returns the number of columns of the submatrix sm.capacity(); // Returns the capacity of the submatrix sm.nonZeros(); // Returns the number of non-zero elements contained in the submatrix sm.resize( 10UL, 8UL ); // Compilation error: Cannot resize a submatrix of a matrix auto sm2 = submatrix( A, 8UL, 0UL, 12UL, 8UL ); swap( sm, sm2 ); // Compilation error: Swap operation not allowed \endcode // \n \section views_submatrices_arithmetic_operations Arithmetic Operations // <hr> // // Both dense and sparse submatrices can be used in all arithmetic operations that any other dense // or sparse matrix can be used in. The following example gives an impression of the use of dense // submatrices within arithmetic operations. All operations (addition, subtraction, multiplication, // scaling, ...) can be performed on all possible combinations of dense and sparse matrices with // fitting element types: \code blaze::DynamicMatrix<double,blaze::rowMajor> D1, D2, D3; blaze::CompressedMatrix<double,blaze::rowMajor> S1, S2; blaze::CompressedVector<double,blaze::columnVector> a, b; // ... Resizing and initialization auto sm = submatrix( D1, 0UL, 0UL, 8UL, 8UL ); // View on the 8x8 submatrix of matrix D1 // starting from row 0 and column 0 submatrix( D1, 0UL, 8UL, 8UL, 8UL ) = D2; // Dense matrix initialization of the 8x8 submatrix // starting in row 0 and column 8 sm = S1; // Sparse matrix initialization of the second 8x8 submatrix D3 = sm + D2; // Dense matrix/dense matrix addition S2 = S1 - submatrix( D1, 8UL, 0UL, 8UL, 8UL ); // Sparse matrix/dense matrix subtraction D2 = sm * submatrix( D1, 8UL, 8UL, 8UL, 8UL ); // Dense matrix/dense matrix multiplication submatrix( D1, 8UL, 0UL, 8UL, 8UL ) *= 2.0; // In-place scaling of a submatrix of D1 D2 = submatrix( D1, 8UL, 8UL, 8UL, 8UL ) * 2.0; // Scaling of the a submatrix of D1 D2 = 2.0 * sm; // Scaling of the a submatrix of D1 submatrix( D1, 0UL, 8UL, 8UL, 8UL ) += D2; // Addition assignment submatrix( D1, 8UL, 0UL, 8UL, 8UL ) -= S1; // Subtraction assignment submatrix( D1, 8UL, 8UL, 8UL, 8UL ) *= sm; // Multiplication assignment a = submatrix( D1, 4UL, 4UL, 8UL, 8UL ) * b; // Dense matrix/sparse vector multiplication \endcode // \n \section views_aligned_submatrices Aligned Submatrices // <hr> // // Usually submatrices can be defined anywhere within a matrix. They may start at any position and // may have an arbitrary extension (only restricted by the extension of the underlying matrix). // However, in contrast to matrices themselves, which are always properly aligned in memory and // therefore can provide maximum performance, this means that submatrices in general have to be // considered to be unaligned. This can be made explicit by the \c blaze::unaligned flag: \code using blaze::unaligned; blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Identical creations of an unaligned submatrix of size 8x8, starting in row 0 and column 0 auto sm1 = submatrix ( A, 0UL, 0UL, 8UL, 8UL ); auto sm2 = submatrix<unaligned>( A, 0UL, 0UL, 8UL, 8UL ); auto sm3 = submatrix<0UL,0UL,8UL,8UL> ( A ); auto sm4 = submatrix<unaligned,0UL,0UL,8UL,8UL>( A ); \endcode // All of these calls to the \c submatrix() function are identical. Whether the alignment flag is // explicitly specified or not, it always returns an unaligned submatrix. Whereas this may provide // full flexibility in the creation of submatrices, this might result in performance disadvantages // in comparison to matrix primitives (even in case the specified submatrix could be aligned). // Whereas matrix primitives are guaranteed to be properly aligned and therefore provide maximum // performance in all operations, a general view on a matrix might not be properly aligned. This // may cause a performance penalty on some platforms and/or for some operations. // // However, it is also possible to create aligned submatrices. Aligned submatrices are identical to // unaligned submatrices in all aspects, except that they may pose additional alignment restrictions // and therefore have less flexibility during creation, but don't suffer from performance penalties // and provide the same performance as the underlying matrix. Aligned submatrices are created by // explicitly specifying the \c blaze::aligned flag: \code using blaze::aligned; // Creating an aligned submatrix of size 8x8, starting in row 0 and column 0 auto sv1 = submatrix<aligned>( A, 0UL, 0UL, 8UL, 8UL ); auto sv2 = submatrix<aligned,0UL,0UL,8UL,8UL>( A ); \endcode // The alignment restrictions refer to system dependent address restrictions for the used element // type and the available vectorization mode (SSE, AVX, ...). In order to be properly aligned the // first element of each row/column of the submatrix must be aligned. The following source code // gives some examples for a double precision row-major dynamic matrix, assuming that padding is // enabled and that AVX is available, which packs 4 \c double values into a SIMD vector: \code using blaze::aligned; blaze::DynamicMatrix<double,blaze::rowMajor> D( 13UL, 17UL ); // ... Resizing and initialization // OK: Starts at position (0,0), i.e. the first element of each row is aligned (due to padding) auto dsm1 = submatrix<aligned>( D, 0UL, 0UL, 7UL, 11UL ); // OK: First column is a multiple of 4, i.e. the first element of each row is aligned (due to padding) auto dsm2 = submatrix<aligned>( D, 3UL, 12UL, 8UL, 16UL ); // OK: First column is a multiple of 4 and the submatrix includes the last row and column auto dsm3 = submatrix<aligned>( D, 4UL, 0UL, 9UL, 17UL ); // Error: First column is not a multiple of 4, i.e. the first element is not aligned auto dsm4 = submatrix<aligned>( D, 2UL, 3UL, 12UL, 12UL ); \endcode // Note that the discussed alignment restrictions are only valid for aligned dense submatrices. // In contrast, aligned sparse submatrices at this time don't pose any additional restrictions. // Therefore aligned and unaligned sparse submatrices are truly fully identical. Still, in case // the \c blaze::aligned flag is specified during setup, an aligned submatrix is created: \code using blaze::aligned; blaze::CompressedMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Creating an aligned submatrix of size 8x8, starting in row 0 and column 0 auto sv = submatrix<aligned>( A, 0UL, 0UL, 8UL, 8UL ); \endcode // \n \section views_submatrices_on_symmetric_matrices Submatrices on Symmetric Matrices // // Submatrices can also be created on symmetric matrices (see the \c SymmetricMatrix class template): \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; // Setup of a 16x16 symmetric matrix SymmetricMatrix< DynamicMatrix<int> > A( 16UL ); // Creating a dense submatrix of size 8x12, starting in row 2 and column 4 auto sm = submatrix( A, 2UL, 4UL, 8UL, 12UL ); \endcode // It is important to note, however, that (compound) assignments to such submatrices have a // special restriction: The symmetry of the underlying symmetric matrix must not be broken! // Since the modification of element \f$ a_{ij} \f$ of a symmetric matrix also modifies the // element \f$ a_{ji} \f$, the matrix to be assigned must be structured such that the symmetry // of the symmetric matrix is preserved. Otherwise a \a std::invalid_argument exception is // thrown: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; // Setup of two default 4x4 symmetric matrices SymmetricMatrix< DynamicMatrix<int> > A1( 4 ), A2( 4 ); // Setup of the 3x2 dynamic matrix // // ( 1 2 ) // B = ( 3 4 ) // ( 5 6 ) // DynamicMatrix<int> B{ { 1, 2 }, { 3, 4 }, { 5, 6 } }; // OK: Assigning B to a submatrix of A1 such that the symmetry can be preserved // // ( 0 0 1 2 ) // A1 = ( 0 0 3 4 ) // ( 1 3 5 6 ) // ( 2 4 6 0 ) // submatrix( A1, 0UL, 2UL, 3UL, 2UL ) = B; // OK // Error: Assigning B to a submatrix of A2 such that the symmetry cannot be preserved! // The elements marked with X cannot be assigned unambiguously! // // ( 0 1 2 0 ) // A2 = ( 1 3 X 0 ) // ( 2 X 6 0 ) // ( 0 0 0 0 ) // submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = B; // Assignment throws an exception! \endcode // \n Previous: \ref views_element_selections &nbsp; &nbsp; Next: \ref views_rows */ //************************************************************************************************* //**Rows******************************************************************************************* /*!\page views_rows Rows // // \tableofcontents // // // Rows provide views on a specific row of a dense or sparse matrix. As such, rows act as a // reference to a specific row. This reference is valid and can be used in every way any other // row vector can be used as long as the matrix containing the row is not resized or entirely // destroyed. The row also acts as an alias to the row elements: Changes made to the elements // (e.g. modifying values, inserting or erasing elements) are immediately visible in the matrix // and changes made via the matrix are immediately visible in the row. // // // \n \section views_rows_setup Setup of Rows // <hr> // // \image html row.png // \image latex row.eps "Row view" width=250pt // // A reference to a dense or sparse row can be created very conveniently via the \c row() function. // It can be included via the header file \code #include <blaze/math/Row.h> \endcode // The row index must be in the range from \f$[0..M-1]\f$, where \c M is the total number of rows // of the matrix, and can be specified both at compile time or at runtime: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Creating a reference to the 1st row of matrix A (compile time index) auto row1 = row<1UL>( A ); // Creating a reference to the 2nd row of matrix A (runtime index) auto row2 = row( A, 2UL ); \endcode // The \c row() function returns an expression representing the row view. The type of this // expression depends on the given row arguments, primarily the type of the matrix and the compile // time arguments. If the type is required, it can be determined via the \c decltype specifier: \code using MatrixType = blaze::DynamicMatrix<int>; using RowType = decltype( blaze::row<1UL>( std::declval<MatrixType>() ) ); \endcode // The resulting view can be treated as any other row vector, i.e. it can be assigned to, it can // be copied from, and it can be used in arithmetic operations. The reference can also be used on // both sides of an assignment: The row can either be used as an alias to grant write access to a // specific row of a matrix primitive on the left-hand side of an assignment or to grant read-access // to a specific row of a matrix primitive or expression on the right-hand side of an assignment. // The following example demonstrates this in detail: \code blaze::DynamicVector<double,blaze::rowVector> x; blaze::CompressedVector<double,blaze::rowVector> y; blaze::DynamicMatrix<double,blaze::rowMajor> A, B; blaze::CompressedMatrix<double,blaze::rowMajor> C, D; // ... Resizing and initialization // Setting the 2nd row of matrix A to x auto row2 = row( A, 2UL ); row2 = x; // Setting the 3rd row of matrix B to y row( B, 3UL ) = y; // Setting x to the 4th row of the result of the matrix multiplication x = row( A * B, 4UL ); // Setting y to the 2nd row of the result of the sparse matrix multiplication y = row( C * D, 2UL ); \endcode // \warning It is the programmer's responsibility to ensure the row does not outlive the viewed // matrix: \code // Creating a row on a temporary matrix; results in a dangling reference! auto row1 = row<1UL>( DynamicMatrix<int>{ { 1, 2, 3 }, { 4, 5, 6 }, { 7, 8, 9 } } ); \endcode // \n \section views_rows_element_access Element Access // <hr> // // The elements of a row can be directly accessed with the subscript operator: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Creating a view on the 4th row of matrix A auto row4 = row( A, 4UL ); // Setting the 1st element of the dense row, which corresponds // to the 1st element in the 4th row of matrix A row4[1] = 2.0; \endcode // The numbering of the row elements is \f[\left(\begin{array}{*{5}{c}} 0 & 1 & 2 & \cdots & N-1 \\ \end{array}\right),\f] // where N is the number of columns of the referenced matrix. Alternatively, the elements of a // row can be traversed via iterators. Just as with vectors, in case of non-const rows, \c begin() // and \c end() return an iterator, which allows to manipulate the elements, in case of constant // rows an iterator to immutable elements is returned: \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 128UL, 256UL ); // ... Resizing and initialization // Creating a reference to the 31st row of matrix A auto row31 = row( A, 31UL ); // Traversing the elements via iterators to non-const elements for( auto it=row31.begin(); it!=row31.end(); ++it ) { *it = ...; // OK; Write access to the dense row value ... = *it; // OK: Read access to the dense row value. } // Traversing the elements via iterators to const elements for( auto it=row31.cbegin(); it!=row31.cend(); ++it ) { *it = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = *it; // OK: Read access to the dense row value. } \endcode \code blaze::CompressedMatrix<int,blaze::rowMajor> A( 128UL, 256UL ); // ... Resizing and initialization // Creating a reference to the 31st row of matrix A auto row31 = row( A, 31UL ); // Traversing the elements via iterators to non-const elements for( auto it=row31.begin(); it!=row31.end(); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } // Traversing the elements via iterators to const elements for( auto it=row31.cbegin(); it!=row31.cend(); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } \endcode // \n \section views_rows_element_insertion Element Insertion // <hr> // // Inserting/accessing elements in a sparse row can be done by several alternative functions. // The following example demonstrates all options: \code blaze::CompressedMatrix<double,blaze::rowMajor> A( 10UL, 100UL ); // Non-initialized 10x100 matrix auto row0( row( A, 0UL ) ); // Reference to the 0th row of A // The subscript operator provides access to all possible elements of the sparse row, // including the zero elements. In case the subscript operator is used to access an element // that is currently not stored in the sparse row, the element is inserted into the row. row0[42] = 2.0; // The second operation for inserting elements is the set() function. In case the element // is not contained in the row it is inserted into the row, if it is already contained in // the row its value is modified. row0.set( 45UL, -1.2 ); // An alternative for inserting elements into the row is the insert() function. However, // it inserts the element only in case the element is not already contained in the row. row0.insert( 50UL, 3.7 ); // A very efficient way to add new elements to a sparse row is the append() function. // Note that append() requires that the appended element's index is strictly larger than // the currently largest non-zero index of the row and that the row's capacity is large // enough to hold the new element. row0.reserve( 10UL ); row0.append( 51UL, -2.1 ); \endcode // \n \section views_rows_common_operations Common Operations // <hr> // // A row view can be used like any other row vector. This means that with only a few exceptions // all \ref vector_operations and \ref arithmetic_operations can be used. For instance, the // current number of elements can be obtained via the \c size() function, the current capacity // via the \c capacity() function, and the number of non-zero elements via the \c nonZeros() // function. However, since rows are references to specific rows of a matrix, several operations // are not possible on views, such as resizing and swapping. The following example shows this by // means of a dense row view: \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 42UL, 42UL ); // ... Resizing and initialization // Creating a reference to the 2nd row of matrix A auto row2 = row( A, 2UL ); row2.size(); // Returns the number of elements in the row row2.capacity(); // Returns the capacity of the row row2.nonZeros(); // Returns the number of non-zero elements contained in the row row2.resize( 84UL ); // Compilation error: Cannot resize a single row of a matrix auto row3 = row( A, 3UL ); swap( row2, row3 ); // Compilation error: Swap operation not allowed \endcode // \n \section views_rows_arithmetic_operations Arithmetic Operations // <hr> // // Both dense and sparse rows can be used in all arithmetic operations that any other dense or // sparse row vector can be used in. The following example gives an impression of the use of // dense rows within arithmetic operations. All operations (addition, subtraction, multiplication, // scaling, ...) can be performed on all possible combinations of dense and sparse rows with // fitting element types: \code blaze::DynamicVector<double,blaze::rowVector> a( 2UL, 2.0 ), b; blaze::CompressedVector<double,blaze::rowVector> c( 2UL ); c[1] = 3.0; blaze::DynamicMatrix<double,blaze::rowMajor> A( 4UL, 2UL ); // Non-initialized 4x2 matrix auto row0( row( A, 0UL ) ); // Reference to the 0th row of A row0[0] = 0.0; // Manual initialization of the 0th row of A row0[1] = 0.0; row( A, 1UL ) = 1.0; // Homogeneous initialization of the 1st row of A row( A, 2UL ) = a; // Dense vector initialization of the 2nd row of A row( A, 3UL ) = c; // Sparse vector initialization of the 3rd row of A b = row0 + a; // Dense vector/dense vector addition b = c + row( A, 1UL ); // Sparse vector/dense vector addition b = row0 * row( A, 2UL ); // Component-wise vector multiplication row( A, 1UL ) *= 2.0; // In-place scaling of the 1st row b = row( A, 1UL ) * 2.0; // Scaling of the 1st row b = 2.0 * row( A, 1UL ); // Scaling of the 1st row row( A, 2UL ) += a; // Addition assignment row( A, 2UL ) -= c; // Subtraction assignment row( A, 2UL ) *= row( A, 0UL ); // Multiplication assignment double scalar = row( A, 1UL ) * trans( c ); // Scalar/dot/inner product between two vectors A = trans( c ) * row( A, 1UL ); // Outer product between two vectors \endcode // \n \section views_rows_non_fitting_storage_order Views on Matrices with Non-Fitting Storage Order // <hr> // // Especially noteworthy is that row views can be created for both row-major and column-major // matrices. Whereas the interface of a row-major matrix only allows to traverse a row directly // and the interface of a column-major matrix only allows to traverse a column, via views it is // possible to traverse a row of a column-major matrix or a column of a row-major matrix. For // instance: \code blaze::DynamicMatrix<int,blaze::columnMajor> A( 64UL, 32UL ); // ... Resizing and initialization // Creating a reference to the 1st row of a column-major matrix A auto row1 = row( A, 1UL ); for( auto it=row1.begin(); it!=row1.end(); ++it ) { // ... } \endcode // However, please note that creating a row view on a matrix stored in a column-major fashion // can result in a considerable performance decrease in comparison to a row view on a matrix // with row-major storage format. This is due to the non-contiguous storage of the matrix // elements. Therefore care has to be taken in the choice of the most suitable storage order: \code // Setup of two column-major matrices blaze::DynamicMatrix<double,blaze::columnMajor> A( 128UL, 128UL ); blaze::DynamicMatrix<double,blaze::columnMajor> B( 128UL, 128UL ); // ... Resizing and initialization // The computation of the 15th row of the multiplication between A and B ... blaze::DynamicVector<double,blaze::rowVector> x = row( A * B, 15UL ); // ... is essentially the same as the following computation, which multiplies // the 15th row of the column-major matrix A with B. blaze::DynamicVector<double,blaze::rowVector> x = row( A, 15UL ) * B; \endcode // Although \b Blaze performs the resulting vector/matrix multiplication as efficiently as possible // using a row-major storage order for matrix \c A would result in a more efficient evaluation. // // \n Previous: \ref views_submatrices &nbsp; &nbsp; Next: \ref views_row_selections */ //************************************************************************************************* //**Row Selections********************************************************************************* /*!\page views_row_selections Row Selections // // \tableofcontents // // // Row selections provide views on arbitrary compositions of rows of dense and sparse matrices. // These views act as a reference to the selected rows and represent them as another dense or // sparse matrix. This reference is valid and can be used in every way any other dense or sparse // matrix can be used as long as the matrix containing the rows is not resized or entirely // destroyed. The row selection also acts as an alias to the matrix elements in the specified // range: Changes made to the rows (e.g. modifying values, inserting or erasing elements) are // immediately visible in the matrix and changes made via the matrix are immediately visible // in the rows. // // // \n \section views_row_selections_setup Setup of Row Selections // // A row selection can be created very conveniently via the \c rows() function. It can be included // via the header file \code #include <blaze/math/Rows.h> \endcode // The indices of the rows to be selected can be specified either at compile time or at runtime // (by means of an initializer list, array or vector): \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Selecting the rows 4, 6, 8, and 10 (compile time arguments) auto rs1 = rows<4UL,6UL,8UL,10UL>( A ); // Selecting the rows 3, 2, and 1 (runtime arguments via an initializer list) const std::initializer_list<size_t> list{ 3UL, 2UL, 1UL }; auto rs2 = rows( A, { 3UL, 2UL, 1UL } ); auto rs3 = rows( A, list ); // Selecting the rows 1, 2, 3, 3, 2, and 1 (runtime arguments via a std::array) const std::array<size_t> array{ 1UL, 2UL, 3UL, 3UL, 2UL, 1UL }; auto rs4 = rows( A, array ); auto rs5 = rows( A, array.data(), array.size() ); // Selecting the row 4 fives times (runtime arguments via a std::vector) const std::vector<size_t> vector{ 4UL, 4UL, 4UL, 4UL, 4UL }; auto rs6 = rows( A, vector ); auto rs7 = rows( A, vector.data(), vector.size() ); \endcode // Note that it is possible to alias the rows of the underlying matrix in any order. Also note // that it is possible to use the same index multiple times. The \c rows() function returns an // expression representing the view on the selected rows. The type of this expression depends // on the given arguments, primarily the type of the matrix and the compile time arguments. If // the type is required, it can be determined via the \c decltype specifier: \code using MatrixType = blaze::DynamicMatrix<int>; using RowsType = decltype( blaze::rows<3UL,0UL,4UL,8UL>( std::declval<MatrixType>() ) ); \endcode // The resulting view can be treated as any other dense or sparse matrix, i.e. it can be assigned // to, it can be copied from, and it can be used in arithmetic operations. Note, however, that a // row selection will always be treated as a row-major matrix, regardless of the storage order of // the matrix containing the rows. The view can also be used on both sides of an assignment: It // can either be used as an alias to grant write access to specific rows of a matrix primitive // on the left-hand side of an assignment or to grant read-access to specific rows of a matrix // primitive or expression on the right-hand side of an assignment. The following example // demonstrates this in detail: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; blaze::DynamicMatrix<double,blaze::columnMajor> B; blaze::CompressedMatrix<double,blaze::rowMajor> C; // ... Resizing and initialization // Selecting the rows 1, 3, 5, and 7 of A auto rs = rows( A, { 1UL, 3UL, 5UL, 7UL } ); // Setting rows 1, 3, 5, and 7 of A to row 4 of B rs = rows( B, { 4UL, 4UL, 4UL, 4UL } ); // Setting the rows 2, 4, 6, and 8 of A to C rows( A, { 2UL, 4UL, 6UL, 8UL } ) = C; // Setting the first 4 rows of A to the rows 5, 4, 3, and 2 of C submatrix( A, 0UL, 0UL, 4UL, A.columns() ) = rows( C, { 5UL, 4UL, 3UL, 2UL } ); // Rotating the result of the addition between rows 1, 3, 5, and 7 of A and C B = rows( rs + C, { 2UL, 3UL, 0UL, 1UL } ); \endcode // \warning It is the programmer's responsibility to ensure the row selection does not outlive the // viewed matrix: \code // Creating a row selection on a temporary matrix; results in a dangling reference! auto rs = rows<2UL,0UL>( DynamicMatrix<int>{ { 1, 2, 3 }, { 4, 5, 6 }, { 7, 8, 9 } } ); \endcode // \n \section views_row_selections_element_access Element Access // // The elements of a row selection can be directly accessed via the function call operator: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Creating a view on the first four rows of A in reverse order auto rs = rows( A, { 3UL, 2UL, 1UL, 0UL } ); // Setting the element (0,0) of the row selection, which corresponds // to the element at position (3,0) in matrix A rs(0,0) = 2.0; \endcode // Alternatively, the elements of a row selection can be traversed via (const) iterators. Just as // with matrices, in case of non-const row selection, \c begin() and \c end() return an iterator, // which allows to manipuate the elements, in case of constant row selection an iterator to // immutable elements is returned: \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 256UL, 512UL ); // ... Resizing and initialization // Creating a reference to a selection of rows of matrix A auto rs = rows( A, { 16UL, 32UL, 64UL, 128UL } ); // Traversing the elements of the 0th row via iterators to non-const elements for( auto it=rs.begin(0); it!=rs.end(0); ++it ) { *it = ...; // OK: Write access to the dense value. ... = *it; // OK: Read access to the dense value. } // Traversing the elements of the 1st row via iterators to const elements for( auto it=rs.cbegin(1); it!=rs.cend(1); ++it ) { *it = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = *it; // OK: Read access to the dense value. } \endcode \code blaze::CompressedMatrix<int,blaze::rowMajor> A( 256UL, 512UL ); // ... Resizing and initialization // Creating a reference to a selection of rows of matrix A auto rs = rows( A, { 16UL, 32UL, 64UL, 128UL } ); // Traversing the elements of the 0th row via iterators to non-const elements for( auto it=rs.begin(0); it!=rs.end(0); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } // Traversing the elements of the 1st row via iterators to const elements for( auto it=rs.cbegin(1); it!=rs.cend(1); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } \endcode // \n \section views_row_selections_element_insertion Element Insertion // // Inserting/accessing elements in a sparse row selection can be done by several alternative // functions. The following example demonstrates all options: \code blaze::CompressedMatrix<double,blaze::rowMajor> A( 256UL, 512UL ); // Non-initialized matrix of size 256x512 auto rs = rows( A, { 10UL, 20UL, 30UL, 40UL } ); // View on the rows 10, 20, 30, and 40 of A // The function call operator provides access to all possible elements of the sparse row // selection, including the zero elements. In case the function call operator is used to // access an element that is currently not stored in the sparse row selection, the element // is inserted into the row selection. rs(2,4) = 2.0; // The second operation for inserting elements is the set() function. In case the element is // not contained in the row selection it is inserted into the row selection, if it is already // contained in the row selection its value is modified. rs.set( 2UL, 5UL, -1.2 ); // An alternative for inserting elements into the row selection is the insert() function. // However, it inserts the element only in case the element is not already contained in the // row selection. rs.insert( 2UL, 6UL, 3.7 ); // Just as in the case of sparse matrices, elements can also be inserted via the append() // function. In case of row selections, append() also requires that the appended element's // index is strictly larger than the currently largest non-zero index in the according row // of the row selection and that the according row's capacity is large enough to hold the new // element. Note however that due to the nature of a row selection, which may be an alias to // an arbitrary collection of rows, the append() function does not work as efficiently for // a row selection as it does for a matrix. rs.reserve( 2UL, 10UL ); rs.append( 2UL, 10UL, -2.1 ); \endcode // \n \section views_row_selections_common_operations Common Operations // // A view on specific rows of a matrix can be used like any other dense or sparse matrix. For // instance, the current size of the matrix, i.e. the number of rows or columns can be obtained // via the \c rows() and \c columns() functions, the current total capacity via the \c capacity() // function, and the number of non-zero elements via the \c nonZeros() function. However, since // row selections are views on specific rows of a matrix, several operations are not possible, // such as resizing and swapping: \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 42UL, 42UL ); // ... Resizing and initialization // Creating a view on the rows 8, 16, 24, and 32 of matrix A auto rs = rows( A, { 8UL, 16UL, 24UL, 32UL } ); rs.rows(); // Returns the number of rows of the row selection rs.columns(); // Returns the number of columns of the row selection rs.capacity(); // Returns the capacity of the row selection rs.nonZeros(); // Returns the number of non-zero elements contained in the row selection rs.resize( 10UL, 8UL ); // Compilation error: Cannot resize a row selection auto rs2 = rows( A, 9UL, 17UL, 25UL, 33UL ); swap( rs, rs2 ); // Compilation error: Swap operation not allowed \endcode // \n \section views_row_selections_arithmetic_operations Arithmetic Operations // // Both dense and sparse row selections can be used in all arithmetic operations that any other // dense or sparse matrix can be used in. The following example gives an impression of the use // of dense row selctions within arithmetic operations. All operations (addition, subtraction, // multiplication, scaling, ...) can be performed on all possible combinations of dense and // sparse matrices with fitting element types: \code blaze::DynamicMatrix<double,blaze::rowMajor> D1, D2, D3; blaze::CompressedMatrix<double,blaze::rowMajor> S1, S2; blaze::CompressedVector<double,blaze::columnVector> a, b; // ... Resizing and initialization std::initializer_list<size_t> indices1{ 0UL, 3UL, 6UL, 9UL, 12UL, 15UL, 18UL, 21UL }; std::initializer_list<size_t> indices2{ 1UL, 4UL, 7UL, 10UL, 13UL, 16UL, 19UL, 22UL }; std::initializer_list<size_t> indices3{ 2UL, 5UL, 8UL, 11UL, 14UL, 17UL, 20UL, 23UL }; auto rs = rows( D1, indices1 ); // Selecting the every third row of D1 in the range [0..21] rs = D2; // Dense matrix assignment to the selected rows rows( D1, indices2 ) = S1; // Sparse matrix assignment to the selected rows D3 = rs + D2; // Dense matrix/dense matrix addition S2 = S1 - rows( D1, indices2 ); // Sparse matrix/dense matrix subtraction D2 = rs % rows( D1, indices3 ); // Dense matrix/dense matrix Schur product D2 = rows( D1, indices2 ) * D1; // Dense matrix/dense matrix multiplication rows( D1, indices2 ) *= 2.0; // In-place scaling of the second selection of rows D2 = rows( D1, indices3 ) * 2.0; // Scaling of the elements in the third selection of rows D2 = 2.0 * rows( D1, indices3 ); // Scaling of the elements in the third selection of rows rows( D1, indices1 ) += D2; // Addition assignment rows( D1, indices2 ) -= S1; // Subtraction assignment rows( D1, indices3 ) %= rs; // Schur product assignment a = rows( D1, indices1 ) * b; // Dense matrix/sparse vector multiplication \endcode // \n \section views_row_selections_on_column_major_matrix Row Selections on Column-Major Matrices // // Especially noteworthy is that row selections can be created for both row-major and column-major // matrices. Whereas the interface of a row-major matrix only allows to traverse a row directly // and the interface of a column-major matrix only allows to traverse a column, via views it is // possible to traverse a row of a column-major matrix or a column of a row-major matrix. For // instance: \code blaze::DynamicMatrix<int,blaze::columnMajor> A( 64UL, 32UL ); // ... Resizing and initialization // Creating a reference to the 1st and 3rd row of a column-major matrix A auto rs = rows( A, { 1UL, 3UL } ); // Traversing row 0 of the selection, which corresponds to the 1st row of matrix A for( auto it=rs.begin( 0UL ); it!=rs.end( 0UL ); ++it ) { // ... } \endcode // However, please note that creating a row selection on a matrix stored in a column-major fashion // can result in a considerable performance decrease in comparison to a row selection on a matrix // with row-major storage format. This is due to the non-contiguous storage of the matrix elements. // Therefore care has to be taken in the choice of the most suitable storage order: \code // Setup of two column-major matrices blaze::DynamicMatrix<double,blaze::columnMajor> A( 128UL, 128UL ); blaze::DynamicMatrix<double,blaze::columnMajor> B( 128UL, 128UL ); // ... Resizing and initialization // The computation of the 15th, 30th, and 45th row of the multiplication between A and B ... blaze::DynamicMatrix<double,blaze::rowMajor> x = rows( A * B, { 15UL, 30UL, 45UL } ); // ... is essentially the same as the following computation, which multiplies // the 15th, 30th, and 45th row of the column-major matrix A with B. blaze::DynamicMatrix<double,blaze::rowMajor> x = rows( A, { 15UL, 30UL, 45UL } ) * B; \endcode // Although \b Blaze performs the resulting matrix/matrix multiplication as efficiently as possible // using a row-major storage order for matrix \c A would result in a more efficient evaluation. // // \n Previous: \ref views_rows &nbsp; &nbsp; Next: \ref views_columns */ //************************************************************************************************* //**Columns**************************************************************************************** /*!\page views_columns Columns // // \tableofcontents // // // Just as rows provide a view on a specific row of a matrix, columns provide views on a specific // column of a dense or sparse matrix. As such, columns act as a reference to a specific column. // This reference is valid an can be used in every way any other column vector can be used as long // as the matrix containing the column is not resized or entirely destroyed. Changes made to the // elements (e.g. modifying values, inserting or erasing elements) are immediately visible in the // matrix and changes made via the matrix are immediately visible in the column. // // // \n \section views_colums_setup Setup of Columns // <hr> // // \image html column.png // \image latex column.eps "Column view" width=250pt // // A reference to a dense or sparse column can be created very conveniently via the \c column() // function. It can be included via the header file \code #include <blaze/math/Column.h> \endcode // The column index must be in the range from \f$[0..N-1]\f$, where \c N is the total number of // columns of the matrix, and can be specified both at compile time or at runtime: \code blaze::DynamicMatrix<double,blaze::columnMajor> A; // ... Resizing and initialization // Creating a reference to the 1st column of matrix A (compile time index) auto col1 = column<1UL>( A ); // Creating a reference to the 2nd column of matrix A (runtime index) auto col2 = column( A, 2UL ); \endcode // The \c column() function returns an expression representing the column view. The type of this // expression depends on the given column arguments, primarily the type of the matrix and the // compile time arguments. If the type is required, it can be determined via the \c decltype // specifier: \code using MatrixType = blaze::DynamicMatrix<int>; using ColumnType = decltype( blaze::column<1UL>( std::declval<MatrixType>() ) ); \endcode // The resulting view can be treated as any other column vector, i.e. it can be assigned to, it // can be copied from, and it can be used in arithmetic operations. The reference can also be used // on both sides of an assignment: The column can either be used as an alias to grant write access // to a specific column of a matrix primitive on the left-hand side of an assignment or to grant // read-access to a specific column of a matrix primitive or expression on the right-hand side // of an assignment. The following example demonstrates this in detail: \code blaze::DynamicVector<double,blaze::columnVector> x; blaze::CompressedVector<double,blaze::columnVector> y; blaze::DynamicMatrix<double,blaze::columnMajor> A, B; blaze::CompressedMatrix<double,blaze::columnMajor> C, D; // ... Resizing and initialization // Setting the 1st column of matrix A to x auto col1 = column( A, 1UL ); col1 = x; // Setting the 4th column of matrix B to y column( B, 4UL ) = y; // Setting x to the 2nd column of the result of the matrix multiplication x = column( A * B, 2UL ); // Setting y to the 2nd column of the result of the sparse matrix multiplication y = column( C * D, 2UL ); \endcode // \warning It is the programmer's responsibility to ensure the column does not outlive the // viewed matrix: \code // Creating a column on a temporary matrix; results in a dangling reference! auto col1 = column<1UL>( DynamicMatrix<int>{ { 1, 2, 3 }, { 4, 5, 6 }, { 7, 8, 9 } } ); \endcode // \n \section views_columns_element_access Element Access // <hr> // // The elements of a column can be directly accessed with the subscript operator. \code blaze::DynamicMatrix<double,blaze::columnMajor> A; // ... Resizing and initialization // Creating a view on the 4th column of matrix A auto col4 = column( A, 4UL ); // Setting the 1st element of the dense column, which corresponds // to the 1st element in the 4th column of matrix A col4[1] = 2.0; \endcode // The numbering of the column elements is \f[\left(\begin{array}{*{5}{c}} 0 & 1 & 2 & \cdots & N-1 \\ \end{array}\right),\f] // where N is the number of rows of the referenced matrix. Alternatively, the elements of a column // can be traversed via iterators. Just as with vectors, in case of non-const columns, \c begin() // and \c end() return an iterator, which allows to manipulate the elements, in case of constant // columns an iterator to immutable elements is returned: \code blaze::DynamicMatrix<int,blaze::columnMajor> A( 128UL, 256UL ); // ... Resizing and initialization // Creating a reference to the 31st column of matrix A auto col31 = column( A, 31UL ); // Traversing the elements via iterators to non-const elements for( auto it=col31.begin(); it!=col31.end(); ++it ) { *it = ...; // OK; Write access to the dense column value ... = *it; // OK: Read access to the dense column value. } // Traversing the elements via iterators to const elements for( auto it=col31.cbegin(); it!=col31.cend(); ++it ) { *it = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = *it; // OK: Read access to the dense column value. } \endcode \code blaze::CompressedMatrix<int,blaze::columnMajor> A( 128UL, 256UL ); // ... Resizing and initialization // Creating a reference to the 31st column of matrix A auto col31 = column( A, 31UL ); // Traversing the elements via iterators to non-const elements for( auto it=col31.begin(); it!=col31.end(); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } // Traversing the elements via iterators to const elements for( auto it=col31.cbegin(); it!=col31.cend(); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } \endcode // \n \section views_columns_element_insertion Element Insertion // <hr> // // Inserting/accessing elements in a sparse column can be done by several alternative functions. // The following example demonstrates all options: \code blaze::CompressedMatrix<double,blaze::columnMajor> A( 100UL, 10UL ); // Non-initialized 100x10 matrix auto col0( column( A, 0UL ) ); // Reference to the 0th column of A // The subscript operator provides access to all possible elements of the sparse column, // including the zero elements. In case the subscript operator is used to access an element // that is currently not stored in the sparse column, the element is inserted into the column. col0[42] = 2.0; // The second operation for inserting elements is the set() function. In case the element // is not contained in the column it is inserted into the column, if it is already contained // in the column its value is modified. col0.set( 45UL, -1.2 ); // An alternative for inserting elements into the column is the insert() function. However, // it inserts the element only in case the element is not already contained in the column. col0.insert( 50UL, 3.7 ); // A very efficient way to add new elements to a sparse column is the append() function. // Note that append() requires that the appended element's index is strictly larger than // the currently largest non-zero index of the column and that the column's capacity is // large enough to hold the new element. col0.reserve( 10UL ); col0.append( 51UL, -2.1 ); \endcode // \n \section views_columns_common_operations Common Operations // <hr> // // A column view can be used like any other column vector. This means that with only a few // exceptions all \ref vector_operations and \ref arithmetic_operations can be used. For instance, // the current number of elements can be obtained via the \c size() function, the current capacity // via the \c capacity() function, and the number of non-zero elements via the \c nonZeros() // function. However, since columns are references to specific columns of a matrix, several // operations are not possible on views, such as resizing and swapping. The following example // shows this by means of a dense column view: \code blaze::DynamicMatrix<int,blaze::columnMajor> A( 42UL, 42UL ); // ... Resizing and initialization // Creating a reference to the 2nd column of matrix A auto col2 = column( A, 2UL ); col2.size(); // Returns the number of elements in the column col2.capacity(); // Returns the capacity of the column col2.nonZeros(); // Returns the number of non-zero elements contained in the column col2.resize( 84UL ); // Compilation error: Cannot resize a single column of a matrix auto col3 = column( A, 3UL ); swap( col2, col3 ); // Compilation error: Swap operation not allowed \endcode // \n \section views_columns_arithmetic_operations Arithmetic Operations // <hr> // // Both dense and sparse columns can be used in all arithmetic operations that any other dense or // sparse column vector can be used in. The following example gives an impression of the use of // dense columns within arithmetic operations. All operations (addition, subtraction, multiplication, // scaling, ...) can be performed on all possible combinations of dense and sparse columns with // fitting element types: \code blaze::DynamicVector<double,blaze::columnVector> a( 2UL, 2.0 ), b; blaze::CompressedVector<double,blaze::columnVector> c( 2UL ); c[1] = 3.0; blaze::DynamicMatrix<double,blaze::columnMajor> A( 2UL, 4UL ); // Non-initialized 2x4 matrix auto col0( column( A, 0UL ) ); // Reference to the 0th column of A col0[0] = 0.0; // Manual initialization of the 0th column of A col0[1] = 0.0; column( A, 1UL ) = 1.0; // Homogeneous initialization of the 1st column of A column( A, 2UL ) = a; // Dense vector initialization of the 2nd column of A column( A, 3UL ) = c; // Sparse vector initialization of the 3rd column of A b = col0 + a; // Dense vector/dense vector addition b = c + column( A, 1UL ); // Sparse vector/dense vector addition b = col0 * column( A, 2UL ); // Component-wise vector multiplication column( A, 1UL ) *= 2.0; // In-place scaling of the 1st column b = column( A, 1UL ) * 2.0; // Scaling of the 1st column b = 2.0 * column( A, 1UL ); // Scaling of the 1st column column( A, 2UL ) += a; // Addition assignment column( A, 2UL ) -= c; // Subtraction assignment column( A, 2UL ) *= column( A, 0UL ); // Multiplication assignment double scalar = trans( c ) * column( A, 1UL ); // Scalar/dot/inner product between two vectors A = column( A, 1UL ) * trans( c ); // Outer product between two vectors \endcode // \n \section views_columns_non_fitting_storage_order Views on Matrices with Non-Fitting Storage Order // <hr> // // Especially noteworthy is that column views can be created for both row-major and column-major // matrices. Whereas the interface of a row-major matrix only allows to traverse a row directly // and the interface of a column-major matrix only allows to traverse a column, via views it is // possible to traverse a row of a column-major matrix or a column of a row-major matrix. For // instance: \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 64UL, 32UL ); // ... Resizing and initialization // Creating a reference to the 1st column of a column-major matrix A auto col1 = column( A, 1UL ); for( auto it=col1.begin(); it!=col1.end(); ++it ) { // ... } \endcode // However, please note that creating a column view on a matrix stored in a row-major fashion // can result in a considerable performance decrease in comparison to a column view on a matrix // with column-major storage format. This is due to the non-contiguous storage of the matrix // elements. Therefore care has to be taken in the choice of the most suitable storage order: \code // Setup of two row-major matrices blaze::DynamicMatrix<double,blaze::rowMajor> A( 128UL, 128UL ); blaze::DynamicMatrix<double,blaze::rowMajor> B( 128UL, 128UL ); // ... Resizing and initialization // The computation of the 15th column of the multiplication between A and B ... blaze::DynamicVector<double,blaze::columnVector> x = column( A * B, 15UL ); // ... is essentially the same as the following computation, which multiplies // A with the 15th column of the row-major matrix B. blaze::DynamicVector<double,blaze::columnVector> x = A * column( B, 15UL ); \endcode // Although \b Blaze performs the resulting matrix/vector multiplication as efficiently as possible // using a column-major storage order for matrix \c B would result in a more efficient evaluation. // // \n Previous: \ref views_row_selections &nbsp; &nbsp; Next: \ref views_column_selections */ //************************************************************************************************* //**Column Selections****************************************************************************** /*!\page views_column_selections Column Selections // // \tableofcontents // // // Column selections provide views on arbitrary compositions of columns of dense and sparse // matrices. These views act as a reference to the selected columns and represent them as another // dense or sparse matrix. This reference is valid and can be used in every way any other dense // or sparse matrix can be used as long as the matrix containing the columns is not resized or // entirely destroyed. The column selection also acts as an alias to the matrix elements in the // specified range: Changes made to the columns (e.g. modifying values, inserting or erasing // elements) are immediately visible in the matrix and changes made via the matrix are immediately // visible in the columns. // // // \n \section views_column_selections_setup Setup of Column Selections // // A column selection can be created very conveniently via the \c columns() function. It can be // included via the header file \code #include <blaze/math/Columns.h> \endcode // The indices of the columns to be selected can be specified either at compile time or at runtime // (by means of an initializer list, array or vector): \code blaze::DynamicMatrix<double,blaze::columnMajor> A; // ... Resizing and initialization // Selecting the columns 4, 6, 8, and 10 (compile time arguments) auto cs1 = columns<4UL,6UL,8UL,10UL>( A ); // Selecting the columns 3, 2, and 1 (runtime arguments via an initializer list) const std::initializer_list<size_t> list{ 3UL, 2UL, 1UL }; auto cs2 = columns( A, { 3UL, 2UL, 1UL } ); auto cs3 = columns( A, list ); // Selecting the columns 1, 2, 3, 3, 2, and 1 (runtime arguments via a std::array) const std::array<size_t> array{ 1UL, 2UL, 3UL, 3UL, 2UL, 1UL }; auto cs4 = columns( A, array ); auto cs5 = columns( A, array.data(), array.size() ); // Selecting the column 4 fives times (runtime arguments via a std::vector) const std::vector<size_t> vector{ 4UL, 4UL, 4UL, 4UL, 4UL }; auto cs6 = columns( A, vector ); auto cs7 = columns( A, vector.data(), vector.size() ); \endcode // Note that it is possible to alias the columns of the underlying matrix in any order. Also note // that it is possible to use the same index multiple times. The \c columns() function returns an // expression representing the view on the selected columns. The type of this expression depends // on the given arguments, primarily the type of the matrix and the compile time arguments. If // the type is required, it can be determined via the \c decltype specifier: \code using MatrixType = blaze::DynamicMatrix<int>; using ColumnsType = decltype( blaze::columns<3UL,0UL,4UL,8UL>( std::declval<MatrixType>() ) ); \endcode // The resulting view can be treated as any other dense or sparse matrix, i.e. it can be assigned // to, it can be copied from, and it can be used in arithmetic operations. Note, however, that a // column selection will always be treated as a column-major matrix, regardless of the storage // order of the matrix containing the columns. The view can also be used on both sides of an // assignment: It can either be used as an alias to grant write access to specific columns of a // matrix primitive on the left-hand side of an assignment or to grant read-access to specific // columns of a matrix primitive or expression on the right-hand side of an assignment. The // following example demonstrates this in detail: \code blaze::DynamicMatrix<double,blaze::columnMajor> A; blaze::DynamicMatrix<double,blaze::rowMajor> B; blaze::CompressedMatrix<double,blaze::columnMajor> C; // ... Resizing and initialization // Selecting the columns 1, 3, 5, and 7 of A auto cs = columns( A, { 1UL, 3UL, 5UL, 7UL } ); // Setting columns 1, 3, 5, and 7 of A to column 4 of B cs = columns( B, { 4UL, 4UL, 4UL, 4UL } ); // Setting the columns 2, 4, 6, and 8 of A to C columns( A, { 2UL, 4UL, 6UL, 8UL } ) = C; // Setting the first 4 columns of A to the columns 5, 4, 3, and 2 of C submatrix( A, 0UL, 0UL, A.rows(), 4UL ) = columns( C, { 5UL, 4UL, 3UL, 2UL } ); // Rotating the result of the addition between columns 1, 3, 5, and 7 of A and C B = columns( cs + C, { 2UL, 3UL, 0UL, 1UL } ); \endcode // \warning It is the programmer's responsibility to ensure the column selection does not outlive // the viewed matrix: \code // Creating a column selection on a temporary matrix; results in a dangling reference! auto cs = columns<2UL,0UL>( DynamicMatrix<int>{ { 1, 2, 3 }, { 4, 5, 6 }, { 7, 8, 9 } } ); \endcode // \n \section views_column_selections_element_access Element Access // // The elements of a column selection can be directly accessed via the function call operator: \code blaze::DynamicMatrix<double,blaze::columnMajor> A; // ... Resizing and initialization // Creating a view on the first four columns of A in reverse order auto cs = columns( A, { 3UL, 2UL, 1UL, 0UL } ); // Setting the element (0,0) of the column selection, which corresponds // to the element at position (0,3) in matrix A cs(0,0) = 2.0; \endcode // Alternatively, the elements of a column selection can be traversed via (const) iterators. // Just as with matrices, in case of non-const column selection, \c begin() and \c end() return // an iterator, which allows to manipuate the elements, in case of constant column selection an // iterator to immutable elements is returned: \code blaze::DynamicMatrix<int,blaze::columnMajor> A( 512UL, 256UL ); // ... Resizing and initialization // Creating a reference to a selection of columns of matrix A auto cs = columns( A, { 16UL, 32UL, 64UL, 128UL } ); // Traversing the elements of the 0th column via iterators to non-const elements for( auto it=cs.begin(0); it!=cs.end(0); ++it ) { *it = ...; // OK: Write access to the dense value. ... = *it; // OK: Read access to the dense value. } // Traversing the elements of the 1st column via iterators to const elements for( auto it=cs.cbegin(1); it!=cs.cend(1); ++it ) { *it = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = *it; // OK: Read access to the dense value. } \endcode \code blaze::CompressedMatrix<int,blaze::columnMajor> A( 512UL, 256UL ); // ... Resizing and initialization // Creating a reference to a selection of columns of matrix A auto cs = columns( A, { 16UL, 32UL, 64UL, 128UL } ); // Traversing the elements of the 0th column via iterators to non-const elements for( auto it=cs.begin(0); it!=cs.end(0); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } // Traversing the elements of the 1st column via iterators to const elements for( auto it=cs.cbegin(1); it!=cs.cend(1); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } \endcode // \n \section views_column_selections_element_insertion Element Insertion // // Inserting/accessing elements in a sparse column selection can be done by several alternative // functions. The following example demonstrates all options: \code blaze::CompressedMatrix<double,blaze::columnMajor> A( 512UL, 256UL ); // Non-initialized matrix of size 512x256 auto cs = columns( A, { 10UL, 20UL, 30UL, 40UL } ); // View on the columns 10, 20, 30, and 40 of A // The function call operator provides access to all possible elements of the sparse column // selection, including the zero elements. In case the function call operator is used to // access an element that is currently not stored in the sparse column selection, the element // is inserted into the column selection. cs(2,4) = 2.0; // The second operation for inserting elements is the set() function. In case the element is // not contained in the column selection it is inserted into the column selection, if it is // already contained in the column selection its value is modified. cs.set( 2UL, 5UL, -1.2 ); // An alternative for inserting elements into the column selection is the insert() function. // However, it inserts the element only in case the element is not already contained in the // column selection. cs.insert( 2UL, 6UL, 3.7 ); // Just as in the case of sparse matrices, elements can also be inserted via the append() // function. In case of column selections, append() also requires that the appended element's // index is strictly larger than the currently largest non-zero index in the according column // of the column selection and that the according column's capacity is large enough to hold the // new element. Note however that due to the nature of a column selection, which may be an alias // to an arbitrary collection of columns, the append() function does not work as efficiently // for a column selection as it does for a matrix. cs.reserve( 2UL, 10UL ); cs.append( 2UL, 10UL, -2.1 ); \endcode // \n \section views_column_selections_common_operations Common Operations // // A view on specific columns of a matrix can be used like any other dense or sparse matrix. For // instance, the current size of the matrix, i.e. the number of rows or columns can be obtained // via the \c rows() and \c columns() functions, the current total capacity via the \c capacity() // function, and the number of non-zero elements via the \c nonZeros() function. However, since // column selections are views on specific columns of a matrix, several operations are not possible, // such as resizing and swapping: \code blaze::DynamicMatrix<int,blaze::columnMajor> A( 42UL, 42UL ); // ... Resizing and initialization // Creating a view on the columns 8, 16, 24, and 32 of matrix A auto cs = columns( A, { 8UL, 16UL, 24UL, 32UL } ); cs.rows(); // Returns the number of rows of the column selection cs.columns(); // Returns the number of columns of the column selection cs.capacity(); // Returns the capacity of the column selection cs.nonZeros(); // Returns the number of non-zero elements contained in the column selection cs.resize( 10UL, 8UL ); // Compilation error: Cannot resize a column selection auto cs2 = columns( A, 9UL, 17UL, 25UL, 33UL ); swap( cs, cs2 ); // Compilation error: Swap operation not allowed \endcode // \n \section views_column_selections_arithmetic_operations Arithmetic Operations // // Both dense and sparse column selections can be used in all arithmetic operations that any other // dense or sparse matrix can be used in. The following example gives an impression of the use of // dense column selctions within arithmetic operations. All operations (addition, subtraction, // multiplication, scaling, ...) can be performed on all possible combinations of dense and // sparse matrices with fitting element types: \code blaze::DynamicMatrix<double,blaze::columnMajor> D1, D2, D3; blaze::CompressedMatrix<double,blaze::columnMajor> S1, S2; blaze::CompressedVector<double,blaze::columnVector> a, b; // ... Resizing and initialization std::initializer_list<size_t> indices1{ 0UL, 3UL, 6UL, 9UL, 12UL, 15UL, 18UL, 21UL }; std::initializer_list<size_t> indices2{ 1UL, 4UL, 7UL, 10UL, 13UL, 16UL, 19UL, 22UL }; std::initializer_list<size_t> indices3{ 2UL, 5UL, 8UL, 11UL, 14UL, 17UL, 20UL, 23UL }; auto cs = columns( D1, indices1 ); // Selecting the every third column of D1 in the range [0..21] cs = D2; // Dense matrix assignment to the selected columns columns( D1, indices2 ) = S1; // Sparse matrix assignment to the selected columns D3 = cs + D2; // Dense matrix/dense matrix addition S2 = S1 - columns( D1, indices2 ); // Sparse matrix/dense matrix subtraction D2 = cs % columns( D1, indices3 ); // Dense matrix/dense matrix Schur product D2 = columns( D1, indices2 ) * D1; // Dense matrix/dense matrix multiplication columns( D1, indices2 ) *= 2.0; // In-place scaling of the second selection of columns D2 = columns( D1, indices3 ) * 2.0; // Scaling of the elements in the third selection of columns D2 = 2.0 * columns( D1, indices3 ); // Scaling of the elements in the third selection of columns columns( D1, indices1 ) += D2; // Addition assignment columns( D1, indices2 ) -= S1; // Subtraction assignment columns( D1, indices3 ) %= cs; // Schur product assignment a = columns( D1, indices1 ) * b; // Dense matrix/sparse vector multiplication \endcode // \n \section views_column_selections_on_row_major_matrix Column Selections on a Row-Major Matrix // // Especially noteworthy is that column selections can be created for both row-major and // column-major matrices. Whereas the interface of a row-major matrix only allows to traverse a // row directly and the interface of a column-major matrix only allows to traverse a column, via // views it is possible to traverse a row of a column-major matrix or a column of a row-major // matrix. For instance: \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 64UL, 32UL ); // ... Resizing and initialization // Creating a reference to the 1st and 3rd column of a column-major matrix A auto cs = columns( A, { 1UL, 3UL } ); // Traversing column 0 of the selection, which corresponds to the 1st column of matrix A for( auto it=cs.begin( 0UL ); it!=cs.end( 0UL ); ++it ) { // ... } \endcode // However, please note that creating a column selection on a matrix stored in a row-major fashion // can result in a considerable performance decrease in comparison to a column selection on a // matrix with column-major storage format. This is due to the non-contiguous storage of the // matrix elements. Therefore care has to be taken in the choice of the most suitable storage // order: \code // Setup of two row-major matrices blaze::DynamicMatrix<double,blaze::rowMajor> A( 128UL, 128UL ); blaze::DynamicMatrix<double,blaze::rowMajor> B( 128UL, 128UL ); // ... Resizing and initialization // The computation of the 15th, 30th, and 45th column of the multiplication between A and B ... blaze::DynamicMatrix<double,blaze::columnMajor> x = columns( A * B, { 15UL, 30UL, 45UL } ); // ... is essentially the same as the following computation, which multiplies // A with the 15th, 30th, and 45th column of the row-major matrix B. blaze::DynamicMatrix<double,blaze::columnMajor> x = A * column( B, { 15UL, 30UL, 45UL } ); \endcode // Although \b Blaze performs the resulting matrix/matrix multiplication as efficiently as possible // using a column-major storage order for matrix \c A would result in a more efficient evaluation. // // \n Previous: \ref views_columns &nbsp; &nbsp; Next: \ref views_bands */ //************************************************************************************************* //**Bands****************************************************************************************** /*!\page views_bands Bands // // \tableofcontents // // // Bands provide views on a specific band of a dense or sparse matrix (e.g. the diagonal, the // subdiagonal, ...). As such, bands act as a reference to a specific band. This reference // is valid and can be used in every way any other vector can be used as long as the matrix // containing the band is not resized or entirely destroyed. The band also acts as an alias to // the band elements: Changes made to the elements (e.g. modifying values, inserting or erasing // elements) are immediately visible in the matrix and changes made via the matrix are immediately // visible in the band. // // // \n \section views_bands_setup Setup of Bands // <hr> // // \image html band.png // \image latex band.eps "Band view" width=250pt // // A reference to a dense or sparse band can be created very conveniently via the \c band() // function. It can be included via the header file \code #include <blaze/math/Band.h> \endcode // The band index must be in the range from \f$[min(0,1-M)..max(0,N-1)]\f$, where \c M is the // total number of rows and \c N is the total number of columns, and can be specified both at // compile time or at runtime: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Creating a reference to the 1st lower band of matrix A (compile time index) auto band1 = band<-1L>( A ); // Creating a reference to the 2nd upper band of matrix A (runtime index) auto band2 = band( A, 2L ); \endcode // In addition, the \c diagonal() function provides a convenient shortcut for the setup of a view // on the diagonal of a dense or sparse matrix. It has the same effect as calling the \c band() // function with a compile time index of 0: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Creating a reference to the diagonal of matrix A via the band() and diagonal() functions auto diag1 = band<0L>( A ); auto diag2 = diagonal( A ); static_assert( blaze::IsSame< decltype(diag1), decltype(diag2) >::value, "Non-identical types detected" ); \endcode // Both the \c band() and the \c diagonal() function return an expression representing the band // view. The type of this expression depends on the given arguments, primarily the type of the // matrix and the compile time arguments. If the type is required, it can be determined via // \c decltype specifier: \code using MatrixType = blaze::DynamicMatrix<int>; using BandType = decltype( blaze::band<1L>( std::declval<MatrixType>() ) ); using DiagonalType = decltype( blaze::diagonal( std::declval<MatrixType>() ) ); \endcode // This resulting view can be treated as any other vector, i.e. it can be assigned to, it can // be copied from, and it can be used in arithmetic operations. By default, bands are considered // column vectors, but this setting can be changed via the \c defaultTransposeFlag switch. The // reference can also be used on both sides of an assignment: The band can either be used as an // alias to grant write access to a specific band of a matrix primitive on the left-hand side of // an assignment or to grant read-access to a specific band of a matrix primitive or expression // on the right-hand side of an assignment. The following example demonstrates this in detail: \code blaze::DynamicVector<double,blaze::rowVector> x; blaze::CompressedVector<double,blaze::rowVector> y; blaze::DynamicMatrix<double,blaze::rowMajor> A, B; blaze::CompressedMatrix<double,blaze::rowMajor> C, D; // ... Resizing and initialization // Setting the 2nd upper band of matrix A to x auto band2 = band( A, 2L ); band2 = x; // Setting the 3rd upper band of matrix B to y band( B, 3L ) = y; // Setting x to the 2nd lower band of the result of the matrix multiplication x = band( A * B, -2L ); // Setting y to the 2nd upper band of the result of the sparse matrix multiplication y = band( C * D, 2L ); \endcode // \warning It is the programmer's responsibility to ensure the band does not outlive the viewed // matrix: \code // Creating a band on a temporary matrix; results in a dangling reference! auto band1 = band<1L>( DynamicMatrix<int>{ { 1, 2, 3 }, { 4, 5, 6 }, { 7, 8, 9 } } ); \endcode // \n \section views_bands_element_access Element Access // <hr> // // The elements of a band can be directly accessed with the subscript operator: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Creating a view on the 4th upper band of matrix A auto band4 = band( A, 4L ); // Setting the 1st element of the dense band, which corresponds // to the 1st element in the 4th upper band of matrix A band4[1] = 2.0; \endcode // The numbering of the band elements is \f[\left(\begin{array}{*{5}{c}} 0 & 1 & 2 & \cdots & N-1 \\ \end{array}\right),\f] // where N is the number of elements of the referenced band. Alternatively, the elements of a band // can be traversed via iterators. Just as with vectors, in case of non-const band, \c begin() and // \c end() return an iterator, which allows to manipulate the elements, in case of constant bands // an iterator to immutable elements is returned: \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 128UL, 256UL ); // ... Resizing and initialization // Creating a reference to the 5th upper band of matrix A auto band5 = band( A, 5L ); // Traversing the elements via iterators to non-const elements for( auto it=band5.begin(); it!=band5.end(); ++it ) { *it = ...; // OK; Write access to the dense band value ... = *it; // OK: Read access to the dense band value. } // Traversing the elements via iterators to const elements for( auto it=band5.cbegin(); it!=band5.cend(); ++it ) { *it = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = *it; // OK: Read access to the dense band value. } \endcode \code blaze::CompressedMatrix<int,blaze::rowMajor> A( 128UL, 256UL ); // ... Resizing and initialization // Creating a reference to the 5th band of matrix A auto band5 = band( A, 5L ); // Traversing the elements via iterators to non-const elements for( auto it=band5.begin(); it!=band5.end(); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } // Traversing the elements via iterators to const elements for( auto it=band5.cbegin(); it!=band5.cend(); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } \endcode // \n \section views_bands_element_insertion Element Insertion // <hr> // // Inserting/accessing elements in a sparse band can be done by several alternative functions. // The following example demonstrates all options: \code blaze::CompressedMatrix<double,blaze::rowMajor> A( 10UL, 100UL ); // Non-initialized 10x100 matrix auto diag( band( A, 0L ) ); // Reference to the diagonal of A // The subscript operator provides access to all possible elements of the sparse band, // including the zero elements. In case the subscript operator is used to access an element // that is currently not stored in the sparse band, the element is inserted into the band. diag[42] = 2.0; // The second operation for inserting elements is the set() function. In case the element // is not contained in the band it is inserted into the band, if it is already contained in // the band its value is modified. diag.set( 45UL, -1.2 ); // An alternative for inserting elements into the band is the insert() function. However, // it inserts the element only in case the element is not already contained in the band. diag.insert( 50UL, 3.7 ); \endcode // \n \section views_bands_common_operations Common Operations // <hr> // // A band view can be used like any other column vector. This means that with only a few // exceptions all \ref vector_operations and \ref arithmetic_operations can be used. For instance, // the current number of band elements can be obtained via the \c size() function, the current // capacity via the \c capacity() function, and the number of non-zero elements via the // \c nonZeros() function. However, since bands are references to specific bands of a matrix, // several operations are not possible, such as resizing and swapping. The following example // shows this by means of a dense band view: \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 42UL, 42UL ); // ... Resizing and initialization // Creating a reference to the 2nd upper band of matrix A auto band2 = band( A, 2L ); band2.size(); // Returns the number of elements in the band band2.capacity(); // Returns the capacity of the band band2.nonZeros(); // Returns the number of non-zero elements contained in the band band2.resize( 84UL ); // Compilation error: Cannot resize a single band of a matrix auto band3 = band( A, 3L ); swap( band2, band3 ); // Compilation error: Swap operation not allowed \endcode // \n \section views_bands_arithmetic_operations Arithmetic Operations // <hr> // // Both dense and sparse bands can be used in all arithmetic operations that any other dense or // sparse vector can be used in. The following example gives an impression of the use of dense // bands within arithmetic operations. All operations (addition, subtraction, multiplication, // scaling, ...) can be performed on all possible combinations of dense and sparse bands with // fitting element types: \code blaze::DynamicVector<double,blaze::columnVector> a( 2UL, 2.0 ), b; blaze::CompressedVector<double,blaze::columnVector> c( 2UL ); c[1] = 3.0; blaze::DynamicMatrix<double,blaze::rowMajor> A( 4UL, 2UL ); // Non-initialized 4x2 matrix auto band1( band( A, 1L ) ); // Reference to the 1st upper band of A auto diag ( band( A, 0L ) ); // Reference to the diagonal of A band1[0] = 0.0; // Manual initialization of the 1st upper band of A diag = 1.0; // Homogeneous initialization of the diagonal of A band( A, -1L ) = a; // Dense vector initialization of the 1st lower band of A band( A, -2L ) = c; // Sparse vector initialization of the 2nd lower band of A b = diag + a; // Dense vector/dense vector addition b = c + band( A, -1L ); // Sparse vector/dense vector addition b = diag * band( A, -2L ); // Component-wise vector multiplication band( A, -1L ) *= 2.0; // In-place scaling of the 1st upper band b = band( A, -1L ) * 2.0; // Scaling of the 1st upper band b = 2.0 * band( A, -1L ); // Scaling of the 1st upper band band( A, -2L ) += a; // Addition assignment band( A, -2L ) -= c; // Subtraction assignment band( A, -2L ) *= band( A, 0L ); // Multiplication assignment double scalar = trans( c ) * band( A, -1L ); // Scalar/dot/inner product between two vectors A = band( A, -1L ) * trans( c ); // Outer product between two vectors \endcode // \n Previous: \ref views_column_selections &nbsp; &nbsp; Next: \ref arithmetic_operations */ //************************************************************************************************* //**Arithmetic Operations************************************************************************** /*!\page arithmetic_operations Arithmetic Operations // // \tableofcontents // // // \b Blaze provides the following arithmetic operations for vectors and matrices: // // <ul> // <li> \ref addition </li> // <li> \ref subtraction </li> // <li> \ref scalar_multiplication </li> // <li> \ref vector_vector_multiplication // <ul> // <li> \ref componentwise_multiplication </li> // <li> \ref inner_product </li> // <li> \ref outer_product </li> // <li> \ref cross_product </li> // </ul> // </li> // <li> \ref vector_vector_division </li> // <li> \ref matrix_vector_multiplication </li> // <li> \ref matrix_matrix_multiplication </li> // </ul> // // \n Previous: \ref views_bands &nbsp; &nbsp; Next: \ref addition */ //************************************************************************************************* //**Addition*************************************************************************************** /*!\page addition Addition // // The addition of vectors and matrices is as intuitive as the addition of scalar values. For both // the vector addition as well as the matrix addition the addition operator can be used. It even // enables the addition of dense and sparse vectors as well as the addition of dense and sparse // matrices: \code blaze::DynamicVector<int> v1( 5UL ), v3; blaze::CompressedVector<float> v2( 5UL ); // ... Initializing the vectors v3 = v1 + v2; // Addition of a two column vectors of different data type \endcode \code blaze::DynamicMatrix<float,rowMajor> M1( 7UL, 3UL ); blaze::CompressedMatrix<size_t,columnMajor> M2( 7UL, 3UL ), M3; // ... Initializing the matrices M3 = M1 + M2; // Addition of a row-major and a column-major matrix of different data type \endcode // Note that it is necessary that both operands have exactly the same dimensions. Violating this // precondition results in an exception. Also note that in case of vectors it is only possible to // add vectors with the same transpose flag: \code blaze::DynamicVector<int,columnVector> v1( 5UL ); blaze::CompressedVector<float,rowVector> v2( 5UL ); v1 + v2; // Compilation error: Cannot add a column vector and a row vector v1 + trans( v2 ); // OK: Addition of two column vectors \endcode // In case of matrices, however, it is possible to add row-major and column-major matrices. Note // however that in favor of performance the addition of two matrices with the same storage order // is favorable. The same argument holds for the element type: In case two vectors or matrices // with the same element type are added, the performance can be much higher due to vectorization // of the operation. \code blaze::DynamicVector<double>v1( 100UL ), v2( 100UL ), v3; // ... Initialization of the vectors v3 = v1 + v2; // Vectorized addition of two double precision vectors \endcode \code blaze::DynamicMatrix<float> M1( 50UL, 70UL ), M2( 50UL, 70UL ), M3; // ... Initialization of the matrices M3 = M1 + M2; // Vectorized addition of two row-major, single precision dense matrices \endcode // \n Previous: \ref arithmetic_operations &nbsp; &nbsp; Next: \ref subtraction */ //************************************************************************************************* //**Subtraction************************************************************************************ /*!\page subtraction Subtraction // // The subtraction of vectors and matrices works exactly as intuitive as the addition, but with // the subtraction operator. For both the vector subtraction as well as the matrix subtraction // the subtraction operator can be used. It also enables the subtraction of dense and sparse // vectors as well as the subtraction of dense and sparse matrices: \code blaze::DynamicVector<int> v1( 5UL ), v3; blaze::CompressedVector<float> v2( 5UL ); // ... Initializing the vectors v3 = v1 - v2; // Subtraction of a two column vectors of different data type blaze::DynamicMatrix<float,rowMajor> M1( 7UL, 3UL ); blaze::CompressedMatrix<size_t,columnMajor> M2( 7UL, 3UL ), M3; // ... Initializing the matrices M3 = M1 - M2; // Subtraction of a row-major and a column-major matrix of different data type \endcode // Note that it is necessary that both operands have exactly the same dimensions. Violating this // precondition results in an exception. Also note that in case of vectors it is only possible to // subtract vectors with the same transpose flag: \code blaze::DynamicVector<int,columnVector> v1( 5UL ); blaze::CompressedVector<float,rowVector> v2( 5UL ); v1 - v2; // Compilation error: Cannot subtract a row vector from a column vector v1 - trans( v2 ); // OK: Subtraction of two column vectors \endcode // In case of matrices, however, it is possible to subtract row-major and column-major matrices. // Note however that in favor of performance the subtraction of two matrices with the same storage // order is favorable. The same argument holds for the element type: In case two vectors or matrices // with the same element type are added, the performance can be much higher due to vectorization // of the operation. \code blaze::DynamicVector<double>v1( 100UL ), v2( 100UL ), v3; // ... Initialization of the vectors v3 = v1 - v2; // Vectorized subtraction of two double precision vectors blaze::DynamicMatrix<float> M1( 50UL, 70UL ), M2( 50UL, 70UL ), M3; // ... Initialization of the matrices M3 = M1 - M2; // Vectorized subtraction of two row-major, single precision dense matrices \endcode // \n Previous: \ref addition &nbsp; &nbsp; Next: \ref scalar_multiplication */ //************************************************************************************************* //**Scalar Multiplication************************************************************************** /*!\page scalar_multiplication Scalar Multiplication // // The scalar multiplication is the multiplication of a scalar value with a vector or a matrix. // In \b Blaze it is possible to use all built-in/fundamental data types except bool as scalar // values. Additionally, it is possible to use std::complex values with the same built-in data // types as element type. \code blaze::StaticVector<int,3UL> v1{ 1, 2, 3 }; blaze::DynamicVector<double> v2 = v1 * 1.2; blaze::CompressedVector<float> v3 = -0.3F * v1; \endcode \code blaze::StaticMatrix<int,3UL,2UL> M1{ { 1, 2 }, { 3, 4 }, { 5, 6 } }; blaze::DynamicMatrix<double> M2 = M1 * 1.2; blaze::CompressedMatrix<float> M3 = -0.3F * M1; \endcode // Vectors and matrices cannot be used for as scalar value for scalar multiplications (see the // following example). However, each vector and matrix provides the \c scale() function, which // can be used to scale a vector or matrix element-wise with arbitrary scalar data types: \code blaze::CompressedMatrix< blaze::StaticMatrix<int,3UL,3UL> > M1; blaze::StaticMatrix<int,3UL,3UL> scalar; M1 * scalar; // No scalar multiplication, but matrix/matrix multiplication M1.scale( scalar ); // Scalar multiplication \endcode // \n Previous: \ref subtraction &nbsp; &nbsp; Next: \ref componentwise_multiplication */ //************************************************************************************************* //**Vector/Vector Multiplication******************************************************************* /*!\page vector_vector_multiplication Vector/Vector Multiplication // // \n \section componentwise_multiplication Componentwise Multiplication // <hr> // // Multiplying two vectors with the same transpose flag (i.e. either blaze::columnVector or // blaze::rowVector) via the multiplication operator results in a componentwise multiplication // of the two vectors: \code using blaze::DynamicVector; using blaze::CompressedVector; CompressedVector<int,columnVector> v1( 17UL ); DynamicVector<int,columnVector> v2( 17UL ); StaticVector<double,10UL,rowVector> v3; DynamicVector<double,rowVector> v4( 10UL ); // ... Initialization of the vectors CompressedVector<int,columnVector> v5( v1 * v2 ); // Componentwise multiplication of a sparse and // a dense column vector. The result is a sparse // column vector. DynamicVector<double,rowVector> v6( v3 * v4 ); // Componentwise multiplication of two dense row // vectors. The result is a dense row vector. \endcode // \n \section inner_product Inner Product / Scalar Product / Dot Product // <hr> // // The multiplication between a row vector and a column vector results in an inner product between // the two vectors: \code blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 }; blaze::DynamicVector<int,columnVector> v2{ -1, 3, -2 }; int result = v1 * v2; // Results in the value 15 \endcode // The \c trans() function can be used to transpose a vector as necessary: \code blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 }; blaze::StaticVector<int,3UL,rowVector> v2{ -1, 3, -2 }; int result = v1 * trans( v2 ); // Also results in the value 15 \endcode // Alternatively, either the \c inner() function, the \c dot() function or the comma operator can // be used for any combination of vectors (row or column vectors) to perform an inner product: \code blaze::StaticVector<int,3UL,columnVector> v1{ 2, 5, -1 }; blaze::StaticVector<int,3UL,rowVector> v2{ -1, 3, -2 }; // All alternatives for the inner product between a column vector and a row vector int result1 = trans( v1 ) * trans( v2 ); int result2 = inner( v1, v2 ); int result3 = dot( v1, v2 ); int result4 = (v1,v2); \endcode // When using the comma operator, please note the brackets embracing the inner product expression. // Due to the low precedence of the comma operator (lower even than the assignment operator) these // brackets are strictly required for a correct evaluation of the inner product. // // // \n \section outer_product Outer Product // <hr> // // The multiplication between a column vector and a row vector results in the outer product of // the two vectors: \code blaze::StaticVector<int,3UL,columnVector> v1{ 2, 5, -1 }; blaze::DynamicVector<int,rowVector> v2{ -1, 3, -2 }; StaticMatrix<int,3UL,3UL> M1 = v1 * v2; \endcode // The \c trans() function can be used to transpose a vector as necessary: \code blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 }; blaze::StaticVector<int,3UL,rowVector> v2{ -1, 3, -2 }; int result = trans( v1 ) * v2; \endcode // Alternatively, the \c outer() function can be used for any combination of vectors (row or column // vectors) to perform an outer product: \code blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 }; blaze::StaticVector<int,3UL,rowVector> v2{ -1, 3, -2 }; StaticMatrix<int,3UL,3UL> M1 = outer( v1, v2 ); // Outer product between two row vectors \endcode // \n \section cross_product Cross Product // <hr> // // Two vectors with the same transpose flag can be multiplied via the cross product. The cross // product between two vectors \f$ a \f$ and \f$ b \f$ is defined as \f[ \left(\begin{array}{*{1}{c}} c_0 \\ c_1 \\ c_2 \\ \end{array}\right) = \left(\begin{array}{*{1}{c}} a_1 b_2 - a_2 b_1 \\ a_2 b_0 - a_0 b_2 \\ a_0 b_1 - a_1 b_0 \\ \end{array}\right). \f] // Due to the absence of a \f$ \times \f$ operator in the C++ language, the cross product is // realized via the \c cross() function. Alternatively, the modulo operator (i.e. \c operator%) // can be used in case infix notation is required: \code blaze::StaticVector<int,3UL,columnVector> v1{ 2, 5, -1 }; blaze::DynamicVector<int,columnVector> v2{ -1, 3, -2 }; blaze::StaticVector<int,3UL,columnVector> v3( cross( v1, v2 ) ); blaze::StaticVector<int,3UL,columnVector> v4( v1 % v2 ); \endcode // Please note that the cross product is restricted to three dimensional (dense and sparse) // column vectors. // // \n Previous: \ref scalar_multiplication &nbsp; &nbsp; Next: \ref vector_vector_division */ //************************************************************************************************* //**Vector/Vector Division************************************************************************* /*!\page vector_vector_division Vector/Vector Division // // \n \section componentwise_division Componentwise Division // <hr> // // Dividing a vector by a dense vector with the same transpose flag (i.e. either blaze::columnVector // or blaze::rowVector) via the division operator results in a componentwise division: \code using blaze::DynamicVector; using blaze::CompressedVector; CompressedVector<int,columnVector> v1( 17UL ); DynamicVector<int,columnVector> v2( 17UL ); StaticVector<double,10UL,rowVector> v3; DynamicVector<double,rowVector> v4( 10UL ); // ... Initialization of the vectors CompressedVector<int,columnVector> v5( v1 / v2 ); // Componentwise division of a sparse and a // dense column vector. The result is a sparse // column vector. DynamicVector<double,rowVector> v6( v3 / v4 ); // Componentwise division of two dense row // vectors. The result is a dense row vector. \endcode // Note that all values of the divisor must be non-zero and that no checks are performed to assert // this precondition! // // \n Previous: \ref vector_vector_multiplication &nbsp; &nbsp; Next: \ref matrix_vector_multiplication */ //************************************************************************************************* //**Matrix/Vector Multiplication******************************************************************* /*!\page matrix_vector_multiplication Matrix/Vector Multiplication // // In \b Blaze matrix/vector multiplications can be as intuitively formulated as in mathematical // textbooks. Just as in textbooks there are two different multiplications between a matrix and // a vector: a matrix/column vector multiplication and a row vector/matrix multiplication: \code using blaze::StaticVector; using blaze::DynamicVector; using blaze::DynamicMatrix; DynamicMatrix<int> M1( 39UL, 12UL ); StaticVector<int,12UL,columnVector> v1; // ... Initialization of the matrix and the vector DynamicVector<int,columnVector> v2 = M1 * v1; // Matrix/column vector multiplication DynamicVector<int,rowVector> v3 = trans( v1 ) * M1; // Row vector/matrix multiplication \endcode // Note that the storage order of the matrix poses no restrictions on the operation. Also note, // that the highest performance for a multiplication between a dense matrix and a dense vector can // be achieved if both the matrix and the vector have the same scalar element type. // // \n Previous: \ref vector_vector_division &nbsp; &nbsp; Next: \ref matrix_matrix_multiplication */ //************************************************************************************************* //**Matrix/Matrix Multiplication******************************************************************* /*!\page matrix_matrix_multiplication Matrix/Matrix Multiplication // // \n \section schur_product Componentwise Multiplication / Schur Product // <hr> // // Multiplying two matrices with the same dimensions (i.e. the same number of rows and columns) // via the modulo operator results in a componentwise multiplication (Schur product) of the two // matrices: \code using blaze::DynamicMatrix; using blaze::CompressedMatrix; DynamicMatrix<double> M1( 28UL, 35UL ); CompressedMatrix<float> M2( 28UL, 35UL ); // ... Initialization of the matrices DynamicMatrix<double> M3 = M1 % M2; \endcode // \n \section matrix_product Matrix Product // <hr> // // The matrix/matrix product can be formulated exactly as in mathematical textbooks: \code using blaze::DynamicMatrix; using blaze::CompressedMatrix; DynamicMatrix<double> M1( 45UL, 85UL ); CompressedMatrix<float> M2( 85UL, 37UL ); // ... Initialization of the matrices DynamicMatrix<double> M3 = M1 * M2; \endcode // The storage order of the two matrices poses no restrictions on the operation, all variations // are possible. It is also possible to multiply two matrices with different element type, as // long as the element types themselves can be multiplied and added. Note however that the // highest performance for a multiplication between two matrices can be expected for two // matrices with the same scalar element type. // // In case the resulting matrix is known to be symmetric, Hermitian, lower triangular, upper // triangular, or diagonal, the computation can be optimized by explicitly declaring the // multiplication as symmetric, Hermitian, lower triangular, upper triangular, or diagonal by // means of the \ref matrix_operations_declaration_operations : \code using blaze::DynamicMatrix; DynamicMatrix<double> M1, M2, M3; // ... Initialization of the square matrices M3 = declsym ( M1 * M2 ); // Declare the result of the matrix multiplication as symmetric M3 = declherm( M1 * M2 ); // Declare the result of the matrix multiplication as Hermitian M3 = decllow ( M1 * M2 ); // Declare the result of the matrix multiplication as lower triangular M3 = declupp ( M1 * M2 ); // Declare the result of the matrix multiplication as upper triangular M3 = decldiag( M1 * M2 ); // Declare the result of the matrix multiplication as diagonal \endcode // Using a declaration operation on the a multiplication expression can speed up the computation // by a factor of 2. Note however that the caller of the according declaration operation takes // full responsibility for the correctness of the declaration. Falsely declaring a multiplication // as symmetric, Hermitian, lower triangular, upper triangular, or diagonal leads to undefined // behavior! // // \n Previous: \ref matrix_vector_multiplication &nbsp; &nbsp; Next: \ref shared_memory_parallelization */ //************************************************************************************************* //**Shared Memory Parallelization****************************************************************** /*!\page shared_memory_parallelization Shared Memory Parallelization // // For all possible operations \b Blaze tries to achieve maximum performance on a single CPU // core. However, today's CPUs are not single core anymore, but provide several (homogeneous // or heterogeneous) compute cores. In order to fully exploit the performance potential of a // multicore CPU, computations have to be parallelized across all available cores of a CPU. // For this purpose, \b Blaze provides four different shared memory parallelization techniques: // // - \ref hpx_parallelization // - \ref cpp_threads_parallelization // - \ref boost_threads_parallelization // - \ref openmp_parallelization // // When any of the shared memory parallelization techniques is activated, all arithmetic // operations on dense vectors and matrices (including additions, subtractions, multiplications, // divisions, and all componentwise arithmetic operations) and most operations on sparse vectors // and matrices are automatically run in parallel. However, in addition, \b Blaze provides means // to enforce the serial execution of specific operations: // // - \ref serial_execution // // \n Previous: \ref matrix_matrix_multiplication &nbsp; &nbsp; Next: \ref hpx_parallelization */ //************************************************************************************************* //**HPX Parallelization**************************************************************************** /*!\page hpx_parallelization HPX Parallelization // // \tableofcontents // // // The first shared memory parallelization provided with \b Blaze is based on // <a href="http://stellar.cct.lsu.edu/projects/hpx/">HPX</a>. // // // \n \section hpx_setup HPX Setup // <hr> // // In order to enable the HPX-based parallelization, the following steps have to be taken: First, // the \c BLAZE_USE_HPX_THREADS command line argument has to be explicitly specified during // compilation: \code ... -DBLAZE_USE_HPX_THREADS ... \endcode // Second, the HPX library and depending libraries such as Boost, hwloc, etc. have to be linked. // And third, the HPX threads have to be initialized by a call to the \c hpx::init() function (see // the <a href="http://stellar.cct.lsu.edu/files/hpx_0.9.0/docs/hpx/tutorial.html">HPX tutorial</a> // for further details). These three actions will cause the \b Blaze library to automatically try // to run all operations in parallel with the specified number of HPX threads. // // Note that the HPX-based parallelization has priority over the OpenMP-based, C++11 thread-based, // and Boost thread-based parallelizations, i.e. is preferred in case multiple parallelizations // are enabled in combination with the HPX thread parallelization. // // The number of threads used by the HPX backend has to be specified via the command line: \code ... --hpx:threads 4 ... \endcode // Please note that the \b Blaze library does not limit the available number of threads. Therefore // it is in YOUR responsibility to choose an appropriate number of threads. The best performance, // though, can be expected if the specified number of threads matches the available number of // cores. // // In order to query the number of threads used for the parallelization of operations, the // \c getNumThreads() function can be used: \code const size_t threads = blaze::getNumThreads(); \endcode // In the context of HPX threads, the function will return the actual number of threads used by // the HPX subsystem. // // // \n \section hpx_configuration HPX Configuration // <hr> // // As in case of the other shared memory parallelizations \b Blaze is not unconditionally running // an operation in parallel (see for instance \ref openmp_parallelization). Only in case a given // operation is large enough and exceeds a certain threshold the operation is executed in parallel. // All thresholds related to the HPX-based parallelization are contained within the configuration // file <tt><blaze/config/Thresholds.h></tt>. // // Please note that these thresholds are highly sensitiv to the used system architecture and // the shared memory parallelization technique. Therefore the default values cannot guarantee // maximum performance for all possible situations and configurations. They merely provide a // reasonable standard for the current CPU generation. Also note that the provided defaults // have been determined using the OpenMP parallelization and require individual adaption for // the HPX-based parallelization. // // \n Previous: \ref shared_memory_parallelization &nbsp; &nbsp; Next: \ref cpp_threads_parallelization */ //************************************************************************************************* //**C++11 Thread Parallelization******************************************************************* /*!\page cpp_threads_parallelization C++11 Thread Parallelization // // \tableofcontents // // // In addition to the HPX-based shared memory parallelization, starting with \b Blaze 2.1, // \b Blaze also provides a shared memory parallelization based on C++11 threads. // // // \n \section cpp_threads_setup C++11 Thread Setup // <hr> // // In order to enable the C++11 thread-based parallelization, first the according C++11-specific // compiler flags have to be used and second the \c BLAZE_USE_CPP_THREADS command line argument // has to be explicitly specified. For instance, in case of the GNU C++ and Clang compilers the // compiler flags have to be extended by \code ... -std=c++11 -DBLAZE_USE_CPP_THREADS ... \endcode // This simple action will cause the \b Blaze library to automatically try to run all operations // in parallel with the specified number of C++11 threads. Note that in case both HPX and C++11 // threads are enabled on the command line, the HPX-based parallelization has priority and is // preferred. // // The number of threads can be either specified via the environment variable \c BLAZE_NUM_THREADS \code export BLAZE_NUM_THREADS=4 // Unix systems set BLAZE_NUM_THREADS=4 // Windows systems \endcode // or alternatively via the \c setNumThreads() function provided by the \b Blaze library: \code blaze::setNumThreads( 4 ); \endcode // Please note that the \b Blaze library does not limit the available number of threads. Therefore // it is in YOUR responsibility to choose an appropriate number of threads. The best performance, // though, can be expected if the specified number of threads matches the available number of // cores. // // In order to query the number of threads used for the parallelization of operations, the // \c getNumThreads() function can be used: \code const size_t threads = blaze::getNumThreads(); \endcode // In the context of C++11 threads, the function will return the previously specified number of // threads. // // // \n \section cpp_threads_configuration C++11 Thread Configuration // <hr> // // As in case of the OpenMP-based parallelization \b Blaze is not unconditionally running an // operation in parallel. In case \b Blaze deems the parallel execution as counterproductive for // the overall performance, the operation is executed serially. One of the main reasons for not // executing an operation in parallel is the size of the operands. For instance, a vector addition // is only executed in parallel if the size of both vector operands exceeds a certain threshold. // Otherwise, the performance could seriously decrease due to the overhead caused by the thread // setup. However, in order to be able to adjust the \b Blaze library to a specific system, it // is possible to configure these thresholds manually. All thresholds are contained within the // configuration file <tt><blaze/config/Thresholds.h></tt>. // // Please note that these thresholds are highly sensitiv to the used system architecture and // the shared memory parallelization technique. Therefore the default values cannot guarantee // maximum performance for all possible situations and configurations. They merely provide a // reasonable standard for the current CPU generation. Also note that the provided defaults // have been determined using the OpenMP parallelization and require individual adaption for // the C++11 thread parallelization. // // // \n \section cpp_threads_known_issues Known Issues // <hr> // // There is a known issue in Visual Studio 2012 and 2013 that may cause C++11 threads to hang // if their destructor is executed after the \c main() function: // // http://connect.microsoft.com/VisualStudio/feedback/details/747145 // // Unfortunately, the C++11 parallelization of the \b Blaze library is affected from this bug. // In order to circumvent this problem, \b Blaze provides the \c shutDownThreads() function, // which can be used to manually destroy all threads at the end of the \c main() function: \code int main() { // ... Using the C++11 thread parallelization of Blaze shutDownThreads(); } \endcode // Please note that this function may only be used at the end of the \c main() function. After // this function no further computation may be executed! Also note that this function has an // effect for Visual Studio compilers only and doesn't need to be used with any other compiler. // // \n Previous: \ref hpx_parallelization &nbsp; &nbsp; Next: \ref boost_threads_parallelization */ //************************************************************************************************* //**Boost Thread Parallelization******************************************************************* /*!\page boost_threads_parallelization Boost Thread Parallelization // // \tableofcontents // // // The third available shared memory parallelization provided with \b Blaze is based // on <a href="https://www.boost.org/doc/libs/1_68_0/doc/html/thread.html">Boost threads</a>. // // // \n \section boost_threads_setup Boost Thread Setup // <hr> // // In order to enable the Boost thread-based parallelization, two steps have to be taken: First, // the \c BLAZE_USE_BOOST_THREADS command line argument has to be explicitly specified during // compilation: \code ... -DBLAZE_USE_BOOST_THREADS ... \endcode // Second, the according Boost libraries have to be linked. These two simple actions will cause // the \b Blaze library to automatically try to run all operations in parallel with the specified // number of Boost threads. Note that the HPX-based and C++11 thread-based parallelizations have // priority, i.e. are preferred in case either is enabled in combination with the Boost thread // parallelization. // // The number of threads can be either specified via the environment variable \c BLAZE_NUM_THREADS \code export BLAZE_NUM_THREADS=4 // Unix systems set BLAZE_NUM_THREADS=4 // Windows systems \endcode // or alternatively via the \c setNumThreads() function provided by the \b Blaze library: \code blaze::setNumThreads( 4 ); \endcode // Please note that the \b Blaze library does not limit the available number of threads. Therefore // it is in YOUR responsibility to choose an appropriate number of threads. The best performance, // though, can be expected if the specified number of threads matches the available number of // cores. // // In order to query the number of threads used for the parallelization of operations, the // \c getNumThreads() function can be used: \code const size_t threads = blaze::getNumThreads(); \endcode // In the context of Boost threads, the function will return the previously specified number of // threads. // // // \n \section boost_threads_configuration Boost Thread Configuration // <hr> // // As in case of the other shared memory parallelizations \b Blaze is not unconditionally running // an operation in parallel (see \ref openmp_parallelization or \ref cpp_threads_parallelization). // All thresholds related to the Boost thread parallelization are also contained within the // configuration file <tt><blaze/config/Thresholds.h></tt>. // // Please note that these thresholds are highly sensitiv to the used system architecture and // the shared memory parallelization technique. Therefore the default values cannot guarantee // maximum performance for all possible situations and configurations. They merely provide a // reasonable standard for the current CPU generation. Also note that the provided defaults // have been determined using the OpenMP parallelization and require individual adaption for // the Boost thread parallelization. // // \n Previous: \ref cpp_threads_parallelization &nbsp; &nbsp; Next: \ref openmp_parallelization */ //************************************************************************************************* //**OpenMP Parallelization************************************************************************* /*!\page openmp_parallelization OpenMP Parallelization // // \tableofcontents // // // The fourth and final shared memory parallelization provided with \b Blaze is based on // <a href="https://www.openmp.org">OpenMP</a>. // // // \n \section openmp_setup OpenMP Setup // <hr> // // To enable the OpenMP-based parallelization, all that needs to be done is to explicitly specify // the use of OpenMP on the command line: \code -fopenmp // GNU/Clang C++ compiler -openmp // Intel C++ compiler /openmp // Visual Studio \endcode // This simple action will cause the \b Blaze library to automatically try to run all operations // in parallel with the specified number of threads. Note however that the HPX-based, the C++11 // thread-based, and the Boost thread-based parallelizations have priority, i.e. are preferred in // case either is enabled in combination with the OpenMP thread parallelization. // // As common for OpenMP, the number of threads can be specified either via an environment variable \code export OMP_NUM_THREADS=4 // Unix systems set OMP_NUM_THREADS=4 // Windows systems \endcode // or via an explicit call to the \c omp_set_num_threads() function: \code omp_set_num_threads( 4 ); \endcode // Alternatively, the number of threads can also be specified via the \c setNumThreads() function // provided by the \b Blaze library: \code blaze::setNumThreads( 4 ); \endcode // Please note that the \b Blaze library does not limit the available number of threads. Therefore // it is in YOUR responsibility to choose an appropriate number of threads. The best performance, // though, can be expected if the specified number of threads matches the available number of // cores. // // In order to query the number of threads used for the parallelization of operations, the // \c getNumThreads() function can be used: \code const size_t threads = blaze::getNumThreads(); \endcode // In the context of OpenMP, the function returns the maximum number of threads OpenMP will use // within a parallel region and is therefore equivalent to the \c omp_get_max_threads() function. // // // \n \section openmp_configuration OpenMP Configuration // <hr> // // Note that \b Blaze is not unconditionally running an operation in parallel. In case \b Blaze // deems the parallel execution as counterproductive for the overall performance, the operation // is executed serially. One of the main reasons for not executing an operation in parallel is // the size of the operands. For instance, a vector addition is only executed in parallel if the // size of both vector operands exceeds a certain threshold. Otherwise, the performance could // seriously decrease due to the overhead caused by the thread setup. However, in order to be // able to adjust the \b Blaze library to a specific system, it is possible to configure these // thresholds manually. All shared memory thresholds are contained within the configuration file // <tt><blaze/config/Thresholds.h></tt>. // // Please note that these thresholds are highly sensitiv to the used system architecture and // the shared memory parallelization technique (see also \ref cpp_threads_parallelization and // \ref boost_threads_parallelization). Therefore the default values cannot guarantee maximum // performance for all possible situations and configurations. They merely provide a reasonable // standard for the current CPU generation. // // // \n \section openmp_first_touch First Touch Policy // <hr> // // So far the \b Blaze library does not (yet) automatically initialize dynamic memory according // to the first touch principle. Consider for instance the following vector triad example: \code using blaze::columnVector; const size_t N( 1000000UL ); blaze::DynamicVector<double,columnVector> a( N ), b( N ), c( N ), d( N ); // Initialization of the vectors b, c, and d for( size_t i=0UL; i<N; ++i ) { b[i] = rand<double>(); c[i] = rand<double>(); d[i] = rand<double>(); } // Performing a vector triad a = b + c * d; \endcode // If this code, which is prototypical for many OpenMP applications that have not been optimized // for ccNUMA architectures, is run across several locality domains (LD), it will not scale // beyond the maximum performance achievable on a single LD if the working set does not fit into // the cache. This is because the initialization loop is executed by a single thread, writing to // \c b, \c c, and \c d for the first time. Hence, all memory pages belonging to those arrays will // be mapped into a single LD. // // As mentioned above, this problem can be solved by performing vector initialization in parallel: \code // ... // Initialization of the vectors b, c, and d #pragma omp parallel for for( size_t i=0UL; i<N; ++i ) { b[i] = rand<double>(); c[i] = rand<double>(); d[i] = rand<double>(); } // ... \endcode // This simple modification makes a huge difference on ccNUMA in memory-bound situations (as for // instance in all BLAS level 1 operations and partially BLAS level 2 operations). Therefore, in // order to achieve the maximum possible performance, it is imperative to initialize the memory // according to the later use of the data structures. // // // \n \section openmp_limitations Limitations of the OpenMP Parallelization // <hr> // // There are a few important limitations to the current \b Blaze OpenMP parallelization. The first // one involves the explicit use of an OpenMP parallel region (see \ref openmp_parallel), the // other one the OpenMP \c sections directive (see \ref openmp_sections). // // // \n \subsection openmp_parallel The Parallel Directive // // In OpenMP threads are explicitly spawned via the an OpenMP parallel directive: \code // Serial region, executed by a single thread #pragma omp parallel { // Parallel region, executed by the specified number of threads } // Serial region, executed by a single thread \endcode // Conceptually, the specified number of threads (see \ref openmp_setup) is created every time a // parallel directive is encountered. Therefore, from a performance point of view, it seems to be // beneficial to use a single OpenMP parallel directive for several operations: \code blaze::DynamicVector<double> x, y1, y2; blaze::DynamicMatrix<double> A, B; #pragma omp parallel { y1 = A * x; y2 = B * x; } \endcode // Unfortunately, this optimization approach is not allowed within the \b Blaze library. More // explicitly, it is not allowed to put an operation into a parallel region. The reason is that // the entire code contained within a parallel region is executed by all threads. Although this // appears to just comprise the contained computations, a computation (or more specifically the // assignment of an expression to a vector or matrix) can contain additional logic that must not // be handled by multiple threads (as for instance memory allocations, setup of temporaries, etc.). // Therefore it is not possible to manually start a parallel region for several operations, but // \b Blaze will spawn threads automatically, depending on the specifics of the operation at hand // and the given operands. // // \n \subsection openmp_sections The Sections Directive // // OpenMP provides several work-sharing construct to distribute work among threads. One of these // constructs is the \c sections directive: \code blaze::DynamicVector<double> x, y1, y2; blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization #pragma omp sections { #pragma omp section y1 = A * x; #pragma omp section y2 = B * x; } \endcode // In this example, two threads are used to compute two distinct matrix/vector multiplications // concurrently. Thereby each of the \c sections is executed by exactly one thread. // // Unfortunately \b Blaze does not support concurrent parallel computations and therefore this // approach does not work with any of the \b Blaze parallelization techniques. All techniques // (including the C++11 and Boost thread parallelizations; see \ref cpp_threads_parallelization // and \ref boost_threads_parallelization) are optimized for the parallel computation of an // operation within a single thread of execution. This means that \b Blaze tries to use all // available threads to compute the result of a single operation as efficiently as possible. // Therefore, for this special case, it is advisable to disable all \b Blaze parallelizations // and to let \b Blaze compute all operations within a \c sections directive in serial. This can // be done by either completely disabling the \b Blaze parallelization (see \ref serial_execution) // or by selectively serializing all operations within a \c sections directive via the \c serial() // function: \code blaze::DynamicVector<double> x, y1, y2; blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization #pragma omp sections { #pragma omp section y1 = serial( A * x ); #pragma omp section y2 = serial( B * x ); } \endcode // Please note that the use of the \c BLAZE_SERIAL_SECTION (see also \ref serial_execution) does // NOT work in this context! // // \n Previous: \ref boost_threads_parallelization &nbsp; &nbsp; Next: \ref serial_execution */ //************************************************************************************************* //**Serial Execution******************************************************************************* /*!\page serial_execution Serial Execution // // Sometimes it may be necessary to enforce the serial execution of specific operations. For this // purpose, the \b Blaze library offers three possible options: the serialization of a single // expression via the \c serial() function, the serialization of a block of expressions via the // \c BLAZE_SERIAL_SECTION, and the general deactivation of the parallel execution. // // // \n \section serial_execution_serial_expression Option 1: Serialization of a Single Expression // <hr> // // The first option is the serialization of a specific operation via the \c serial() function: \code blaze::DynamicMatrix<double> A, B, C; // ... Resizing and initialization C = serial( A + B ); \endcode // \c serial() enforces the serial evaluation of the enclosed expression. It can be used on any // kind of dense or sparse vector or matrix expression. // // // \n \section serial_execution_serial_section Option 2: Serialization of Multiple Expressions // <hr> // // The second option is the temporary and local enforcement of a serial execution via the // \c BLAZE_SERIAL_SECTION: \code using blaze::rowMajor; using blaze::columnVector; blaze::DynamicMatrix<double,rowMajor> A; blaze::DynamicVector<double,columnVector> b, c, d, x, y, z; // ... Resizing and initialization // Parallel execution // If possible and beneficial for performance the following operation is executed in parallel. x = A * b; // Serial execution // All operations executed within the serial section are guaranteed to be executed in // serial (even if a parallel execution would be possible and/or beneficial). BLAZE_SERIAL_SECTION { y = A * c; z = A * d; } // Parallel execution continued // ... \endcode // Within the scope of the \c BLAZE_SERIAL_SECTION, all operations are guaranteed to run in serial. // Outside the scope of the serial section, all operations are run in parallel (if beneficial for // the performance). // // Note that the \c BLAZE_SERIAL_SECTION must only be used within a single thread of execution. // The use of the serial section within several concurrent threads will result undefined behavior! // // // \n \section serial_execution_deactivate_parallelism Option 3: Deactivation of Parallel Execution // <hr> // // The third option is the general deactivation of the parallel execution (even in case OpenMP is // enabled on the command line). This can be achieved via the \c BLAZE_USE_SHARED_MEMORY_PARALLELIZATION // switch in the <tt>./blaze/config/SMP.h</tt> configuration file: \code #define BLAZE_USE_SHARED_MEMORY_PARALLELIZATION 1 \endcode // In case the \c BLAZE_USE_SHARED_MEMORY_PARALLELIZATION switch is set to 0, the shared memory // parallelization is deactivated altogether. // // \n Previous: \ref openmp_parallelization &nbsp; &nbsp; Next: \ref serialization */ //************************************************************************************************* //**Serialization********************************************************************************** /*!\page serialization Serialization // // Sometimes it is necessary to store vector and/or matrices on disk, for instance for storing // results or for sharing specific setups with other people. The \b Blaze math serialization // module provides the according functionality to create platform independent, portable, binary // representations of vectors and matrices that can be used to store the \b Blaze data structures // without loss of precision and to reliably transfer them from one machine to another. // // The following two pages explain how to serialize vectors and matrices: // // - \ref vector_serialization // - \ref matrix_serialization // // \n Previous: \ref serial_execution &nbsp; &nbsp; Next: \ref vector_serialization */ //************************************************************************************************* //**Vector Serialization*************************************************************************** /*!\page vector_serialization Vector Serialization // // The following example demonstrates the (de-)serialization of dense and sparse vectors: \code using blaze::columnVector; using blaze::rowVector; // Serialization of both vectors { blaze::StaticVector<double,5UL,rowVector> d; blaze::CompressedVector<int,columnVector> s; // ... Resizing and initialization // Creating an archive that writes into a the file "vectors.blaze" blaze::Archive<std::ofstream> archive( "vectors.blaze" ); // Serialization of both vectors into the same archive. Note that d lies before s! archive << d << s; } // Reconstitution of both vectors { blaze::DynamicVector<double,rowVector> d1; blaze::DynamicVector<int,rowVector> d2; // Creating an archive that reads from the file "vectors.blaze" blaze::Archive<std::ifstream> archive( "vectors.blaze" ); // Reconstituting the former d vector into d1. Note that it is possible to reconstitute // the vector into a differrent kind of vector (StaticVector -> DynamicVector), but that // the type of elements has to be the same. archive >> d1; // Reconstituting the former s vector into d2. Note that is is even possible to reconstitute // a sparse vector as a dense vector (also the reverse is possible) and that a column vector // can be reconstituted as row vector (and vice versa). Note however that also in this case // the type of elements is the same! archive >> d2 } \endcode // The (de-)serialization of vectors is not restricted to vectors of built-in data type, but can // also be used for vectors with vector or matrix element type: \code // Serialization { blaze::CompressedVector< blaze::DynamicVector< blaze::complex<double> > > vec; // ... Resizing and initialization // Creating an archive that writes into a the file "vector.blaze" blaze::Archive<std::ofstream> archive( "vector.blaze" ); // Serialization of the vector into the archive archive << vec; } // Deserialization { blaze::CompressedVector< blaze::DynamicVector< blaze::complex<double> > > vec; // Creating an archive that reads from the file "vector.blaze" blaze::Archive<std::ifstream> archive( "vector.blaze" ); // Reconstitution of the vector from the archive archive >> vec; } \endcode // As the examples demonstrates, the vector serialization offers an enormous flexibility. However, // several actions result in errors: // // - vectors cannot be reconstituted as matrices (and vice versa) // - the element type of the serialized and reconstituted vector must match, which means // that on the source and destination platform the general type (signed/unsigned integral // or floating point) and the size of the type must be exactly the same // - when reconstituting a \c StaticVector, its size must match the size of the serialized vector // // In case an error is encountered during (de-)serialization, a \c std::runtime_exception is // thrown. // // \n Previous: \ref serialization &nbsp; &nbsp; Next: \ref matrix_serialization */ //************************************************************************************************* //**Matrix Serialization*************************************************************************** /*!\page matrix_serialization Matrix Serialization // // The serialization of matrices works in the same manner as the serialization of vectors. The // following example demonstrates the (de-)serialization of dense and sparse matrices: \code using blaze::rowMajor; using blaze::columnMajor; // Serialization of both matrices { blaze::StaticMatrix<double,3UL,5UL,rowMajor> D; blaze::CompressedMatrix<int,columnMajor> S; // ... Resizing and initialization // Creating an archive that writes into a the file "matrices.blaze" blaze::Archive<std::ofstream> archive( "matrices.blaze" ); // Serialization of both matrices into the same archive. Note that D lies before S! archive << D << S; } // Reconstitution of both matrices { blaze::DynamicMatrix<double,rowMajor> D1; blaze::DynamicMatrix<int,rowMajor> D2; // Creating an archive that reads from the file "matrices.blaze" blaze::Archive<std::ifstream> archive( "matrices.blaze" ); // Reconstituting the former D matrix into D1. Note that it is possible to reconstitute // the matrix into a differrent kind of matrix (StaticMatrix -> DynamicMatrix), but that // the type of elements has to be the same. archive >> D1; // Reconstituting the former S matrix into D2. Note that is is even possible to reconstitute // a sparse matrix as a dense matrix (also the reverse is possible) and that a column-major // matrix can be reconstituted as row-major matrix (and vice versa). Note however that also // in this case the type of elements is the same! archive >> D2 } \endcode // Note that also in case of matrices it is possible to (de-)serialize matrices with vector or // matrix elements: \code // Serialization { blaze::CompressedMatrix< blaze::DynamicMatrix< blaze::complex<double> > > mat; // ... Resizing and initialization // Creating an archive that writes into a the file "matrix.blaze" blaze::Archive<std::ofstream> archive( "matrix.blaze" ); // Serialization of the matrix into the archive archive << mat; } // Deserialization { blaze::CompressedMatrix< blaze::DynamicMatrix< blaze::complex<double> > > mat; // Creating an archive that reads from the file "matrix.blaze" blaze::Archive<std::ifstream> archive( "matrix.blaze" ); // Reconstitution of the matrix from the archive archive >> mat; } \endcode // Note that just as the vector serialization, the matrix serialization is restricted by a // few important rules: // // - matrices cannot be reconstituted as vectors (and vice versa) // - the element type of the serialized and reconstituted matrix must match, which means // that on the source and destination platform the general type (signed/unsigned integral // or floating point) and the size of the type must be exactly the same // - when reconstituting a \c StaticMatrix, the number of rows and columns must match those // of the serialized matrix // // In case an error is encountered during (de-)serialization, a \c std::runtime_exception is // thrown. // // \n Previous: \ref vector_serialization &nbsp; &nbsp; Next: \ref customization \n */ //************************************************************************************************* //**Customization********************************************************************************** /*!\page customization Customization // // Although \b Blaze tries to work out of the box for every possible setting, still it may be // necessary to adapt the library to specific requirements. The following three pages explain // how to customize the \b Blaze library to your own needs: // // - \ref configuration_files // - \ref vector_and_matrix_customization // - \ref error_reporting_customization // // \n Previous: \ref matrix_serialization &nbsp; &nbsp; Next: \ref configuration_files */ //************************************************************************************************* //**Configuration Files**************************************************************************** /*!\page configuration_files Configuration Files // // \tableofcontents // // // Sometimes it is necessary to adapt \b Blaze to specific requirements. For this purpose // \b Blaze provides several configuration files in the <tt>./blaze/config/</tt> subdirectory, // which provide ample opportunity to customize internal settings, behavior, and thresholds. // This chapter explains the most important of these configuration files. For a complete // overview of all customization opportunities, please go to the configuration files in the // <tt>./blaze/config/</tt> subdirectory or see the complete \b Blaze documentation. // // // \n \section transpose_flag Default Vector Storage // <hr> // // The \b Blaze default is that all vectors are created as column vectors (if not specified // explicitly): \code blaze::StaticVector<double,3UL> x; // Creates a 3-dimensional static column vector \endcode // The header file <tt>./blaze/config/TransposeFlag.h</tt> allows the configuration of the default // vector storage (i.e. the default transpose flag) of all vectors within the \b Blaze library. // The default transpose flag is specified via the \c BLAZE_DEFAULT_TRANSPOSE_FLAG macro: \code #define BLAZE_DEFAULT_TRANSPOSE_FLAG blaze::columnVector \endcode // Alternatively the default transpose flag can be specified via command line or by defining this // symbol manually before including any \b Blaze header file: \code #define BLAZE_DEFAULT_TRANSPOSE_FLAG blaze::columnVector #include <blaze/Blaze.h> \endcode // Valid settings for \c BLAZE_DEFAULT_TRANSPOSE_FLAG are blaze::rowVector and blaze::columnVector. // // // \n \section storage_order Default Matrix Storage // <hr> // // Matrices are by default created as row-major matrices: \code blaze::StaticMatrix<double,3UL,3UL> A; // Creates a 3x3 row-major matrix \endcode // The header file <tt>./blaze/config/StorageOrder.h</tt> allows the configuration of the default // matrix storage order. Via the \c BLAZE_DEFAULT_STORAGE_ORDER macro the default storage order // for all matrices of the \b Blaze library can be specified. \code #define BLAZE_DEFAULT_STORAGE_ORDER blaze::rowMajor \endcode // Alternatively the default storage order can be specified via command line or by defining this // symbol manually before including any \b Blaze header file: \code #define BLAZE_DEFAULT_STORAGE_ORDER blaze::rowMajor #include <blaze/Blaze.h> \endcode // Valid settings for \c BLAZE_DEFAULT_STORAGE_ORDER are blaze::rowMajor and blaze::columnMajor. // // // \n \section blas_mode BLAS Mode // <hr> // // In order to achieve maximum performance for multiplications with dense matrices, \b Blaze can // be configured to use a BLAS library. Via the following compilation switch in the configuration // file <tt>./blaze/config/BLAS.h</tt> BLAS can be enabled: \code #define BLAZE_BLAS_MODE 1 \endcode // In case the selected BLAS library provides parallel execution, the \c BLAZE_BLAS_IS_PARALLEL // switch should be activated to prevent \b Blaze from parallelizing on its own: \code #define BLAZE_BLAS_IS_PARALLEL 1 \endcode // Alternatively, both settings can be specified via command line or by defining the symbols // manually before including any \b Blaze header file: \code #define BLAZE_BLAS_MODE 1 #define BLAZE_BLAS_IS_PARALLEL 1 #include <blaze/Blaze.h> \endcode // In case no BLAS library is available, \b Blaze will still work and will not be reduced in // functionality, but performance may be limited. // // // \n \section cache_size Cache Size // <hr> // // The optimization of several \b Blaze compute kernels depends on the cache size of the target // architecture. By default, \b Blaze assumes a cache size of 3 MiByte. However, for optimal // speed the exact cache size of the system should be provided via the \c cacheSize value in the // <tt>./blaze/config/CacheSize.h</tt> configuration file: \code #define BLAZE_CACHE_SIZE 3145728UL; \endcode // The cache size can also be specified via command line or by defining this symbol manually // before including any \b Blaze header file: \code #define BLAZE_CACHE_SIZE 3145728UL #include <blaze/Blaze.h> \endcode // \n \section vectorization Vectorization // <hr> // // In order to achieve maximum performance and to exploit the compute power of a target platform // the \b Blaze library attempts to vectorize all linear algebra operations by SSE, AVX, and/or // AVX-512 intrinsics, depending on which instruction set is available. However, it is possible // to disable the vectorization entirely by the compile time switch in the configuration file // <tt>./blaze/config/Vectorization.h</tt>: \code #define BLAZE_USE_VECTORIZATION 1 \endcode // It is also possible to (de-)activate vectorization via command line or by defining this symbol // manually before including any \b Blaze header file: \code #define BLAZE_USE_VECTORIZATION 1 #include <blaze/Blaze.h> \endcode // In case the switch is set to 1, vectorization is enabled and the \b Blaze library is allowed // to use intrinsics to speed up computations. In case the switch is set to 0, vectorization is // disabled entirely and the \b Blaze library chooses default, non-vectorized functionality for // the operations. Note that deactivating the vectorization may pose a severe performance // limitation for a large number of operations! // // // \n \section thresholds Thresholds // <hr> // // For many computations \b Blaze distinguishes between small and large vectors and matrices. // This separation is especially important for the parallel execution of computations, since // the use of several threads only pays off for sufficiently large vectors and matrices. // Additionally, it also enables \b Blaze to select kernels that are optimized for a specific // size. // // In order to distinguish between small and large data structures \b Blaze provides several // thresholds that can be adapted to the characteristics of the target platform. For instance, // the \c DMATDVECMULT_THRESHOLD specifies the threshold between the application of the custom // \b Blaze kernels for small dense matrix/dense vector multiplications and the BLAS kernels // for large multiplications. All thresholds, including the thresholds for the OpenMP- and // thread-based parallelization, are contained within the configuration file // <tt><blaze/config/Thresholds.h></tt>. // // // \n \section padding Padding // <hr> // // By default the \b Blaze library uses padding for all dense vectors and matrices in order to // achieve maximum performance in all operations. Due to padding, the proper alignment of data // elements can be guaranteed and the need for remainder loops is minimized. However, on the // downside padding introduces an additional memory overhead, which can be large depending on // the used data type. // // The configuration file <tt>./blaze/config/Optimizations.h</tt> provides a compile time switch // that can be used to (de-)activate padding: \code #define BLAZE_USE_PADDING 1 \endcode // Alternatively it is possible to (de-)activate padding via command line or by defining this // symbol manually before including any \b Blaze header file: \code #define BLAZE_USE_PADDING 1 #include <blaze/Blaze.h> \endcode // If \c BLAZE_USE_PADDING is set to 1 padding is enabled for all dense vectors and matrices, if // it is set to 0 padding is disabled. Note however that disabling padding can considerably reduce // the performance of all dense vector and matrix operations! // // // \n \section streaming Streaming (Non-Temporal Stores) // <hr> // // For vectors and matrices that don't fit into the cache anymore non-temporal stores can provide // a significant performance advantage of about 20%. However, this advantage is only in effect in // case the memory bandwidth of the target architecture is maxed out. If the target architecture's // memory bandwidth cannot be exhausted the use of non-temporal stores can decrease performance // instead of increasing it. // // The configuration file <tt>./blaze/config/Optimizations.h</tt> provides a compile time switch // that can be used to (de-)activate streaming: \code #define BLAZE_USE_STREAMING 1 \endcode // Alternatively streaming can be (de-)activated via command line or by defining this symbol // manually before including any \b Blaze header file: \code #define BLAZE_USE_STREAMING 1 #include <blaze/Blaze.h> \endcode // If \c BLAZE_USE_STREAMING is set to 1 streaming is enabled, if it is set to 0 streaming is // disabled. It is recommended to consult the target architecture's white papers to decide whether // streaming is beneficial or hurtful for performance. // // // \n Previous: \ref customization &nbsp; &nbsp; Next: \ref vector_and_matrix_customization \n */ //************************************************************************************************* //**Customization of Vectors and Matrices********************************************************** /*!\page vector_and_matrix_customization Customization of Vectors and Matrices // // \tableofcontents // // // \n \section custom_data_members Custom Data Members // <hr> // // So far the \b Blaze library does not provide a lot of flexibility to customize the data // members of existing \ref vector_types and \ref matrix_types. However, to some extend it is // possible to customize vectors and matrices by inheritance. The following example gives an // impression on how to create a simple variation of \ref matrix_types_custom_matrix, which // automatically takes care of acquiring and releasing custom memory. \code template< typename Type // Data type of the matrix , bool SO = defaultStorageOrder > // Storage order class MyCustomMatrix : public CustomMatrix< Type, unaligned, unpadded, SO > { public: explicit inline MyCustomMatrix( size_t m, size_t n ) : CustomMatrix<Type,unaligned,unpadded,SO>() , array_( new Type[m*n] ) { this->reset( array_.get(), m, n ); } private: std::unique_ptr<Type[]> array_; }; \endcode // Please note that this is a simplified example with the intent to show the general approach. // The number of constructors, the memory acquisition, and the kind of memory management can of // course be adapted to specific requirements. Also, please note that since none of the \b Blaze // vectors and matrices have virtual destructors polymorphic destruction cannot be used. // // // \n \section custom_operations Custom Operations // <hr> // // There are two approaches to extend \b Blaze with custom operations. First, the \c map() // functions provide the possibility to execute componentwise custom operations on vectors and // matrices. Second, it is possible to add customized free functions. // // \n \subsection custom_operations_map The map() Functions // // Via the unary and binary \c map() functions it is possible to execute componentwise custom // operations on vectors and matrices. The unary \c map() function can be used to apply a custom // operation on each single element of a dense vector or matrix or each non-zero element of a // sparse vector or matrix. For instance, the following example demonstrates a custom square // root computation on a dense matrix: \code blaze::DynamicMatrix<double> A, B; B = map( A, []( double d ) { return std::sqrt( d ); } ); \endcode // The binary \c map() function can be used to apply an operation pairwise to the elements of // two dense vectors or two dense matrices. The following example demonstrates the merging of // two matrices of double precision values into a matrix of double precision complex numbers: \code blaze::DynamicMatrix<double> real{ { 2.1, -4.2 }, { 1.0, 0.6 } }; blaze::DynamicMatrix<double> imag{ { 0.3, 1.4 }, { 2.9, -3.4 } }; blaze::DynamicMatrix< complex<double> > cplx; // Creating the matrix // ( (-2.1, 0.3) (-4.2, -1.4) ) // ( ( 1.0, 2.9) ( 0.6, -3.4) ) cplx = map( real, imag, []( double r, double i ){ return complex( r, i ); } ); \endcode // These examples demonstrate the most convenient way of defining a unary custom operation by // passing a lambda to the \c map() function. Alternatively, it is possible to pass a custom // functor: \code struct Sqrt { double operator()( double a ) const { return std::sqrt( a ); } }; B = map( A, Sqrt() ); \endcode // In order for the functor to work in a call to \c map() it must define a function call operator, // which accepts arguments of the type of the according vector or matrix elements. // // Although the operation is automatically parallelized depending on the size of the vector or // matrix, no automatic vectorization is possible. In order to enable vectorization, a \c load() // function can be added to the functor, which handles the vectorized computation. Depending on // the data type this function is passed one of the following \b Blaze SIMD data types: // // <ul> // <li>SIMD data types for fundamental data types // <ul> // <li>\c blaze::SIMDint8: Packed SIMD type for 8-bit signed integral data types</li> // <li>\c blaze::SIMDuint8: Packed SIMD type for 8-bit unsigned integral data types</li> // <li>\c blaze::SIMDint16: Packed SIMD type for 16-bit signed integral data types</li> // <li>\c blaze::SIMDuint16: Packed SIMD type for 16-bit unsigned integral data types</li> // <li>\c blaze::SIMDint32: Packed SIMD type for 32-bit signed integral data types</li> // <li>\c blaze::SIMDuint32: Packed SIMD type for 32-bit unsigned integral data types</li> // <li>\c blaze::SIMDint64: Packed SIMD type for 64-bit signed integral data types</li> // <li>\c blaze::SIMDuint64: Packed SIMD type for 64-bit unsigned integral data types</li> // <li>\c blaze::SIMDfloat: Packed SIMD type for single precision floating point data</li> // <li>\c blaze::SIMDdouble: Packed SIMD type for double precision floating point data</li> // </ul> // </li> // <li>SIMD data types for complex data types // <ul> // <li>\c blaze::SIMDcint8: Packed SIMD type for complex 8-bit signed integral data types</li> // <li>\c blaze::SIMDcuint8: Packed SIMD type for complex 8-bit unsigned integral data types</li> // <li>\c blaze::SIMDcint16: Packed SIMD type for complex 16-bit signed integral data types</li> // <li>\c blaze::SIMDcuint16: Packed SIMD type for complex 16-bit unsigned integral data types</li> // <li>\c blaze::SIMDcint32: Packed SIMD type for complex 32-bit signed integral data types</li> // <li>\c blaze::SIMDcuint32: Packed SIMD type for complex 32-bit unsigned integral data types</li> // <li>\c blaze::SIMDcint64: Packed SIMD type for complex 64-bit signed integral data types</li> // <li>\c blaze::SIMDcuint64: Packed SIMD type for complex 64-bit unsigned integral data types</li> // <li>\c blaze::SIMDcfloat: Packed SIMD type for complex single precision floating point data</li> // <li>\c blaze::SIMDcdouble: Packed SIMD type for complex double precision floating point data</li> // </ul> // </li> // </ul> // // All SIMD types provide the \c value data member for a direct access to the underlying intrinsic // data element. In the following example, this intrinsic element is passed to the AVX function // \c _mm256_sqrt_pd(): \code struct Sqrt { double operator()( double a ) const { return std::sqrt( a ); } SIMDdouble load( const SIMDdouble& a ) const { return _mm256_sqrt_pd( a.value ); } }; \endcode // In this example, whenever vectorization is generally applicable, the \c load() function is // called instead of the function call operator for as long as the number of remaining elements // is larger-or-equal to the width of the packed SIMD type. In all other cases (which also // includes peel-off and remainder loops) the scalar operation is used. // // Please note that this example has two drawbacks: First, it will only compile in case the // intrinsic \c _mm256_sqrt_pd() function is available (i.e. when AVX is active). Second, the // availability of AVX is not taken into account. The first drawback can be alleviated by making // the \c load() function a function template. The second drawback can be dealt with by adding a // \c simdEnabled() function template to the functor: \code struct Sqrt { double operator()( double a ) const { return std::sqrt( a ); } template< typename T > T load( const T& a ) const { return _mm256_sqrt_pd( a.value ); } template< typename T > static constexpr bool simdEnabled() { #if defined(__AVX__) return true; #else return false; #endif } }; \endcode // The \c simdEnabled() function must be a \c static, \c constexpr function and must return whether // or not vectorization is available for the given data type \c T. In case the function returns // \c true, the \c load() function is used for a vectorized evaluation, in case the function // returns \c false, \c load() is not called. // // Note that this is a simplified example that is only working when used for dense vectors and // matrices with double precision floating point elements. The following code shows the complete // implementation of the according functor that is used within the \b Blaze library. The \b Blaze // \c Sqrt functor is working for all data types that are providing a square root operation: \code namespace blaze { struct Sqrt { template< typename T > BLAZE_ALWAYS_INLINE auto operator()( const T& a ) const { return sqrt( a ); } template< typename T > static constexpr bool simdEnabled() { return HasSIMDSqrt<T>::value; } template< typename T > BLAZE_ALWAYS_INLINE auto load( const T& a ) const { BLAZE_CONSTRAINT_MUST_BE_SIMD_PACK( T ); return sqrt( a ); } }; } // namespace blaze \endcode // The same approach can be taken for binary custom operations. The following code demonstrates // the \c Min functor of the \b Blaze library, which is working for all data types that provide // a \c min() operation: \code struct Min { explicit inline Min() {} template< typename T1, typename T2 > BLAZE_ALWAYS_INLINE decltype(auto) operator()( const T1& a, const T2& b ) const { return min( a, b ); } template< typename T1, typename T2 > static constexpr bool simdEnabled() { return HasSIMDMin<T1,T2>::value; } template< typename T1, typename T2 > BLAZE_ALWAYS_INLINE decltype(auto) load( const T1& a, const T2& b ) const { BLAZE_CONSTRAINT_MUST_BE_SIMD_PACK( T1 ); BLAZE_CONSTRAINT_MUST_BE_SIMD_PACK( T2 ); return min( a, b ); } }; \endcode // For more information on the available \b Blaze SIMD data types and functions, please see the // SIMD module in the complete \b Blaze documentation. // // \n \subsection custom_operations_free_functions Free Functions // // In order to extend \b Blaze with new functionality it is possible to add free functions. Free // functions can be used either as wrappers around calls to the map() function or to implement // general, non-componentwise operations. The following two examples will demonstrate both ideas. // // The first example shows the \c setToZero() function, which resets a sparse matrix to zero // without affecting the sparsity pattern. It is implemented as a convenience wrapper around // the map() function: \code template< typename MT // Type of the sparse matrix , bool SO > // Storage order void setToZero( blaze::SparseMatrix<MT,SO>& mat ) { (~mat) = blaze::map( ~mat, []( int ){ return 0; } ); } \endcode // The blaze::SparseMatrix class template is the base class for all kinds of sparse matrices and // provides an abstraction from the actual type \c MT of the sparse matrix. However, due to the // <a href="https://en.wikipedia.org/wiki/Curiously_recurring_template_pattern">Curiously Recurring Template Pattern (CRTP)</a> // it also enables a conversion back to the actual type. This downcast is performed via the tilde // operator (i.e. \c operator~()). The template parameter \c SO represents the storage order // (blaze::rowMajor or blaze::columnMajor) of the matrix. // // The second example shows the \c countZeros() function, which counts the number of values, which // are exactly zero, in a dense, row-major matrix: \code template< typename MT > size_t countZeros( blaze::DenseMatrix<MT,rowMajor>& mat ) { const size_t M( (~mat).rows() ); const size_t N( (~mat).columns() ); size_t count( 0UL ); for( size_t i=0UL; i<M; ++i ) { for( size_t j=0UL; j<N; ++j ) { if( blaze::isDefault<strict>( (~mat)(i,j) ) ) ++count; } } return count; } \endcode // The blaze::DenseMatrix class template is the base class for all kinds of dense matrices. Again, // it is possible to perform the conversion to the actual type via the tilde operator. // // The following two listings show the declarations of all vector and matrix base classes, which // can be used for custom free functions: \code template< typename VT // Concrete type of the dense or sparse vector , bool TF > // Transpose flag (blaze::columnVector or blaze::rowVector) class Vector; template< typename VT // Concrete type of the dense vector , bool TF > // Transpose flag (blaze::columnVector or blaze::rowVector) class DenseVector; template< typename VT // Concrete type of the sparse vector , bool TF > // Transpose flag (blaze::columnVector or blaze::rowVector) class SparseVector; \endcode \code template< typename MT // Concrete type of the dense or sparse matrix , bool SO > // Storage order (blaze::rowMajor or blaze::columnMajor) class Matrix; template< typename MT // Concrete type of the dense matrix , bool SO > // Storage order (blaze::rowMajor or blaze::columnMajor) class DenseMatrix; template< typename MT // Concrete type of the sparse matrix , bool SO > // Storage order (blaze::rowMajor or blaze::columnMajor) class SparseMatrix; \endcode // \n \section custom_data_types Custom Data Types // <hr> // // The \b Blaze library tries hard to make the use of custom data types as convenient, easy and // intuitive as possible. However, unfortunately it is not possible to meet the requirements of // all possible data types. Thus it might be necessary to provide \b Blaze with some additional // information about the data type. The following sections give an overview of the necessary steps // to enable the use of the hypothetical custom data type \c custom::double_t for vector and // matrix operations. For example: \code blaze::DynamicVector<custom::double_t> a, b, c; // ... Resizing and initialization c = a + b; \endcode // The \b Blaze library assumes that the \c custom::double_t data type provides \c operator+() // for additions, \c operator-() for subtractions, \c operator*() for multiplications and // \c operator/() for divisions. If any of these functions is missing it is necessary to implement // the operator to perform the according operation. For this example we assume that the custom // data type provides the four following functions instead of operators: \code namespace custom { double_t add ( const double_t& a, const double_t b ); double_t sub ( const double_t& a, const double_t b ); double_t mult( const double_t& a, const double_t b ); double_t div ( const double_t& a, const double_t b ); } // namespace custom \endcode // The following implementations will satisfy the requirements of the \b Blaze library: \code inline custom::double_t operator+( const custom::double_t& a, const custom::double_t& b ) { return add( a, b ); } inline custom::double_t operator-( const custom::double_t& a, const custom::double_t& b ) { return sub( a, b ); } inline custom::double_t operator*( const custom::double_t& a, const custom::double_t& b ) { return mult( a, b ); } inline custom::double_t operator/( const custom::double_t& a, const custom::double_t& b ) { return div( a, b ); } \endcode // \b Blaze will use all the information provided with these functions (for instance the return // type) to properly handle the operations. In the rare case that the return type cannot be // automatically determined from the operator it might be additionally necessary to provide a // specialization of the following four \b Blaze class templates: \code namespace blaze { template<> struct AddTrait<custom::double_t,custom::double_t> { using Type = custom::double_t; }; template<> struct SubTrait<custom::double_t,custom::double_t> { using Type = custom::double_t; }; template<> struct MultTrait<custom::double_t,custom::double_t> { using Type = custom::double_t; }; template<> struct DivTrait<custom::double_t,custom::double_t> { using Type = custom::double_t; }; } // namespace blaze \endcode // The same steps are necessary if several custom data types need to be combined (as for instance // \c custom::double_t and \c custom::float_t). Note that in this case both permutations need to // be taken into account: \code custom::double_t operator+( const custom::double_t& a, const custom::float_t& b ); custom::double_t operator+( const custom::float_t& a, const custom::double_t& b ); // ... \endcode // Please note that only built-in data types apply for vectorization and thus custom data types // cannot achieve maximum performance! // // // \n Previous: \ref configuration_files &nbsp; &nbsp; Next: \ref custom_operations \n */ //************************************************************************************************* //**Customization of the Error Reporting Mechanism************************************************* /*!\page error_reporting_customization Customization of the Error Reporting Mechanism // // \tableofcontents // // // \n \section error_reporting_background Background // <hr> // // The default way of \b Blaze to report errors of any kind is to throw a standard exception. // However, although in general this approach works well, in certain environments and under // special circumstances exceptions may not be the mechanism of choice and a different error // reporting mechanism may be desirable. For this reason, \b Blaze provides several macros, // which enable the customization of the error reporting mechanism. Via these macros it is // possible to replace the standard exceptions by some other exception type or a completely // different approach to report errors. // // // \n \section error_reporting_general_customization Customization of the Reporting Mechanism // <hr> // // In some cases it might be necessary to adapt the entire error reporting mechanism and to // replace it by some other means to signal failure. The primary macro for this purpose is the // \c BLAZE_THROW macro: \code #define BLAZE_THROW( EXCEPTION ) \ throw EXCEPTION \endcode // This macro represents the default mechanism of the \b Blaze library to report errors of any // kind. In order to customize the error reporing mechanism all that needs to be done is to // define the macro prior to including any \b Blaze header file. This will cause the \b Blaze // specific mechanism to be overridden. The following example demonstrates this by replacing // exceptions by a call to a \c log() function and a direct call to abort: \code #define BLAZE_THROW( EXCEPTION ) \ log( "..." ); \ abort() #include <blaze/Blaze.h> \endcode // Doing this will trigger a call to \c log() and an abort instead of throwing an exception // whenever an error (such as an invalid argument) is detected. // // \note It is possible to execute several statements instead of executing a single statement to // throw an exception. Also note that it is recommended to define the macro such that a subsequent // semicolon is required! // // \warning This macro is provided with the intention to assist in adapting \b Blaze to special // conditions and environments. However, the customization of the error reporting mechanism via // this macro can have a significant effect on the library. Thus be advised to use the macro // with due care! // // // \n \section error_reporting_exception_customization Customization of the Type of Exceptions // <hr> // // In addition to the customization of the entire error reporting mechanism it is also possible // to customize the type of exceptions being thrown. This can be achieved by customizing any // number of the following macros: \code #define BLAZE_THROW_BAD_ALLOC \ BLAZE_THROW( std::bad_alloc() ) #define BLAZE_THROW_LOGIC_ERROR( MESSAGE ) \ BLAZE_THROW( std::logic_error( MESSAGE ) ) #define BLAZE_THROW_INVALID_ARGUMENT( MESSAGE ) \ BLAZE_THROW( std::invalid_argument( MESSAGE ) ) #define BLAZE_THROW_LENGTH_ERROR( MESSAGE ) \ BLAZE_THROW( std::length_error( MESSAGE ) ) #define BLAZE_THROW_OUT_OF_RANGE( MESSAGE ) \ BLAZE_THROW( std::out_of_range( MESSAGE ) ) #define BLAZE_THROW_RUNTIME_ERROR( MESSAGE ) \ BLAZE_THROW( std::runtime_error( MESSAGE ) ) \endcode // In order to customize the type of exception the according macro has to be defined prior to // including any \b Blaze header file. This will override the \b Blaze default behavior. The // following example demonstrates this by replacing \c std::invalid_argument by a custom // exception type: \code class InvalidArgument { public: InvalidArgument(); explicit InvalidArgument( const std::string& message ); // ... }; #define BLAZE_THROW_INVALID_ARGUMENT( MESSAGE ) \ BLAZE_THROW( InvalidArgument( MESSAGE ) ) #include <blaze/Blaze.h> \endcode // By manually defining the macro, an \c InvalidArgument exception is thrown instead of a // \c std::invalid_argument exception. Note that it is recommended to define the macro such // that a subsequent semicolon is required! // // \warning These macros are provided with the intention to assist in adapting \b Blaze to // special conditions and environments. However, the customization of the type of an exception // via this macro may have an effect on the library. Thus be advised to use the macro with due // care! // // // \n \section error_reporting_special_errors Customization of Special Errors // <hr> // // Last but not least it is possible to customize the error reporting for special kinds of errors. // This can be achieved by customizing any number of the following macros: \code #define BLAZE_THROW_DIVISION_BY_ZERO( MESSAGE ) \ BLAZE_THROW_RUNTIME_ERROR( MESSAGE ) #define BLAZE_THROW_LAPACK_ERROR( MESSAGE ) \ BLAZE_THROW_RUNTIME_ERROR( MESSAGE ) \endcode // As explained in the previous sections, in order to customize the handling of special errors // the according macro has to be defined prior to including any \b Blaze header file. This will // override the \b Blaze default behavior. // // // \n Previous: \ref vector_and_matrix_customization &nbsp; &nbsp; Next: \ref blas_functions \n */ //************************************************************************************************* //**BLAS Functions********************************************************************************* /*!\page blas_functions BLAS Functions // // \tableofcontents // // // For vector/vector, matrix/vector and matrix/matrix multiplications with large dense matrices // \b Blaze relies on the efficiency of BLAS libraries. For this purpose, \b Blaze implements // several convenient C++ wrapper functions for several BLAS functions. The following sections // give a complete overview of all available BLAS level 1, 2 and 3 functions. // // // \n \section blas_level_1 BLAS Level 1 // <hr> // // \subsection blas_level_1_dotu Dot Product (dotu) // // The following wrapper functions provide a generic interface for the BLAS functions for the // dot product of two dense vectors (\c sdot(), \c ddot(), \c cdotu_sub(), and \c zdotu_sub()): \code namespace blaze { float dotu( int n, const float* x, int incX, const float* y, int incY ); double dotu( int n, const double* x, int incX, const double* y, int incY ); complex<float> dotu( int n, const complex<float>* x, int incX, const complex<float>* y, int incY ); complex<double> dotu( int n, const complex<double>* x, int incX, const complex<double>* y, int incY ); template< typename VT1, bool TF1, typename VT2, bool TF2 > ElementType_<VT1> dotu( const DenseVector<VT1,TF1>& x, const DenseVector<VT2,TF2>& y ); } // namespace blaze \endcode // \subsection blas_level_1_dotc Complex Conjugate Dot Product (dotc) // // The following wrapper functions provide a generic interface for the BLAS functions for the // complex conjugate dot product of two dense vectors (\c sdot(), \c ddot(), \c cdotc_sub(), // and \c zdotc_sub()): \code namespace blaze { float dotc( int n, const float* x, int incX, const float* y, int incY ); double dotc( int n, const double* x, int incX, const double* y, int incY ); complex<float> dotc( int n, const complex<float>* x, int incX, const complex<float>* y, int incY ); complex<double> dotc( int n, const complex<double>* x, int incX, const complex<double>* y, int incY ); template< typename VT1, bool TF1, typename VT2, bool TF2 > ElementType_<VT1> dotc( const DenseVector<VT1,TF1>& x, const DenseVector<VT2,TF2>& y ); } // namespace blaze \endcode // \subsection blas_level_1_axpy Axpy Product (axpy) // // The following wrapper functions provide a generic interface for the BLAS functions for the // axpy product of two dense vectors (\c saxpy(), \c daxpy(), \c caxpy(), and \c zaxpy()): \code namespace blaze { void axpy( int n, float alpha, const float* x, int incX, float* y, int incY ); void axpy( int n, double alpha, const double* x, int incX, double* y, int incY ); void axpy( int n, complex<float> alpha, const complex<float>* x, int incX, complex<float>* y, int incY ); void axpy( int n, complex<double> alpha, const complex<double>* x, int incX, complex<double>* y, int incY ); template< typename VT1, bool TF1, typename VT2, bool TF2, typename ST > void axpy( const DenseVector<VT1,TF1>& x, const DenseVector<VT2,TF2>& y, ST alpha ); } // namespace blaze \endcode // \n \section blas_level_2 BLAS Level 2 // <hr> // // \subsection blas_level_2_gemv General Matrix/Vector Multiplication (gemv) // // The following wrapper functions provide a generic interface for the BLAS functions for the // general matrix/vector multiplication (\c sgemv(), \c dgemv(), \c cgemv(), and \c zgemv()): \code namespace blaze { void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, int m, int n, float alpha, const float* A, int lda, const float* x, int incX, float beta, float* y, int incY ); void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, int m, int n, double alpha, const double* A, int lda, const double* x, int incX, double beta, double* y, int incY ); void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, int m, int n, complex<float> alpha, const complex<float>* A, int lda, const complex<float>* x, int incX, complex<float> beta, complex<float>* y, int incY ); void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, int m, int n, complex<double> alpha, const complex<double>* A, int lda, const complex<double>* x, int incX, complex<double> beta, complex<double>* y, int incY ); template< typename VT1, typename MT1, bool SO, typename VT2, typename ST > void gemv( DenseVector<VT1,false>& y, const DenseMatrix<MT1,SO>& A, const DenseVector<VT2,false>& x, ST alpha, ST beta ); template< typename VT1, typename VT2, typename MT1, bool SO, typename ST > void gemv( DenseVector<VT1,true>& y, const DenseVector<VT2,true>& x, const DenseMatrix<MT1,SO>& A, ST alpha, ST beta ); } // namespace blaze \endcode // \n \subsection blas_level_2_trmv Triangular Matrix/Vector Multiplication (trmv) // // The following wrapper functions provide a generic interface for the BLAS functions for the // matrix/vector multiplication with a triangular matrix (\c strmv(), \c dtrmv(), \c ctrmv(), // and \c ztrmv()): \code namespace blaze { void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int n, const float* A, int lda, float* x, int incX ); void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int n, const double* A, int lda, double* x, int incX ); void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int n, const complex<float>* A, int lda, complex<float>* x, int incX ); void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int n, const complex<double>* A, int lda, complex<double>* x, int incX ); template< typename VT, typename MT, bool SO > void trmv( DenseVector<VT,false>& x, const DenseMatrix<MT,SO>& A, CBLAS_UPLO uplo ); template< typename VT, typename MT, bool SO > void trmv( DenseVector<VT,true>& x, const DenseMatrix<MT,SO>& A, CBLAS_UPLO uplo ); } // namespace blaze \endcode // \n \section blas_level_3 BLAS Level 3 // <hr> // // \subsection blas_level_3_gemm General Matrix/Matrix Multiplication (gemm) // // The following wrapper functions provide a generic interface for the BLAS functions for the // general matrix/matrix multiplication (\c sgemm(), \c dgemm(), \c cgemm(), and \c zgemm()): \code namespace blaze { void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB, int m, int n, int k, float alpha, const float* A, int lda, const float* B, int ldb, float beta, float* C, int ldc ); void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB, int m, int n, int k, double alpha, const double* A, int lda, const double* B, int ldb, double beta, float* C, int ldc ); void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB, int m, int n, int k, complex<float> alpha, const complex<float>* A, int lda, const complex<float>* B, int ldb, complex<float> beta, float* C, int ldc ); void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB, int m, int n, int k, complex<double> alpha, const complex<double>* A, int lda, const complex<double>* B, int ldb, complex<double> beta, float* C, int ldc ); template< typename MT1, bool SO1, typename MT2, bool SO2, typename MT3, bool SO3, typename ST > void gemm( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, const DenseMatrix<MT3,SO3>& B, ST alpha, ST beta ); } // namespace blaze \endcode // \n \subsection blas_level_3_trmm Triangular Matrix/Matrix Multiplication (trmm) // // The following wrapper functions provide a generic interface for the BLAS functions for the // matrix/matrix multiplication with a triangular matrix (\c strmm(), \c dtrmm(), \c ctrmm(), and // \c ztrmm()): \code namespace blaze { void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, float alpha, const float* A, int lda, float* B, int ldb ); void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, double alpha, const double* A, int lda, double* B, int ldb ); void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, complex<float> alpha, const complex<float>* A, int lda, complex<float>* B, int ldb ); void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, complex<double> alpha, const complex<double>* A, int lda, complex<double>* B, int ldb ); template< typename MT1, bool SO1, typename MT2, bool SO2, typename ST > void trmm( DenseMatrix<MT1,SO1>& B, const DenseMatrix<MT2,SO2>& A, CBLAS_SIDE side, CBLAS_UPLO uplo, ST alpha ); } // namespace blaze \endcode // \n \subsection blas_level_3_trsm Triangular System Solver (trsm) // // The following wrapper functions provide a generic interface for the BLAS functions for solving // a triangular system of equations (\c strsm(), \c dtrsm(), \c ctrsm(), and \c ztrsm()): \code namespace blaze { void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, float alpha, const float* A, int lda, float* B, int ldb ); void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, double alpha, const double* A, int lda, double* B, int ldb ); void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, complex<float> alpha, const complex<float>* A, int lda, complex<float>* B, int ldb ); void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, complex<double> alpha, const complex<double>* A, int lda, complex<double>* B, int ldb ); template< typename MT, bool SO, typename VT, bool TF, typename ST > void trsm( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, CBLAS_SIDE side, CBLAS_UPLO uplo, ST alpha ); template< typename MT1, bool SO1, typename MT2, bool SO2, typename ST > void trsm( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, CBLAS_SIDE side, CBLAS_UPLO uplo, ST alpha ); } // namespace blaze \endcode // \n Previous: \ref error_reporting_customization &nbsp; &nbsp; Next: \ref lapack_functions \n */ //************************************************************************************************* //**LAPACK Functions******************************************************************************* /*!\page lapack_functions LAPACK Functions // // \tableofcontents // // // \n \section lapack_introction Introduction // <hr> // // The \b Blaze library makes extensive use of the LAPACK functionality for various compute tasks // (including the decomposition, inversion and the computation of the determinant of dense matrices). // For this purpose, \b Blaze implements several convenient C++ wrapper functions for all required // LAPACK functions. The following sections give a complete overview of all available LAPACK wrapper // functions. For more details on the individual LAPACK functions see the \b Blaze function // documentation or the LAPACK online documentation browser: // // http://www.netlib.org/lapack/explore-html/ // // Most of the wrapper functions are implemented as thin wrappers around LAPACK functions. They // provide the parameters of the original LAPACK functions and thus provide maximum flexibility: \code constexpr size_t N( 100UL ); blaze::DynamicMatrix<double,blaze::columnMajor> A( N, N ); // ... Initializing the matrix const int m ( numeric_cast<int>( A.rows() ) ); // == N const int n ( numeric_cast<int>( A.columns() ) ); // == N const int lda ( numeric_cast<int>( A.spacing() ) ); // >= N const int lwork( n*lda ); const std::unique_ptr<int[]> ipiv( new int[N] ); // No initialization required const std::unique_ptr<double[]> work( new double[N] ); // No initialization required int info( 0 ); getrf( m, n, A.data(), lda, ipiv.get(), &info ); // Reports failure via 'info' getri( n, A.data(), lda, ipiv.get(), work.get(), lwork, &info ); // Reports failure via 'info' \endcode // Additionally, \b Blaze provides wrappers that provide a higher level of abstraction. These // wrappers provide a maximum of convenience: \code constexpr size_t N( 100UL ); blaze::DynamicMatrix<double,blaze::columnMajor> A( N, N ); // ... Initializing the matrix const std::unique_ptr<int[]> ipiv( new int[N] ); // No initialization required getrf( A, ipiv.get() ); // Cannot fail getri( A, ipiv.get() ); // Reports failure via exception \endcode // \note All functions only work for general, non-adapted matrices with \c float, \c double, // \c complex<float>, or \c complex<double> element type. The attempt to call the function with // adaptors or matrices of any other element type results in a compile time error! // // \note All functions can only be used if a fitting LAPACK library is available and linked to // the final executable. Otherwise a call to this function will result in a linker error. // // \note For performance reasons all functions do only provide the basic exception safety guarantee, // i.e. in case an exception is thrown the given matrix may already have been modified. // // // \n \section lapack_decomposition Matrix Decomposition // <hr> // // The following functions decompose/factorize the given dense matrix. Based on this decomposition // the matrix can be inverted or used to solve a linear system of equations. // // // \n \subsection lapack_lu_decomposition LU Decomposition // // The following functions provide an interface for the LAPACK functions \c sgetrf(), \c dgetrf(), // \c cgetrf(), and \c zgetrf(), which compute the LU decomposition for the given general matrix: \code namespace blaze { void getrf( int m, int n, float* A, int lda, int* ipiv, int* info ); void getrf( int m, int n, double* A, int lda, int* ipiv, int* info ); void getrf( int m, int n, complex<float>* A, int lda, int* ipiv, int* info ); void getrf( int m, int n, complex<double>* A, int lda, int* ipiv, int* info ); template< typename MT, bool SO > void getrf( DenseMatrix<MT,SO>& A, int* ipiv ); } // namespace blaze \endcode // The decomposition has the form \f[ A = P \cdot L \cdot U, \f]\n // where \c P is a permutation matrix, \c L is a lower unitriangular matrix, and \c U is an upper // triangular matrix. The resulting decomposition is stored within \a A: In case of a column-major // matrix, \c L is stored in the lower part of \a A and \c U is stored in the upper part. The unit // diagonal elements of \c L are not stored. In case \a A is a row-major matrix the result is // transposed. // // \note The LU decomposition will never fail, even for singular matrices. However, in case of a // singular matrix the resulting decomposition cannot be used for a matrix inversion or solving // a linear system of equations. // // // \n \subsection lapack_ldlt_decomposition LDLT Decomposition // // The following functions provide an interface for the LAPACK functions \c ssytrf(), \c dsytrf(), // \c csytrf(), and \c zsytrf(), which compute the LDLT (Bunch-Kaufman) decomposition for the given // symmetric indefinite matrix: \code namespace blaze { void sytrf( char uplo, int n, float* A, int lda, int* ipiv, float* work, int lwork, int* info ); void sytrf( char uplo, int n, double* A, int lda, int* ipiv, double* work, int lwork, int* info ); void sytrf( char uplo, int n, complex<float>* A, int lda, int* ipiv, complex<float>* work, int lwork, int* info ); void sytrf( char uplo, int n, complex<double>* A, int lda, int* ipiv, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void sytrf( DenseMatrix<MT,SO>& A, char uplo, int* ipiv ); } // namespace blaze \endcode // The decomposition has the form \f[ A = U D U^{T} \texttt{ (if uplo = 'U'), or } A = L D L^{T} \texttt{ (if uplo = 'L'), } \f] // where \c U (or \c L) is a product of permutation and unit upper (lower) triangular matrices, // and \c D is symmetric and block diagonal with 1-by-1 and 2-by-2 diagonal blocks. The resulting // decomposition is stored within \a A: In case \a uplo is set to \c 'L' the result is stored in // the lower part of the matrix and the upper part remains untouched, in case \a uplo is set to // \c 'U' the result is stored in the upper part and the lower part remains untouched. // // \note The Bunch-Kaufman decomposition will never fail, even for singular matrices. However, in // case of a singular matrix the resulting decomposition cannot be used for a matrix inversion or // solving a linear system of equations. // // // \n \subsection lapack_ldlh_decomposition LDLH Decomposition // // The following functions provide an interface for the LAPACK functions \c chetrf() and \c zsytrf(), // which compute the LDLH (Bunch-Kaufman) decomposition for the given Hermitian indefinite matrix: \code namespace blaze { void hetrf( char uplo, int n, complex<float>* A, int lda, int* ipiv, complex<float>* work, int lwork, int* info ); void hetrf( char uplo, int n, complex<double>* A, int lda, int* ipiv, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void hetrf( DenseMatrix<MT,SO>& A, char uplo, int* ipiv ); } // namespace blaze \endcode // The decomposition has the form \f[ A = U D U^{H} \texttt{ (if uplo = 'U'), or } A = L D L^{H} \texttt{ (if uplo = 'L'), } \f] // where \c U (or \c L) is a product of permutation and unit upper (lower) triangular matrices, // and \c D is Hermitian and block diagonal with 1-by-1 and 2-by-2 diagonal blocks. The resulting // decomposition is stored within \a A: In case \a uplo is set to \c 'L' the result is stored in // the lower part of the matrix and the upper part remains untouched, in case \a uplo is set to // \c 'U' the result is stored in the upper part and the lower part remains untouched. // // \note The Bunch-Kaufman decomposition will never fail, even for singular matrices. However, in // case of a singular matrix the resulting decomposition cannot be used for a matrix inversion or // solving a linear system of equations. // // // \n \subsection lapack_llh_decomposition Cholesky Decomposition // // The following functions provide an interface for the LAPACK functions \c spotrf(), \c dpotrf(), // \c cpotrf(), and \c zpotrf(), which compute the Cholesky (LLH) decomposition for the given // positive definite matrix: \code namespace blaze { void potrf( char uplo, int n, float* A, int lda, int* info ); void potrf( char uplo, int n, double* A, int lda, int* info ); void potrf( char uplo, int n, complex<float>* A, int lda, int* info ); void potrf( char uplo, int n, complex<double>* A, int lda, int* info ); template< typename MT, bool SO > void potrf( DenseMatrix<MT,SO>& A, char uplo ); } // namespace blaze \endcode // The decomposition has the form \f[ A = U^{T} U \texttt{ (if uplo = 'U'), or } A = L L^{T} \texttt{ (if uplo = 'L'), } \f] // where \c U is an upper triangular matrix and \c L is a lower triangular matrix. The Cholesky // decomposition fails if the given matrix \a A is not a positive definite matrix. In this case // a \a std::std::invalid_argument exception is thrown. // // // \n \subsection lapack_qr_decomposition QR Decomposition // // The following functions provide an interface for the LAPACK functions \c sgeqrf(), \c dgeqrf(), // \c cgeqrf(), and \c zgeqrf(), which compute the QR decomposition of the given general matrix: \code namespace blaze { void geqrf( int m, int n, float* A, int lda, float* tau, float* work, int lwork, int* info ); void geqrf( int m, int n, double* A, int lda, double* tau, double* work, int lwork, int* info ); void geqrf( int m, int n, complex<float>* A, int lda, complex<float>* tau, complex<float>* work, int lwork, int* info ); void geqrf( int m, int n, complex<double>* A, int lda, complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void geqrf( DenseMatrix<MT,SO>& A, typename MT::ElementType* tau ); } // namespace blaze \endcode // The decomposition has the form \f[ A = Q \cdot R, \f] // where the \c Q is represented as a product of elementary reflectors \f[ Q = H(1) H(2) . . . H(k) \texttt{, with k = min(m,n).} \f] // Each H(i) has the form \f[ H(i) = I - tau \cdot v \cdot v^T, \f] // where \c tau is a real scalar, and \c v is a real vector with <tt>v(0:i-1) = 0</tt> and // <tt>v(i) = 1</tt>. <tt>v(i+1:m)</tt> is stored on exit in <tt>A(i+1:m,i)</tt>, and \c tau // in \c tau(i). Thus on exit the elements on and above the diagonal of the matrix contain the // min(\a m,\a n)-by-\a n upper trapezoidal matrix \c R (\c R is upper triangular if \a m >= \a n); // the elements below the diagonal, with the array \c tau, represent the orthogonal matrix \c Q as // a product of min(\a m,\a n) elementary reflectors. // // The following functions provide an interface for the LAPACK functions \c sorgqr(), \c dorgqr(), // \c cungqr(), and \c zunqqr(), which reconstruct the \c Q matrix from a QR decomposition: \code namespace blaze { void orgqr( int m, int n, int k, float* A, int lda, const float* tau, float* work, int lwork, int* info ); void orgqr( int m, int n, int k, double* A, int lda, const double* tau, double* work, int lwork, int* info ); void ungqr( int m, int n, int k, complex<float>* A, int lda, const complex<float>* tau, complex<float>* work, int lwork, int* info ); void ungqr( int m, int n, int k, complex<double>* A, int lda, const complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void orgqr( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); template< typename MT, bool SO > void ungqr( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); } // namespace blaze \endcode // The following functions provide an interface for the LAPACK functions \c sormqr(), \c dormqr(), // \c cunmqr(), and \c zunmqr(), which can be used to multiply a matrix with the \c Q matrix from // a QR decomposition: \code namespace blaze { void ormqr( char side, char trans, int m, int n, int k, const float* A, int lda, const float* tau, float* C, int ldc, float* work, int lwork, int* info ); void ormqr( char side, char trans, int m, int n, int k, const double* A, int lda, const double* tau, double* C, int ldc, double* work, int lwork, int* info ); void unmqr( char side, char trans, int m, int n, int k, const complex<float>* A, int lda, const complex<float>* tau, complex<float>* C, int ldc, complex<float>* work, int lwork, int* info ); void unmqr( char side, char trans, int m, int n, int k, const complex<double>* A, int lda, const complex<double>* tau, complex<double>* C, int ldc, complex<double>* work, int lwork, int* info ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void ormqr( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char trans, const ElementType_<MT2>* tau ); template< typename MT1, bool SO, typename MT2 > void unmqr( DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, ElementType_<MT2>* tau ); } // namespace blaze \endcode // \n \subsection lapack_rq_decomposition RQ Decomposition // // The following functions provide an interface for the LAPACK functions \c sgerqf(), \c dgerqf(), // \c cgerqf(), and \c zgerqf(), which compute the RQ decomposition of the given general matrix: \code namespace blaze { void gerqf( int m, int n, float* A, int lda, float* tau, float* work, int lwork, int* info ); void gerqf( int m, int n, double* A, int lda, double* tau, double* work, int lwork, int* info ); void gerqf( int m, int n, complex<float>* A, int lda, complex<float>* tau, complex<float>* work, int lwork, int* info ); void gerqf( int m, int n, complex<double>* A, int lda, complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void gerqf( DenseMatrix<MT,SO>& A, typename MT::ElementType* tau ); } // namespace blaze \endcode // The decomposition has the form \f[ A = R \cdot Q, \f] // where the \c Q is represented as a product of elementary reflectors \f[ Q = H(1) H(2) . . . H(k) \texttt{, with k = min(m,n).} \f] // Each H(i) has the form \f[ H(i) = I - tau \cdot v \cdot v^T, \f] // where \c tau is a real scalar, and \c v is a real vector with <tt>v(n-k+i+1:n) = 0</tt> and // <tt>v(n-k+i) = 1</tt>. <tt>v(1:n-k+i-1)</tt> is stored on exit in <tt>A(m-k+i,1:n-k+i-1)</tt>, // and \c tau in \c tau(i). Thus in case \a m <= \a n, the upper triangle of the subarray // <tt>A(1:m,n-m+1:n)</tt> contains the \a m-by-\a m upper triangular matrix \c R and in case // \a m >= \a n, the elements on and above the (\a m-\a n)-th subdiagonal contain the \a m-by-\a n // upper trapezoidal matrix \c R; the remaining elements in combination with the array \c tau // represent the orthogonal matrix \c Q as a product of min(\a m,\a n) elementary reflectors. // // The following functions provide an interface for the LAPACK functions \c sorgrq(), \c dorgrq(), // \c cungrq(), and \c zunqrq(), which reconstruct the \c Q matrix from a RQ decomposition: \code namespace blaze { void orgrq( int m, int n, int k, float* A, int lda, const float* tau, float* work, int lwork, int* info ); void orgrq( int m, int n, int k, double* A, int lda, const double* tau, double* work, int lwork, int* info ); void ungrq( int m, int n, int k, complex<float>* A, int lda, const complex<float>* tau, complex<float>* work, int lwork, int* info ); void ungrq( int m, int n, int k, complex<double>* A, int lda, const complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void orgrq( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); template< typename MT, bool SO > void ungrq( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); } // namespace blaze \endcode // The following functions provide an interface for the LAPACK functions \c sormrq(), \c dormrq(), // \c cunmrq(), and \c zunmrq(), which can be used to multiply a matrix with the \c Q matrix from // a RQ decomposition: \code namespace blaze { void ormrq( char side, char trans, int m, int n, int k, const float* A, int lda, const float* tau, float* C, int ldc, float* work, int lwork, int* info ); void ormrq( char side, char trans, int m, int n, int k, const double* A, int lda, const double* tau, double* C, int ldc, double* work, int lwork, int* info ); void unmrq( char side, char trans, int m, int n, int k, const complex<float>* A, int lda, const complex<float>* tau, complex<float>* C, int ldc, complex<float>* work, int lwork, int* info ); void unmrq( char side, char trans, int m, int n, int k, const complex<double>* A, int lda, const complex<double>* tau, complex<double>* C, int ldc, complex<double>* work, int lwork, int* info ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void ormrq( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char trans, const ElementType_<MT2>* tau ); template< typename MT1, bool SO, typename MT2 > void unmrq( DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, ElementType_<MT2>* tau ); } // namespace blaze \endcode // \n \subsection lapack_ql_decomposition QL Decomposition // // The following functions provide an interface for the LAPACK functions \c sgeqlf(), \c dgeqlf(), // \c cgeqlf(), and \c zgeqlf(), which compute the QL decomposition of the given general matrix: \code namespace blaze { void geqlf( int m, int n, float* A, int lda, float* tau, float* work, int lwork, int* info ); void geqlf( int m, int n, double* A, int lda, double* tau, double* work, int lwork, int* info ); void geqlf( int m, int n, complex<float>* A, int lda, complex<float>* tau, complex<float>* work, int lwork, int* info ); void geqlf( int m, int n, complex<double>* A, int lda, complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void geqlf( DenseMatrix<MT,SO>& A, typename MT::ElementType* tau ); } // namespace blaze \endcode // The decomposition has the form \f[ A = Q \cdot L, \f] // where the \c Q is represented as a product of elementary reflectors \f[ Q = H(k) . . . H(2) H(1) \texttt{, with k = min(m,n).} \f] // Each H(i) has the form \f[ H(i) = I - tau \cdot v \cdot v^T, \f] // where \c tau is a real scalar, and \c v is a real vector with <tt>v(m-k+i+1:m) = 0</tt> and // <tt>v(m-k+i) = 1</tt>. <tt>v(1:m-k+i-1)</tt> is stored on exit in <tt>A(1:m-k+i-1,n-k+i)</tt>, // and \c tau in \c tau(i). Thus in case \a m >= \a n, the lower triangle of the subarray // A(m-n+1:m,1:n) contains the \a n-by-\a n lower triangular matrix \c L and in case \a m <= \a n, // the elements on and below the (\a n-\a m)-th subdiagonal contain the \a m-by-\a n lower // trapezoidal matrix \c L; the remaining elements in combination with the array \c tau represent // the orthogonal matrix \c Q as a product of min(\a m,\a n) elementary reflectors. // // The following functions provide an interface for the LAPACK functions \c sorgql(), \c dorgql(), // \c cungql(), and \c zunqql(), which reconstruct the \c Q matrix from an QL decomposition: \code namespace blaze { void orgql( int m, int n, int k, float* A, int lda, const float* tau, float* work, int lwork, int* info ); void orgql( int m, int n, int k, double* A, int lda, const double* tau, double* work, int lwork, int* info ); void ungql( int m, int n, int k, complex<float>* A, int lda, const complex<float>* tau, complex<float>* work, int lwork, int* info ); void ungql( int m, int n, int k, complex<double>* A, int lda, const complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void orgql( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); template< typename MT, bool SO > void ungql( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); } // namespace blaze \endcode // The following functions provide an interface for the LAPACK functions \c sormql(), \c dormql(), // \c cunmql(), and \c zunmql(), which can be used to multiply a matrix with the \c Q matrix from // a QL decomposition: \code namespace blaze { void ormql( char side, char trans, int m, int n, int k, const float* A, int lda, const float* tau, float* C, int ldc, float* work, int lwork, int* info ); void ormql( char side, char trans, int m, int n, int k, const double* A, int lda, const double* tau, double* C, int ldc, double* work, int lwork, int* info ); void unmql( char side, char trans, int m, int n, int k, const complex<float>* A, int lda, const complex<float>* tau, complex<float>* C, int ldc, complex<float>* work, int lwork, int* info ); void unmql( char side, char trans, int m, int n, int k, const complex<double>* A, int lda, const complex<double>* tau, complex<double>* C, int ldc, complex<double>* work, int lwork, int* info ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void ormql( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char trans, const ElementType_<MT2>* tau ); template< typename MT1, bool SO, typename MT2 > void unmql( DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, ElementType_<MT2>* tau ); } // namespace blaze \endcode // \n \subsection lapack_lq_decomposition LQ Decomposition // // The following functions provide an interface for the LAPACK functions \c sgelqf(), \c dgelqf(), // \c cgelqf(), and \c zgelqf(), which compute the LQ decomposition of the given general matrix: \code namespace blaze { void gelqf( int m, int n, float* A, int lda, float* tau, float* work, int lwork, int* info ); void gelqf( int m, int n, double* A, int lda, double* tau, double* work, int lwork, int* info ); void gelqf( int m, int n, complex<float>* A, int lda, complex<float>* tau, complex<float>* work, int lwork, int* info ); void gelqf( int m, int n, complex<double>* A, int lda, complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void gelqf( DenseMatrix<MT,SO>& A, typename MT::ElementType* tau ); } // namespace blaze \endcode // The decomposition has the form \f[ A = L \cdot Q, \f] // where the \c Q is represented as a product of elementary reflectors \f[ Q = H(k) . . . H(2) H(1) \texttt{, with k = min(m,n).} \f] // Each H(i) has the form \f[ H(i) = I - tau \cdot v \cdot v^T, \f] // where \c tau is a real scalar, and \c v is a real vector with <tt>v(0:i-1) = 0</tt> and // <tt>v(i) = 1</tt>. <tt>v(i+1:n)</tt> is stored on exit in <tt>A(i,i+1:n)</tt>, and \c tau // in \c tau(i). Thus on exit the elements on and below the diagonal of the matrix contain the // \a m-by-min(\a m,\a n) lower trapezoidal matrix \c L (\c L is lower triangular if \a m <= \a n); // the elements above the diagonal, with the array \c tau, represent the orthogonal matrix \c Q // as a product of min(\a m,\a n) elementary reflectors. // // The following functions provide an interface for the LAPACK functions \c sorglq(), \c dorglq(), // \c cunglq(), and \c zunqlq(), which reconstruct the \c Q matrix from an LQ decomposition: \code namespace blaze { void orglq( int m, int n, int k, float* A, int lda, const float* tau, float* work, int lwork, int* info ); void orglq( int m, int n, int k, double* A, int lda, const double* tau, double* work, int lwork, int* info ); void unglq( int m, int n, int k, complex<float>* A, int lda, const complex<float>* tau, complex<float>* work, int lwork, int* info ); void unglq( int m, int n, int k, complex<double>* A, int lda, const complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void orglq( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); template< typename MT, bool SO > void unglq( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); } // namespace blaze \endcode // The following functions provide an interface for the LAPACK functions \c sormlq(), \c dormlq(), // \c cunmlq(), and \c zunmlq(), which can be used to multiply a matrix with the \c Q matrix from // a LQ decomposition: \code namespace blaze { void ormlq( char side, char trans, int m, int n, int k, const float* A, int lda, const float* tau, float* C, int ldc, float* work, int lwork, int* info ); void ormlq( char side, char trans, int m, int n, int k, const double* A, int lda, const double* tau, double* C, int ldc, double* work, int lwork, int* info ); void unmlq( char side, char trans, int m, int n, int k, const complex<float>* A, int lda, const complex<float>* tau, complex<float>* C, int ldc, complex<float>* work, int lwork, int* info ); void unmlq( char side, char trans, int m, int n, int k, const complex<double>* A, int lda, const complex<double>* tau, complex<double>* C, int ldc, complex<double>* work, int lwork, int* info ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void ormlq( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char trans, const ElementType_<MT2>* tau ); template< typename MT1, bool SO, typename MT2 > void unmlq( DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, ElementType_<MT2>* tau ); } // namespace blaze \endcode // \n \section lapack_inversion Matrix Inversion // <hr> // // Given a matrix that has already been decomposed, the following functions can be used to invert // the matrix in-place. // // // \n \subsection lapack_lu_inversion LU-based Inversion // // The following functions provide an interface for the LAPACK functions \c sgetri(), \c dgetri(), // \c cgetri(), and \c zgetri(), which invert a general matrix that has already been decomposed by // an \ref lapack_lu_decomposition : \code namespace blaze { void getri( int n, float* A, int lda, const int* ipiv, float* work, int lwork, int* info ); void getri( int n, double* A, int lda, const int* ipiv, double* work, int lwork, int* info ); void getri( int n, complex<float>* A, int lda, const int* ipiv, complex<float>* work, int lwork, int* info ); void getri( int n, complex<double>* A, int lda, const int* ipiv, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void getri( DenseMatrix<MT,SO>& A, const int* ipiv ); } // namespace blaze \endcode // The functions fail if ... // // - ... the given matrix is not a square matrix; // - ... the given matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_ldlt_inversion LDLT-based Inversion // // The following functions provide an interface for the LAPACK functions \c ssytri(), \c dsytri(), // \c csytri(), and \c zsytri(), which invert a symmetric indefinite matrix that has already been // decomposed by an \ref lapack_ldlt_decomposition : \code namespace blaze { void sytri( char uplo, int n, float* A, int lda, const int* ipiv, float* work, int* info ); void sytri( char uplo, int n, double* A, int lda, const int* ipiv, double* work, int* info ); void sytri( char uplo, int n, complex<float>* A, int lda, const int* ipiv, complex<float>* work, int* info ); void sytri( char uplo, int n, complex<double>* A, int lda, const int* ipiv, complex<double>* work, int* info ); template< typename MT, bool SO > void sytri( DenseMatrix<MT,SO>& A, char uplo, const int* ipiv ); } // namespace blaze \endcode // The functions fail if ... // // - ... the given matrix is not a square matrix; // - ... the given matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_ldlh_inversion LDLH-based Inversion // // The following functions provide an interface for the LAPACK functions \c chetri() and // \c zhetri(), which invert an Hermitian indefinite matrix that has already been decomposed by // an \ref lapack_ldlh_decomposition : \code namespace blaze { void hetri( char uplo, int n, complex<float>* A, int lda, const int* ipiv, complex<float>* work, int* info ); void hetri( char uplo, int n, complex<double>* A, int lda, const int* ipiv, complex<double>* work, int* info ); template< typename MT, bool SO > void hetri( DenseMatrix<MT,SO>& A, char uplo, const int* ipiv ); } // namespace blaze \endcode // The functions fail if ... // // - ... the given matrix is not a square matrix; // - ... the given matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_llh_inversion Cholesky-based Inversion // // The following functions provide an interface for the LAPACK functions \c spotri(), \c dpotri(), // \c cpotri(), and \c zpotri(), which invert a positive definite matrix that has already been // decomposed by an \ref lapack_llh_decomposition : \code namespace blaze { void potri( char uplo, int n, float* A, int lda, int* info ); void potri( char uplo, int n, double* A, int lda, int* info ); void potri( char uplo, int n, complex<float>* A, int lda, int* info ); void potri( char uplo, int n, complex<double>* A, int lda, int* info ); template< typename MT, bool SO > void potri( DenseMatrix<MT,SO>& A, char uplo ); } // namespace blaze \endcode // The functions fail if ... // // - ... the given matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the given matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_triangular_inversion Inversion of Triangular Matrices // // The following functions provide an interface for the LAPACK functions \c strtri(), \c dtrtri(), // \c ctrtri(), and \c ztrtri(), which invert the given triangular matrix in-place: \code namespace blaze { void trtri( char uplo, char diag, int n, float* A, int lda, int* info ); void trtri( char uplo, char diag, int n, double* A, int lda, int* info ); void trtri( char uplo, char diag, int n, complex<float>* A, int lda, int* info ); void trtri( char uplo, char diag, int n, complex<double>* A, int lda, int* info ); template< typename MT, bool SO > void trtri( DenseMatrix<MT,SO>& A, char uplo, char diag ); } // namespace blaze \endcode // The functions fail if ... // // - ... the given matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the given \a diag argument is neither 'U' nor 'N'; // - ... the given matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \section lapack_substitution Substitution // <hr> // // Given a matrix that has already been decomposed the following functions can be used to perform // the forward/backward substitution step to compute the solution to a system of linear equations. // Note that depending on the storage order of the system matrix and the given right-hand side the // functions solve different equation systems: // // Single right-hand side: // - \f$ A *x=b \f$ if \a A is column-major // - \f$ A^T*x=b \f$ if \a A is row-major // // Multiple right-hand sides: // - \f$ A *X =B \f$ if both \a A and \a B are column-major // - \f$ A^T*X =B \f$ if \a A is row-major and \a B is column-major // - \f$ A *X^T=B^T \f$ if \a A is column-major and \a B is row-major // - \f$ A^T*X^T=B^T \f$ if both \a A and \a B are row-major // // In this context the general system matrix \a A is a n-by-n matrix that has already been // factorized by the according decomposition function, \a x and \a b are n-dimensional vectors // and \a X and \a B are either row-major m-by-n matrices or column-major n-by-m matrices. // // // \n \subsection lapack_lu_substitution LU-based Substitution // // The following functions provide an interface for the LAPACK functions \c sgetrs(), \c dgetrs(), // \c cgetrs(), and \c zgetrs(), which perform the substitution step for a general matrix that has // already been decomposed by an \ref lapack_lu_decomposition : \code namespace blaze { void getrs( char trans, int n, int nrhs, const float* A, int lda, const int* ipiv, float* B, int ldb, int* info ); void getrs( char trans, int n, int nrhs, const double* A, int lda, const int* ipiv, double* B, int ldb, int* info ); void getrs( char trans, int n, const complex<float>* A, int lda, const int* ipiv, complex<float>* B, int ldb, int* info ); void getrs( char trans, int n, const complex<double>* A, int lda, const int* ipiv, complex<double>* B, int ldb, int* info ); template< typename MT, bool SO, typename VT, bool TF > void getrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char trans, const int* ipiv ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void getrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char trans, const int* ipiv ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the solution(s) // of the linear system of equations. The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a trans argument is neither 'N' nor 'T' nor 'C'; // - ... the sizes of the two given matrices do not match. // // The first four functions report failure via the \c info argument, the last two functions throw // a \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_ldlt_substitution LDLT-based Substitution // // The following functions provide an interface for the LAPACK functions \c ssytrs(), \c dsytrs(), // \c csytrs(), and \c zsytrs(), which perform the substitution step for a symmetric indefinite // matrix that has already been decomposed by an \ref lapack_ldlt_decomposition : \code namespace blaze { void sytrs( char uplo, int n, int nrhs, const float* A, int lda, const int* ipiv, float* B, int ldb, int* info ); void sytrs( char uplo, int n, int nrhs, const double* A, int lda, const int* ipiv, double* B, int ldb, int* info ); void sytrs( char uplo, int n, int nrhs, const complex<float>* A, int lda, const int* ipiv, complex<float>* B, int ldb, int* info ); void sytrs( char uplo, int n, int nrhs, const complex<double>* A, int lda, const int* ipiv, complex<double>* B, int ldb, int* info ); template< typename MT, bool SO, typename VT, bool TF > void sytrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, const int* ipiv ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void sytrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, const int* ipiv ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the solution(s) // of the linear system of equations. The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the sizes of the two given matrices do not match. // // The first four functions report failure via the \c info argument, the last two functions throw // a \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_ldlh_substitution LDLH-based Substitution // // The following functions provide an interface for the LAPACK functions \c chetrs(), and \c zhetrs(), // which perform the substitution step for an Hermitian indefinite matrix that has already been // decomposed by an \ref lapack_ldlh_decomposition : \code namespace blaze { void hetrs( char uplo, int n, int nrhs, const complex<float>* A, int lda, const int* ipiv, complex<float>* B, int ldb, int* info ); void hetrs( char uplo, int n, int nrhs, const complex<double>* A, int lda, const int* ipiv, complex<double>* B, int ldb, int* info ); template< typename MT, bool SO, typename VT, bool TF > void hetrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, const int* ipiv ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void hetrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, const int* ipiv ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the solution(s) // of the linear system of equations. The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the sizes of the two given matrices do not match. // // The first two functions report failure via the \c info argument, the last two functions throw // a \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_llh_substitution Cholesky-based Substitution // // The following functions provide an interface for the LAPACK functions \c spotrs(), \c dpotrs(), // \c cpotrs(), and \c zpotrs(), which perform the substitution step for a positive definite matrix // that has already been decomposed by an \ref lapack_llh_decomposition : \code namespace blaze { void potrs( char uplo, int n, int nrhs, const float* A, int lda, float* B, int ldb, int* info ); void potrs( char uplo, int n, int nrhs, const double* A, int lda, double* B, int ldb, int* info ); void potrs( char uplo, int n, int nrhs, const complex<float>* A, int lda, complex<float>* B, int ldb, int* info ); void potrs( char uplo, int n, int nrhs, const complex<double>* A, int lda, complex<double>* B, int ldb, int* info ); template< typename MT, bool SO, typename VT, bool TF > void potrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void potrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the solution(s) // of the linear system of equations. The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the sizes of the two given matrices do not match. // // The first two functions report failure via the \c info argument, the last two functions throw // a \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_triangular_substitution Substitution for Triangular Matrices // // The following functions provide an interface for the LAPACK functions \c strtrs(), \c dtrtrs(), // \c ctrtrs(), and \c ztrtrs(), which perform the substitution step for a triangular matrix: \code namespace blaze { void trtrs( char uplo, char trans, char diag, int n, int nrhs, const float* A, int lda, float* B, int ldb, int* info ); void trtrs( char uplo, char trans, char diag, int n, int nrhs, const double* A, int lda, double* B, int ldb, int* info ); void trtrs( char uplo, char trans, char diag, int n, int nrhs, const complex<float>* A, int lda, complex<float>* B, int ldb, int* info ); void trtrs( char uplo, char trans, char diag, int n, int nrhs, const complex<double>* A, int lda, complex<double>* B, int ldb, int* info ); template< typename MT, bool SO, typename VT, bool TF > void trtrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, char trans, char diag ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void trtrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, char trans, char diag ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the solution(s) // of the linear system of equations. The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the given \a trans argument is neither 'N' nor 'T' nor 'C'; // - ... the given \a diag argument is neither 'U' nor 'N'; // - ... the sizes of the two given matrices do not match. // // The first four functions report failure via the \c info argument, the last two functions throw // a \a std::invalid_argument exception in case of an error. // // // \n \section lapack_linear_system_solver Linear System Solver // <hr> // // The following functions represent compound functions that perform both the decomposition step // as well as the substitution step to compute the solution to a system of linear equations. Note // that depending on the storage order of the system matrix and the given right-hand side the // functions solve different equation systems: // // Single right-hand side: // - \f$ A *x=b \f$ if \a A is column-major // - \f$ A^T*x=b \f$ if \a A is row-major // // Multiple right-hand sides: // - \f$ A *X =B \f$ if both \a A and \a B are column-major // - \f$ A^T*X =B \f$ if \a A is row-major and \a B is column-major // - \f$ A *X^T=B^T \f$ if \a A is column-major and \a B is row-major // - \f$ A^T*X^T=B^T \f$ if both \a A and \a B are row-major // // In this context the general system matrix \a A is a n-by-n matrix that has already been // factorized by the according decomposition function, \a x and \a b are n-dimensional vectors // and \a X and \a B are either row-major m-by-n matrices or column-major n-by-m matrices. // // // \subsection lapack_lu_linear_system_solver LU-based Linear System Solver // // The following functions provide an interface for the LAPACK functions \c sgesv(), \c dgesv(), // \c cgesv(), and \c zgesv(), which combine an \ref lapack_lu_decomposition and the according // \ref lapack_lu_substitution : \code namespace blaze { void gesv( int n, int nrhs, float* A, int lda, int* ipiv, float* B, int ldb, int* info ); void gesv( int n, int nrhs, double* A, int lda, int* ipiv, double* B, int ldb, int* info ); void gesv( int n, int nrhs, complex<float>* A, int lda, int* ipiv, complex<float>* B, int ldb, int* info ); void gesv( int n, int nrhs, complex<double>* A, int lda, int* ipiv, complex<double>* B, int ldb, int* info ); template< typename MT, bool SO, typename VT, bool TF > void gesv( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, int* ipiv ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void gesv( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, int* ipiv ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the // solution(s) of the linear system of equations and \a A has been decomposed by means of an // \ref lapack_lu_decomposition. // // The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given system matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_ldlt_linear_system_solver LDLT-based Linear System Solver // // The following functions provide an interface for the LAPACK functions \c ssysv(), \c dsysv(), // \c csysv(), and \c zsysv(), which combine an \ref lapack_ldlt_decomposition and the according // \ref lapack_ldlt_substitution : \code namespace blaze { void sysv( char uplo, int n, int nrhs, float* A, int lda, int* ipiv, float* B, int ldb, float* work, int lwork, int* info ); void sysv( char uplo, int n, int nrhs, double* A, int lda, int* ipiv, double* B, int ldb, double* work, int lwork, int* info ); void sysv( char uplo, int n, int nrhs, complex<float>* A, int lda, int* ipiv, complex<float>* B, int ldb, complex<float>* work, int lwork, int* info ); void sysv( char uplo, int n, int nrhs, complex<double>* A, int lda, int* ipiv, complex<double>* B, int ldb, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO, typename VT, bool TF > void sysv( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, int* ipiv ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void sysv( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, int* ipiv ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the // solution(s) of the linear system of equations and \a A has been decomposed by means of an // \ref lapack_ldlt_decomposition. // // The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the sizes of the two given matrices do not match; // - ... the given system matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_ldlh_linear_system_solver LDLH-based Linear System Solver // // The following functions provide an interface for the LAPACK functions \c shesv(), \c dhesv(), // \c chesv(), and \c zhesv(), which combine an \ref lapack_ldlh_decomposition and the according // \ref lapack_ldlh_substitution : \code namespace blaze { void hesv( char uplo, int n, int nrhs, complex<float>* A, int lda, int* ipiv, complex<float>* B, int ldb, complex<float>* work, int lwork, int* info ); void hesv( char uplo, int n, int nrhs, complex<double>* A, int lda, int* ipiv, complex<double>* B, int ldb, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO, typename VT, bool TF > void hesv( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, int* ipiv ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void hesv( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, int* ipiv ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the // solution(s) of the linear system of equations and \a A has been decomposed by means of an // \ref lapack_ldlh_decomposition. // // The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the sizes of the two given matrices do not match; // - ... the given system matrix is singular and not invertible. // // The first two functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_llh_linear_system_solver Cholesky-based Linear System Solver // // The following functions provide an interface for the LAPACK functions \c sposv(), \c dposv(), // \c cposv(), and \c zposv(), which combine an \ref lapack_llh_decomposition and the according // \ref lapack_llh_substitution : \code namespace blaze { void posv( char uplo, int n, int nrhs, float* A, int lda, float* B, int ldb, int* info ); void posv( char uplo, int n, int nrhs, double* A, int lda, double* B, int ldb, int* info ); void posv( char uplo, int n, int nrhs, complex<float>* A, int lda, complex<float>* B, int ldb, int* info ); void posv( char uplo, int n, int nrhs, complex<double>* A, int lda, complex<double>* B, int ldb, int* info ); template< typename MT, bool SO, typename VT, bool TF > void posv( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void posv( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the // solution(s) of the linear system of equations and \a A has been decomposed by means of an // \ref lapack_llh_decomposition. // // The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the sizes of the two given matrices do not match; // - ... the given system matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_triangular_linear_system_solver Linear System Solver for Triangular Matrices // // The following functions provide an interface for the LAPACK functions \c strsv(), \c dtrsv(), // \c ctrsv(), and \c ztrsv(): \code namespace blaze { void trsv( char uplo, char trans, char diag, int n, const float* A, int lda, float* x, int incX ); void trsv( char uplo, char trans, char diag, int n, const double* A, int lda, double* x, int incX ); void trsv( char uplo, char trans, char diag, int n, const complex<float>* A, int lda, complex<float>* x, int incX ); void trsv( char uplo, char trans, char diag, int n, const complex<double>* A, int lda, complex<double>* x, int incX ); template< typename MT, bool SO, typename VT, bool TF > void trsv( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, char trans, char diag ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the // solution(s) of the linear system of equations. // // The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the given \a trans argument is neither 'N' nor 'T' nor 'C'; // - ... the given \a diag argument is neither 'U' nor 'N'. // // The last function throws a \a std::invalid_argument exception in case of an error. Note that // none of the functions does perform any test for singularity or near-singularity. Such tests // must be performed prior to calling this function! // // // \n \section lapack_eigenvalues Eigenvalues/Eigenvectors // // \subsection lapack_eigenvalues_general General Matrices // // The following functions provide an interface for the LAPACK functions \c sgeev(), \c dgeev(), // \c cgeev(), and \c zgeev(), which compute the eigenvalues and optionally the eigenvectors of // the given general matrix: \code namespace blaze { void geev( char jobvl, char jobvr, int n, float* A, int lda, float* wr, float* wi, float* VL, int ldvl, float* VR, int ldvr, float* work, int lwork, int* info ); void geev( char jobvl, char jobvr, int n, double* A, int lda, double* wr, double* wi, double* VL, int ldvl, double* VR, int ldvr, double* work, int lwork, int* info ); void geev( char jobvl, char jobvr, int n, complex<float>* A, int lda, complex<float>* w, complex<float>* VL, int ldvl, complex<float>* VR, int ldvr, complex<float>* work, int lwork, float* rwork, int* info ); void geev( char jobvl, char jobvr, int n, complex<double>* A, int lda, complex<double>* w, complex<double>* VL, int ldvl, complex<double>* VR, int ldvr, complex<double>* work, int lwork, double* rwork, int* info ); template< typename MT, bool SO, typename VT, bool TF > void geev( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w ); template< typename MT1, bool SO1, typename MT2, bool SO2, typename VT, bool TF > void geev( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& VL, DenseVector<VT,TF>& w ); template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool SO2 > void geev( DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, DenseMatrix<MT2,SO2>& VR ); template< typename MT1, bool SO1, typename MT2, bool SO2, typename VT, bool TF, typename MT3, bool SO3 > void geev( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& VL, DenseVector<VT,TF>& w, DenseMatrix<MT3,SO3>& VR ); } // namespace blaze \endcode // The complex eigenvalues of the given matrix \a A are returned in the given vector \a w. // Please note that no order of eigenvalues can be assumed, except that complex conjugate pairs // of eigenvalues appear consecutively with the eigenvalue having the positive imaginary part // first. // // If \a VR is provided as an argument, the right eigenvectors are returned in the rows of \a VR // in case \a VR is a row-major matrix and in the columns of \a VR in case \a VR is a column-major // matrix. The right eigenvector \f$v[j]\f$ of \a A satisfies \f[ A * v[j] = lambda[j] * v[j], \f] // where \f$lambda[j]\f$ is its eigenvalue. // // If \a VL is provided as an argument, the left eigenvectors are returned in the rows of \a VL // in case \a VL is a row-major matrix and in the columns of \a VL in case \a VL is a column-major // matrix. The left eigenvector \f$u[j]\f$ of \a A satisfies \f[ u[j]^{H} * A = lambda[j] * u[j]^{H}, \f] // where \f$u[j]^{H}\f$ denotes the conjugate transpose of \f$u[j]\f$. // // \a w, \a VL, and \a VR are resized to the correct dimensions (if possible and necessary). The // functions fail if ... // // - ... the given matrix \a A is not a square matrix; // - ... the given matrix \a VL is a fixed size matrix and the dimensions don't match; // - ... the given vector \a w is a fixed size vector and the size doesn't match; // - ... the given matrix \a VR is a fixed size matrix and the dimensions don't match; // - ... the eigenvalue computation fails. // // The first four functions report failure via the \c info argument, the last four functions throw // an exception in case of an error. // // // \n \subsection lapack_eigenvalues_symmetric Symmetric Matrices // // The following functions provide an interface for the LAPACK functions \c ssyev() and \c dsyev(), // which compute the eigenvalues and eigenvectors of the given symmetric matrix: \code namespace blaze { void syev( char jobz, char uplo, int n, float* A, int lda, float* w, float* work, int lwork, int* info ); void syev( char jobz, char uplo, int n, double* A, int lda, double* w, double* work, int lwork, int* info ); template< typename MT, bool SO, typename VT, bool TF > void syev( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char jobz, char uplo ); } // namespace blaze \endcode // Alternatively, the following functions can be used, which provide an interface to the LAPACK // functions \c ssyevd() and \c dsyevd(). In contrast to the \c syev() functions they use a // divide-and-conquer strategy for the computation of the left and right eigenvectors: \code namespace blaze { void syevd( char jobz, char uplo, int n, float* A, int lda, float* w, float* work, int lwork, int* iwork, int liwork, int* info ); void syevd( char jobz, char uplo, int n, double* A, int lda, double* w, double* work, int lwork, int* iwork, int liwork, int* info ); template< typename MT, bool SO, typename VT, bool TF > void syevd( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char jobz, char uplo ); } // namespace blaze \endcode // The real eigenvalues are returned in ascending order in the given vector \a w. \a w is resized // to the correct size (if possible and necessary). In case \a A is a row-major matrix, the left // eigenvectors are returned in the rows of \a A, in case \a A is a column-major matrix, the right // eigenvectors are returned in the columns of \a A. // // The functions fail if ... // // - ... the given matrix \a A is not a square matrix; // - ... the given vector \a w is a fixed size vector and the size doesn't match; // - ... the given \a jobz argument is neither \c 'V' nor \c 'N'; // - ... the given \a uplo argument is neither \c 'L' nor \c 'U'; // - ... the eigenvalue computation fails. // // The first two functions report failure via the \c info argument, the last function throws an // exception in case of an error. // // Via the following functions, which wrap the LAPACK functions \c ssyevx() and \c dsyevx(), it // is possible to compute a subset of eigenvalues and/or eigenvectors of a symmetric matrix: \code namespace blaze { void syevx( char jobz, char range, char uplo, int n, float* A, int lda, float vl, float vu, int il, int iu, float abstol, int* m, float* w, float* Z, int ldz, float* work, int lwork, int* iwork, int* ifail, int* info ); void syevx( char jobz, char range, char uplo, int n, double* A, int lda, double vl, double vu, int il, int iu, double abstol, int* m, double* w, double* Z, int ldz, double* work, int lwork, int* iwork, int* ifail, int* info ); template< typename MT, bool SO, typename VT, bool TF > size_t syevx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char uplo ); template< typename MT, bool SO, typename VT, bool TF, typename ST > size_t syevx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char uplo, ST low, ST upp ); template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool SO2 > size_t syevx( DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, DenseMatrix<MT2,SO2>& Z, char uplo ); template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool SO2, typename ST > size_t syevx( DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, DenseMatrix<MT2,SO2>& Z, char uplo, ST low, ST upp ); } // namespace blaze \endcode // The number of eigenvalues to be computed is specified by the lower bound \c low and the upper // bound \c upp, which either form an integral or a floating point range. // // In case \a low and \a upp are of integral type, the function computes all eigenvalues in the // index range \f$[low..upp]\f$. The \a num resulting real eigenvalues are stored in ascending // order in the given vector \a w, which is either resized (if possible) or expected to be a // \a num-dimensional vector. The eigenvectors are returned in the rows of \a Z in case \a Z is // row-major matrix and in the columns of \a Z in case \a Z is a column-major matrix. \a Z is // resized (if possible) or expected to be a \a num-by-\a n row-major matrix or a \a n-by-\a num // column-major matrix. // // In case \a low and \a upp are of floating point type, the function computes all eigenvalues // in the half-open interval \f$(low..upp]\f$. The resulting real eigenvalues are stored in // ascending order in the given vector \a w, which is either resized (if possible) or expected // to be an \a n-dimensional vector. The eigenvectors are returned in the rows of \a Z in case // \a Z is a row-major matrix and in the columns of \a Z in case \a Z is a column-major matrix. // \a Z is resized (if possible) or expected to be a \a n-by-\a n matrix. // // The functions fail if ... // // - ... the given matrix \a A is not a square matrix; // - ... the given vector \a w is a fixed size vector and the size doesn't match; // - ... the given matrix \a Z is a fixed size matrix and the dimensions don't match; // - ... the given \a uplo argument is neither \c 'L' nor \c 'U'; // - ... the eigenvalue computation fails. // // The first two functions report failure via the \c info argument, the last four functions throw // an exception in case of an error. // // // \n \subsection lapack_eigenvalues_hermitian Hermitian Matrices // // The following functions provide an interface for the LAPACK functions \c cheev() and \c zheev(), // which compute the eigenvalues and eigenvectors of the given Hermitian matrix: \code namespace blaze { void heev( char jobz, char uplo, int n, complex<float>* A, int lda, float* w, complex<float>* work, int lwork, float* rwork, int* info ); void heev( char jobz, char uplo, int n, complex<double>* A, int lda, double* w, complex<double>* work, int lwork, float* rwork, int* info ); template< typename MT, bool SO, typename VT, bool TF > void heev( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char jobz, char uplo ); } // namespace blaze \endcode // Alternatively, the following functions can be used, which provide an interface to the LAPACK // functions \c cheevd() and \c zheevd(). In contrast to the \c heev() functions they use a // divide-and-conquer strategy for the computation of the left and right eigenvectors: \code namespace blaze { void heevd( char jobz, char uplo, int n, complex<float>* A, int lda, float* w, complex<float>* work, int lwork, float* rwork, int* lrwork, int* iwork, int* liwork, int* info ); void heevd( char jobz, char uplo, int n, complex<double>* A, int lda, double* w, complex<double>* work, int lwork, double* rwork, int lrwork, int* iwork, int* liwork, int* info ); template< typename MT, bool SO, typename VT, bool TF > void heevd( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char jobz, char uplo ); } // namespace blaze \endcode // The real eigenvalues are returned in ascending order in the given vector \a w. \a w is resized // to the correct size (if possible and necessary). In case \a A is a row-major matrix, the left // eigenvectors are returned in the rows of \a A, in case \a A is a column-major matrix, the right // eigenvectors are returned in the columns of \a A. // // The functions fail if ... // // - ... the given matrix \a A is not a square matrix; // - ... the given vector \a w is a fixed size vector and the size doesn't match; // - ... the given \a jobz argument is neither \c 'V' nor \c 'N'; // - ... the given \a uplo argument is neither \c 'L' nor \c 'U'; // - ... the eigenvalue computation fails. // // The first two functions report failure via the \c info argument, the last function throws an // exception in case of an error. // // Via the following functions, which wrap the LAPACK functions \c cheevx() and \c zheevx(), it // is possible to compute a subset of eigenvalues and/or eigenvectors of an Hermitian matrix: \code namespace blaze { void heevx( char jobz, char range, char uplo, int n, complex<float>* A, int lda, float vl, float vu, int il, int iu, float abstol, int* m, float* w, complex<float>* Z, int ldz, complex<float>* work, int lwork, float* rwork, int* iwork, int* ifail, int* info ); void heevx( char jobz, char range, char uplo, int n, complex<double>* A, int lda, double vl, double vu, int il, int iu, double abstol, int* m, double* w, complex<double>* Z, int ldz, complex<double>* work, int lwork, double* rwork, int* iwork, int* ifail, int* info ); template< typename MT, bool SO, typename VT, bool TF > size_t heevx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char uplo ); template< typename MT, bool SO, typename VT, bool TF, typename ST > size_t heevx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char uplo, ST low, ST upp ); template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool SO2 > size_t heevx( DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, DenseMatrix<MT2,SO2>& Z, char uplo ); template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool SO2, typename ST > size_t heevx( DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, DenseMatrix<MT2,SO2>& Z, char uplo, ST low, ST upp ); } // namespace blaze \endcode // The number of eigenvalues to be computed is specified by the lower bound \c low and the upper // bound \c upp, which either form an integral or a floating point range. // // In case \a low and \a upp are of integral type, the function computes all eigenvalues in the // index range \f$[low..upp]\f$. The \a num resulting real eigenvalues are stored in ascending // order in the given vector \a w, which is either resized (if possible) or expected to be a // \a num-dimensional vector. The eigenvectors are returned in the rows of \a Z in case \a Z is // row-major matrix and in the columns of \a Z in case \a Z is a column-major matrix. \a Z is // resized (if possible) or expected to be a \a num-by-\a n row-major matrix or a \a n-by-\a num // column-major matrix. // // In case \a low and \a upp are of floating point type, the function computes all eigenvalues // in the half-open interval \f$(low..upp]\f$. The resulting real eigenvalues are stored in // ascending order in the given vector \a w, which is either resized (if possible) or expected // to be an \a n-dimensional vector. The eigenvectors are returned in the rows of \a Z in case // \a Z is a row-major matrix and in the columns of \a Z in case \a Z is a column-major matrix. // \a Z is resized (if possible) or expected to be a \a n-by-\a n matrix. // // The functions fail if ... // // - ... the given matrix \a A is not a square matrix; // - ... the given vector \a w is a fixed size vector and the size doesn't match; // - ... the given matrix \a Z is a fixed size matrix and the dimensions don't match; // - ... the given \a uplo argument is neither \c 'L' nor \c 'U'; // - ... the eigenvalue computation fails. // // The first two functions report failure via the \c info argument, the last four functions throw // an exception in case of an error. // // // \n \section lapack_singular_values Singular Values/Singular Vectors // // The following functions provide an interface for the LAPACK functions \c sgesvd(), \c dgesvd(), // \c cgesvd(), and \c zgesvd(), which perform a singular value decomposition (SVD) on the given // general matrix: \code namespace blaze { void gesvd( char jobu, char jobv, int m, int n, float* A, int lda, float* s, float* U, int ldu, float* V, int ldv, float* work, int lwork, int* info ); void gesvd( char jobu, char jobv, int m, int n, double* A, int lda, double* s, double* U, int ldu, double* V, int ldv, double* work, int lwork, int* info ); void gesvd( char jobu, char jobv, int m, int n, complex<float>* A, int lda, float* s, complex<float>* U, int ldu, complex<float>* V, int ldv, complex<float>* work, int lwork, float* rwork, int* info ); void gesvd( char jobu, char jobv, int m, int n, complex<double>* A, int lda, double* s, complex<double>* U, int ldu, complex<double>* V, int ldv, complex<double>* work, int lwork, double* rwork, int* info ); template< typename MT, bool SO, typename VT, bool TF > void gesvd( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s, char jobu, char jobv ); template< typename MT1, bool SO, typename MT2, typename VT, bool TF > void gesvd( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, char jobu, char jobv ); template< typename MT1, bool SO, typename VT, bool TF, typename MT2 > void gesvd( DenseMatrix<MT1,SO>& A, DenseVector<VT,TF>& s, DenseMatrix<MT2,SO>& V, char jobu, char jobv ); template< typename MT1, bool SO, typename MT2, typename VT, bool TF, typename MT3 > void gesvd( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V, char jobu, char jobv ); } // namespace blaze \endcode // Alternatively, the following functions can be used, which provide an interface to the LAPACK // functions \c sgesdd(), \c dgesdd(), \c cgesdd(), and \c zgesdd(). In contrast to the \c gesvd() // functions they compute the singular value decomposition (SVD) of the given general matrix by // applying a divide-and-conquer strategy for the computation of the left and right singular // vectors: \code namespace blaze { void gesdd( char jobz, int m, int n, float* A, int lda, float* s, float* U, int ldu, float* V, int ldv, float* work, int lwork, int* iwork, int* info ); void gesdd( char jobz, int m, int n, double* A, int lda, double* s, double* U, int ldu, double* V, int ldv, double* work, int lwork, int* iwork, int* info ); void gesdd( char jobz, int m, int n, complex<float>* A, int lda, float* s, complex<float>* U, int ldu, complex<float>* V, int ldv, complex<float>* work, int lwork, float* rwork, int* iwork, int* info ); void gesdd( char jobz, int m, int n, complex<double>* A, int lda, double* s, complex<double>* U, int ldu, complex<double>* V, int ldv, complex<double>* work, int lwork, double* rwork, int* iwork, int* info ); template< typename MT, bool SO, typename VT, bool TF > void gesdd( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s ); template< typename MT1, bool SO, typename MT2, typename VT, bool TF > void gesdd( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, char jobz ); template< typename MT1, bool SO, typename MT2, typename VT, bool TF > void gesdd( DenseMatrix<MT1,SO>& A, DenseVector<VT,TF>& s, DenseMatrix<MT2,SO>& V, char jobz ); template< typename MT1, bool SO, typename MT2, typename VT, bool TF, typename MT3 > void gesdd( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V, char jobz ); } // namespace blaze \endcode // The resulting decomposition has the form \f[ A = U \cdot S \cdot V, \f] // where \a S is a \a m-by-\a n matrix, which is zero except for its min(\a m,\a n) diagonal // elements, \a U is an \a m-by-\a m orthogonal matrix, and \a V is a \a n-by-\a n orthogonal // matrix. The diagonal elements of \a S are the singular values of \a A, the first min(\a m,\a n) // columns of \a U and rows of \a V are the left and right singular vectors of \a A, respectively. // // The resulting min(\a m,\a n) real and non-negative singular values are returned in descending // order in the vector \a s, which is resized to the correct size (if possible and necessary). // // Via the following functions, which wrap the LAPACK functions \c sgesvdx(), \c dgesvdx(), // \c cgesvdx(), and \c zgesvdx(), it is possible to compute a subset of singular values and/or // vectors: \code namespace blaze { void gesvdx( char jobu, char jobv, char range, int m, int n, float* A, int lda, float vl, float vu, int il, int iu, int* ns, float* s, float* U, int ldu, float* V, int ldv, float* work, int lwork, int* iwork, int* info ); void gesvdx( char jobu, char jobv, char range, int m, int n, double* A, int lda, double vl, double vu, int il, int iu, int* ns, double* s, double* U, int ldu, double* V, int ldv, double* work, int lwork, int* iwork, int* info ); void gesvdx( char jobu, char jobv, char range, int m, int n, complex<float>* A, int lda, float vl, float vu, int il, int iu, int* ns, float* s, complex<float>* U, int ldu, complex<float>* V, int ldv, complex<float>* work, int lwork, float* rwork, int* iwork, int* info ); void gesvdx( char jobu, char jobv, char range, int m, int n, complex<double>* A, int lda, double vl, double vu, int il, int iu, int* ns, double* s, complex<double>* U, int ldu, complex<double>* V, int ldv, complex<double>* work, int lwork, double* rwork, int* iwork, int* info ); template< typename MT, bool SO, typename VT, bool TF > size_t gesvdx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s ); template< typename MT, bool SO, typename VT, bool TF, typename ST > size_t gesvdx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s, ST low, ST upp ); template< typename MT1, bool SO, typename MT2, typename VT, bool TF > size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s ); template< typename MT1, bool SO, typename MT2, typename VT, bool TF, typename ST > size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, ST low, ST upp ); template< typename MT1, bool SO, typename VT, bool TF, typename MT2 > size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseVector<VT,TF>& s, DenseMatrix<MT2,SO>& V ); template< typename MT1, bool SO, typename VT, bool TF, typename MT2, typename ST > size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseVector<VT,TF>& s, DenseMatrix<MT2,SO>& V, ST low, ST upp ); template< typename MT1, bool SO, typename MT2, typename VT, bool TF, typename MT3 > size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V ); template< typename MT1, bool SO, typename MT2, typename VT, bool TF, typename MT3, typename ST > size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V, ST low, ST upp ); } // namespace blaze \endcode // The number of singular values to be computed is specified by the lower bound \a low and the // upper bound \a upp, which either form an integral or a floating point range. // // In case \a low and \a upp form are of integral type, the function computes all singular values // in the index range \f$[low..upp]\f$. The \a num resulting real and non-negative singular values // are stored in descending order in the given vector \a s, which is either resized (if possible) // or expected to be a \a num-dimensional vector. The resulting left singular vectors are stored // in the given matrix \a U, which is either resized (if possible) or expected to be a // \a m-by-\a num matrix. The resulting right singular vectors are stored in the given matrix \a V, // which is either resized (if possible) or expected to be a \a num-by-\a n matrix. // // In case \a low and \a upp are of floating point type, the function computes all singular values // in the half-open interval \f$(low..upp]\f$. The resulting real and non-negative singular values // are stored in descending order in the given vector \a s, which is either resized (if possible) // or expected to be a min(\a m,\a n)-dimensional vector. The resulting left singular vectors are // stored in the given matrix \a U, which is either resized (if possible) or expected to be a // \a m-by-min(\a m,\a n) matrix. The resulting right singular vectors are stored in the given // matrix \a V, which is either resized (if possible) or expected to be a min(\a m,\a n)-by-\a n // matrix. // // The functions fail if ... // // - ... the given matrix \a U is a fixed size matrix and the dimensions don't match; // - ... the given vector \a s is a fixed size vector and the size doesn't match; // - ... the given matrix \a V is a fixed size matrix and the dimensions don't match; // - ... the given scalar values don't form a proper range; // - ... the singular value decomposition fails. // // The first four functions report failure via the \c info argument, the remaining functions throw // an exception in case of an error. // // // \n Previous: \ref blas_functions &nbsp; &nbsp; Next: \ref block_vectors_and_matrices \n */ //************************************************************************************************* //**Block Vectors and Matrices********************************************************************* /*!\page block_vectors_and_matrices Block Vectors and Matrices // // \tableofcontents // // // \n \section block_vectors_and_matrices_general General Concepts // <hr> // // In addition to fundamental element types, the \b Blaze library supports vectors and matrices // with non-fundamental element type. For instance, it is possible to define block matrices by // using a matrix type as the element type: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::rowMajor; using blaze::columnVector; DynamicMatrix< DynamicMatrix<double,rowMajor>, rowMajor > A; DynamicVector< DynamicVector<double,columnVector >, columnVector > x, y; // ... Resizing and initialization y = A * x; \endcode // The matrix/vector multiplication in this example runs fully parallel and uses vectorization // for every inner matrix/vector multiplication and vector addition. // // // \n \section block_vectors_and_matrices_pitfalls Pitfalls // <hr> // // The only thing to keep in mind when using non-fundamental element types is that all operations // between the elements have to be well defined. More specifically, the size of vector and matrix // elements has to match. The attempt to combine two non-matching elements results in either a // compilation error (in case of statically sized elements) or an exception (for dynamically sized // elements): \code DynamicVector< StaticVector<int,2UL> > a; DynamicVector< StaticVector<int,3UL> > b; DynamicVector< DynamicVector<int> > c( a + b ); // Compilation error: element size doesn't match \endcode // Therefore please don't forget that dynamically sized elements (e.g. \c blaze::DynamicVector, // \c blaze::HybridVector, \c blaze::DynamicMatrix, \c blaze::HybridMatrix, ...) need to be sized // accordingly upfront. // // // \n \section block_vectors_and_matrices_examples Examples // <hr> // // The first example demonstrates the multiplication between a statically sized block matrix // and a block vector: \code using namespace blaze; // ( ( 1 1 ) ( 2 2 ) ) ( ( 1 ) ) ( ( 10 ) ) // ( ( 1 1 ) ( 2 2 ) ) ( ( 1 ) ) ( ( 10 ) ) // ( ) * ( ) = ( ) // ( ( 3 3 ) ( 4 4 ) ) ( ( 2 ) ) ( ( 22 ) ) // ( ( 3 3 ) ( 4 4 ) ) ( ( 2 ) ) ( ( 22 ) ) using M2x2 = StaticMatrix<int,2UL,2UL,rowMajor>; using V2 = StaticVector<int,2UL,columnVector>; DynamicMatrix<M2x2,rowMajor> A{ { M2x2(1), M2x2(2) }, { M2x2(3), M2x2(4) } }; DynamicVector<V2,columnVector> x{ V2(1), V2(2) }; DynamicVector<V2,columnVector> y( A * x ); \endcode // The second example shows the multiplication between a compressed block matrix with blocks of // varying size and a compressed block vector: \code using namespace blaze; // ( ( 1 -2 3 ) ( 5 -1 ) ) ( ( 1 ) ) ( ( -3 ) ) // ( ( 4 1 0 ) ( 1 2 ) ) ( ( 0 ) ) ( ( 7 ) ) // ( ( 0 2 4 ) ( 3 1 ) ) ( ( 1 ) ) ( ( 3 ) ) // ( ) ( ) ( ) // ( ( 1 ) ) * ( ( 2 ) ) = ( ( 2 ) ) // ( ) ( ) ( ) // ( ( 0 -1 1 ) ( 1 0 ) ) ( ( -1 ) ) ( ( 0 ) ) // ( ( 2 -1 2 ) ( 0 1 ) ) ( ( 2 ) ) ( ( 6 ) ) using M3x3 = HybridMatrix<int,3UL,3UL,rowMajor>; using V3 = HybridVector<int,3UL,columnVector>; CompressedMatrix<M3x3,rowMajor> A( 3UL, 3UL, 5UL ); A(0,0) = M3x3{ { 1, -2, 3 }, { 4, 1, 0 }, { 0, 2, 4 } }; A(0,2) = M3x3{ { 5, -1 }, { 1, 2 }, { 3, 1 } }; A(1,1) = M3x3{ { 1 } }; A(2,0) = M3x3{ { 0, -1, 1 }, { 2, -1, 2 } }; A(2,2) = M3x3{ { 1, 0 }, { 0, 1 } }; CompressedVector<V3,columnVector> x( 3UL, 3UL ); x[0] = V3{ 1, 0, 1 }; x[1] = V3{ 2 }; x[2] = V3{ -1, 2 }; CompressedVector<V3,columnVector> y( A * x ); \endcode // \n Previous: \ref lapack_functions &nbsp; &nbsp; Next: \ref intra_statement_optimization \n */ //************************************************************************************************* //**Intra-Statement Optimization******************************************************************* /*!\page intra_statement_optimization Intra-Statement Optimization // // One of the prime features of the \b Blaze library is the automatic intra-statement optimization. // In order to optimize the overall performance of every single statement \b Blaze attempts to // rearrange the operands based on their types. For instance, the following addition of dense and // sparse vectors \code blaze::DynamicVector<double> d1, d2, d3; blaze::CompressedVector<double> s1; // ... Resizing and initialization d3 = d1 + s1 + d2; \endcode // is automatically rearranged and evaluated as \code // ... d3 = d1 + d2 + s1; // <- Note that s1 and d2 have been rearranged \endcode // This order of operands is highly favorable for the overall performance since the addition of // the two dense vectors \c d1 and \c d2 can be handled much more efficiently in a vectorized // fashion. // // This intra-statement optimization can have a tremendous effect on the performance of a statement. // Consider for instance the following computation: \code blaze::DynamicMatrix<double> A, B; blaze::DynamicVector<double> x, y; // ... Resizing and initialization y = A * B * x; \endcode // Since multiplications are evaluated from left to right, this statement would result in a // matrix/matrix multiplication, followed by a matrix/vector multiplication. However, if the // right subexpression is evaluated first, the performance can be dramatically improved since the // matrix/matrix multiplication can be avoided in favor of a second matrix/vector multiplication. // The \b Blaze library exploits this by automatically restructuring the expression such that the // right multiplication is evaluated first: \code // ... y = A * ( B * x ); \endcode // Note however that although this intra-statement optimization may result in a measurable or // even significant performance improvement, this behavior may be undesirable for several reasons, // for instance because of numerical stability. Therefore, in case the order of evaluation matters, // the best solution is to be explicit and to separate a statement into several statements: \code blaze::DynamicVector<double> d1, d2, d3; blaze::CompressedVector<double> s1; // ... Resizing and initialization d3 = d1 + s1; // Compute the dense vector/sparse vector addition first ... d3 += d2; // ... and afterwards add the second dense vector \endcode \code // ... blaze::DynamicMatrix<double> A, B, C; blaze::DynamicVector<double> x, y; // ... Resizing and initialization C = A * B; // Compute the left-hand side matrix-matrix multiplication first ... y = C * x; // ... before the right-hand side matrix-vector multiplication \endcode // Alternatively, it is also possible to use the \c eval() function to fix the order of evaluation: \code blaze::DynamicVector<double> d1, d2, d3; blaze::CompressedVector<double> s1; // ... Resizing and initialization d3 = d1 + eval( s1 + d2 ); \endcode \code blaze::DynamicMatrix<double> A, B; blaze::DynamicVector<double> x, y; // ... Resizing and initialization y = eval( A * B ) * x; \endcode // \n Previous: \ref block_vectors_and_matrices &nbsp; &nbsp; Next: \ref faq \n */ //************************************************************************************************* //**FAQ******************************************************************************************** /*!\page faq Frequently Asked Questions (FAQ) // // \tableofcontents // // // <hr> // \section faq_padding A StaticVector/StaticMatrix is larger than expected. Is this a bug? // // The size of a \c StaticVector, \c StaticMatrix, \c HybridVector, or \c HybridMatrix can // indeed be larger than expected: \code StaticVector<int,3> a; StaticMatrix<int,3,3> A; sizeof( a ); // Evaluates to 16, 32, or even 64, but not 12 sizeof( A ); // Evaluates to 48, 96, or even 144, but not 36 \endcode // In order to achieve the maximum possible performance the \b Blaze library tries to enable // SIMD vectorization even for small vectors. For that reason \b Blaze by default uses padding // elements for all dense vectors and matrices to guarantee that at least a single SIMD vector // can be loaded. Depending on the used SIMD technology that can significantly increase the size // of a \c StaticVector, \c StaticMatrix, \c HybridVector or \c HybridMatrix: \code StaticVector<int,3> a; StaticMatrix<int,3,3> A; sizeof( a ); // Evaluates to 16 in case of SSE, 32 in case of AVX, and 64 in case of AVX-512 // (under the assumption that an integer occupies 4 bytes) sizeof( A ); // Evaluates to 48 in case of SSE, 96 in case of AVX, and 144 in case of AVX-512 // (under the assumption that an integer occupies 4 bytes) \endcode // The configuration file <tt>./blaze/config/Optimizations.h</tt> provides a compile time switch // that can be used to (de-)activate padding: \code #define BLAZE_USE_PADDING 1 \endcode // Alternatively it is possible to (de-)activate padding via command line or by defining this // symbol manually before including any \b Blaze header file: \code #define BLAZE_USE_PADDING 1 #include <blaze/Blaze.h> \endcode // If \c BLAZE_USE_PADDING is set to 1 padding is enabled for all dense vectors and matrices, if // it is set to 0 padding is disabled. Note however that disabling padding can considerably reduce // the performance of all dense vector and matrix operations! // // // <hr> // \section faq_alignment Despite disabling padding, a StaticVector/StaticMatrix is still larger than expected. Is this a bug? // // Despite disabling padding via the \c BLAZE_USE_PADDING compile time switch (see \ref faq_padding), // the size of a \c StaticVector, \c StaticMatrix, \c HybridVector, or \c HybridMatrix can still // be larger than expected: \code #define BLAZE_USE_PADDING 1 #include <blaze/Blaze.h> StaticVector<int,3> a; StaticVector<int,5> b; sizeof( a ); // Always evaluates to 12 sizeof( b ); // Evaluates to 32 with SSE (larger than expected) and to 20 with AVX or AVX-512 (expected) \endcode // The reason for this behavior is the used SIMD technology. If SSE is used, which provides 128 // bit wide registers, a single SIMD pack can usually hold 4 integers (128 bit divided by 32 bit). // Since the second vector contains enough elements is possible to benefit from vectorization. // However, SSE requires an alignment of 16 bytes, which ultimately results in a total size of // 32 bytes for the \c StaticVector (2 times 16 bytes due to 5 integer elements). If AVX or AVX-512 // is used, which provide 256 bit or 512 bit wide registers, a single SIMD vector can hold 8 or 16 // integers, respectively. Even the second vector does not hold enough elements to benefit from // vectorization, which is why \b Blaze does not enforce a 32 byte (for AVX) or even 64 byte // alignment (for AVX-512). // // It is possible to disable the vectorization entirely by the compile time switch in the // <tt>./blaze/config/Vectorization.h</tt> configuration file: \code #define BLAZE_USE_VECTORIZATION 1 \endcode // It is also possible to (de-)activate vectorization via command line or by defining this symbol // manually before including any \b Blaze header file: \code #define BLAZE_USE_VECTORIZATION 1 #include <blaze/Blaze.h> \endcode // In case the switch is set to 1, vectorization is enabled and the \b Blaze library is allowed // to use intrinsics and the necessary alignment to speed up computations. In case the switch is // set to 0, vectorization is disabled entirely and the \b Blaze library chooses default, // non-vectorized functionality for the operations. Note that deactivating the vectorization may // pose a severe performance limitation for a large number of operations! // // // <hr> // \section faq_blas To which extend does Blaze make use of BLAS functions under the hood? // // Currently the only BLAS functions that are utilized by \b Blaze are the \c gemm() functions // for the multiplication of two dense matrices (i.e. \c sgemm(), \c dgemm(), \c cgemm(), and // \c zgemm()). All other operations are always and unconditionally performed by native \b Blaze // kernels. // // The \c BLAZE_BLAS_MODE config switch (see <tt>./blaze/config/BLAS.h</tt>) determines whether // \b Blaze is allowed to use BLAS kernels. If \c BLAZE_BLAS_MODE is set to 0 then \b Blaze // does not utilize the BLAS kernels and unconditionally uses its own custom kernels. If // \c BLAZE_BLAS_MODE is set to 1 then \b Blaze is allowed to choose between using BLAS kernels // or its own custom kernels. In case of the dense matrix multiplication this decision is based // on the size of the dense matrices. For large matrices, \b Blaze uses the BLAS kernels, for // small matrices it uses its own custom kernels. The threshold for this decision can be // configured via the \c BLAZE_DMATDMATMULT_THRESHOLD, \c BLAZE_DMATTDMATMULT_THRESHOLD, // \c BLAZE_TDMATDMATMULT_THRESHOLD and \c BLAZE_TDMATTDMATMULT_THRESHOLD config switches // (see <tt>./blaze/config/Thresholds.h</tt>). // // Please note that the extend to which \b Blaze uses BLAS kernels can change in future releases // of \b Blaze! // // // <hr> // \section faq_lapack To which extend does Blaze make use of LAPACK functions under the hood? // // \b Blaze uses LAPACK functions for matrix decomposition, matrix inversion, computing the // determinants and eigenvalues, and the SVD. In contrast to the BLAS functionality (see // \ref faq_blas), you cannot disable LAPACK or switch to custom kernels. In case you try to // use any of these functionalities, but do not provide (i.e. link) a LAPACK library you will // get link time errors. // // Please note that the extend to which \b Blaze uses LAPACK kernels can change in future releases // of \b Blaze! // // // <hr> // \section faq_compile_times The compile time is too high if I include <blaze/Blaze.h>. Can I reduce it? // // The include file <tt><blaze/Blaze.h></tt> includes the entire functionality of the \b Blaze // library, which by now is several hundred thousand lines of source code. That means that a lot // of source code has to be parsed whenever <tt><blaze/Blaze.h></tt> is encountered. However, it // is rare that everything is required within a single compilation unit. Therefore it is easily // possible to reduce compile times by including only those \b Blaze features that are used within // the compilation unit. For instance, instead of including <tt><blaze/Blaze.h></tt> it could be // enough to include <tt><blaze/math/DynamicVector.h></tt>, which would reduce the compilation // times by about 20%. // // Additionally we are taking care to implement new \b Blaze functionality such that compile times // do not explode and try to reduce the compile times of existing features. Thus newer releases of // \b Blaze can also improve compile times. // // \n Previous: \ref intra_statement_optimization &nbsp; &nbsp; Next: \ref issue_creation_guidelines \n */ //************************************************************************************************* //**FAQ******************************************************************************************** /*!\page issue_creation_guidelines Issue Creation Guidelines // // \tableofcontents // // // One of the most important aspects of the \b Blaze project is the // <a href="https://bitbucket.org/blaze-lib/blaze/issues">issue management</a> on the official // \b Blaze Bitbucket page. We cordially invite all \b Blaze users to submit feature requests // and bug reports, as we believe that this is a significant part of making \b Blaze a better // library. However, we are asking to follow a small set of guidelines when creating an issue // to facilitate the issue management on our side and also to make issues more useful for users // of \b Blaze. // // // <hr> // \section issues_title Title // // The title is the most important detail of an issue. A well chosen title makes it easy to grasp // the idea of an issue and improves the discoverability. Therefore, please choose a title that // is ... // // - ... as descriptive as possible; // - ... as concise as possible; // - ... as unambiguous as possible. // // Also, please create a separate issue for each idea/problem/etc. A very general title or an // \"and\" in the title could be an indication that the issue is not specific enough and should // be split into several issues. // // \subsection issues_title_good_examples Good Examples // // - \"Provide support for AVX-512 SIMD operations\" // - \"Add support for the Boost Multiprecision Library\" // - \"Introduce reduction operations into Blaze\" // - \"Compilation error on KNL with -march=knl\" // // \subsection issues_title_bad_examples Bad Examples // // - \"Several requests\" (instead create separate issues for each single request) // - \"Improve the performance\" (instead specify which operation should perform better) // - \"Blaze library compilation error\" (instead try to be more specific) // // // <hr> // \section issues_description Description // // The description should help us to understand your idea or problem in as much detail as possible. // Also, it helps to clearly spell out your expectations (how a feature is supposed to work, how // the behavior should be, etc.). Please spend a couple of minutes to try to make the description // as comprehensive as possible. // // // <hr> // \section issues_assignee Assignee // // There is no need to assign the issue to a particular person. It is perfectly ok if you just // ignore this setting. // // // <hr> // \section issues_kind Kind of Issue // // There are four kinds of issues available in the Bitbucket issue tracker: \ref issues_kind_bug, // \ref issues_kind_enhancement, \ref issues_kind_proposal, and \ref issues_kind_task. In the // following we try to give guidelines on which kind to choose for a particular issue: // // \subsection issues_kind_bug Bug // // Please choose the category \ref issues_kind_bug if ... // // - ... you experience a compilation error despite your best efforts to get it right; // - ... you experience a crash/failure despite your best efforts to get it right; // - ... you experience problems when combining features; // - ... a feature does not work as specified/documented (i.e. can be considered broken). // // Please \b don't choose the category \ref issues_kind_bug if ... // // - ... you feel a feature should work differently than it currently does (instead create a // \ref issues_kind_proposal with a convincing title and description); // - ... you are not sure how to use a feature (instead create an \ref issues_kind_enhancement // issue to extend the documentation); // - ... you are missing a feature (instead create a \ref issues_kind_proposal or // \ref issues_kind_enhancement issue). // // If you select the category \ref issues_kind_bug, please also try to provide a minimum example // that fails. That helps us to minimize the time to resolve the bug. // // As we try to keep \b Blaze bug-free, we will always prioritize bug issues. However, we will // also quickly close bug issues as \"wontfix\" if the described issue is not a bug (i.e. one of // the problems mentioned above). We will \b not relabel a bug issue to \ref issues_kind_enhancement // or \ref issues_kind_proposal, even if they would be reasonable extensions to \b Blaze. // // \subsection issues_kind_enhancement Enhancement // // Please choose the category \ref issues_kind_enhancement if ... // // - ... you need an add-on to an existing feature; // - ... you need an extension of an existing feature; // - ... you need an extended documentation for an existing feature. // // \ref issues_kind_enhancement is very similar to \ref issues_kind_proposal, so we don't mind // if an \ref issues_kind_enhancement is labeled as a \ref issues_kind_proposal or vice versa. // Just make sure you don't request an extension or new feature as a \ref issues_kind_bug. // // \subsection issues_kind_proposal Proposal // // Please choose the category \ref issues_kind_proposal if ... // // - ... you want to request a new feature; // - ... you want to change an existing feature. // // \ref issues_kind_proposal is very similar to \ref issues_kind_enhancement, so we don't mind if // a \ref issues_kind_proposal is labeled as an \ref issues_kind_enhancement or vice versa. Just // make sure you don't request an extension or new feature as a \ref issues_kind_bug. // // \subsection issues_kind_task Task // // Please choose the category \ref issues_kind_task if ... // // - ... you want us to do something not feature related; // - ... you have something else in mind which does not fall in the other three categories. // // // <hr> // \section issues_priority Priority // // Via the priority of an issue you can tell us how important the issue is to you. Therefore the // priority can have an influence on when we will deal with the issue. However, unfortunately we // don't have an infinite amount of time and we can not deal with an arbitrary amount of issues // at the same time. We will therefore take the priority into account, but mainly schedule the // issues based on impact to all \b Blaze users and the estimated time to resolve it. // // You can choose between \ref issues_priority_blocker, \ref issues_priority_critical, // \ref issues_priority_major, \ref issues_priority_minor, and \ref issues_priority_trivial. // // \subsection issues_priority_blocker Blocker // // Please choose a \ref issues_priority_blocker priority if ... // // - ... you cannot work with \b Blaze due to the described \ref issues_kind_bug; // - ... the \ref issues_kind_bug likely has an influence on \b all \b Blaze users. // // Please note that the categories \ref issues_kind_enhancement or \ref issues_kind_proposal // should never be a \ref issues_priority_blocker! // // \subsection issues_priority_critical Critical // // Please choose a \ref issues_priority_critical priority if ... // // - ... you can work around a \ref issues_kind_bug, but the workaround is (much) slower or awful; // - ... you cannot use \b Blaze without the proposed feature; // - ... you consider it to be essential for \b all \b Blaze users. // // \subsection issues_priority_major Major // // Please choose a \ref issues_priority_major priority if ... // // - ... a \ref issues_kind_bug or feature request is not \ref issues_priority_critical, but // still very important to you; // - ... you consider it to have a \ref issues_priority_major impact on most \b Blaze users. // // The \ref issues_priority_major category is the default setting in Bitbucket and we therefore // consider it as the default priority for issues. // // \subsection issues_priority_minor Minor // // Please choose a \ref issues_priority_minor priority if ... // // - ... a \ref issues_kind_bug does not affect many \b Blaze users; // - ... a feature request would only be useful for a small number of \b Blaze users; // - ... a feature would be nice to have, but is not particularly important. // // \subsection issues_priority_trivial Trivial // // Please choose a \ref issues_priority_trivial priority if ... // // - ... a \ref issues_kind_bug hardly affects anyone; // - ... a feature request would only be useful for very few \b Blaze users; // - ... the expected time to resolve an issue is very small. // // // <hr> // \section issues_attachment Attachments // // You can always provide us with additional information in the form of attachments. Feel free // to attach something to the issue if ... // // - ... it can help us to analyze a \ref issues_kind_bug; // - ... you have some source code that demonstrates a problem; // - ... you already have a working prototype that sketches the idea; // - ... you have additional resources that could help us. // // We appreciate anything that simplifies our work and speeds up our progress. // // \n Previous: \ref faq &nbsp; &nbsp; Next: \ref blaze_references \n */ //************************************************************************************************* //**Blaze References******************************************************************************* /*!\page blaze_references Blaze References // // In case you need references to the \b Blaze library (for papers or other publications), please // feel free to use one of the following references: \code @misc{blazelib, author = "Klaus {Iglberger}", title = "Blaze C++ Linear Algebra Library", howpublished = "https://bitbucket.org/blaze-lib", year = 2012 } \endcode \code @article{iglberger2012_1, author = "Klaus {Iglberger} and Georg {Hager} and Jan {Treibig} and Ulrich {R{\"u}de}", title = "Expression Templates Revisited: A Performance Analysis of Current Methodologies", journal = "SIAM Journal on Scientific Computing", year = 2012, volume = 34(2), pages = C42--C69 } \endcode \code @inproceedings{iglberger2012_2, author = "Klaus {Iglberger} and Georg {Hager} and Jan {Treibig} and Ulrich {R{\"u}de}", title = "High Performance Smart Expression Template Math Libraries", booktitle = "Proceedings of the 2nd International Workshop on New Algorithms and Programming Models for the Manycore Era (APMM 2012) at HPCS 2012", year = 2012 } \endcode // \n Previous: \ref issue_creation_guidelines */ //************************************************************************************************* #endif
//================================================================================================= /* * ! // \file blaze/Tutorial.h // \brief Tutorial of the Blaze library // * // Copyright (C) 2012-2018 Klaus Iglberger - All Rights Reserved // // * This file is part of the Blaze library. You can redistribute it and/or * modify it under // the terms of the New (Revised) BSD License. * Redistribution and use in source and binary // forms, with or without * modification, are permitted provided that the following conditions // are * met: // // 1. Redistributions of source code must retain the above * copyright notice, this list of // conditions and the following * disclaimer. // 2. Redistributions in binary form must reproduce the above * copyright notice, this list // of conditions and the following * disclaimer in the documentation and/or other materials // provided * with the distribution. // 3. Neither the names of the Blaze development * group nor the names of its contributors // may be used to endorse or * promote products derived from this software without specific // prior * written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT * HOLDERS AND CONTRIBUTORS "AS IS" AND ANY // EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES // OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT // SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY * DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED // TO, PROCUREMENT OF SUBSTITUTE * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR // BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH // DAMAGE. */ //================================================================================================= #ifndef _BLAZE_TUTORIAL_H_ #define _BLAZE_TUTORIAL_H_ //================================================================================================= // //BLAZE TUTORIAL // //================================================================================================= //**Mainpage *************************************************************************************** /* * !\mainpage // // \image html blaze300x150.jpg // // This is the API for * the \b Blaze high performance C++ math library. It gives a complete // * overview of the individual features and sublibraries of \b Blaze. To get a * first impression // on \b Blaze, the short \ref getting_started tutorial * is a good place to start. Afterwards, // the following long tutorial * covers the most important aspects of the \b Blaze math library. // The * tabs at the top of the page allow a direct access to the individual * modules, namespaces, // classes, and files of the \b Blaze library.\n\n // * // \section table_of_content Table of Contents // // <ul> // <li> \ref * configuration_and_installation </li> // <li> \ref getting_started </li> * // <li> \ref vectors // <ul> // <li> \ref vector_types * </li> // <li> \ref vector_operations </li> // </ul> // * </li> // <li> \ref matrices // <ul> // <li> \ref * matrix_types </li> // <li> \ref matrix_operations </li> // * </ul> // </li> // <li> \ref adaptors // <ul> // <li> * \ref adaptors_symmetric_matrices </li> // <li> \ref * adaptors_hermitian_matrices </li> // <li> \ref * adaptors_triangular_matrices </li> // </ul> // </li> // <li> * \ref views // <ul> // <li> \ref views_subvectors </li> // * <li> \ref views_element_selections </li> // <li> \ref * views_submatrices </li> // <li> \ref views_rows </li> // * <li> \ref views_row_selections </li> // <li> \ref views_columns * </li> // <li> \ref views_column_selections </li> // <li> * \ref views_bands </li> // </ul> // </li> // <li> \ref * arithmetic_operations // <ul> // <li> \ref addition </li> * // <li> \ref subtraction </li> // <li> \ref * scalar_multiplication </li> // <li> \ref * vector_vector_multiplication // <ul> // <li> * \ref componentwise_multiplication </li> // <li> \ref * inner_product </li> // <li> \ref outer_product </li> // * <li> \ref cross_product </li> // </ul> // </li> // * <li> \ref vector_vector_division </li> // <li> \ref * matrix_vector_multiplication </li> // <li> \ref * matrix_matrix_multiplication // <ul> // <li> * \ref schur_product </li> // <li> \ref matrix_product </li> * // </ul> // </li> // </ul> // </li> // * <li> \ref shared_memory_parallelization // <ul> // <li> * \ref hpx_parallelization </li> // <li> \ref * cpp_threads_parallelization </li> // <li> \ref * boost_threads_parallelization </li> // <li> \ref * openmp_parallelization </li> // <li> \ref serial_execution </li> * // </ul> // </li> // <li> \ref serialization // <ul> // * <li> \ref vector_serialization </li> // <li> \ref * matrix_serialization </li> // </ul> // </li> // <li> \ref * customization // <ul> // <li> \ref configuration_files * </li> // <li> \ref vector_and_matrix_customization // * <ul> // <li> \ref custom_data_members </li> // * <li> \ref custom_operations </li> // <li> \ref * custom_data_types </li> // </ul> // </li> // * <li> \ref error_reporting_customization </li> // </ul> // </li> * // <li> \ref blas_functions </li> // <li> \ref lapack_functions * </li> // <li> \ref block_vectors_and_matrices </li> // <li> \ref * intra_statement_optimization </li> // <li> \ref faq </li> // <li> * \ref issue_creation_guidelines </li> // <li> \ref blaze_references * </li> // </ul> */ //************************************************************************************************* //**Configuration and Installation ***************************************************************** /* * !\page configuration_and_installation Configuration and Installation // // * \tableofcontents // // // Since \b Blaze is a header-only library, setting * up the \b Blaze library on a particular system // is a fairly easy two * step process. In the following, this two step process is explained in // * detail, preceded only by a short summary of the requirements. // // // \n * \section requirements Requirements // <hr> // // For maximum performance * the \b Blaze library expects you to have a BLAS library installed // (<a * href="http://software.intel.com/en-us/articles/intel-mkl/">Intel MKL</a>, * // <a href="http://developer.amd.com/libraries/acml/">ACML</a>, // <a * href="http://math-atlas.sourceforge.net">Atlas</a>, // <a * href="http://www.tacc.utexas.edu/tacc-projects/gotoblas2">Goto</a>, ...). * If you don't // have a BLAS library installed on your system, \b Blaze * will still work and will not be reduced // in functionality, but * performance may be limited. Thus it is strongly recommended to install a * // BLAS library. // // Additionally, for computing the determinant of a * dense matrix, for the decomposition of dense // matrices, for the dense * matrix inversion, and for the computation of eigenvalues and singular // * values \b Blaze requires <a * href="https://en.wikipedia.org/wiki/LAPACK">LAPACK</a>. When either // of * these features is used it is necessary to link the LAPACK library to the * final executable. // If no LAPACK library is available the use of these * features will result in a linker error. // // Furthermore, it is possible * to use Boost threads to run numeric operations in parallel. In this // * case the Boost library is required to be installed on your system. It is * recommended to use the // newest Boost library available, but \b Blaze * requires at minimum the Boost version 1.54.0. If // you don't have Boost * installed on your system, you can download it for free from // <a * href="http://www.boost.org">www.boost.org</a>. // // // \n \section * step_1_installation Step 1: Installation // <hr> // // \subsection * step_1_cmake Installation via CMake // // The first step is the * installation of the \b Blaze header files. The most convenient way // to * do this is via <a href="https://cmake.org">CMake</a>. Linux and macOS * users can use the // following two lines to copy the \b Blaze headers in * the <tt>./blaze</tt> subdirectory to // the directory \c * ${CMAKE_INSTALL_PREFIX}/include and the package configuration files to // * \c ${CMAKE_INSTALL_PREFIX}/share/blaze/cmake. * * \code cmake -DCMAKE_INSTALL_PREFIX=/usr/local/ sudo make install \endcode * * // Windows users can do the same via the cmake-gui. Alternatively, it is * possible to include // \b Blaze by adding the following lines in any \c * CMakeLists.txt file: * * \code find_package( blaze ) if( blaze_FOUND ) add_library( blaze_target * INTERFACE ) target_link_libraries( blaze_target INTERFACE blaze::blaze ) * endif() \endcode * * // \n \subsection step_1_vcpkg Installation via the VC++ Packaging Tool // // * An alternate way to install \b Blaze for Windows users is Microsoft's // * <a href="https://github.com/Microsoft/vcpkg">VC++ Packaging Tool * (vcpkg)</a>. \b Blaze can // be installed via the command line: * * \code C:\src\vcpkg> .\vcpkg install blaze \endcode * * // The tool automatically downloads the latest \b Blaze release and copies * the header files to // the common include directory. Please note that * since \b Blaze is a header-only library the // attempt to install any * static or dynamic library will fail! // // \n \subsection * step_1_installation_unix Manual Installation on Linux/macOS // // Since \b * Blaze only consists of header files, the <tt>./blaze</tt> subdirectory can * be simply // copied to a standard include directory (note that this * requires root privileges): * * \code cp -r ./blaze /usr/local/include \endcode * * // Alternatively, on Unix-based machines (which includes Linux and Mac OS X) * the // \c CPLUS_INCLUDE_PATH environment variable can be set. The * specified directory will be // searched after any directories specified on * the command line with the option \c -I and // before the standard default * directories (such as \c /usr/local/include and \c /usr/include). // * Assuming a user named 'Jon', the environment variable can be set as * follows: * * \code CPLUS_INCLUDE_PATH=/usr/home/jon/blaze export CPLUS_INCLUDE_PATH * \endcode * * // Last but not least, the <tt>./blaze</tt> subdirectory can be explicitly * specified on the // command line. The following example demonstrates this * by means of the GNU C++ compiler: * * \code g++ -I/usr/home/jon/blaze -o BlazeTest BlazeTest.cpp \endcode * * // \n \subsection step_1_installation_windows Manual Installation on Windows * // // Windows doesn't have a standard include directory. Therefore the \b * Blaze header files can be // copied to any other directory or simply left * in the default \b Blaze directory. However, the // chosen include * directory has to be explicitly specified as include path. In Visual * Studio, // this is done via the project property pages, configuration * properties, C/C++, General settings. // Here the additional include * directories can be specified. // // // \n \section step_2_configuration * Step 2: Configuration // <hr> // // The second step is the configuration * and customization of the \b Blaze library. Many aspects // of \b Blaze can * be adapted to specific requirements, environments and architectures. The * most // convenient way to configure \b Blaze is to modify the headers in * the <tt>./blaze/config/</tt> // subdirectory by means of <a * href="https://cmake.org">CMake</a>. Alternatively these header // files * can be customized manually. In both cases, however, the files are * modified. If this is // not an option it is possible to configure \b Blaze * via the command line (see the tutorial // section \ref configuration_files * or the documentation in the configuration files). // // Since the default * settings are reasonable for most systems this step can also be skipped. // * However, in order to achieve maximum performance a customization of at * least the following // configuration files is required: // // - * <b><tt><blaze/config/BLAS.h></tt></b>: Via this configuration file \b * Blaze can be enabled // to use a third-party BLAS library for several * basic linear algebra functions (such as for // instance dense matrix * multiplications). In case no BLAS library is used, all linear algebra // * functions use the default implementations of the \b Blaze library and * therefore BLAS is not a // requirement for the compilation process. * However, please note that performance may be limited. // - * <b><tt><blaze/config/CacheSize.h></tt></b>: This file contains the * hardware specific cache // settings. \b Blaze uses this information to * optimize its cache usage. For maximum performance // it is recommended * to adapt these setting to a specific target architecture. // - * <b><tt><blaze/config/Thresholds.h></tt></b>: This file contains all * thresholds for the // customization of the \b Blaze compute kernels. In * order to tune the kernels for a specific // architecture and to * maximize performance it can be necessary to adjust the thresholds, // * especially for a parallel execution (see \ref * shared_memory_parallelization). // // For an overview of other * customization options and more details, please see the section // \ref * configuration_files. // // // \n \section blaze_version Blaze Version // * <hr> // // The current major and minor version number of the \b Blaze * library can be found in the // <b><tt><blaze/system/Version.h></tt></b> * header file. It is automatically included via the // * <b><tt><blaze/Blaze.h></tt></b> header file. The file contains the two * following macros, // which can for instance be used for conditional * compilation: * * \code #define BLAZE_MAJOR_VERSION 3 #define BLAZE_MINOR_VERSION 2 \endcode * * // \n Next: \ref getting_started */ //************************************************************************************************* //**Getting Started ******************************************************************************** /* * !\page getting_started Getting Started // // This short tutorial serves * the purpose to give a quick overview of the way mathematical // * expressions have to be formulated in \b Blaze. Starting with \ref * vector_types, the following // long tutorial covers the most important * aspects of the \b Blaze math library. // // // \n \section * getting_started_vector_example A First Example // // \b Blaze is written * such that using mathematical expressions is as close to mathematical // * textbooks as possible and therefore as intuitive as possible. In nearly * all cases the seemingly // easiest solution is the right solution and most * users experience no problems when trying to // use \b Blaze in the most * natural way. The following example gives a first impression of the // * formulation of a vector addition in \b Blaze: * * \code #include <iostream> #include <blaze/Math.h> * * using blaze::StaticVector; using blaze::DynamicVector; * * // Instantiation of a static 3D column vector. The vector is directly * initialized as // ( 4 -2 5 ) StaticVector<int,3UL> a{ 4, -2, 5 }; * * // Instantiation of a dynamic 3D column vector. Via the subscript operator * the values are set to // ( 2 5 -3 ) DynamicVector<int> b( 3UL ); b[0] = * 2; b[1] = 5; b[2] = -3; * * // Adding the vectors a and b DynamicVector<int> c = a + b; * * // Printing the result of the vector addition std::cout << "c =\n" << c << * "\n"; \endcode * * // Note that the entire \b Blaze math library can be included via the \c * blaze/Math.h header // file. Alternatively, the entire \b Blaze library, * including both the math and the entire // utility module, can be included * via the \c blaze/Blaze.h header file. Also note that all // classes and * functions of \b Blaze are contained in the blaze namespace.\n\n // // * Assuming that this program resides in a source file called \c * FirstExample.cpp, it can be // compiled for instance via the GNU C++ * compiler: * * \code g++ -ansi -O3 -DNDEBUG -mavx -o FirstExample FirstExample.cpp \endcode * * // Note the definition of the \c NDEBUG preprocessor symbol. In order to * achieve maximum // performance, it is necessary to compile the program in * release mode, which deactivates // all debugging functionality inside \b * Blaze. It is also strongly recommended to specify // the available * architecture specific instruction set (as for instance the AVX instruction * // set, which if available can be activated via the \c -mavx flag). This * allows \b Blaze // to optimize computations via vectorization.\n\n // // * When running the resulting executable \c FirstExample, the output of the * last line of // this small program is * * \code c = 6 3 2 \endcode * * // \n \section getting_started_matrix_example An Example Involving Matrices * // // Similarly easy and intuitive are expressions involving matrices: * * \code #include <blaze/Math.h> * * using namespace blaze; * * // Instantiating a dynamic 3D column vector DynamicVector<int> x{ 4, -1, 3 }; * * // Instantiating a dynamic 2x3 row-major matrix, preinitialized with 0. Via * the function call // operator three values of the matrix are explicitly * set to get the matrix // ( 1 0 4 ) // ( 0 -2 0 ) DynamicMatrix<int> * A( 2UL, 3UL, 0 ); A(0,0) = 1; A(0,2) = 4; A(1,1) = -2; * * // Performing a matrix/vector multiplication DynamicVector<int> y = A * x; * * // Printing the resulting vector std::cout << "y =\n" << y << "\n"; * * // Instantiating a static column-major matrix. The matrix is directly * initialized as // ( 3 -1 ) // ( 0 2 ) // ( -1 0 ) * StaticMatrix<int,3UL,2UL,columnMajor> B{ { 3, -1 }, { 0, 2 }, { -1, 0 } }; * * // Performing a matrix/matrix multiplication DynamicMatrix<int> C = A * B; * * // Printing the resulting matrix std::cout << "C =\n" << C << "\n"; \endcode * * // The output of this program is * * \code y = 16 2 * * C = ( -1 -1 ) ( 0 -4 ) \endcode * * // \n \section getting_started_complex_example A Complex Example // // The * following example is much more sophisticated. It shows the implementation * of the Conjugate // Gradient (CG) algorithm * (http://en.wikipedia.org/wiki/Conjugate_gradient) by means of the // \b * Blaze library: // // \image html cg.jpg // // In this example it is not * important to understand the CG algorithm itself, but to see the // * advantage of the API of the \b Blaze library. In the \b Blaze * implementation we will use a // sparse matrix/dense vector multiplication * for a 2D Poisson equation using \f$ N \times N \f$ // unknowns. It becomes * apparent that the core of the algorithm is very close to the mathematical * // formulation and therefore has huge advantages in terms of readability * and maintainability, // while the performance of the code is close to the * expected theoretical peak performance: * * \code const size_t NN( N*N ); * * blaze::CompressedMatrix<double,rowMajor> A( NN, NN ); * blaze::DynamicVector<double,columnVector> x( NN, 1.0 ), b( NN, 0.0 ), r( * NN ), p( NN ), Ap( NN ); double alpha, beta, delta; * * // ... Initializing the sparse matrix A * * // Performing the CG algorithm r = b - A * x; p = r; delta = (r,r); * * for( size_t iteration=0UL; iteration<iterations; ++iteration ) { Ap = A * p; * alpha = delta / (p,Ap); x += alpha * p; r -= alpha * Ap; beta = (r,r); if( * std::sqrt( beta ) < 1E-8 ) break; p = r + ( beta / delta ) * p; delta = * beta; } \endcode * * // \n Hopefully this short tutorial gives a good first impression of how * mathematical expressions // are formulated with \b Blaze. The following * long tutorial, starting with \ref vector_types, // will cover all aspects * of the \b Blaze math library, i.e. it will introduce all vector and // * matrix types, all possible operations on vectors and matrices, and of * course all possible // mathematical expressions. // // \n Previous: \ref * configuration_and_installation &nbsp; &nbsp; Next: \ref vectors */ //************************************************************************************************* //**Vectors **************************************************************************************** /* * !\page vectors Vectors // // \tableofcontents // // // \n \section * vectors_general General Concepts // <hr> // // The \b Blaze library * currently offers four dense vector types (\ref vector_types_static_vector, * // \ref vector_types_dynamic_vector, \ref vector_types_hybrid_vector, and * \ref vector_types_custom_vector) // and one sparse vector type (\ref * vector_types_compressed_vector). All vectors can be specified // as either * column vectors or row vectors: * * \code using blaze::DynamicVector; using blaze::columnVector; using * blaze::rowVector; * * // Setup of the 3-dimensional dense column vector // // ( 1 ) // ( 2 ) * // ( 3 ) // DynamicVector<int,columnVector> a{ 1, 2, 3 }; * * // Setup of the 3-dimensional dense row vector // // ( 4 5 6 ) // * DynamicVector<int,rowVector> b{ 4, 5, 6 }; \endcode * * // Per default, all vectors in \b Blaze are column vectors: * * \code // Instantiation of a 3-dimensional column vector * blaze::DynamicVector<int> c( 3UL ); \endcode * * // \n \section vectors_details Vector Details // <hr> // // - \ref * vector_types // - \ref vector_operations // // // \n \section * vectors_examples Examples // <hr> * * \code using blaze::StaticVector; using blaze::DynamicVector; using * blaze::CompressedVector; using blaze::rowVector; using * blaze::columnVector; * * StaticVector<int,6UL> a; // Instantiation of a 6-dimensional * static column vector CompressedVector<int,rowVector> b; // Instantiation * of a compressed row vector DynamicVector<int,columnVector> c; // * Instantiation of a dynamic column vector * * // ... Resizing and initialization * * c = a + trans( b ); \endcode * * // \n Previous: \ref getting_started &nbsp; &nbsp; Next: \ref vector_types */ //************************************************************************************************* //**Vector Types *********************************************************************************** /* * !\page vector_types Vector Types // // \tableofcontents // // // \n * \section vector_types_static_vector StaticVector // <hr> // // The * blaze::StaticVector class template is the representation of a fixed size * vector with // statically allocated elements of arbitrary type. It can be * included via the header file * * \code #include <blaze/math/StaticVector.h> \endcode * * // The type of the elements, the number of elements, and the transpose flag * of the vector can // be specified via the three template parameters: * * \code template< typename Type, size_t N, bool TF > class StaticVector; * \endcode * * // - \c Type: specifies the type of the vector elements. StaticVector can be * used with any // non-cv-qualified, non-reference, non-pointer * element type. // - \c N : specifies the total number of vector * elements. It is expected that StaticVector is // only used for * tiny and small vectors. // - \c TF : specifies whether the vector is a * row vector (\c blaze::rowVector) or a column // vector (\c * blaze::columnVector). The default value is \c blaze::columnVector. // // * The blaze::StaticVector is perfectly suited for small to medium vectors * whose size is known at // compile time: * * \code // Definition of a 3-dimensional integral column vector * blaze::StaticVector<int,3UL> a; * * // Definition of a 4-dimensional single precision column vector * blaze::StaticVector<float,4UL,blaze::columnVector> b; * * // Definition of a 6-dimensional double precision row vector * blaze::StaticVector<double,6UL,blaze::rowVector> c; \endcode * * // \n \section vector_types_dynamic_vector DynamicVector // <hr> // // The * blaze::DynamicVector class template is the representation of an arbitrary * sized vector // with dynamically allocated elements of arbitrary type. It * can be included via the header file * * \code #include <blaze/math/DynamicVector.h> \endcode * * // The type of the elements and the transpose flag of the vector can be * specified via the two // template parameters: * * \code template< typename Type, bool TF > class DynamicVector; \endcode * * // - \c Type: specifies the type of the vector elements. DynamicVector can * be used with any // non-cv-qualified, non-reference, * non-pointer element type. // - \c TF : specifies whether the vector is a * row vector (\c blaze::rowVector) or a column // vector (\c * blaze::columnVector). The default value is \c blaze::columnVector. // // * The blaze::DynamicVector is the default choice for all kinds of dense * vectors and the best // choice for medium to large vectors. Its size can * be modified at runtime: * * \code // Definition of a 3-dimensional integral column vector * blaze::DynamicVector<int> a( 3UL ); * * // Definition of a 4-dimensional single precision column vector * blaze::DynamicVector<float,blaze::columnVector> b( 4UL ); * * // Definition of a double precision row vector with size 0 * blaze::DynamicVector<double,blaze::rowVector> c; \endcode * * // \n \section vector_types_hybrid_vector HybridVector // <hr> // // The * blaze::HybridVector class template combines the advantages of the * blaze::StaticVector and // the blaze::DynamicVector class templates. It * represents a fixed size vector with statically // allocated elements, but * still can be dynamically resized (within the bounds of the available // * memory). It can be included via the header file * * \code #include <blaze/math/HybridVector.h> \endcode * * // The type of the elements, the number of elements, and the transpose flag * of the vector can // be specified via the three template parameters: * * \code template< typename Type, size_t N, bool TF > class HybridVector; * \endcode * * // - \c Type: specifies the type of the vector elements. HybridVector can be * used with any // non-cv-qualified, non-reference, non-pointer * element type. // - \c N : specifies the maximum number of vector * elements. It is expected that HybridVector // is only used for * tiny and small vectors. // - \c TF : specifies whether the vector is a * row vector (\c blaze::rowVector) or a column // vector (\c * blaze::columnVector). The default value is \c blaze::columnVector. // // * The blaze::HybridVector is a suitable choice for small to medium vectors, * whose size is not // known at compile time or not fixed at runtime, but * whose maximum size is known at compile // time: * * \code // Definition of a 3-dimensional integral column vector with a maximum * size of 6 blaze::HybridVector<int,6UL> a( 3UL ); * * // Definition of a 4-dimensional single precision column vector with a * maximum size of 16 blaze::HybridVector<float,16UL,blaze::columnVector> b( * 4UL ); * * // Definition of a double precision row vector with size 0 and a maximum size * of 6 blaze::HybridVector<double,6UL,blaze::rowVector> c; \endcode * * // \n \section vector_types_custom_vector CustomVector // <hr> // // The * blaze::CustomVector class template provides the functionality to represent * an external // array of elements of arbitrary type and a fixed size as a * native \b Blaze dense vector data // structure. Thus in contrast to all * other dense vector types a custom vector does not perform // any kind of * memory allocation by itself, but it is provided with an existing array of * element // during construction. A custom vector can therefore be * considered an alias to the existing // array. It can be included via the * header file * * \code #include <blaze/math/CustomVector.h> \endcode * * // The type of the elements, the properties of the given array of elements * and the transpose // flag of the vector can be specified via the following * four template parameters: * * \code template< typename Type, bool AF, bool PF, bool TF > class * CustomVector; \endcode * * // - Type: specifies the type of the vector elements. blaze::CustomVector * can be used with // any non-cv-qualified, non-reference, * non-pointer element type. // - AF : specifies whether the represented, * external arrays are properly aligned with // respect to the * available instruction set (SSE, AVX, ...) or not. // - PF : specified * whether the represented, external arrays are properly padded with // * respect to the available instruction set (SSE, AVX, ...) or not. // - TF * : specifies whether the vector is a row vector (\c blaze::rowVector) or a * column // vector (\c blaze::columnVector). The default value is * \c blaze::columnVector. // // The blaze::CustomVector is the right choice * if any external array needs to be represented as // a \b Blaze dense * vector data structure or if a custom memory allocation strategy needs to * be // realized: * * \code using blaze::CustomVector; using blaze::Deallocate; using * blaze::aligned; using blaze::unaligned; using blaze::padded; using * blaze::unpadded; * * // Definition of an unmanaged custom column vector for unaligned, unpadded * integer arrays using UnalignedUnpadded = * CustomVector<int,unaligned,unpadded,columnVector>; std::vector<int> vec( * 7UL ); UnalignedUnpadded a( &vec[0], 7UL ); * * // Definition of a managed custom column vector for unaligned but padded * 'float' arrays using UnalignedPadded = * CustomVector<float,unaligned,padded,columnVector>; * std::unique_ptr<float[]> memory1( new float[16] ); UnalignedPadded b( * memory1.get(), 9UL, 16UL ); * * // Definition of a managed custom row vector for aligned, unpadded 'double' * arrays using AlignedUnpadded = * CustomVector<double,aligned,unpadded,rowVector>; * std::unique_ptr<double[],Deallocate> memory2( blaze::allocate<double>( 7UL * ) ); AlignedUnpadded c( memory2.get(), 7UL ); * * // Definition of a managed custom row vector for aligned, padded * 'complex<double>' arrays using cplx = complex<double>; using AlignedPadded * = CustomVector<cplx,aligned,padded,columnVector>; * std::unique_ptr<cplx[],Deallocate> memory3( allocate<cplx>( 8UL ) ); * AlignedPadded d( memory3.get(), 5UL, 8UL ); \endcode * * // In comparison with the remaining \b Blaze dense vector types * blaze::CustomVector has several // special characteristics. All of these * result from the fact that a custom vector is not // performing any kind of * memory allocation, but instead is given an existing array of elements. // * The following sections discuss all of these characteristics: // // -# * <b>\ref vector_types_custom_vector_memory_management</b> // -# <b>\ref * vector_types_custom_vector_copy_operations</b> // -# <b>\ref * vector_types_custom_vector_alignment</b> // -# <b>\ref * vector_types_custom_vector_padding</b> // // \n \subsection * vector_types_custom_vector_memory_management Memory Management // // The * blaze::CustomVector class template acts as an adaptor for an existing * array of elements. As // such it provides everything that is required to * use the array just like a native \b Blaze dense // vector data structure. * However, this flexibility comes with the price that the user of a custom * // vector is responsible for the resource management. // // The following * examples give an impression of several possible types of custom vectors: * * \code using blaze::CustomVector; using blaze::Deallocate; using * blaze::allocate; using blaze::aligned; using blaze::unaligned; using * blaze::padded; using blaze::unpadded; * * // Definition of a 3-dimensional custom vector with unaligned, unpadded and * externally // managed integer array. Note that the std::vector must be * guaranteed to outlive the // custom vector! std::vector<int> vec( 3UL ); * CustomVector<int,unaligned,unpadded> a( &vec[0], 3UL ); * * // Definition of a custom vector with size 3 and capacity 16 with aligned, * padded and // externally managed integer array. Note that the * std::unique_ptr must be guaranteed // to outlive the custom vector! * std::unique_ptr<int[],Deallocate> memory( allocate<int>( 16UL ) ); * CustomVector<int,aligned,padded> b( memory.get(), 3UL, 16UL ); \endcode * * // \n \subsection vector_types_custom_vector_copy_operations Copy Operations * // // As with all dense vectors it is possible to copy construct a custom * vector: * * \code using blaze::CustomVector; using blaze::unaligned; using * blaze::unpadded; * * using CustomType = CustomVector<int,unaligned,unpadded>; * * std::vector<int> vec( 5UL, 10 ); // Vector of 5 integers of the value 10 * CustomType a( &vec[0], 5UL ); // Represent the std::vector as Blaze * dense vector a[1] = 20; // Also modifies the * std::vector * * CustomType b( a ); // Creating a copy of vector a b[2] = 20; // * Also affects vector a and the std::vector \endcode * * // It is important to note that a custom vector acts as a reference to the * specified array. Thus // the result of the copy constructor is a new * custom vector that is referencing and representing // the same array as * the original custom vector. // // In contrast to copy construction, just * as with references, copy assignment does not change // which array is * referenced by the custom vector, but modifies the values of the array: * * \code std::vector<int> vec2( 5UL, 4 ); // Vector of 5 integers of the value * 4 CustomType c( &vec2[0], 5UL ); // Represent the std::vector as Blaze * dense vector * * a = c; // Copy assignment: Set all values of vector a and b to 4. \endcode * * // \n \subsection vector_types_custom_vector_alignment Alignment // // In * case the custom vector is specified as \c aligned the passed array must be * guaranteed to // be aligned according to the requirements of the used * instruction set (SSE, AVX, ...). For // instance, if AVX is active an * array of integers must be 32-bit aligned: * * \code using blaze::CustomVector; using blaze::Deallocate; using * blaze::allocate; using blaze::aligned; using blaze::unpadded; * * // Allocation of 32-bit aligned memory std::unique_ptr<int[],Deallocate> * memory( allocate<int>( 5UL ) ); * * CustomVector<int,aligned,unpadded> a( memory.get(), 5UL ); \endcode * * // In case the alignment requirements are violated, a \c * std::invalid_argument exception is // thrown. // // \n \subsection * vector_types_custom_vector_padding Padding // // Adding padding elements * to the end of an array can have a significant impact on the performance. * // For instance, assuming that AVX is available, then two aligned, padded, * 3-dimensional vectors // of double precision values can be added via a * single SIMD addition operation: * * \code using blaze::CustomVector; using blaze::Deallocate; using * blaze::allocate; using blaze::aligned; using blaze::padded; * * using CustomType = CustomVector<double,aligned,padded>; * * std::unique_ptr<int[],Deallocate> memory1( allocate<double>( 4UL ) ); * std::unique_ptr<int[],Deallocate> memory2( allocate<double>( 4UL ) ); * std::unique_ptr<int[],Deallocate> memory3( allocate<double>( 4UL ) ); * * // Creating padded custom vectors of size 3 and a capacity of 4 CustomType a( * memory1.get(), 3UL, 4UL ); CustomType b( memory2.get(), 3UL, 4UL ); * CustomType c( memory3.get(), 3UL, 4UL ); * * // ... Initialization * * c = a + b; // AVX-based vector addition \endcode * * // In this example, maximum performance is possible. However, in case no * padding elements are // inserted, a scalar addition has to be used: * * \code using blaze::CustomVector; using blaze::Deallocate; using * blaze::allocate; using blaze::aligned; using blaze::unpadded; * * using CustomType = CustomVector<double,aligned,unpadded>; * * std::unique_ptr<int[],Deallocate> memory1( allocate<double>( 3UL ) ); * std::unique_ptr<int[],Deallocate> memory2( allocate<double>( 3UL ) ); * std::unique_ptr<int[],Deallocate> memory3( allocate<double>( 3UL ) ); * * // Creating unpadded custom vector of size 3 CustomType a( allocate<double>( * 3UL ), 3UL ); CustomType b( allocate<double>( 3UL ), 3UL ); CustomType c( * allocate<double>( 3UL ), 3UL ); * * // ... Initialization * * c = a + b; // Scalar vector addition \endcode * * // Note the different number of constructor parameters for unpadded and * padded custom vectors: // In contrast to unpadded vectors, where during * the construction only the size of the array // has to be specified, during * the construction of a padded custom vector it is additionally // necessary * to explicitly specify the capacity of the array. // // The number of * padding elements is required to be sufficient with respect to the * available // instruction set: In case of an aligned padded custom vector * the added padding elements must // guarantee that the capacity is greater * or equal than the size and a multiple of the SIMD vector // width. In case * of unaligned padded vectors the number of padding elements can be greater * or // equal the number of padding elements of an aligned padded custom * vector. In case the padding // is insufficient with respect to the * available instruction set, a \a std::invalid_argument // exception is * thrown. // // Please also note that \b Blaze will zero initialize the * padding elements in order to achieve // maximum performance! // // // \n * \section vector_types_compressed_vector CompressedVector // <hr> // // The * blaze::CompressedVector class is the representation of an arbitrarily * sized sparse // vector, which stores only non-zero elements of arbitrary * type. It can be included via the // header file * * \code #include <blaze/math/CompressedVector.h> \endcode * * // The type of the elements and the transpose flag of the vector can be * specified via the two // template parameters: * * \code template< typename Type, bool TF > class CompressedVector; \endcode * * // - \c Type: specifies the type of the vector elements. CompressedVector * can be used with any // non-cv-qualified, non-reference, * non-pointer element type. // - \c TF : specifies whether the vector is a * row vector (\c blaze::rowVector) or a column // vector (\c * blaze::columnVector). The default value is \c blaze::columnVector. // // * The blaze::CompressedVector is the right choice for all kinds of sparse * vectors: * * \code // Definition of a 3-dimensional integral column vector * blaze::CompressedVector<int> a( 3UL ); * * // Definition of a 4-dimensional single precision column vector with capacity * for 3 non-zero elements blaze::CompressedVector<float,blaze::columnVector> * b( 4UL, 3UL ); * * // Definition of a double precision row vector with size 0 * blaze::CompressedVector<double,blaze::rowVector> c; \endcode * * // \n Previous: \ref vectors &nbsp; &nbsp; Next: \ref vector_operations */ //************************************************************************************************* //**Vector Operations ****************************************************************************** /* * !\page vector_operations Vector Operations // // \tableofcontents // // // * \n \section vector_operations_constructors Constructors // <hr> // // * Instantiating and setting up a vector is very easy and intuitive. However, * there are a few // rules to take care of: // - In case the last template * parameter (the transpose flag) is omitted, the vector is per // default * a column vector. // - The elements of a \c StaticVector or \c * HybridVector are default initialized (i.e. built-in // data types are * initialized to 0, class types are initialized via the default * constructor). // - Newly allocated elements of a \c DynamicVector or \c * CompressedVector remain uninitialized // if they are of built-in type * and are default constructed if they are of class type. // // \n * \subsection vector_operations_default_construction Default Construction * * \code using blaze::StaticVector; using blaze::DynamicVector; using * blaze::CompressedVector; * * // All vectors can be default constructed. Whereas the size // of * StaticVectors is fixed via the second template parameter, // the initial * size of a default constructed DynamicVector or // CompressedVector is 0. * StaticVector<int,2UL> v1; // Instantiation of a 2D integer * column vector. // All elements are initialized to 0. * StaticVector<long,3UL,columnVector> v2; // Instantiation of a 3D long * integer column vector. // Again, all elements are initialized to 0L. * DynamicVector<float> v3; // Instantiation of a dynamic * single precision column // vector of size 0. * DynamicVector<double,rowVector> v4; // Instantiation of a dynamic * double precision row // vector of size 0. CompressedVector<int> v5; * // Instantiation of a compressed integer column // vector of size 0. * CompressedVector<double,rowVector> v6; // Instantiation of a compressed * double precision row // vector of size 0. \endcode * * // \n \subsection vector_operations_size_construction Construction with * Specific Size // // The \c DynamicVector, \c HybridVector and \c * CompressedVector classes offer a constructor that // allows to immediately * give the vector the required size. Whereas both dense vectors (i.e. // \c * DynamicVector and \c HybridVector) use this information to allocate memory * for all vector // elements, \c CompressedVector merely acquires the size * but remains empty. * * \code DynamicVector<int,columnVector> v7( 9UL ); // Instantiation of an * integer dynamic column vector // of size 9. The elements are NOT * initialized! HybridVector< complex<float>, 5UL > v8( 2UL ); // * Instantiation of a column vector with two single // precision complex * values. The elements are // default constructed. * CompressedVector<int,rowVector> v9( 10UL ); // Instantiation of a * compressed row vector with // size 10. Initially, the vector provides no * // capacity for non-zero elements. \endcode * * // \n \subsection vector_operations_initialization_constructors * Initialization Constructors // // All dense vector classes offer a * constructor that allows for a direct, homogeneous initialization // of all * vector elements. In contrast, for sparse vectors the predicted number of * non-zero elements // can be specified * * \code StaticVector<int,3UL,rowVector> v10( 2 ); // Instantiation * of a 3D integer row vector. // All elements are initialized to 2. * DynamicVector<float> v11( 3UL, 7.0F ); // Instantiation of a * dynamic single precision // column vector of size 3. All elements are // * set to 7.0F. CompressedVector<float,rowVector> v12( 15UL, 3UL ); // * Instantiation of a single precision column // vector of size 15, which * provides enough // space for at least 3 non-zero elements. \endcode * * // \n \subsection vector_operations_array_construction Array Construction // * // Alternatively, all dense vector classes offer a constructor for an * initialization with a dynamic // or static array. If the vector is * initialized from a dynamic array, the constructor expects the // actual * size of the array as first argument, the array as second argument. In case * of a static // array, the fixed size of the array is used: * * \code const unique_ptr<double[]> array1( new double[2] ); // ... * Initialization of the dynamic array blaze::StaticVector<double,2UL> v13( * 2UL, array1.get() ); * * int array2[4] = { 4, -5, -6, 7 }; blaze::StaticVector<int,4UL> v14( array2 ); * \endcode * * // \n \subsection vector_operations_initializer_list_construction Initializer * List Construction // // In addition, all dense and sparse vector classes * can be directly initialized by means of an // initializer list: * * \code blaze::DynamicVector<float> v15{ 1.0F, 2.0F, 3.0F, 4.0F }; * blaze::CompressedVector<int> v16{ 0, 2, 0, 0, 5, 0, 7, 0 }; \endcode * * // In case of sparse vectors, only the non-zero elements are used to * initialize the vector. // // \n \subsection * vector_operations_copy_construction Copy Construction // // All dense and * sparse vectors can be created as the copy of any other dense or sparse * vector // with the same transpose flag (i.e. blaze::rowVector or * blaze::columnVector). * * \code StaticVector<int,9UL,columnVector> v17( v7 ); // Instantiation of the * dense column vector v17 // as copy of the dense column vector v7. * DynamicVector<int,rowVector> v18( v9 ); // Instantiation of the * dense row vector v18 as // copy of the sparse row vector v9. * CompressedVector<int,columnVector> v19( v1 ); // Instantiation of the * sparse column vector v19 // as copy of the dense column vector v1. * CompressedVector<float,rowVector> v20( v12 ); // Instantiation of the * sparse row vector v20 as // copy of the row vector v12. \endcode * * // Note that it is not possible to create a \c StaticVector as a copy of a * vector with a different // size: * * \code StaticVector<int,5UL,columnVector> v21( v7 ); // Runtime error: Size * does not match! StaticVector<int,4UL,rowVector> v22( v10 ); // Compile * time error: Size does not match! \endcode * * // \n \section vector_operations_assignment Assignment // <hr> // // There * are several types of assignment to dense and sparse vectors: // \ref * vector_operations_homogeneous_assignment, \ref * vector_operations_array_assignment, // \ref * vector_operations_copy_assignment, and \ref * vector_operations_compound_assignment. // // \n \subsection * vector_operations_homogeneous_assignment Homogeneous Assignment // // * Sometimes it may be necessary to assign the same value to all elements of * a dense vector. // For this purpose, the assignment operator can be used: * * \code blaze::StaticVector<int,3UL> v1; blaze::DynamicVector<double> v2; * * // Setting all integer elements of the StaticVector to 2 v1 = 2; * * // Setting all double precision elements of the DynamicVector to 5.0 v2 = * 5.0; \endcode * * // \n \subsection vector_operations_array_assignment Array Assignment // // * Dense vectors can also be assigned a static array: * * \code blaze::StaticVector<float,2UL> v1; * blaze::DynamicVector<double,rowVector> v2; * * float array1[2] = { 1.0F, 2.0F }; double array2[5] = { 2.1, 4.0, -1.7, 8.6, * -7.2 }; * * v1 = array1; v2 = array2; \endcode * * // \n \subsection vector_operations_initializer_list_assignment Initializer * List Assignment // // Alternatively, it is possible to directly assign an * initializer list to a dense or sparse // vector: * * \code blaze::DynamicVector<float> v1; * blaze::CompressedVector<double,rowVector> v2; * * v1 = { 1.0F, 2.0F }; v2 = { 2.1, 0.0, -1.7, 0.0, -7.2 }; \endcode * * // In case of sparse vectors, only the non-zero elements are considered. // * // \n \subsection vector_operations_copy_assignment Copy Assignment // // * For all vector types it is generally possible to assign another vector * with the same transpose // flag (i.e. blaze::columnVector or * blaze::rowVector). Note that in case of \c StaticVectors, the // assigned * vector is required to have the same size as the \c StaticVector since the * size of a // \c StaticVector cannot be adapted! * * \code blaze::StaticVector<int,3UL,columnVector> v1; * blaze::DynamicVector<int,columnVector> v2( 3UL ); * blaze::DynamicVector<float,columnVector> v3( 5UL ); * blaze::CompressedVector<int,columnVector> v4( 3UL ); * blaze::CompressedVector<float,rowVector> v5( 3UL ); * * // ... Initialization of the vectors * * v1 = v2; // OK: Assignment of a 3D dense column vector to another 3D dense * column vector v1 = v4; // OK: Assignment of a 3D sparse column vector to * a 3D dense column vector v1 = v3; // Runtime error: Cannot assign a 5D * vector to a 3D static vector v1 = v5; // Compilation error: Cannot assign * a row vector to a column vector \endcode * * // \n \subsection vector_operations_compound_assignment Compound Assignment * // // Next to plain assignment, it is also possible to use addition * assignment, subtraction // assignment, and multiplication assignment. Note * however, that in contrast to plain assignment // the size and the * transpose flag of the vectors has be to equal in order to able to perform * a // compound assignment. * * \code blaze::StaticVector<int,5UL,columnVector> v1; * blaze::DynamicVector<int,columnVector> v2( 5UL ); * blaze::CompressedVector<float,columnVector> v3( 7UL ); * blaze::DynamicVector<float,rowVector> v4( 7UL ); * blaze::CompressedVector<float,rowVector> v5( 7UL ); * * // ... Initialization of the vectors * * v1 += v2; // OK: Addition assignment between two column vectors of the same * size v1 += v3; // Runtime error: No compound assignment between vectors * of different size v1 -= v4; // Compilation error: No compound assignment * between vectors of different transpose flag v4 *= v5; // OK: * Multiplication assignment between two row vectors of the same size * \endcode * * // \n \section vector_operations_element_access Element Access // <hr> // // * \n \subsection vector_operations_subscript_operator_1 Subscript Operator * // // The easiest and most intuitive way to access a dense or sparse * vector is via the subscript // operator. The indices to access a vector * are zero-based: * * \code blaze::DynamicVector<int> v1( 5UL ); v1[0] = 1; v1[1] = 3; // ... * * blaze::CompressedVector<float> v2( 5UL ); v2[2] = 7.3F; v2[4] = -1.4F; * \endcode * * // Whereas using the subscript operator on a dense vector only accesses the * already existing // element, accessing an element of a sparse vector via * the subscript operator potentially // inserts the element into the vector * and may therefore be more expensive. Consider the // following example: * * \code blaze::CompressedVector<int> v1( 10UL ); * * for( size_t i=0UL; i<v1.size(); ++i ) { ... = v1[i]; } \endcode * * // Although the compressed vector is only used for read access within the for * loop, using the // subscript operator temporarily inserts 10 non-zero * elements into the vector. Therefore the // preferred way to traverse the * non-zero elements of a sparse vector is to use iterators. // // \n * \subsection vector_operations_iterators Iterators // // All vectors * (sparse as well as dense) offer an alternate way via the \c begin(), \c * cbegin(), // \c end(), and \c cend() functions to traverse the currently * contained elements by iterators. // In case of non-const vectors, \c * begin() and \c end() return an \c Iterator, which allows a // manipulation * of the non-zero value, in case of a constant vector or in case \c cbegin() * or // \c cend() are used a \c ConstIterator is returned: * * \code using blaze::CompressedVector; * * CompressedVector<int> v1( 10UL ); * * // ... Initialization of the vector * * // Traversing the vector by Iterator for( CompressedVector<int>::Iterator * it=v1.begin(); it!=v1.end(); ++it ) { it->value() = ...; // OK: Write * access to the value of the non-zero element. ... = it->value(); // OK: * Read access to the value of the non-zero element. it->index() = ...; // * Compilation error: The index of a non-zero element cannot be changed. ... * = it->index(); // OK: Read access to the index of the non-zero element. } * * // Traversing the vector by ConstIterator for( * CompressedVector<int>::ConstIterator it=v1.cbegin(); it!=v1.cend(); ++it ) * { it->value() = ...; // Compilation error: Assignment to the value via a * ConstIterator is invalid. ... = it->value(); // OK: Read access to the * value of the non-zero element. it->index() = ...; // Compilation error: * The index of a non-zero element cannot be changed. ... = it->index(); // * OK: Read access to the index of the non-zero element. } \endcode * * // Note that \c begin(), \c cbegin(), \c end(), and \c cend() are also * available as free functions: * * \code for( CompressedVector<int>::Iterator it=begin( v1 ); it!=end( v1 ); * ++it ) { // ... } * * for( CompressedVector<int>::ConstIterator it=cbegin( v1 ); it!=cend( v1 ); * ++it ) { // ... } \endcode * * // \n \section vector_operations_element_insertion Element Insertion // <hr> * // // In contrast to dense vectors, that store all elements independent of * their value and that // offer direct access to all elements, spares * vectors only store the non-zero elements contained // in the vector. * Therefore it is necessary to explicitly add elements to the vector. // // * \n \subsection vector_operations_subscript_operator_2 Subscript Operator * // // The first option to add elements to a sparse vector is the subscript * operator: * * \code using blaze::CompressedVector; * * CompressedVector<int> v1( 3UL ); v1[1] = 2; \endcode * * // In case the element at the given index is not yet contained in the vector, * it is automatically // inserted. Otherwise the old value is replaced by * the new value 2. The operator returns a // reference to the sparse vector * element. // // \n \subsection vector_operations_set .set() // // An * alternative to the subscript operator is the \c set() function: In case * the element is not // yet contained in the vector the element is inserted, * else the element's value is modified: * * \code // Insert or modify the value at index 3 v1.set( 3, 1 ); \endcode * * // \n \subsection vector_operations_insert .insert() // // The insertion of * elements can be better controlled via the \c insert() function. In * contrast to // the subscript operator and the \c set() function it emits * an exception in case the element is // already contained in the vector. In * order to check for this case, the \c find() function can be // used: * * \code // In case the element at index 4 is not yet contained in the matrix it * is inserted // with a value of 6. if( v1.find( 4 ) == v1.end() ) * v1.insert( 4, 6 ); \endcode * * // \n \subsection vector_operations_append .append() // // Although the \c * insert() function is very flexible, due to performance reasons it is not * suited // for the setup of large sparse vectors. A very efficient, yet * also very low-level way to fill // a sparse vector is the \c append() * function. It requires the sparse vector to provide enough // capacity to * insert a new element. Additionally, the index of the new element must be * larger // than the index of the previous element. Violating these * conditions results in undefined // behavior! * * \code v1.reserve( 10 ); // Reserving space for 10 non-zero elements * v1.append( 5, -2 ); // Appending the element -2 at index 5 v1.append( 6, * 4 ); // Appending the element 4 at index 6 // ... \endcode * * // \n \section vector_operations_element_removal Element Removal // <hr> // * // \subsection vector_operations_erase .erase() // // The \c erase() * member functions can be used to remove elements from a sparse vector. The * // following example gives an impression of the five different flavors of * \c erase(): * * \code using blaze::CompressedVector; * * CompressedVector<int> v( 42 ); // ... Initialization of the vector * * // Erasing the element at index 21 v.erase( 21 ); * * // Erasing a single element via iterator v.erase( v.find( 4 ) ); * * // Erasing all non-zero elements in the range [7..24] v.erase( v.lowerBound( * 7 ), v.upperBound( 24 ) ); * * // Erasing all non-zero elements with a value larger than 9 by passing a * unary predicate v.erase( []( int i ){ return i > 9; } ); * * // Erasing all non-zero elements in the range [30..40] with a value larger * than 5 v.erase( v.lowerBound( 30 ), v.upperBound( 40 ), []( int i ){ * return i > 5; } ); \endcode * * // \n \section vector_operations_element_lookup Element Lookup // <hr> // // * A sparse vector only stores the non-zero elements contained in the vector. * Therefore, whenever // accessing a vector element at a specific index a * lookup operation is required. Whereas the // subscript operator is * performing this lookup automatically, it is also possible to use the // \c * find(), \c lowerBound(), and \c upperBound() member functions for a manual * lookup. // // \n \subsection vector_operations_find .find() // // The \c * find() function can be used to check whether a specific element is * contained in a sparse // vector. It specifically searches for the element * at the given index. In case the element is // found, the function returns * an iterator to the element. Otherwise an iterator just past the // last * non-zero element of the compressed vector (the \c end() iterator) is * returned. Note that // the returned iterator is subject to invalidation * due to inserting operations via the subscript // operator, the \c set() * function or the \c insert() function! * * \code using blaze::CompressedVector; * * CompressedVector<int> a( 42 ); // ... Initialization of the vector * * // Searching the element at index 7. In case the element is not // contained * in the vector, the end() iterator is returned. * CompressedVector<int>::Iterator pos( a.find( 7 ) ); * * if( pos != a.end( 7 ) ) { // ... } \endcode * * // \n \subsection vector_operations_lowerbound .lowerBound() // // The \c * lowerBound() function returns an iterator to the first element with an * index not less // then the given index. In combination with the \c * upperBound() function this function can be // used to create a pair of * iterators specifying a range of indices. Note that the returned // * iterator is subject to invalidation due to inserting operations via the * subscript operator, // the \c set() function or the \c insert() function! * * \code using blaze::CompressedVector; * * CompressedVector<int> a( 42 ); // ... Initialization of the vector * * // Searching the lower bound of index 17. CompressedVector<int>::Iterator * pos1( A.lowerBound( 17 ) ); * * // Searching the upper bound of index 28 CompressedVector<int>::Iterator * pos2( A.upperBound( 28 ) ); * * // Erasing all elements in the specified range a.erase( pos1, pos2 ); * \endcode * * // \n \subsection vector_operations_upperbound .upperBound() // // The \c * upperBound() function returns an iterator to the first element with an * index greater then // the given index. In combination with the \c * lowerBound() function this function can be used to // create a pair of * iterators specifying a range of indices. Note that the returned iterator * is // subject to invalidation due to inserting operations via the * subscript operator, the \c set() // function or the \c insert() function! * * \code using blaze::CompressedVector; * * CompressedVector<int> a( 42 ); // ... Initialization of the vector * * // Searching the lower bound of index 17. CompressedVector<int>::Iterator * pos1( A.lowerBound( 17 ) ); * * // Searching the upper bound of index 28 CompressedVector<int>::Iterator * pos2( A.upperBound( 28 ) ); * * // Erasing all elements in the specified range a.erase( pos1, pos2 ); * \endcode * * // \n \section vector_operations_non_modifying_operations Non-Modifying * Operations // <hr> // // \subsection vector_operations_size .size() / * size() // // Via the \c size() member function, the current size of a * dense or sparse vector can be queried: * * \code // Instantiating a dynamic vector with size 10 * blaze::DynamicVector<int> v1( 10UL ); v1.size(); // Returns 10 * * // Instantiating a compressed vector with size 12 and capacity for 3 non-zero * elements blaze::CompressedVector<double> v2( 12UL, 3UL ); v2.size(); // * Returns 12 \endcode * * // Alternatively, the free function \c size() can be used to query to current * size of a vector. // In contrast to the member function, the free function * can also be used to query the size of // vector expressions: * * \code size( v1 ); // Returns 10, i.e. has the same effect as the member * function size( v2 ); // Returns 12, i.e. has the same effect as the * member function * * blaze::DynamicMatrix<int> A( 15UL, 12UL ); size( A * v2 ); // Returns 15, * i.e. the size of the resulting vector \endcode * * // \n \subsection vector_operations_capacity .capacity() / capacity() // // * Via the \c capacity() (member) function the internal capacity of a dense * or sparse vector // can be queried. Note that the capacity of a vector * doesn't have to be equal to the size // of a vector. In case of a dense * vector the capacity will always be greater or equal than // the size of * the vector, in case of a sparse vector the capacity may even be less than * // the size. * * \code v1.capacity(); // Returns at least 10 \endcode * * // For symmetry reasons, there is also a free function /c capacity() * available that can be used // to query the capacity: * * \code capacity( v1 ); // Returns at least 10, i.e. has the same effect as * the member function \endcode * * // Note, however, that it is not possible to query the capacity of a vector * expression: * * \code capacity( A * v1 ); // Compilation error! \endcode * * // \n \subsection vector_operations_nonzeros .nonZeros() / nonZeros() // // * For both dense and sparse vectors the number of non-zero elements can be * determined via the // \c nonZeros() member function. Sparse vectors * directly return their number of non-zero // elements, dense vectors * traverse their elements and count the number of non-zero elements. * * \code v1.nonZeros(); // Returns the number of non-zero elements in the dense * vector v2.nonZeros(); // Returns the number of non-zero elements in the * sparse vector \endcode * * // There is also a free function \c nonZeros() available to query the current * number of non-zero // elements: * * \code nonZeros( v1 ); // Returns the number of non-zero elements in the * dense vector nonZeros( v2 ); // Returns the number of non-zero elements * in the sparse vector \endcode * * // The free \c nonZeros() function can also be used to query the number of * non-zero elements in // a vector expression. However, the result is not * the exact number of non-zero elements, but // may be a rough estimation: * * \code nonZeros( A * v1 ); // Estimates the number of non-zero elements in * the vector expression \endcode * * // \n \subsection vector_operations_isempty isEmpty() // // The \c isEmpty() * function returns whether the total number of elements of the vector is * zero: * * \code blaze::DynamicVector<int> a; // Create an empty vector isEmpty( a ); * // Returns true a.resize( 10 ); // Resize to 10 elements * isEmpty( a ); // Returns false \endcode * * // \n \subsection vector_operations_isnan isnan() // // The \c isnan() * function provides the means to check a dense or sparse vector for * non-a-number // elements: * * \code blaze::DynamicVector<double> a; // ... Resizing and initialization if( * isnan( a ) ) { ... } \endcode * * \code blaze::CompressedVector<double> a; // ... Resizing and initialization * if( isnan( a ) ) { ... } \endcode * * // If at least one element of the vector is not-a-number, the function * returns \c true, otherwise // it returns \c false. Please note that this * function only works for vectors with floating point // elements. The * attempt to use it for a vector with a non-floating point element type * results in // a compile time error. // // // \n \subsection * vector_operations_isdefault isDefault() // // The \c isDefault() function * returns whether the given dense or sparse vector is in default state: * * \code blaze::HybridVector<int,20UL> a; // ... Resizing and initialization if( * isDefault( a ) ) { ... } \endcode * * // A vector is in default state if it appears to just have been default * constructed. All resizable // vectors (\c HybridVector, \c DynamicVector, * or \c CompressedVector) and \c CustomVector are // in default state if its * size is equal to zero. A non-resizable vector (\c StaticVector, all // * subvectors, element selections, rows, and columns) is in default state if * all its elements are // in default state. For instance, in case the vector * is instantiated for a built-in integral or // floating point data type, * the function returns \c true in case all vector elements are 0 and // \c * false in case any vector element is not 0. // // // \n \subsection * vector_operations_isUniform isUniform() // // In order to check if all * vector elements are identical, the \c isUniform function can be used: * * \code blaze::DynamicVector<int> a; // ... Resizing and initialization if( * isUniform( a ) ) { ... } \endcode * * // Note that in case of sparse vectors also the zero elements are also taken * into account! // // // \n \subsection vector_operations_length length() / * sqrLength() // // In order to calculate the length (magnitude) of a dense * or sparse vector, both the \c length() // and \c sqrLength() function can * be used: * * \code blaze::StaticVector<float,3UL,rowVector> v{ -1.2F, 2.7F, -2.3F }; * * const float len = length ( v ); // Computes the current length of the * vector const float sqrlen = sqrLength( v ); // Computes the square length * of the vector \endcode * * // Note that both functions can only be used for vectors with built-in or * complex element type! // // // \n \subsection * vector_operations_vector_trans trans() // // As already mentioned, vectors * can either be column vectors (blaze::columnVector) or row vectors // * (blaze::rowVector). A column vector cannot be assigned to a row vector and * vice versa. However, // vectors can be transposed via the \c trans() * function: * * \code blaze::DynamicVector<int,columnVector> v1( 4UL ); * blaze::CompressedVector<int,rowVector> v2( 4UL ); * * v1 = v2; // Compilation error: Cannot assign a row vector to a * column vector v1 = trans( v2 ); // OK: Transposing the row vector to a * column vector and assigning it // to the column vector v1 v2 = trans( * v1 ); // OK: Transposing the column vector v1 and assigning it to the * row vector v2 v1 += trans( v2 ); // OK: Addition assignment of two column * vectors \endcode * * // \n \subsection vector_operations_ctrans ctrans() // // It is also possible * to compute the conjugate transpose of a vector. This operation is * available // via the \c ctrans() function: * * \code blaze::CompressedVector< complex<float>, rowVector > v1( 4UL ); * blaze::DynamicVector< complex<float>, columnVector > v2( 4UL ); * * v1 = ctrans( v2 ); // Compute the conjugate transpose vector \endcode * * // Note that the \c ctrans() function has the same effect as manually * applying the \c conj() and // \c trans() function in any order: * * \code v1 = trans( conj( v2 ) ); // Computing the conjugate transpose vector * v1 = conj( trans( v2 ) ); // Computing the conjugate transpose vector * \endcode * * // \n \subsection vector_operations_evaluate eval() / evaluate() // // The \c * evaluate() function forces an evaluation of the given vector expression * and enables // an automatic deduction of the correct result type of an * operation. The following code example // demonstrates its intended use for * the multiplication of a dense and a sparse vector: * * \code using blaze::DynamicVector; using blaze::CompressedVector; * * blaze::DynamicVector<double> a; blaze::CompressedVector<double> b; // ... * Resizing and initialization * * auto c = evaluate( a * b ); \endcode * * // In this scenario, the \c evaluate() function assists in deducing the exact * result type of // the operation via the \c auto keyword. Please note that * if \c evaluate() is used in this // way, no temporary vector is created * and no copy operation is performed. Instead, the result // is directly * written to the target vector due to the return value optimization (RVO). * However, // if \c evaluate() is used in combination with an explicit * target type, a temporary will be // created and a copy operation will be * performed if the used type differs from the type // returned from the * function: * * \code CompressedVector<double> d( a * b ); // No temporary & no copy * operation DynamicVector<double> e( a * b ); // Temporary & copy * operation d = evaluate( a * b ); // Temporary & copy * operation \endcode * * // Sometimes it might be desirable to explicitly evaluate a sub-expression * within a larger // expression. However, please note that \c evaluate() is * not intended to be used for this // purpose. This task is more elegantly * and efficiently handled by the \c eval() function: * * \code blaze::DynamicVector<double> a, b, c, d; * * d = a + evaluate( b * c ); // Unnecessary creation of a temporary vector d = * a + eval( b * c ); // No creation of a temporary vector \endcode * * // In contrast to the \c evaluate() function, \c eval() can take the complete * expression // into account and therefore can guarantee the most efficient * way to evaluate it (see also // \ref intra_statement_optimization). // // * // \n \section vector_operations_modifying_operations Modifying Operations * // <hr> // // \subsection vector_operations_resize_reserve .resize() / * .reserve() // // The size of a \c StaticVector is fixed by the second * template parameter and a \c CustomVector // cannot be resized. In * contrast, the size of \c DynamicVectors, \c HybridVectors as well as // \c * CompressedVectors can be changed via the \c resize() function: * * \code using blaze::DynamicVector; using blaze::CompressedVector; * * DynamicVector<int,columnVector> v1; CompressedVector<int,rowVector> v2( 4 ); * v2[1] = -2; v2[3] = 11; * * // Adapting the size of the dynamic and compressed vectors. The (optional) * second parameter // specifies whether the existing elements should be * preserved. Per default, the existing // elements are preserved. v1.resize( * 5UL ); // Resizing vector v1 to 5 elements. Elements of built-in * type remain // uninitialized, elements of class type are default * constructed. v1.resize( 3UL, false ); // Resizing vector v1 to 3 * elements. The old elements are lost, the // new elements are NOT * initialized! v2.resize( 8UL, true ); // Resizing vector v2 to 8 * elements. The old elements are preserved. v2.resize( 5UL, false ); // * Resizing vector v2 to 5 elements. The old elements are lost. \endcode * * // Note that resizing a vector invalidates all existing views (see e.g. \ref * views_subvectors) // on the vector: * * \code blaze::DynamicVector<int,rowVector> v1( 10UL ); // Creating a dynamic * vector of size 10 auto sv = subvector( v1, 2UL, 5UL ); // * Creating a view on the range [2..6] v1.resize( 6UL ); * // Resizing the vector invalidates the view \endcode * * // When the internal capacity of a vector is no longer sufficient, the * allocation of a larger // junk of memory is triggered. In order to avoid * frequent reallocations, the \c reserve() // function can be used up front * to set the internal capacity: * * \code blaze::DynamicVector<int> v1; v1.reserve( 100 ); v1.size(); // * Returns 0 v1.capacity(); // Returns at least 100 \endcode * * // Note that the size of the vector remains unchanged, but only the internal * capacity is set // according to the specified value! // // \n \subsection * vector_operations_shrinkToFit .shrinkToFit() // // The internal capacity * of vectors with dynamic memory is preserved in order to minimize the // * number of reallocations. For that reason, the \c resize() and \c reserve() * functions can lead // to memory overhead. The \c shrinkToFit() member * function can be used to minimize the internal // capacity: * * \code blaze::DynamicVector<int> v1( 1000UL ); // Create a vector of 1000 * integers v1.resize( 10UL ); // Resize to 10, but the * capacity is preserved v1.shrinkToFit(); // Remove * the unused capacity \endcode * * // Please note that due to padding the capacity might not be reduced exactly * to \c size(). Please // also note that in case a reallocation occurs, all * iterators (including \c end() iterators), all // pointers and references * to elements of the vector are invalidated. // // \subsection * vector_operations_reset_clear reset() / clear() // // In order to reset * all elements of a vector, the \c reset() function can be used: * * \code // Setup of a single precision column vector, whose elements are * initialized with 2.0F. blaze::DynamicVector<float> v1( 3UL, 2.0F ); * * // Resetting all elements to 0.0F. Only the elements are reset, the size of * the vector is unchanged. reset( v1 ); // Resetting all elements * v1.size(); // Returns 3: size and capacity remain unchanged \endcode * * // In order to return a vector to its default state (i.e. the state of a * default constructed // vector), the \c clear() function can be used: * * \code // Setup of a single precision column vector, whose elements are * initialized with -1.0F. blaze::DynamicVector<float> v1( 5, -1.0F ); * * // Resetting the entire vector. clear( v1 ); // Resetting the entire vector * v1.size(); // Returns 0: size is reset, but capacity remains unchanged * \endcode * * // Note that resetting or clearing both dense and sparse vectors does not * change the capacity // of the vectors. // // // \n \subsection * vector_operations_swap swap() // // Via the \c swap() function it is * possible to completely swap the contents of two vectors of // the same * type: * * \code blaze::DynamicVector<int,columnVector> v1( 10UL ); * blaze::DynamicVector<int,columnVector> v2( 20UL ); * * swap( v1, v2 ); // Swapping the contents of v1 and v2 \endcode * * // \n \section vector_operations_arithmetic_operations Arithmetic Operations * // <hr> // // \subsection vector_operations_normalize normalize() // // * The \c normalize() function can be used to scale any non-zero vector to a * length of 1. In // case the vector does not contain a single non-zero * element (i.e. is a zero vector), the // \c normalize() function returns a * zero vector. * * \code blaze::DynamicVector<float,columnVector> v1( 10UL ); * blaze::CompressedVector<double,columnVector> v2( 12UL ); * * v1 = normalize( v1 ); // Normalizing the dense vector v1 length( v1 ); * // Returns 1 (or 0 in case of a zero vector) v1 = normalize( v2 ); // * Assigning v1 the normalized vector v2 length( v1 ); // Returns 1 * (or 0 in case of a zero vector) \endcode * * // Note that the \c normalize() function only works for floating point * vectors. The attempt to // use it for an integral vector results in a * compile time error. // // // \n \subsection vector_operations_min_max * min() / max() // // The \c min() and \c max() functions can be used for a * single vector or multiple vectors. If // passed a single vector, the * functions return the smallest and largest element of the given // dense * vector or the smallest and largest non-zero element of the given sparse * vector, // respectively: * * \code blaze::StaticVector<int,4UL,rowVector> a{ -5, 2, 7, -4 }; * * min( a ); // Returns -5 max( a ); // Returns 7 \endcode * * \code blaze::CompressedVector<int> b{ 1, 0, 3, 0 }; * * min( b ); // Returns 1 max( b ); // Returns 3 \endcode * * // For more information on the unary \c min() and \c max() reduction * operations see the // \ref vector_operations_reduction_operations section. * // // If passed two or more dense vectors, the \c min() and \c max() * functions compute the // componentwise minimum or maximum of the given * vectors, respectively: * * \code blaze::StaticVector<int,4UL,rowVector> c{ -5, 1, -7, 4 }; * blaze::StaticVector<int,4UL,rowVector> d{ -5, 3, 0, 2 }; * * min( a, c ); // Results in the vector ( -5, 1, -7, -4 ) max( a, c, d ); * // Results in the vector ( -5, 3, 7, 4 ) \endcode * * // Please note that sparse vectors can only be used in the unary \c min() and * \c max() functions. // Also note that all forms of the \c min() and \c * max() functions can be used to compute the // smallest and largest element * of a vector expression: * * \code min( a + b + c ); // Returns -9, i.e. the smallest value of the * resulting vector max( a - b - c ); // Returns 11, i.e. the largest value * of the resulting vector * * min( a + c, c - d ); // Results in ( -10 -2 -7 0 ) max( a - c, c + d ); // * Results in ( 0 4 14 6 ) \endcode * * // \n \subsection vector_operators_softmax softmax() // // The <a * href="https://en.wikipedia.org/wiki/Softmax_function">softmax * function</a>, also called // the normalized exponential function, of a * given dense vector can be computed via \c softmax(). // The resulting * dense vector consists of real values in the range (0..1], which add up to * 1. * * \code blaze::StaticVector<double,7UL,rowVector> x{ 1.0, 2.0, 3.0, 4.0, 1.0, * 2.0, 3.0 }; blaze::StaticVector<double,7UL,rowVector> y; * * // Evaluating the softmax function y = softmax( x ); // Results in ( * 0.024 0.064 0.175 0.475 0.024 0.064 0.175 ) double s = sum( y ); // * Results in 1 \endcode * * // \n \subsection vector_operators_abs abs() // // The \c abs() function can * be used to compute the absolute values of each element of a vector. // For * instance, the following computation * * \code blaze::StaticVector<int,3UL,rowVector> a{ -1, 2, -3 }; * blaze::StaticVector<int,3UL,rowVector> b( abs( a ) ); \endcode * * // results in the vector * * \f$ b = \left(\begin{array}{*{1}{c}} 1 \\ 2 \\ 3 \\ \end{array}\right)\f$ * * // \n \subsection vector_operators_sign sign() // // The \c sign() function * can be used to evaluate the sign of each element of a vector \a a. For // * each element \c i the corresponding result is 1 if \a a[i] is greater than * zero, 0 if \a a[i] // is zero, and -1 if \a a[i] is less than zero. For * instance, the following use of the \c sign() // function * * \code blaze::StaticVector<int,3UL,rowVector> a{ -1, 2, 0 }; * blaze::StaticVector<int,3UL,rowVector> b( sign( a ) ); \endcode * * // results in the vector * * \f$ b = \left(\begin{array}{*{1}{c}} -1 \\ 1 \\ 0 \\ \end{array}\right)\f$ * * // \n \subsection vector_operations_rounding_functions floor() / ceil() / * trunc() / round() // // The \c floor(), \c ceil(), \c trunc(), and \c * round() functions can be used to round down/up // each element of a * vector, respectively: * * \code blaze::StaticVector<double,3UL,rowVector> a, b; * * b = floor( a ); // Rounding down each element of the vector b = ceil ( a ); * // Rounding up each element of the vector b = trunc( a ); // Truncating * each element of the vector b = round( a ); // Rounding each element of * the vector \endcode * * // \n \subsection vector_operators_conj conj() // // The \c conj() function * can be applied on a dense or sparse vector to compute the complex // * conjugate of each element of the vector: * * \code using blaze::StaticVector; * * using cplx = std::complex<double>; * * // Creating the vector // ( (-2,-1) ) // ( ( 1, 1) ) * StaticVector<cplx,2UL> a{ cplx(-2.0,-1.0), cplx(1.0,1.0) }; * * // Computing the vector of complex conjugates // ( (-2, 1) ) // ( ( * 1,-1) ) StaticVector<cplx,2UL> b; b = conj( a ); \endcode * * // Additionally, vectors can be conjugated in-place via the \c conjugate() * function: * * \code blaze::DynamicVector<cplx> c( 5UL ); * * conjugate( c ); // In-place conjugate operation. c = conj( c ); // Same as * above \endcode * * // \n \subsection vector_operators_real real() // // The \c real() function * can be used on a dense or sparse vector to extract the real part of // * each element of the vector: * * \code using blaze::StaticVector; * * using cplx = std::complex<double>; * * // Creating the vector // ( (-2,-1) ) // ( ( 1, 1) ) * StaticVector<cplx,2UL> a{ cplx(-2.0,-1.0), cplx(1.0,1.0) }; * * // Extracting the real part of each vector element // ( -2 ) // ( 1 ) * StaticVector<double,2UL> b; b = real( a ); \endcode * * // \n \subsection vector_operators_imag imag() // // The \c imag() function * can be used on a dense or sparse vector to extract the imaginary part // * of each element of the vector: * * \code using blaze::StaticVector; * * using cplx = std::complex<double>; * * // Creating the vector // ( (-2,-1) ) // ( ( 1, 1) ) * StaticVector<cplx,2UL> a{ cplx(-2.0,-1.0), cplx(1.0,1.0) }; * * // Extracting the imaginary part of each vector element // ( -1 ) // ( * 1 ) StaticVector<double,2UL> b; b = imag( a ); \endcode * * // \n \subsection vector_operations_sqrt sqrt() / invsqrt() // // Via the \c * sqrt() and \c invsqrt() functions the (inverse) square root of each * element of a // vector can be computed: * * \code blaze::DynamicVector<double> a, b, c; * * b = sqrt( a ); // Computes the square root of each element c = invsqrt( a * ); // Computes the inverse square root of each element \endcode * * // Note that in case of sparse vectors only the non-zero elements are taken * into account! // // // \n \subsection vector_operations_cbrt cbrt() / * invcbrt() // // The \c cbrt() and \c invcbrt() functions can be used to * compute the the (inverse) cubic root // of each element of a vector: * * \code blaze::HybridVector<double,3UL> a, b, c; * * b = cbrt( a ); // Computes the cubic root of each element c = invcbrt( a * ); // Computes the inverse cubic root of each element \endcode * * // Note that in case of sparse vectors only the non-zero elements are taken * into account! // // // \n \subsection vector_operations_hypot hypot() // * // The \c hypot() function can be used to compute the componentwise * hypotenous for a pair of // dense vectors: * * \code blaze::StaticVector<double,3UL> a, b, c; * * c = hypot( a, b ); // Computes the componentwise hypotenuous \endcode * * // \n \subsection vector_operations_clamp clamp() // // The \c clamp() * function can be used to restrict all elements of a vector to a specific * range: * * \code blaze::DynamicVector<double> a, b * * b = clamp( a, -1.0, 1.0 ); // Restrict all elements to the range [-1..1] * \endcode * * // Note that in case of sparse vectors only the non-zero elements are taken * into account! // // // \n \subsection vector_operations_pow pow() // // * The \c pow() function can be used to compute the exponential value of each * element of a vector. // If passed a vector and a numeric exponent, the * function computes the exponential value of each // element of the vector * using the same exponent. If passed a second vector, the function computes * // the componentwise exponential value: * * \code blaze::StaticVector<double,3UL> a, b, c; * * c = pow( a, 1.2 ); // Computes the exponential value of each element c = * pow( a, b ); // Computes the componentwise exponential value \endcode * * // \n \subsection vector_operations_exp exp() / exp2() / exp10() // // \c * exp(), \c exp2() and \c exp10() compute the base e/2/10 exponential of * each element of a // vector, respectively: * * \code blaze::DynamicVector<double> a, b; * * b = exp( a ); // Computes the base e exponential of each element b = exp2( * a ); // Computes the base 2 exponential of each element b = exp10( a ); * // Computes the base 10 exponential of each element \endcode * * // Note that in case of sparse vectors only the non-zero elements are taken * into account! // // // \n \subsection vector_operations_log log() / log2() * / log10() // // The \c log(), \c log2() and \c log10() functions can be * used to compute the natural, binary // and common logarithm of each * element of a vector: * * \code blaze::StaticVector<double,3UL> a, b; * * b = log( a ); // Computes the natural logarithm of each element b = log2( * a ); // Computes the binary logarithm of each element b = log10( a ); * // Computes the common logarithm of each element \endcode * * // \n \subsection vector_operations_trigonometric_functions sin() / cos() / * tan() / asin() / acos() / atan() // // The following trigonometric * functions are available for both dense and sparse vectors: * * \code blaze::DynamicVector<double> a, b; * * b = sin( a ); // Computes the sine of each element of the vector b = cos( a * ); // Computes the cosine of each element of the vector b = tan( a ); // * Computes the tangent of each element of the vector * * b = asin( a ); // Computes the inverse sine of each element of the vector b * = acos( a ); // Computes the inverse cosine of each element of the vector * b = atan( a ); // Computes the inverse tangent of each element of the * vector \endcode * * // Note that in case of sparse vectors only the non-zero elements are taken * into account! // // // \n \subsection * vector_operations_hyperbolic_functions sinh() / cosh() / tanh() / asinh() * / acosh() / atanh() // // The following hyperbolic functions are available * for both dense and sparse vectors: * * \code blaze::DynamicVector<double> a, b; * * b = sinh( a ); // Computes the hyperbolic sine of each element of the vector * b = cosh( a ); // Computes the hyperbolic cosine of each element of the * vector b = tanh( a ); // Computes the hyperbolic tangent of each element * of the vector * * b = asinh( a ); // Computes the inverse hyperbolic sine of each element of * the vector b = acosh( a ); // Computes the inverse hyperbolic cosine of * each element of the vector b = atanh( a ); // Computes the inverse * hyperbolic tangent of each element of the vector \endcode * * // Note that in case of sparse vectors only the non-zero elements are taken * into account! // // // \n \subsection vector_operations_atan2 atan2() // * // The multi-valued inverse tangent is available for a pair of dense * vectors: * * \code blaze::DynamicVector<double> a, b, c; * * c = atan2( a, b ); // Computes the componentwise multi-valued inverse * tangent \endcode * * // \n \subsection vector_operations_erf erf() / erfc() // // The \c erf() and * \c erfc() functions compute the (complementary) error function of each // * element of a vector: * * \code blaze::StaticVector<double,3UL,rowVector> a, b; * * b = erf( a ); // Computes the error function of each element b = erfc( a ); * // Computes the complementary error function of each element \endcode * * // Note that in case of sparse vectors only the non-zero elements are taken * into account! // // // \n \subsection vector_operations_map map() / * forEach() // // Via the unary and binary \c map() functions it is possible * to execute componentwise custom // operations on vectors. The unary \c * map() function can be used to apply a custom operation // on each element * of a dense or sparse vector. For instance, the following example * demonstrates // a custom square root computation via a lambda: * * \code blaze::DynamicVector<double> a, b; * * b = map( a, []( double d ) { return std::sqrt( d ); } ); \endcode * * // The binary \c map() function can be used to apply an operation pairwise to * the elements of // two dense vectors. The following example demonstrates * the merging of two vectors of double // precision values into a vector of * double precision complex numbers: * * \code blaze::DynamicVector<double> real{ 2.1, -4.2, 1.0, 0.6 }; * blaze::DynamicVector<double> imag{ 0.3, 1.4, 2.9, -3.4 }; * * blaze::DynamicVector< complex<double> > cplx; * * // Creating the vector // ( (-2.1, 0.3) ) // ( (-4.2, -1.4) ) // ( * ( 1.0, 2.9) ) // ( ( 0.6, -3.4) ) cplx = map( real, imag, []( double * r, double i ){ return complex( r, i ); } ); \endcode * * // Although the computation can be parallelized it is not vectorized and thus * cannot perform at // peak performance. However, it is also possible to * create vectorized custom operations. See // \ref custom_operations for a * detailed overview of the possibilities of custom operations. // // Please * note that unary custom operations on vectors have been introduced in \b * Blaze 3.0 in // form of the \c forEach() function. With the introduction * of binary custom functions, the // \c forEach() function has been renamed * to \c map(). The \c forEach() function can still be // used (even for * binary custom operations), but the function might be deprecated in future * // releases of \b Blaze. // // // \n \section * vector_operations_reduction_operations Reduction Operations // <hr> // // * \subsection vector_operations_reduction_operations_reduce reduce() // // * The \c reduce() function performs a total reduction of the elements of the * given dense vector // or the non-zero elements of the given sparse vector. * The following examples demonstrate the // total reduction of a dense and * sparse vector: * * \code blaze::DynamicVector<double> a; // ... Resizing and initialization * * const double totalsum1 = reduce( a, blaze::Add() ); const double totalsum2 = * reduce( a, []( double a, double b ){ return a + b; } ); \endcode * * \code blaze::CompressedVector<double> a; // ... Resizing and initialization * * const double totalmin1 = reduce( a, blaze::Min() ); const double totalmin2 = * reduce( a, []( double a, double b ){ return blaze::min( a, b ); } ); * \endcode * * // As demonstrated in the examples it is possible to pass any binary callable * as custom reduction // operation. However, for instance in the case of * lambdas the vectorization of the reduction // operation is compiler * dependent and might not perform at peak performance. However, it is also * // possible to create vectorized custom operations. See \ref * custom_operations for a detailed // overview of the possibilities of * custom operations. // // Please note that the evaluation order of the \c * reduce() function is unspecified. Thus the // behavior is * non-deterministic if the given reduction operation is not associative or * not // commutative. Also, the operation is undefined if the given * reduction operation modifies the // values. // // \n \subsection * vector_operations_reduction_operations_sum sum() // // The \c sum() * function reduces the elements of the given dense vector or the non-zero * elements // of the given sparse vector by means of addition: * * \code blaze::DynamicVector<int> a{ 1, 2, 3, 4 }; * * const int totalsum = sum( a ); // Results in 10 \endcode * * \code blaze::CompressedVector<int> a{ 1, 2, 3, 4 }; * * const int totalsum = sum( a ); // Results in 10 \endcode * * // Please note that the evaluation order of the \c sum() function is * unspecified. // // \n \subsection * vector_operations_reduction_operations_prod prod() // // The \c prod() * function reduces the elements of the given dense vector or the non-zero * elements // of the given sparse vector by means of multiplication: * * \code blaze::DynamicVector<int> a{ 1, 2, 3, 4 }; * * const int totalprod = prod( a ); // Results in 24 \endcode * * \code blaze::CompressedVector<int> a{ 1, 2, 3, 4 }; * * const int totalprod = prod( a ); // Results in 24 \endcode * * // \n \subsection vector_operations_reduction_operations_min min() // // The * unary \c min() function returns the smallest element of the given dense * vector or the // smallest non-zero element of the given sparse vector. It * can only be used for element types // that support the smaller-than * relationship. In case the given vector currently has a size // of 0, the * returned value is the default value (e.g. 0 in case of fundamental data * types). * * \code blaze::DynamicVector<int> a{ 1, -2, 3, 0 }; * * const int totalmin = min( a ); // Results in -2 \endcode * * \code blaze::CompressedVector<int> a{ 1, 0, 3, 0 }; * * const int totalmin = min( a ); // Results in 1 \endcode * * // \note In case the sparse vector is not completely filled, the implicit * zero elements are NOT // taken into account. In the previous example the * compressed vector has only 2 non-zero elements. // However, the minimum of * the vector is 1. // // \n \subsection * vector_operations_reduction_operations_max max() // // The unary \c max() * function returns the largest element of the given dense vector or the // * largest non-zero element of the given sparse vector. It can only be used * for element types // that support the smaller-than relationship. In case * the given vector currently has a size // of 0, the returned value is the * default value (e.g. 0 in case of fundamental data types). * * \code blaze::DynamicVector<int> a{ 1, -2, 3, 0 }; * * const int totalmax = max( a ); // Results in 3 \endcode * * \code blaze::CompressedVector<int> a{ -1, 0, -3, 0 }; * * const int totalmin = max( a ); // Results in -1 \endcode * * // \note In case the sparse vector is not completely filled, the implicit * zero elements are NOT // taken into account. In the previous example the * compressed vector has only 2 non-zero elements. // However, the maximum of * the vector is -1. // // // \n \section vector_operations_norms Norms // * <hr> // // \subsection vector_operations_norms_norm norm() // // The \c * norm() function computes the L2 norm of the given dense or sparse vector: * * \code blaze::DynamicVector<double> a; // ... Resizing and initialization * const double l2 = norm( a ); \endcode * * // \n \subsection vector_operations_norms_sqrnorm sqrNorm() // // The \c * sqrNorm() function computes the squared L2 norm of the given dense or * sparse vector: * * \code blaze::DynamicVector<double> a; // ... Resizing and initialization * const double l2 = sqrNorm( a ); \endcode * * // \n \subsection vector_operations_norms_l1norm l1Norm() // // The \c * l1Norm() function computes the squared L1 norm of the given dense or * sparse vector: * * \code blaze::DynamicVector<double> a; // ... Resizing and initialization * const double l1 = l1Norm( a ); \endcode * * // \n \subsection vector_operations_norms_l2norm l2Norm() // // The \c * l2Norm() function computes the squared L2 norm of the given dense or * sparse vector: * * \code blaze::DynamicVector<double> a; // ... Resizing and initialization * const double l2 = l2Norm( a ); \endcode * * // \n \subsection vector_operations_norms_l3norm l3Norm() // // The \c * l3Norm() function computes the squared L3 norm of the given dense or * sparse vector: * * \code blaze::DynamicVector<double> a; // ... Resizing and initialization * const double l3 = l3Norm( a ); \endcode * * // \n \subsection vector_operations_norms_l4norm l4Norm() // // The \c * l4Norm() function computes the squared L4 norm of the given dense or * sparse vector: * * \code blaze::DynamicVector<double> a; // ... Resizing and initialization * const double l4 = l4Norm( a ); \endcode * * // \n \subsection vector_operations_norms_lpnorm lpNorm() // // The \c * lpNorm() function computes the general Lp norm of the given dense or * sparse vector, // where the norm is specified by either a compile time or * a runtime argument: * * \code blaze::DynamicVector<double> a; // ... Resizing and initialization * const double lp1 = lpNorm<2>( a ); // Compile time argument const * double lp2 = lpNorm( a, 2.3 ); // Runtime argument \endcode * * // \n \subsection vector_operations_norms_maxnorm maxNorm() // // The \c * maxNorm() function computes the maximum norm of the given dense or sparse * vector: * * \code blaze::DynamicVector<double> a; // ... Resizing and initialization * const double max = maxNorm( a ); \endcode * * // \n Previous: \ref vector_types &nbsp; &nbsp; Next: \ref matrices */ //************************************************************************************************* //**Matrices *************************************************************************************** /* * !\page matrices Matrices // // \tableofcontents // // // \n \section * matrices_general General Concepts // <hr> // // The \b Blaze library * currently offers four dense matrix types (\ref matrix_types_static_matrix, * // \ref matrix_types_dynamic_matrix, \ref matrix_types_hybrid_matrix, and * \ref matrix_types_custom_matrix) // and one sparse matrix type (\ref * matrix_types_compressed_matrix). All matrices can either be // stored as * row-major matrices or column-major matrices: * * \code using blaze::DynamicMatrix; using blaze::rowMajor; using * blaze::columnMajor; * * // Setup of the 2x3 row-major dense matrix // // ( 1 2 3 ) // ( 4 5 * 6 ) // DynamicMatrix<int,rowMajor> A{ { 1, 2, 3 }, { 4, 5, 6 } }; * * // Setup of the 3x2 column-major dense matrix // // ( 1 4 ) // ( 2 5 * ) // ( 3 6 ) // DynamicMatrix<int,columnMajor> B{ { 1, 4 }, { 2, 5 }, * { 3, 6 } }; \endcode * * // Per default, all matrices in \b Blaze are row-major matrices: * * \code // Instantiation of a 3x3 row-major matrix blaze::DynamicMatrix<int> C( * 3UL, 3UL ); \endcode * * // \n \section matrices_details Matrix Details // <hr> // // - \ref * matrix_types // - \ref matrix_operations // // // \n \section * matrices_examples Examples // <hr> * * \code using blaze::StaticMatrix; using blaze::DynamicMatrix; using * blaze::CompressedMatrix; using blaze::rowMajor; using blaze::columnMajor; * * StaticMatrix<double,6UL,20UL> A; // Instantiation of a 6x20 row-major * static matrix CompressedMatrix<double,rowMajor> B; // Instantiation of a * row-major compressed matrix DynamicMatrix<double,columnMajor> C; // * Instantiation of a column-major dynamic matrix * * // ... Resizing and initialization * * C = A * B; \endcode * * // \n Previous: \ref vector_operations &nbsp; &nbsp; Next: \ref matrix_types */ //************************************************************************************************* //**Matrix Types *********************************************************************************** /* * !\page matrix_types Matrix Types // // \tableofcontents // // // \n * \section matrix_types_static_matrix StaticMatrix // <hr> // // The * blaze::StaticMatrix class template is the representation of a fixed size * matrix with // statically allocated elements of arbitrary type. It can be * included via the header file * * \code #include <blaze/math/StaticMatrix.h> \endcode * * // The type of the elements, the number of rows and columns, and the storage * order of the matrix // can be specified via the four template parameters: * * \code template< typename Type, size_t M, size_t N, bool SO > class * StaticMatrix; \endcode * * // - \c Type: specifies the type of the matrix elements. StaticMatrix can be * used with any // non-cv-qualified, non-reference element type. * // - \c M : specifies the total number of rows of the matrix. // - \c * N : specifies the total number of columns of the matrix. Note that it is * expected // that StaticMatrix is only used for tiny and small * matrices. // - \c SO : specifies the storage order (blaze::rowMajor, * blaze::columnMajor) of the matrix. // The default value is * blaze::rowMajor. // // The blaze::StaticMatrix is perfectly suited for * small to medium matrices whose dimensions are // known at compile time: * * \code // Definition of a 3x4 integral row-major matrix * blaze::StaticMatrix<int,3UL,4UL> A; * * // Definition of a 4x6 single precision row-major matrix * blaze::StaticMatrix<float,4UL,6UL,blaze::rowMajor> B; * * // Definition of a 6x4 double precision column-major matrix * blaze::StaticMatrix<double,6UL,4UL,blaze::columnMajor> C; \endcode * * // \n \section matrix_types_dynamic_matrix DynamicMatrix // <hr> // // The * blaze::DynamicMatrix class template is the representation of an arbitrary * sized matrix // with \f$ M \cdot N \f$ dynamically allocated elements of * arbitrary type. It can be included // via the header file * * \code #include <blaze/math/DynamicMatrix.h> \endcode * * // The type of the elements and the storage order of the matrix can be * specified via the two // template parameters: * * \code template< typename Type, bool SO > class DynamicMatrix; \endcode * * // - \c Type: specifies the type of the matrix elements. DynamicMatrix can * be used with any // non-cv-qualified, non-reference element * type. // - \c SO : specifies the storage order (blaze::rowMajor, * blaze::columnMajor) of the matrix. // The default value is * blaze::rowMajor. // // The blaze::DynamicMatrix is the default choice for * all kinds of dense matrices and the best // choice for medium to large * matrices. The number of rows and columns can be modified at runtime: * * \code // Definition of a 3x4 integral row-major matrix * blaze::DynamicMatrix<int> A( 3UL, 4UL ); * * // Definition of a 4x6 single precision row-major matrix * blaze::DynamicMatrix<float,blaze::rowMajor> B( 4UL, 6UL ); * * // Definition of a double precision column-major matrix with 0 rows and * columns blaze::DynamicMatrix<double,blaze::columnMajor> C; \endcode * * // \n \section matrix_types_hybrid_matrix HybridMatrix // <hr> // // The * HybridMatrix class template combines the flexibility of a dynamically * sized matrix with // the efficiency and performance of a fixed size * matrix. It is implemented as a crossing between // the blaze::StaticMatrix * and the blaze::DynamicMatrix class templates: Similar to the static // * matrix it uses static stack memory instead of dynamically allocated memory * and similar to the // dynamic matrix it can be resized (within the extend * of the static memory). It can be included // via the header file * * \code #include <blaze/math/HybridMatrix.h> \endcode * * // The type of the elements, the maximum number of rows and columns and the * storage order of the // matrix can be specified via the four template * parameters: * * \code template< typename Type, size_t M, size_t N, bool SO > class * HybridMatrix; \endcode * * // - Type: specifies the type of the matrix elements. HybridMatrix can be * used with any // non-cv-qualified, non-reference, non-pointer * element type. // - M : specifies the maximum number of rows of the * matrix. // - N : specifies the maximum number of columns of the matrix. * Note that it is expected // that HybridMatrix is only used for * tiny and small matrices. // - SO : specifies the storage order * (blaze::rowMajor, blaze::columnMajor) of the matrix. // The * default value is blaze::rowMajor. // // The blaze::HybridMatrix is a * suitable choice for small to medium matrices, whose dimensions // are not * known at compile time or not fixed at runtime, but whose maximum * dimensions are known // at compile time: * * \code // Definition of a 3x4 integral row-major matrix with maximum * dimensions of 6x8 blaze::HybridMatrix<int,6UL,8UL> A( 3UL, 4UL ); * * // Definition of a 4x6 single precision row-major matrix with maximum * dimensions of 12x16 blaze::HybridMatrix<float,12UL,16UL,blaze::rowMajor> * B( 4UL, 6UL ); * * // Definition of a 0x0 double precision column-major matrix and maximum * dimensions of 6x6 blaze::HybridMatrix<double,6UL,6UL,blaze::columnMajor> * C; \endcode * * // \n \section matrix_types_custom_matrix CustomMatrix // <hr> // // The * blaze::CustomMatrix class template provides the functionality to represent * an external // array of elements of arbitrary type and a fixed size as a * native \b Blaze dense matrix data // structure. Thus in contrast to all * other dense matrix types a custom matrix does not perform // any kind of * memory allocation by itself, but it is provided with an existing array of * element // during construction. A custom matrix can therefore be * considered an alias to the existing // array. It can be included via the * header file * * \code #include <blaze/math/CustomMatrix.h> \endcode * * // The type of the elements, the properties of the given array of elements * and the storage order // of the matrix can be specified via the following * four template parameters: * * \code template< typename Type, bool AF, bool PF, bool SO > class * CustomMatrix; \endcode * * // - Type: specifies the type of the matrix elements. blaze::CustomMatrix * can be used with // any non-cv-qualified, non-reference, * non-pointer element type. // - AF : specifies whether the represented, * external arrays are properly aligned with // respect to the * available instruction set (SSE, AVX, ...) or not. // - PF : specified * whether the represented, external arrays are properly padded with // * respect to the available instruction set (SSE, AVX, ...) or not. // - SO * : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the * matrix. // The default value is blaze::rowMajor. // // The * blaze::CustomMatrix is the right choice if any external array needs to be * represented as // a \b Blaze dense matrix data structure or if a custom * memory allocation strategy needs to be // realized: * * \code using blaze::CustomMatrix; using blaze::Deallocate; using * blaze::aligned; using blaze::unaligned; using blaze::padded; using * blaze::unpadded; * * // Definition of an unmanaged 3x4 custom matrix for unaligned, unpadded * integer arrays using UnalignedUnpadded = * CustomMatrix<int,unaligned,unpadded,rowMajor>; std::vector<int> vec( 12UL * ) UnalignedUnpadded A( &vec[0], 3UL, 4UL ); * * // Definition of a managed 5x6 custom matrix for unaligned but padded 'float' * arrays using UnalignedPadded = * CustomMatrix<float,unaligned,padded,columnMajor>; std::unique_ptr<float[]> * memory1( new float[40] ); UnalignedPadded B( memory1.get(), 5UL, 6UL, 8UL * ); * * // Definition of a managed 12x13 custom matrix for aligned, unpadded 'double' * arrays using AlignedUnpadded = * CustomMatrix<double,aligned,unpadded,rowMajor>; * std::unique_ptr<double[],Deallocate> memory2( blaze::allocate<double>( * 192UL ) ); AlignedUnpadded C( memory2.get(), 12UL, 13UL, 16UL ); * * // Definition of a 7x14 custom matrix for aligned, padded 'complex<double>' * arrays using cplx = complex<double>; using AlignedPadded = * CustomMatrix<cplx,aligned,padded,columnMajor>; * std::unique_ptr<cplx[],Deallocate> memory3( blaze::allocate<cplx>( 112UL ) * ); AlignedPadded D( memory3.get(), 7UL, 14UL, 16UL ); \endcode * * // In comparison with the remaining \b Blaze dense matrix types * blaze::CustomMatrix has several // special characteristics. All of these * result from the fact that a custom matrix is not // performing any kind of * memory allocation, but instead is given an existing array of elements. // * The following sections discuss all of these characteristics: // // -# * <b>\ref matrix_types_custom_matrix_memory_management</b> // -# <b>\ref * matrix_types_custom_matrix_copy_operations</b> // -# <b>\ref * matrix_types_custom_matrix_alignment</b> // -# <b>\ref * matrix_types_custom_matrix_padding</b> // // \n \subsection * matrix_types_custom_matrix_memory_management Memory Management // // The * blaze::CustomMatrix class template acts as an adaptor for an existing * array of elements. As // such it provides everything that is required to * use the array just like a native \b Blaze dense // matrix data structure. * However, this flexibility comes with the price that the user of a custom * // matrix is responsible for the resource management. // // The following * examples give an impression of several possible types of custom matrices: * * \code using blaze::CustomMatrix; using blaze::Deallocate; using * blaze::allocate; using blaze::aligned; using blaze::unaligned; using * blaze::padded; using blaze::unpadded; * * // Definition of a 3x4 custom row-major matrix with unaligned, unpadded and * externally // managed integer array. Note that the std::vector must be * guaranteed to outlive the // custom matrix! std::vector<int> vec( 12UL ); * CustomMatrix<int,unaligned,unpadded> A( &vec[0], 3UL, 4UL ); * * // Definition of a custom 8x12 matrix for an aligned and padded integer array * of // capacity 128 (including 8 padding elements per row). Note that the * std::unique_ptr // must be guaranteed to outlive the custom matrix! * std::unique_ptr<int[],Deallocate> memory( allocate<int>( 128UL ) ); * CustomMatrix<int,aligned,padded> B( memory.get(), 8UL, 12UL, 16UL ); * \endcode * * // \n \subsection matrix_types_custom_matrix_copy_operations Copy Operations * // // As with all dense matrices it is possible to copy construct a custom * matrix: * * \code using blaze::CustomMatrix; using blaze::unaligned; using * blaze::unpadded; * * using CustomType = CustomMatrix<int,unaligned,unpadded>; * * std::vector<int> vec( 6UL, 10 ); // Vector of 6 integers of the value 10 * CustomType A( &vec[0], 2UL, 3UL ); // Represent the std::vector as Blaze * dense matrix a[1] = 20; // Also modifies the * std::vector * * CustomType B( a ); // Creating a copy of vector a b[2] = 20; // * Also affects matrix A and the std::vector \endcode * * // It is important to note that a custom matrix acts as a reference to the * specified array. Thus // the result of the copy constructor is a new * custom matrix that is referencing and representing // the same array as * the original custom matrix. // // In contrast to copy construction, just * as with references, copy assignment does not change // which array is * referenced by the custom matrices, but modifies the values of the array: * * \code std::vector<int> vec2( 6UL, 4 ); // Vector of 6 integers of the * value 4 CustomType C( &vec2[0], 2UL, 3UL ); // Represent the std::vector * as Blaze dense matrix * * A = C; // Copy assignment: Set all values of matrix A and B to 4. \endcode * * // \n \subsection matrix_types_custom_matrix_alignment Alignment // // In * case the custom matrix is specified as \c aligned the passed array must * adhere to some // alignment restrictions based on the alignment * requirements of the used data type and the // used instruction set (SSE, * AVX, ...). The restriction applies to the first element of each // * row/column: In case of a row-major matrix the first element of each row * must be properly // aligned, in case of a column-major matrix the first * element of each column must be properly // aligned. For instance, if a * row-major matrix is used and AVX is active the first element of // each * row must be 32-bit aligned: * * \code using blaze::CustomMatrix; using blaze::Deallocate; using * blaze::allocate; using blaze::aligned; using blaze::padded; using * blaze::rowMajor; * * // Allocation of 32-bit aligned memory std::unique_ptr<int[],Deallocate> * memory( allocate<int>( 40UL ) ); * * CustomMatrix<int,aligned,padded,rowMajor> A( memory.get(), 5UL, 6UL, 8UL ); * \endcode * * // In the example, the row-major matrix has six columns. However, since with * AVX eight integer // values are loaded together the matrix is padded with * two additional elements. This guarantees // that the first element of each * row is 32-bit aligned. In case the alignment requirements are // violated, * a \c std::invalid_argument exception is thrown. // // \n \subsection * matrix_types_custom_matrix_padding Padding // // Adding padding elements * to the end of each row/column can have a significant impact on the // * performance. For instance, assuming that AVX is available, then two * aligned, padded, 3x3 double // precision matrices can be added via three * SIMD addition operations: * * \code using blaze::CustomMatrix; using blaze::Deallocate; using * blaze::allocate; using blaze::aligned; using blaze::padded; * * using CustomType = CustomMatrix<double,aligned,padded>; * * std::unique_ptr<int[],Deallocate> memory1( allocate<double>( 12UL ) ); * std::unique_ptr<int[],Deallocate> memory2( allocate<double>( 12UL ) ); * std::unique_ptr<int[],Deallocate> memory3( allocate<double>( 12UL ) ); * * // Creating padded custom 3x3 matrix with an additional padding element in * each row CustomType A( memory1.get(), 3UL, 3UL, 4UL ); CustomType B( * memory2.get(), 3UL, 3UL, 4UL ); CustomType C( memory3.get(), 3UL, 3UL, 4UL * ); * * // ... Initialization * * C = A + B; // AVX-based matrix addition \endcode * * // In this example, maximum performance is possible. However, in case no * padding elements are // inserted a scalar addition has to be used: * * \code using blaze::CustomMatrix; using blaze::Deallocate; using * blaze::allocate; using blaze::aligned; using blaze::unpadded; * * using CustomType = CustomMatrix<double,aligned,unpadded>; * * std::unique_ptr<int[],Deallocate> memory1( allocate<double>( 9UL ) ); * std::unique_ptr<int[],Deallocate> memory2( allocate<double>( 9UL ) ); * std::unique_ptr<int[],Deallocate> memory3( allocate<double>( 9UL ) ); * * // Creating unpadded custom 3x3 matrix CustomType A( memory1.get(), 3UL, 3UL * ); CustomType B( memory2.get(), 3UL, 3UL ); CustomType C( memory3.get(), * 3UL, 3UL ); * * // ... Initialization * * C = A + B; // Scalar matrix addition \endcode * * // Note that the construction of padded and unpadded aligned matrices looks * identical. However, // in case of padded matrices, \b Blaze will zero * initialize the padding element and use them // in all computations in * order to achieve maximum performance. In case of an unpadded matrix // \b * Blaze will ignore the elements with the downside that it is not possible * to load a complete // row to an AVX register, which makes it necessary to * fall back to a scalar addition. // // The number of padding elements is * required to be sufficient with respect to the available // instruction * set: In case of an aligned padded custom matrix the added padding elements * must // guarantee that the total number of elements in each row/column is * a multiple of the SIMD // vector width. In case of an unaligned padded * matrix the number of padding elements can be // greater or equal the * number of padding elements of an aligned padded custom matrix. In case // * the padding is insufficient with respect to the available instruction set, * a // \c std::invalid_argument exception is thrown. // // // \n \section * matrix_types_compressed_matrix CompressedMatrix // <hr> // // The * blaze::CompressedMatrix class template is the representation of an * arbitrary sized sparse // matrix with \f$ M \cdot N \f$ dynamically * allocated elements of arbitrary type. It can be // included via the header * file * * \code #include <blaze/math/CompressedMatrix.h> \endcode * * // The type of the elements and the storage order of the matrix can be * specified via the two // template parameters: * * \code template< typename Type, bool SO > class CompressedMatrix; \endcode * * // - \c Type: specifies the type of the matrix elements. CompressedMatrix * can be used with // any non-cv-qualified, non-reference, * non-pointer element type. // - \c SO : specifies the storage order * (blaze::rowMajor, blaze::columnMajor) of the matrix. // The * default value is blaze::rowMajor. // // The blaze::CompressedMatrix is the * right choice for all kinds of sparse matrices: * * \code // Definition of a 3x4 integral row-major matrix * blaze::CompressedMatrix<int> A( 3UL, 4UL ); * * // Definition of a 4x6 single precision row-major matrix * blaze::CompressedMatrix<float,blaze::rowMajor> B( 4UL, 6UL ); * * // Definition of a double precision column-major matrix with 0 rows and * columns blaze::CompressedMatrix<double,blaze::columnMajor> C; \endcode * * // \n \section matrix_types_identity_matrix IdentityMatrix // <hr> // // The * blaze::IdentityMatrix class template is the representation of an * immutable, arbitrary // sized identity matrix with \f$ N \cdot N \f$ * elements of arbitrary type. It can be included // via the header file * * \code #include <blaze/math/IdentityMatrix.h> \endcode * * // The type of the elements and the storage order of the matrix can be * specified via the two // template parameters: * * \code template< typename Type, bool SO > class IdentityMatrix; \endcode * * // - Type: specifies the type of the matrix elements. IdentityMatrix can be * used with any // non-cv-qualified, non-reference, non-pointer * element type. // - SO : specifies the storage order (blaze::rowMajor, * blaze::columnMajor) of the matrix. // The default value is * blaze::rowMajor. // // The blaze::IdentityMatrix is the perfect choice to * represent an identity matrix: * * \code // Definition of a 3x3 integral row-major identity matrix * blaze::IdentityMatrix<int> A( 3UL ); * * // Definition of a 6x6 single precision row-major identity matrix * blaze::IdentityMatrix<float,blaze::rowMajor> B( 6UL ); * * // Definition of a double precision column-major identity matrix with 0 rows * and columns blaze::IdentityMatrix<double,blaze::columnMajor> C; \endcode * * // \n Previous: \ref matrices &nbsp; &nbsp; Next: \ref matrix_operations */ //************************************************************************************************* //**Matrix Operations ****************************************************************************** /* * !\page matrix_operations Matrix Operations // // \tableofcontents // // // * \n \section matrix_operations_constructors Constructors // <hr> // // * Matrices are just as easy and intuitive to create as vectors. Still, there * are a few rules // to be aware of: // - In case the last template * parameter (the storage order) is omitted, the matrix is per // default * stored in row-major order. // - The elements of a \c StaticMatrix or \c * HybridMatrix are default initialized (i.e. built-in // data types are * initialized to 0, class types are initialized via the default * constructor). // - Newly allocated elements of a \c DynamicMatrix or \c * CompressedMatrix remain uninitialized // if they are of built-in type * and are default constructed if they are of class type. // // \n * \subsection matrix_operations_default_construction Default Construction * * \code using blaze::StaticMatrix; using blaze::DynamicMatrix; using * blaze::CompressedMatrix; * * // All matrices can be default constructed. Whereas the size of // a * StaticMatrix is fixed via the second and third template // parameter, the * initial size of a constructed DynamicMatrix // or CompressedMatrix is 0. * StaticMatrix<int,2UL,2UL> M1; // Instantiation of a 2x2 * integer row-major // matrix. All elements are initialized to 0. * DynamicMatrix<float> M2; // Instantiation of a single * precision dynamic // row-major matrix with 0 rows and 0 columns. * DynamicMatrix<double,columnMajor> M3; // Instantiation of a double * precision dynamic // column-major matrix with 0 rows and 0 columns. * CompressedMatrix<int> M4; // Instantiation of a compressed * integer // row-major matrix of size 0x0. * CompressedMatrix<double,columnMajor> M5; // Instantiation of a compressed * double precision // column-major matrix of size 0x0. \endcode * * // \n \subsection matrix_operations_size_construction Construction with * Specific Size // // The \c DynamicMatrix, \c HybridMatrix, and \c * CompressedMatrix classes offer a constructor // that allows to immediately * give the matrices a specific number of rows and columns: * * \code DynamicMatrix<int> M6( 5UL, 4UL ); // Instantiation * of a 5x4 dynamic row-major // matrix. The elements are not initialized. * HybridMatrix<double,5UL,9UL> M7( 3UL, 7UL ); // Instantiation of a * 3x7 hybrid row-major // matrix. The elements are not initialized. * CompressedMatrix<float,columnMajor> M8( 8UL, 6UL ); // Instantiation of * an empty 8x6 compressed // column-major matrix. \endcode * * // Note that dense matrices (in this case \c DynamicMatrix and \c * HybridMatrix) immediately // allocate enough capacity for all matrix * elements. Sparse matrices on the other hand (in this // example \c * CompressedMatrix) merely acquire the size, but don't necessarily allocate * memory. // // // \n \subsection * matrix_operations_initialization_constructors Initialization Constructors * // // All dense matrix classes offer a constructor for a direct, * homogeneous initialization of all // matrix elements. In contrast, for * sparse matrices the predicted number of non-zero elements // can be * specified. * * \code StaticMatrix<int,4UL,3UL,columnMajor> M9( 7 ); // Instantiation of a * 4x3 integer column-major // matrix. All elements are initialized to 7. * DynamicMatrix<float> M10( 2UL, 5UL, 2.0F ); // Instantiation of a 2x5 * single precision row-major // matrix. All elements are initialized to * 2.0F. CompressedMatrix<int> M11( 3UL, 4UL, 4 ); // Instantiation of * a 3x4 integer row-major // matrix with capacity for 4 non-zero elements. * \endcode * * // \n \subsection matrix_operations_array_construction Array Construction // * // Alternatively, all dense matrix classes offer a constructor for an * initialization with a // dynamic or static array. If the matrix is * initialized from a dynamic array, the constructor // expects the * dimensions of values provided by the array as first and second argument, * the // array as third argument. In case of a static array, the fixed size * of the array is used: * * \code const std::unique_ptr<double[]> array1( new double[6] ); // ... * Initialization of the dynamic array blaze::StaticMatrix<double,2UL,3UL> * M12( 2UL, 3UL, array1.get() ); * * int array2[2][2] = { { 4, -5 }, { -6, 7 } }; * blaze::StaticMatrix<int,2UL,2UL,rowMajor> M13( array2 ); \endcode * * // \n \subsection matrix_operations_initializer_list_construction // // In * addition, all dense and sparse matrix classes can be directly initialized * by means of an // initializer list: * * \code blaze::DynamicMatrix<float,columnMajor> M14{ { 3.1F, 6.4F }, { -0.9F, * -1.2F }, { 4.8F, 0.6F } }; blaze::CompressedMatrix<int,rowMajor> M15{ { * 3 }, { 1 }, { 0, 2 } }; \endcode * * // In case of sparse matrices, only the non-zero elements are used to * initialize the matrix. // Missing values are considered to be default * values. // // \n \subsection matrix_operations_copy_construction Copy * Construction // // All dense and sparse matrices can be created as a copy * of another dense or sparse matrix. * * \code StaticMatrix<int,5UL,4UL,rowMajor> M16( M6 ); // Instantiation of * the dense row-major matrix M16 // as copy of the dense row-major matrix * M6. DynamicMatrix<float,columnMajor> M17( M8 ); // Instantiation of * the dense column-major matrix M17 // as copy of the sparse column-major * matrix M8. CompressedMatrix<double,columnMajor> M18( M7 ); // * Instantiation of the compressed column-major matrix // M18 as copy of the * dense row-major matrix M7. CompressedMatrix<float,rowMajor> M19( M8 ); * // Instantiation of the compressed row-major matrix // M19 as copy of the * compressed column-major matrix M8. \endcode * * // Note that it is not possible to create a \c StaticMatrix as a copy of a * matrix with a different // number of rows and/or columns: * * \code StaticMatrix<int,4UL,5UL,rowMajor> M20( M6 ); // Runtime error: * Number of rows and columns // does not match! * StaticMatrix<int,4UL,4UL,columnMajor> M21( M9 ); // Compile time error: * Number of columns does // not match! \endcode * * // \n \section matrix_operations_assignment Assignment // <hr> // // There * are several types of assignment to dense and sparse matrices: // \ref * matrix_operations_homogeneous_assignment, \ref * matrix_operations_array_assignment, // \ref * matrix_operations_copy_assignment, and \ref * matrix_operations_compound_assignment. // // // \n \subsection * matrix_operations_homogeneous_assignment Homogeneous Assignment // // It * is possible to assign the same value to all elements of a dense matrix. * All dense matrix // classes provide an according assignment operator: * * \code blaze::StaticMatrix<int,3UL,2UL> M1; blaze::DynamicMatrix<double> M2; * * // Setting all integer elements of the StaticMatrix to 4 M1 = 4; * * // Setting all double precision elements of the DynamicMatrix to 3.5 M2 = 3.5 * \endcode * * // \n \subsection matrix_operations_array_assignment Array Assignment // // * Dense matrices can also be assigned a static array: * * \code blaze::StaticMatrix<int,2UL,2UL,rowMajor> M1; * blaze::StaticMatrix<int,2UL,2UL,columnMajor> M2; * blaze::DynamicMatrix<double> M3; * * int array1[2][2] = { { 1, 2 }, { 3, 4 } }; double array2[3][2] = { { 3.1, 6.4 * }, { -0.9, -1.2 }, { 4.8, 0.6 } }; * * M1 = array1; M2 = array1; M3 = array2; \endcode * * // Note that the dimensions of the static array have to match the size of a * \c StaticMatrix, // whereas a \c DynamicMatrix is resized according to the * array dimensions: * * \f$ M3 = \left(\begin{array}{*{2}{c}} 3.1 & 6.4 \\ -0.9 & -1.2 \\ 4.8 & 0.6 * \\ \end{array}\right)\f$ * * // \n \subsection matrix_operations_initializer_list_assignment Initializer * List Assignment // // Alternatively, it is possible to directly assign an * initializer list to a dense or sparse // matrix: * * \code blaze::DynamicMatrix<double> M1; blaze::CompressedMatrix<int> M2; * * M1 = { { 3.1, 6.4 }, { -0.9, -1.2 }, { 4.8, 0.6 } }; M2 = { { 1, 0 }, {}, { * 0, 1 }, { 2 } }; \endcode * * // In case of sparse matrices, only the non-zero elements are considered. * Missing values are // considered to be default values. // // \n * \subsection matrix_operations_copy_assignment Copy Assignment // // All * kinds of matrices can be assigned to each other. The only restriction is * that since a // \c StaticMatrix cannot change its size, the assigned * matrix must match both in the number of // rows and in the number of * columns. * * \code blaze::StaticMatrix<int,3UL,2UL,rowMajor> M1; * blaze::DynamicMatrix<int,rowMajor> M2( 3UL, 2UL ); * blaze::DynamicMatrix<float,rowMajor> M3( 5UL, 2UL ); * blaze::CompressedMatrix<int,rowMajor> M4( 3UL, 2UL ); * blaze::CompressedMatrix<float,columnMajor> M5( 3UL, 2UL ); * * // ... Initialization of the matrices * * M1 = M2; // OK: Assignment of a 3x2 dense row-major matrix to another 3x2 * dense row-major matrix M1 = M4; // OK: Assignment of a 3x2 sparse * row-major matrix to a 3x2 dense row-major matrix M1 = M3; // Runtime * error: Cannot assign a 5x2 matrix to a 3x2 static matrix M1 = M5; // OK: * Assignment of a 3x2 sparse column-major matrix to a 3x2 dense row-major * matrix \endcode * * // \n \subsection matrix_operations_compound_assignment Compound Assignment * // // Compound assignment is also available for matrices: addition * assignment, subtraction assignment, // and multiplication assignment. In * contrast to plain assignment, however, the number of rows // and columns * of the two operands have to match according to the arithmetic operation. * * \code blaze::StaticMatrix<int,2UL,3UL,rowMajor> M1; * blaze::DynamicMatrix<int,rowMajor> M2( 2UL, 3UL ); * blaze::CompressedMatrix<float,columnMajor> M3( 2UL, 3UL ); * blaze::CompressedMatrix<float,rowMajor> M4( 2UL, 4UL ); * blaze::StaticMatrix<float,2UL,4UL,rowMajor> M5; * blaze::CompressedMatrix<float,rowMajor> M6( 3UL, 2UL ); * * // ... Initialization of the matrices * * M1 += M2; // OK: Addition assignment between two row-major matrices of the * same dimensions M1 -= M3; // OK: Subtraction assignment between between a * row-major and a column-major matrix M1 += M4; // Runtime error: No * compound assignment between matrices of different size M1 -= M5; // * Compilation error: No compound assignment between matrices of different * size M2 *= M6; // OK: Multiplication assignment between two row-major * matrices \endcode * * // Note that the multiplication assignment potentially changes the number of * columns of the // target matrix: * * \f$\left(\begin{array}{*{3}{c}} 2 & 0 & 1 \\ 0 & 3 & 2 \\ \end{array}\right) * \times \left(\begin{array}{*{2}{c}} 4 & 0 \\ 1 & 0 \\ 0 & 3 \\ * \end{array}\right) = \left(\begin{array}{*{2}{c}} 8 & 3 \\ 3 & 6 \\ * \end{array}\right)\f$ * * // Since a \c StaticMatrix cannot change its size, only a square StaticMatrix * can be used in a // multiplication assignment with other square matrices * of the same dimensions. // // // \n \section * matrix_operations_element_access Element Access // <hr> // // \n * \subsection matrix_operations_function_call_operator_1 Function Call * Operator // // The easiest way to access a specific dense or sparse matrix * element is via the function call // operator. The indices to access a * matrix are zero-based: * * \code blaze::DynamicMatrix<int> M1( 4UL, 6UL ); M1(0,0) = 1; M1(0,1) = 3; // * ... * * blaze::CompressedMatrix<double> M2( 5UL, 3UL ); M2(0,2) = 4.1; M2(1,1) = * -6.3; \endcode * * // Since dense matrices allocate enough memory for all contained elements, * using the function // call operator on a dense matrix directly returns a * reference to the accessed value. In case // of a sparse matrix, if the * accessed value is currently not contained in the matrix, the // value is * inserted into the matrix prior to returning a reference to the value, * which can // be much more expensive than the direct access to a dense * matrix. Consider the following // example: * * \code blaze::CompressedMatrix<int> M1( 4UL, 4UL ); * * for( size_t i=0UL; i<M1.rows(); ++i ) { for( size_t j=0UL; j<M1.columns(); * ++j ) { ... = M1(i,j); } } \endcode * * // Although the compressed matrix is only used for read access within the for * loop, using the // function call operator temporarily inserts 16 non-zero * elements into the matrix. Therefore // the preferred way to traverse the * non-zero elements of a sparse matrix is to use iterators. // // \n * \subsection matrix_operations_iterators Iterators // // All matrices * (sparse as well as dense) offer an alternate way via the \c begin(), \c * cbegin(), // \c end() and \c cend() functions to traverse all contained * elements by iterator. Note that // it is not possible to traverse all * elements of the matrix, but that it is only possible to // traverse * elements in a row/column-wise fashion. In case of a non-const matrix, \c * begin() and // \c end() return an \c Iterator, which allows a manipulation * of the non-zero value, in case of // a constant matrix or in case \c * cbegin() or \c cend() are used a \c ConstIterator is returned: * * \code using blaze::CompressedMatrix; * * CompressedMatrix<int,rowMajor> M1( 4UL, 6UL ); * * // Traversing the matrix by Iterator for( size_t i=0UL; i<A.rows(); ++i ) { * for( CompressedMatrix<int,rowMajor>::Iterator it=A.begin(i); it!=A.end(i); * ++it ) { it->value() = ...; // OK: Write access to the value of the * non-zero element. ... = it->value(); // OK: Read access to the value of * the non-zero element. it->index() = ...; // Compilation error: The index * of a non-zero element cannot be changed. ... = it->index(); // OK: Read * access to the index of the non-zero element. } } * * // Traversing the matrix by ConstIterator for( size_t i=0UL; i<A.rows(); ++i * ) { for( CompressedMatrix<int,rowMajor>::ConstIterator it=A.cbegin(i); * it!=A.cend(i); ++it ) { it->value() = ...; // Compilation error: * Assignment to the value via a ConstIterator is invalid. ... = it->value(); * // OK: Read access to the value of the non-zero element. it->index() = * ...; // Compilation error: The index of a non-zero element cannot be * changed. ... = it->index(); // OK: Read access to the index of the * non-zero element. } } \endcode * * // Note that \c begin(), \c cbegin(), \c end(), and \c cend() are also * available as free functions: * * \code for( size_t i=0UL; i<A.rows(); ++i ) { for( * CompressedMatrix<int,rowMajor>::Iterator it=begin( A, i ); it!=end( A, i * ); ++it ) { // ... } } * * for( size_t i=0UL; i<A.rows(); ++i ) { for( * CompressedMatrix<int,rowMajor>::ConstIterator it=cbegin( A, i ); it!=cend( * A, i ); ++it ) { // ... } } \endcode * * // \n \section matrix_operations_element_insertion Element Insertion // <hr> * // // Whereas a dense matrix always provides enough capacity to store all * matrix elements, a sparse // matrix only stores the non-zero elements. * Therefore it is necessary to explicitly add elements // to the matrix. // * // \n \subsection matrix_operations_function_call_operator_2 Function Call * Operator // // The first possibility to add elements to a sparse matrix is * the function call operator: * * \code using blaze::CompressedMatrix; * * CompressedMatrix<int> M1( 3UL, 4UL ); M1(1,2) = 9; \endcode * * // In case the element at the given position is not yet contained in the * sparse matrix, it is // automatically inserted. Otherwise the old value is * replaced by the new value 2. The operator // returns a reference to the * sparse vector element. // // \n \subsection matrix_operations_set .set() * // // An alternative to the function call operator is the \c set() * function: In case the element is // not yet contained in the matrix the * element is inserted, else the element's value is modified: * * \code // Insert or modify the value at position (2,0) M1.set( 2, 0, 1 ); * \endcode * * // \n \subsection matrix_operations_insert .insert() * * // The insertion of elements can be better controlled via the \c insert() * function. In contrast // to the function call operator and the \c set() * function it emits an exception in case the // element is already contained * in the matrix. In order to check for this case, the \c find() // function * can be used: * * \code // In case the element at position (2,3) is not yet contained in the * matrix it is inserted // with a value of 4. if( M1.find( 2, 3 ) == M1.end( * 2 ) ) M1.insert( 2, 3, 4 ); \endcode * * // \n \subsection matrix_operations_append .append() // // Although the \c * insert() function is very flexible, due to performance reasons it is not * // suited for the setup of large sparse matrices. A very efficient, yet * also very low-level // way to fill a sparse matrix is the \c append() * function. It requires the sparse matrix to // provide enough capacity to * insert a new element in the specified row/column. Additionally, // the * index of the new element must be larger than the index of the previous * element in the // same row/column. Violating these conditions results in * undefined behavior! * * \code M1.reserve( 0, 3 ); // Reserving space for three non-zero elements * in row 0 M1.append( 0, 1, 2 ); // Appending the element 2 in row 0 at * column index 1 M1.append( 0, 2, -4 ); // Appending the element -4 in row * 0 at column index 2 // ... \endcode * * // The most efficient way to fill a sparse matrix with elements, however, is * a combination of // \c reserve(), \c append(), and the \c finalize() * function: * * \code // Setup of the compressed row-major matrix // // ( 0 1 0 2 0 ) * // A = ( 0 0 0 0 0 ) // ( 3 0 0 0 0 ) // * blaze::CompressedMatrix<int> M1( 3UL, 5UL ); M1.reserve( 3 ); // * Reserving enough space for 3 non-zero elements M1.append( 0, 1, 1 ); // * Appending the value 1 in row 0 with column index 1 M1.append( 0, 3, 2 ); * // Appending the value 2 in row 0 with column index 3 M1.finalize( 0 ); * // Finalizing row 0 M1.finalize( 1 ); // Finalizing the empty row 1 * to prepare row 2 M1.append( 2, 0, 3 ); // Appending the value 3 in row 2 * with column index 0 M1.finalize( 2 ); // Finalizing row 2 \endcode * * // \note The \c finalize() function has to be explicitly called for each row * or column, even // for empty ones! // \note Although \c append() does not * allocate new memory, it still invalidates all iterators // returned by the * \c end() functions! // // // \n \section matrix_operations_element_removal * Element Removal // <hr> // // \subsection matrix_operations_erase .erase() * // // The \c erase() member functions can be used to remove elements from * a sparse matrix. The // following example gives an impression of the five * different flavors of \c erase(): * * \code using blaze::CompressedMatrix; * * CompressedMatrix<int,rowMajor> A( 42, 53 ); // ... Initialization of the * matrix * * // Erasing the element at position (21,23) A.erase( 21, 23 ); * * // Erasing a single element in row 17 via iterator A.erase( 17, A.find( 4 ) * ); * * // Erasing all non-zero elements in the range [7..24] of row 33 A.erase( 33, * A.lowerBound( 33, 7 ), A.upperBound( 33, 24 ) ); * * // Erasing all non-zero elements with a value larger than 9 by passing a * unary predicate A.erase( []( int i ){ return i > 9; } ); * * // Erasing all non-zero elements in the range [30..40] of row 37 with a value * larger than 5 CompressedMatrix<int,rowMajor>::Iterator pos1( A.lowerBound( * 37, 30 ) ); CompressedMatrix<int,rowMajor>::Iterator pos2( A.upperBound( * 37, 40 ) ); A.erase( 37, pos1, pos2, []( int i ){ return i > 5; } ); * \endcode * * // \n \section matrix_operations_element_lookup Element Lookup // <hr> // // * A sparse matrix only stores the non-zero elements contained in the matrix. * Therefore, whenever // accessing a matrix element at a specific position a * lookup operation is required. Whereas the // function call operator is * performing this lookup automatically, it is also possible to use the // \c * find(), \c lowerBound(), and \c upperBound() member functions for a manual * lookup. // // \n \subsection matrix_operations_find .find() // // The \c * find() function can be used to check whether a specific element is * contained in the // sparse matrix. It specifically searches for the * element at the specified position. In case // the element is found, the * function returns an iterator to the element. Otherwise an iterator // just * past the last non-zero element of the according row or column (the \c * end() iterator) // is returned. Note that the returned iterator is subject * to invalidation due to inserting // operations via the function call * operator, the \c set() function or the \c insert() function! * * \code using blaze::CompressedMatrix; * * CompressedMatrix<int,rowMajor> A( 42, 53 ); // ... Initialization of the * matrix * * // Searching the element at position (7,17). In case the element is not // * contained in the vector, the end() iterator of row 7 is returned. * CompressedMatrix<int,rowMajor>::Iterator pos( A.find( 7, 17 ) ); * * if( pos != A.end( 7 ) ) { // ... } \endcode * * // \n \subsection matrix_operations_lowerbound .lowerBound() // // In case of * a row-major matrix, this function returns a row iterator to the first * element with // an index not less then the given column index. In case of * a column-major matrix, the function // returns a column iterator to the * first element with an index not less then the given row // index. In * combination with the \c upperBound() function this function can be used to * create a // pair of iterators specifying a range of indices. Note that the * returned iterator is subject // to invalidation due to inserting * operations via the function call operator, the \c set() // function or the * \c insert() function! * * \code using blaze::CompressedMatrix; * * CompressedMatrix<int,rowMajor> A( 42, 53 ); // ... Initialization of the * matrix * * // Searching the lower bound of column index 17 in row 7. * CompressedMatrix<int,rowMajor>::Iterator pos1( A.lowerBound( 7, 17 ) ); * * // Searching the upper bound of column index 28 in row 7 * CompressedMatrix<int,rowMajor>::Iterator pos2( A.upperBound( 7, 28 ) ); * * // Erasing all elements in the specified range A.erase( 7, pos1, pos2 ); * \endcode * * // \n \subsection matrix_operations_upperbound .upperBound() // // In case of * a row-major matrix, this function returns a row iterator to the first * element with // an index greater then the given column index. In case of a * column-major matrix, the function // returns a column iterator to the * first element with an index greater then the given row // index. In * combination with the \c lowerBound() function this function can be used to * create a // pair of iterators specifying a range of indices. Note that the * returned iterator is subject // to invalidation due to inserting * operations via the function call operator, the \c set() // function or the * \c insert() function! * * \code using blaze::CompressedMatrix; * * CompressedMatrix<int,columnMajor> A( 42, 53 ); // ... Initialization of the * matrix * * // Searching the lower bound of row index 17 in column 9. * CompressedMatrix<int,columnMajor>::Iterator pos1( A.lowerBound( 17, 9 ) ); * * // Searching the upper bound of row index 28 in column 9 * CompressedMatrix<int,columnMajor>::Iterator pos2( A.upperBound( 28, 9 ) ); * * // Erasing all elements in the specified range A.erase( 9, pos1, pos2 ); * \endcode * * // \n \section matrix_operations_non_modifying_operations Non-Modifying * Operations // <hr> // // \subsection matrix_operations_rows .rows() / * rows() // // The current number of rows of a matrix can be acquired via * the \c rows() member function: * * \code // Instantiating a dynamic matrix with 10 rows and 8 columns * blaze::DynamicMatrix<int> M1( 10UL, 8UL ); M1.rows(); // Returns 10 * * // Instantiating a compressed matrix with 8 rows and 12 columns * blaze::CompressedMatrix<double> M2( 8UL, 12UL ); M2.rows(); // Returns 8 * \endcode * * // Alternatively, the free functions \c rows() can be used to query the * current number of rows of // a matrix. In contrast to the member function, * the free function can also be used to query the // number of rows of a * matrix expression: * * \code rows( M1 ); // Returns 10, i.e. has the same effect as the member * function rows( M2 ); // Returns 8, i.e. has the same effect as the member * function * * rows( M1 * M2 ); // Returns 10, i.e. the number of rows of the resulting * matrix \endcode * * // \n \subsection matrix_operations_columns .columns() / columns() // // The * current number of columns of a matrix can be acquired via the \c columns() * member function: * * \code // Instantiating a dynamic matrix with 6 rows and 8 columns * blaze::DynamicMatrix<int> M1( 6UL, 8UL ); M1.columns(); // Returns 8 * * // Instantiating a compressed matrix with 8 rows and 7 columns * blaze::CompressedMatrix<double> M2( 8UL, 7UL ); M2.columns(); // Returns * 7 \endcode * * // There is also a free function \c columns() available, which can also be * used to query the number // of columns of a matrix expression: * * \code columns( M1 ); // Returns 8, i.e. has the same effect as the member * function columns( M2 ); // Returns 7, i.e. has the same effect as the * member function * * columns( M1 * M2 ); // Returns 7, i.e. the number of columns of the * resulting matrix \endcode * * // \subsection matrix_operations_size size() // // The \c size() function * returns the total number of elements of a matrix: * * \code // Instantiating a dynamic matrix with 6 rows and 8 columns * blaze::DynamicMatrix<int> M1( 6UL, 8UL ); size( M1 ); // Returns 48 * * // Instantiating a compressed matrix with 8 rows and 7 columns * blaze::CompressedMatrix<double> M2( 8UL, 7UL ); size( M2 ); // Returns 56 * \endcode * * // \subsection matrix_operations_spacing .spacing() / spacing() // // The * total number of elements of a row or column of a dense matrix, including * potential padding // elements, can be acquired via the \c spacing member * function. In case of a row-major matrix // (i.e. in case the storage order * is set to blaze::rowMajor) the function returns the spacing // between two * rows, in case of a column-major matrix (i.e. in case the storage flag is * set to // blaze::columnMajor) the function returns the spacing between two * columns: * * \code // Instantiating a row-major dynamic matrix with 7 rows and 8 columns * blaze::DynamicMatrix<int,blaze::rowMajor> M1( 7UL, 8UL ); M1.spacing(); * // Returns the total number of elements in a row * * // Instantiating a column-major dynamic matrix with 8 rows and 12 columns * blaze::CompressedMatrix<double> M2( 8UL, 12UL ); M2.spacing(); // Returns * the total number of element in a column \endcode * * // Alternatively, the free functions \c spacing() can be used to query the * current number of // elements in a row/column. * * \code spacing( M1 ); // Returns the total number of elements in a row * spacing( M2 ); // Returns the total number of elements in a column * \endcode * * // \n \subsection matrix_operations_capacity .capacity() / capacity() // // * The \c capacity() member function returns the internal capacity of a dense * or sparse matrix. // Note that the capacity of a matrix doesn't have to be * equal to the size of a matrix. In case of // a dense matrix the capacity * will always be greater or equal than the total number of elements // of * the matrix. In case of a sparse matrix, the capacity will usually be much * less than the // total number of elements. * * \code blaze::DynamicMatrix<float> M1( 5UL, 7UL ); * blaze::StaticMatrix<float,7UL,4UL> M2; M1.capacity(); // Returns at least * 35 M2.capacity(); // Returns at least 28 \endcode * * // There is also a free function \c capacity() available to query the * capacity. However, please // note that this function cannot be used to * query the capacity of a matrix expression: * * \code capacity( M1 ); // Returns at least 35, i.e. has the same effect as * the member function capacity( M2 ); // Returns at least 28, i.e. has the * same effect as the member function * * capacity( M1 * M2 ); // Compilation error! \endcode * * // \n \subsection matrix_operations_nonzeros .nonZeros() / nonZeros() // // * For both dense and sparse matrices the current number of non-zero elements * can be queried // via the \c nonZeros() member function. In case of * matrices there are two flavors of the // \c nonZeros() function: One * returns the total number of non-zero elements in the matrix, // the second * returns the number of non-zero elements in a specific row (in case of a * row-major // matrix) or column (in case of a column-major matrix). Sparse * matrices directly return their // number of non-zero elements, dense * matrices traverse their elements and count the number of // non-zero * elements. * * \code blaze::DynamicMatrix<int,rowMajor> M1( 3UL, 5UL ); * * // ... Initializing the dense matrix * * M1.nonZeros(); // Returns the total number of non-zero elements in the * dense matrix M1.nonZeros( 2 ); // Returns the number of non-zero elements * in row 2 \endcode * * \code blaze::CompressedMatrix<double,columnMajor> M2( 4UL, 7UL ); * * // ... Initializing the sparse matrix * * M2.nonZeros(); // Returns the total number of non-zero elements in the * sparse matrix M2.nonZeros( 3 ); // Returns the number of non-zero * elements in column 3 \endcode * * // The free \c nonZeros() function can also be used to query the number of * non-zero elements in a // matrix expression. However, the result is not * the exact number of non-zero elements, but may be // a rough estimation: * * \code nonZeros( M1 ); // Has the same effect as the member function * nonZeros( M1, 2 ); // Has the same effect as the member function * * nonZeros( M2 ); // Has the same effect as the member function nonZeros( * M2, 3 ); // Has the same effect as the member function * * nonZeros( M1 * M2 ); // Estimates the number of non-zero elements in the * matrix expression \endcode * * // \n \subsection matrix_operations_isempty isEmpty() // // The \c isEmpty() * function returns whether the total number of elements of the matrix is * zero: * * \code blaze::DynamicMatrix<int> A; // Create an empty matrix isEmpty( A ); * // Returns true a.resize( 5, 0 ); // Resize to a 5x0 matrix * isEmpty( A ); // Returns true a.resize( 5, 3 ); * // Resize to a 5x3 matrix isEmpty( A ); // Returns false * \endcode * * // \n \subsection matrix_operations_isnan isnan() // // The \c isnan() * function provides the means to check a dense or sparse matrix for * non-a-number // elements: * * \code blaze::DynamicMatrix<double> A( 3UL, 4UL ); // ... Initialization if( * isnan( A ) ) { ... } \endcode * * \code blaze::CompressedMatrix<double> A( 3UL, 4UL ); // ... Initialization * if( isnan( A ) ) { ... } \endcode * * // If at least one element of the matrix is not-a-number, the function * returns \c true, otherwise // it returns \c false. Please note that this * function only works for matrices with floating point // elements. The * attempt to use it for a matrix with a non-floating point element type * results in // a compile time error. // // // \n \subsection * matrix_operations_isdefault isDefault() // // The \c isDefault() function * returns whether the given dense or sparse matrix is in default state: * * \code blaze::HybridMatrix<int,5UL,4UL> A; // ... Resizing and initialization * if( isDefault( A ) ) { ... } \endcode * * // A matrix is in default state if it appears to just have been default * constructed. All resizable // matrices (\c HybridMatrix, \c DynamicMatrix, * or \c CompressedMatrix) and \c CustomMatrix are in // default state if its * size is equal to zero. A non-resizable matrix (\c StaticMatrix and all // * submatrices) is in default state if all its elements are in default state. * For instance, in case // the matrix is instantiated for a built-in * integral or floating point data type, the function // returns \c true in * case all matrix elements are 0 and \c false in case any matrix element is * // not 0. // // // \n \subsection matrix_operations_isSquare isSquare() // * // Whether a dense or sparse matrix is a square matrix (i.e. if the number * of rows is equal to the // number of columns) can be checked via the \c * isSquare() function: * * \code blaze::DynamicMatrix<double> A; // ... Resizing and initialization if( * isSquare( A ) ) { ... } \endcode * * // \n \subsection matrix_operations_issymmetric isSymmetric() // // Via the * \c isSymmetric() function it is possible to check whether a dense or * sparse matrix // is symmetric: * * \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( * isSymmetric( A ) ) { ... } \endcode * * // Note that non-square matrices are never considered to be symmetric! // // * // \n \subsection matrix_operations_isUniform isUniform() // // In order * to check if all matrix elements are identical, the \c isUniform function * can be used: * * \code blaze::DynamicMatrix<int> A; // ... Resizing and initialization if( * isUniform( A ) ) { ... } \endcode * * // Note that in case of a sparse matrix also the zero elements are also taken * into account! // // // \n \subsection matrix_operations_islower isLower() * // // Via the \c isLower() function it is possible to check whether a * dense or sparse matrix is // lower triangular: * * \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( * isLower( A ) ) { ... } \endcode * * // Note that non-square matrices are never considered to be lower triangular! * // // // \n \subsection matrix_operations_isunilower isUniLower() // // * Via the \c isUniLower() function it is possible to check whether a dense * or sparse matrix is // lower unitriangular: * * \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( * isUniLower( A ) ) { ... } \endcode * * // Note that non-square matrices are never considered to be lower * unitriangular! // // // \n \subsection matrix_operations_isstrictlylower * isStrictlyLower() // // Via the \c isStrictlyLower() function it is * possible to check whether a dense or sparse matrix // is strictly lower * triangular: * * \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( * isStrictlyLower( A ) ) { ... } \endcode * * // Note that non-square matrices are never considered to be strictly lower * triangular! // // // \n \subsection matrix_operations_isUpper isUpper() // * // Via the \c isUpper() function it is possible to check whether a dense * or sparse matrix is // upper triangular: * * \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( * isUpper( A ) ) { ... } \endcode * * // Note that non-square matrices are never considered to be upper triangular! * // // // \n \subsection matrix_operations_isuniupper isUniUpper() // // * Via the \c isUniUpper() function it is possible to check whether a dense * or sparse matrix is // upper unitriangular: * * \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( * isUniUpper( A ) ) { ... } \endcode * * // Note that non-square matrices are never considered to be upper * unitriangular! // // // \n \subsection matrix_operations_isstrictlyupper * isStrictlyUpper() // // Via the \c isStrictlyUpper() function it is * possible to check whether a dense or sparse matrix // is strictly upper * triangular: * * \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( * isStrictlyUpper( A ) ) { ... } \endcode * * // Note that non-square matrices are never considered to be strictly upper * triangular! // // // \n \subsection matrix_operations_isdiagonal * isDiagonal() // // The \c isDiagonal() function checks if the given dense * or sparse matrix is a diagonal matrix, // i.e. if it has only elements on * its diagonal and if the non-diagonal elements are default // elements: * * \code blaze::CompressedMatrix<float> A; // ... Resizing and initialization * if( isDiagonal( A ) ) { ... } \endcode * * // Note that non-square matrices are never considered to be diagonal! // // * // \n \subsection matrix_operations_isidentity isIdentity() // // The \c * isIdentity() function checks if the given dense or sparse matrix is an * identity matrix, // i.e. if all diagonal elements are 1 and all * non-diagonal elements are 0: * * \code blaze::CompressedMatrix<float> A; // ... Resizing and initialization * if( isIdentity( A ) ) { ... } \endcode * * // Note that non-square matrices are never considered to be identity * matrices! // // // \n \subsection matrix_operations_matrix_determinant * det() // // The determinant of a square dense matrix can be computed by * means of the \c det() function: * * \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and * initialization double d = det( A ); // Compute the determinant of A * \endcode * * // In case the given dense matrix is not a square matrix, a \c * std::invalid_argument exception is // thrown. // // \note The \c det() * function can only be used for dense matrices with \c float, \c double, // * \c complex<float> or \c complex<double> element type. The attempt to call * the function with // matrices of any other element type or with a sparse * matrix results in a compile time error! // // \note The function is * depending on LAPACK kernels. Thus the function can only be used if the // * fitting LAPACK library is available and linked to the executable. * Otherwise a linker error // will be created. // // // \n \subsection * matrix_operations_matrix_trans trans() // // Matrices can be transposed * via the \c trans() function. Row-major matrices are transposed into // a * column-major matrix and vice versa: * * \code blaze::DynamicMatrix<int,rowMajor> M1( 5UL, 2UL ); * blaze::CompressedMatrix<int,columnMajor> M2( 3UL, 7UL ); * * M1 = M2; // Assigning a column-major matrix to a row-major matrix * M1 = trans( M2 ); // Assigning the transpose of M2 (i.e. a row-major * matrix) to M1 M1 += trans( M2 ); // Addition assignment of two row-major * matrices \endcode * * // \n \subsection matrix_operations_ctrans ctrans() // // The conjugate * transpose of a dense or sparse matrix (also called adjoint matrix, * Hermitian // conjugate, or transjugate) can be computed via the \c * ctrans() function: * * \code blaze::DynamicMatrix< complex<float>, rowMajor > M1( 5UL, 2UL ); * blaze::CompressedMatrix< complex<float>, columnMajor > M2( 2UL, 5UL ); * * M1 = ctrans( M2 ); // Compute the conjugate transpose matrix \endcode * * // Note that the \c ctrans() function has the same effect as manually * applying the \c conj() and // \c trans() function in any order: * * \code M1 = trans( conj( M2 ) ); // Computing the conjugate transpose matrix * M1 = conj( trans( M2 ) ); // Computing the conjugate transpose matrix * \endcode * * // \n \subsection matrix_operations_matrix_evaluate eval() / evaluate() // // * The \c evaluate() function forces an evaluation of the given matrix * expression and enables // an automatic deduction of the correct result * type of an operation. The following code example // demonstrates its * intended use for the multiplication of a lower and a strictly lower dense * // matrix: * * \code using blaze::DynamicMatrix; using blaze::LowerMatrix; using * blaze::StrictlyLowerMatrix; * * LowerMatrix< DynamicMatrix<double> > A; StrictlyLowerMatrix< * DynamicMatrix<double> > B; // ... Resizing and initialization * * auto C = evaluate( A * B ); \endcode * * // In this scenario, the \c evaluate() function assists in deducing the exact * result type of // the operation via the \c auto keyword. Please note that * if \c evaluate() is used in this // way, no temporary matrix is created * and no copy operation is performed. Instead, the result // is directly * written to the target matrix due to the return value optimization (RVO). * However, // if \c evaluate() is used in combination with an explicit * target type, a temporary will be // created and a copy operation will be * performed if the used type differs from the type // returned from the * function: * * \code StrictlyLowerMatrix< DynamicMatrix<double> > D( A * B ); // No * temporary & no copy operation LowerMatrix< DynamicMatrix<double> > E( A * * B ); // Temporary & copy operation DynamicMatrix<double> F( A * B * ); // Temporary & copy operation D = evaluate( A * * B ); // Temporary & copy operation * \endcode * * // Sometimes it might be desirable to explicitly evaluate a sub-expression * within a larger // expression. However, please note that \c evaluate() is * not intended to be used for this // purpose. This task is more elegantly * and efficiently handled by the \c eval() function: * * \code blaze::DynamicMatrix<double> A, B, C, D; * * D = A + evaluate( B * C ); // Unnecessary creation of a temporary matrix D = * A + eval( B * C ); // No creation of a temporary matrix \endcode * * // In contrast to the \c evaluate() function, \c eval() can take the complete * expression // into account and therefore can guarantee the most efficient * way to evaluate it (see also // \ref intra_statement_optimization). // // * // \n \section matrix_operations_modifying_operations Modifying Operations * // <hr> // // \subsection matrix_operations_resize_reserve .resize() / * .reserve() // // The dimensions of a \c StaticMatrix are fixed at compile * time by the second and third template // parameter and a \c CustomMatrix * cannot be resized. In contrast, the number or rows and columns // of \c * DynamicMatrix, \c HybridMatrix, and \c CompressedMatrix can be changed at * runtime: * * \code using blaze::DynamicMatrix; using blaze::CompressedMatrix; * * DynamicMatrix<int,rowMajor> M1; CompressedMatrix<int,columnMajor> M2( 3UL, * 2UL ); * * // Adapting the number of rows and columns via the resize() function. The * (optional) // third parameter specifies whether the existing elements * should be preserved. Per // default, the existing elements are preserved. * M1.resize( 2UL, 2UL ); // Resizing matrix M1 to 2x2 elements. * Elements of built-in type // remain uninitialized, elements of class type * are default // constructed. M1.resize( 3UL, 1UL, false ); // Resizing M1 * to 3x1 elements. The old elements are lost, the // new elements are NOT * initialized! M2.resize( 5UL, 7UL, true ); // Resizing M2 to 5x7 * elements. The old elements are preserved. M2.resize( 3UL, 2UL, false ); * // Resizing M2 to 3x2 elements. The old elements are lost. \endcode * * // Note that resizing a matrix invalidates all existing views (see e.g. \ref * views_submatrices) // on the matrix: * * \code blaze::DynamicMatrix<int,rowMajor> M1( 10UL, 20UL ); // Creating a * 10x20 matrix auto row8 = row( M1, 8UL ); // Creating a view on the 8th * row of the matrix M1.resize( 6UL, 20UL ); // Resizing the matrix * invalidates the view \endcode * * // When the internal capacity of a matrix is no longer sufficient, the * allocation of a larger // junk of memory is triggered. In order to avoid * frequent reallocations, the \c reserve() // function can be used up front * to set the internal capacity: * * \code blaze::DynamicMatrix<int> M1; M1.reserve( 100 ); M1.rows(); // * Returns 0 M1.capacity(); // Returns at least 100 \endcode * * // Additionally it is possible to reserve memory in a specific row (for a * row-major matrix) or // column (for a column-major matrix): * * \code blaze::CompressedMatrix<int> M1( 4UL, 6UL ); M1.reserve( 1, 4 ); // * Reserving enough space for four non-zero elements in row 1 \endcode * * // \n \subsection matrix_operations_shrinkToFit .shrinkToFit() // // The * internal capacity of matrices with dynamic memory is preserved in order to * minimize the // number of reallocations. For that reason, the \c resize() * and \c reserve() functions can lead // to memory overhead. The \c * shrinkToFit() member function can be used to minimize the internal // * capacity: * * \code blaze::DynamicMatrix<int> M1( 100UL, 100UL ); // Create a 100x100 * integer matrix M1.resize( 10UL, 10UL ); // Resize to * 10x10, but the capacity is preserved M1.shrinkToFit(); * // Remove the unused capacity \endcode * * // Please note that due to padding the capacity might not be reduced exactly * to \c rows() times // \c columns(). Please also note that in case a * reallocation occurs, all iterators (including // \c end() iterators), all * pointers and references to elements of this matrix are invalidated. // // * // \subsection matrix_operations_reset_clear reset() / clear // // In * order to reset all elements of a dense or sparse matrix, the \c reset() * function can be // used. The number of rows and columns of the matrix are * preserved: * * \code // Setting up a single precision row-major matrix, whose elements are * initialized with 2.0F. blaze::DynamicMatrix<float> M1( 4UL, 5UL, 2.0F ); * * // Resetting all elements to 0.0F. reset( M1 ); // Resetting all elements * M1.rows(); // Returns 4: size and capacity remain unchanged \endcode * * // Alternatively, only a single row or column of the matrix can be resetted: * * \code blaze::DynamicMatrix<int,blaze::rowMajor> M1( 7UL, 6UL, 5 ); // * Setup of a row-major matrix blaze::DynamicMatrix<int,blaze::columnMajor> * M2( 4UL, 5UL, 4 ); // Setup of a column-major matrix * * reset( M1, 2UL ); // Resetting the 2nd row of the row-major matrix reset( * M2, 3UL ); // Resetting the 3rd column of the column-major matrix * \endcode * * // In order to reset a row of a column-major matrix or a column of a * row-major matrix, use a // row or column view (see \ref views_rows and * views_colums). // // In order to return a matrix to its default state * (i.e. the state of a default constructed // matrix), the \c clear() * function can be used: * * \code // Setting up a single precision row-major matrix, whose elements are * initialized with 2.0F. blaze::DynamicMatrix<float> M1( 4UL, 5UL, 2.0F ); * * // Resetting all elements to 0.0F. clear( M1 ); // Resetting the entire * matrix M1.rows(); // Returns 0: size is reset, but capacity remains * unchanged \endcode * * // \n \subsection matrix_operations_matrix_transpose transpose() // // In * addition to the non-modifying \c trans() function, matrices can be * transposed in-place via // the \c transpose() function: * * \code blaze::DynamicMatrix<int,rowMajor> M( 5UL, 2UL ); * * transpose( M ); // In-place transpose operation. M = trans( M ); // Same as * above \endcode * * // Note however that the transpose operation fails if ... // // - ... the * given matrix has a fixed size and is non-square; // - ... the given * matrix is a triangular matrix; // - ... the given submatrix affects the * restricted parts of a triangular matrix; // - ... the given submatrix * would cause non-deterministic results in a symmetric/Hermitian matrix. // * // // \n \subsection matrix_operations_ctranspose ctranspose() // // The * \c ctranspose() function can be used to perform an in-place conjugate * transpose operation: * * \code blaze::DynamicMatrix<int,rowMajor> M( 5UL, 2UL ); * * ctranspose( M ); // In-place conjugate transpose operation. M = ctrans( M ); * // Same as above \endcode * * // Note however that the conjugate transpose operation fails if ... // // - * ... the given matrix has a fixed size and is non-square; // - ... the * given matrix is a triangular matrix; // - ... the given submatrix affects * the restricted parts of a triangular matrix; // - ... the given submatrix * would cause non-deterministic results in a symmetric/Hermitian matrix. // * // // \n \subsection matrix_operations_swap swap() // // Via the \c \c * swap() function it is possible to completely swap the contents of two * matrices // of the same type: * * \code blaze::DynamicMatrix<int,blaze::rowMajor> M1( 10UL, 15UL ); * blaze::DynamicMatrix<int,blaze::rowMajor> M2( 20UL, 10UL ); * * swap( M1, M2 ); // Swapping the contents of M1 and M2 \endcode * * // \n \section matrix_operations_arithmetic_operations Arithmetic Operations * // <hr> // // \subsection matrix_operations_min_max min() / max() // // * The \c min() and \c max() functions can be used for a single vector or * multiple vectors. If // passed a single matrix, the functions return the * smallest and largest element of the given // dense matrix or the smallest * and largest non-zero element of the given sparse matrix, // respectively: * * \code blaze::StaticMatrix<int,2UL,3UL> A{ { -5, 2, 7 }, { -4, 0, 1 } }; * * min( A ); // Returns -5 max( A ); // Returns 7 \endcode * * \code blaze::CompressedMatrix<int> B{ { 1, 0, 3 }, { 0, 0, 0 } }; * * min( B ); // Returns 1 max( B ); // Returns 3 \endcode * * // For more information on the unary \c min() and \c max() reduction * operations see the // \ref matrix_operations_reduction_operations section. * // // If passed two or more dense matrices, the \c min() and \c max() * functions compute the // componentwise minimum or maximum of the given * matrices, respectively: * * \code blaze::StaticMatrix<int,2UL,3UL,rowMajor> C{ { -5, 1, -7 }, { 4, 1, 0 } * }; blaze::StaticMatrix<int,2UL,3UL,rowMajor> D{ { -5, 3, 0 }, { 2, 2, -2 * } }; * * min( A, C ); // Results in the matrix ( -5, 1, -7 ) ( -4, 0, 0 ) max( A, * C, D ); // Results in the matrix ( -5, 3, 7 ) ( 4, 2, 1 ) \endcode * * // Please note that sparse matrices can only be used in the unary \c min() * and \c max() functions. // Also note that all forms of the \c min() and \c * max() functions can be used to compute the // smallest and largest element * of a matrix expression: * * \code min( A + B + C ); // Returns -9, i.e. the smallest value of the * resulting matrix max( A - B - C ); // Returns 11, i.e. the largest value * of the resulting matrix \endcode * * // \n \subsection matrix_operators_softmax softmax() // // The <a * href="https://en.wikipedia.org/wiki/Softmax_function">softmax * function</a>, also called // the normalized exponential function, of a * given dense matrix can be computed via \c softmax(). // The resulting * dense matrix consists of real values in the range (0..1], which add up to * 1. * * \code blaze::StaticMatrix<double,3UL,3UL> A{ { 1.0, 2.0, 3.0 } , { 4.0, 1.0, * 2.0 } , { 3.0, 4.0, 1.0 } }; blaze::StaticMatrix<double,3UL,3UL> B; * * // Evaluating the softmax function B = softmax( A ); // Results in ( * 0.0157764 0.0428847 0.116573 ) // ( 0.316878 0.0157764 * 0.0428847 ) // ( 0.116573 0.316878 0.0157764 ) double s = * sum( B ); // Results in 1 \endcode * * // \n \subsection matrix_operators_trace trace() // // The \c trace() * function sums the diagonal elements of a square dense or sparse matrix: * * \code blaze::StaticMatrix<int,3UL,3UL> A{ { -1, 2, -3 } , { -4, -5, 6 } , { * 7, -8, -9 } }; * * trace( A ); // Returns the sum of the diagonal elements, i.e. -15 \endcode * * // In case the given matrix is not a square matrix, a \c * std::invalid_argument exception is // thrown. // // // \n \subsection * matrix_operators_abs abs() // // The \c abs() function can be used to * compute the absolute values of each element of a matrix. // For instance, * the following computation * * \code blaze::StaticMatrix<int,2UL,3UL,rowMajor> A{ { -1, 2, -3 }, { 4, -5, * 6 } }; blaze::StaticMatrix<int,2UL,3UL,rowMajor> B( abs( A ) ); \endcode * * // results in the matrix * * \f$ B = \left(\begin{array}{*{3}{c}} 1 & 2 & 3 \\ 4 & 5 & 6 \\ * \end{array}\right)\f$ * * // \n \subsection matrix_operators_sign sign() // // The \c sign() function * can be used to evaluate the sign of each element of a matrix \a A. For // * each element \c (i,j) the corresponding result is 1 if \a A(i,j) is * greater than zero, 0 if // \a A(i,j) is zero, and -1 if \a A(i,j) is less * than zero. For instance, the following use of // the \c sign() function * * \code blaze::StaticMatrix<int,2UL,3UL,rowMajor> A{ { -1, 2, 0 }, { 4, 0, * -6 } }; blaze::StaticMatrix<int,2UL,3UL,rowMajor> B( sign( A ) ); \endcode * * // results in the matrix * * \f$ B = \left(\begin{array}{*{3}{c}} -1 & 1 & 0 \\ 1 & 0 & -1 \\ * \end{array}\right)\f$ * * // \n \subsection matrix_operators_rounding_functions floor() / ceil() / * trunc() / round() // // The \c floor(), \c ceil(), \c trunc(), and \c * round() functions can be used to round down/up // each element of a * matrix, respectively: * * \code blaze::StaticMatrix<double,3UL,3UL> A, B; * * B = floor( A ); // Rounding down each element of the matrix B = ceil ( A ); * // Rounding up each element of the matrix B = trunc( A ); // Truncating * each element of the matrix B = round( A ); // Rounding each element of * the matrix \endcode * * // \n \subsection matrix_operators_conj conj() // // The \c conj() function * can be applied on a dense or sparse matrix to compute the complex // * conjugate of each element of the matrix: * * \code using blaze::StaticMatrix; * * using cplx = std::complex<double>; * * // Creating the matrix // ( (1,0) (-2,-1) ) // ( (1,1) ( 0, 1) ) * StaticMatrix<cplx,2UL,2UL> A{ { cplx( 1.0, 0.0 ), cplx( -2.0, -1.0 ) }, { * cplx( 1.0, 1.0 ), cplx( 0.0, 1.0 ) } }; * * // Computing the matrix of conjugate values // ( (1, 0) (-2, 1) ) // ( * (1,-1) ( 0,-1) ) StaticMatrix<cplx,2UL,2UL> B; B = conj( A ); \endcode * * // Additionally, matrices can be conjugated in-place via the \c conjugate() * function: * * \code blaze::DynamicMatrix<cplx> C( 5UL, 2UL ); * * conjugate( C ); // In-place conjugate operation. C = conj( C ); // Same as * above \endcode * * // \n \subsection matrix_operators_real real() // // The \c real() function * can be used on a dense or sparse matrix to extract the real part of // * each element of the matrix: * * \code using blaze::StaticMatrix; * * using cplx = std::complex<double>; * * // Creating the matrix // ( (1,0) (-2,-1) ) // ( (1,1) ( 0, 1) ) * StaticMatrix<cplx,2UL,2UL> A{ { cplx( 1.0, 0.0 ), cplx( -2.0, -1.0 ) }, { * cplx( 1.0, 1.0 ), cplx( 0.0, 1.0 ) } }; * * // Extracting the real part of each matrix element // ( 1 -2 ) // ( 1 * 0 ) StaticMatrix<double,2UL,2UL> B; B = real( A ); \endcode * * // \n \subsection matrix_operators_imag imag() // // The \c imag() function * can be used on a dense or sparse matrix to extract the imaginary part // * of each element of the matrix: * * \code using blaze::StaticMatrix; * * using cplx = std::complex<double>; * * // Creating the matrix // ( (1,0) (-2,-1) ) // ( (1,1) ( 0, 1) ) * StaticMatrix<cplx,2UL,2UL> A{ { cplx( 1.0, 0.0 ), cplx( -2.0, -1.0 ) }, { * cplx( 1.0, 1.0 ), cplx( 0.0, 1.0 ) } }; * * // Extracting the imaginary part of each matrix element // ( 0 -1 ) // * ( 1 1 ) StaticMatrix<double,2UL,2UL> B; B = imag( A ); \endcode * * // \n \subsection matrix_operators_sqrt sqrt() / invsqrt() // // Via the \c * sqrt() and \c invsqrt() functions the (inverse) square root of each * element of a // matrix can be computed: * * \code blaze::StaticMatrix<double,3UL,3UL> A, B, C; * * B = sqrt( A ); // Computes the square root of each element C = invsqrt( A * ); // Computes the inverse square root of each element \endcode * * // Note that in case of sparse matrices only the non-zero elements are taken * into account! // // // \n \subsection matrix_operators_cbrt cbrt() / * invcbrt() // // The \c cbrt() and \c invcbrt() functions can be used to * compute the the (inverse) cubic root // of each element of a matrix: * * \code blaze::DynamicMatrix<double> A, B, C; * * B = cbrt( A ); // Computes the cubic root of each element C = invcbrt( A * ); // Computes the inverse cubic root of each element \endcode * * // Note that in case of sparse matrices only the non-zero elements are taken * into account! // // // \n \subsection matrix_operations_hypot hypot() // * // The \c hypot() function can be used to compute the componentwise * hypotenous for a pair of // dense matrices: * * \code blaze::StaticMatrix<double,3UL,3UL> A, B, C; * * C = hypot( A, B ); // Computes the componentwise hypotenuous \endcode * * // \n \subsection matrix_operators_clamp clamp() // // The \c clamp() * function can be used to restrict all elements of a matrix to a specific * range: * * \code blaze::DynamicMatrix<double> A, B; * * B = clamp( A, -1.0, 1.0 ); // Restrict all elements to the range [-1..1] * \endcode * * // Note that in case of sparse matrices only the non-zero elements are taken * into account! // // // \n \subsection matrix_operators_pow pow() // // The * \c pow() function can be used to compute the exponential value of each * element of a matrix. // If passed a matrix and a numeric exponent, the * function computes the exponential value of each // element of the matrix * using the same exponent. If passed a second matrix, the function computes * // the componentwise exponential value: * * \code blaze::StaticMatrix<double,3UL,3UL> A, B, C; * * C = pow( A, 1.2 ); // Computes the exponential value of each element C = * pow( A, B ); // Computes the componentwise exponential value \endcode * * // \n \subsection matrix_operators_exp exp() // // \c exp(), \c exp2() and \c * exp10() compute the base e/2/10 exponential of each element of a // * matrix, respectively: * * \code blaze::HybridMatrix<double,3UL,3UL> A, B; * * B = exp( A ); // Computes the base e exponential of each element B = exp2( * A ); // Computes the base 2 exponential of each element B = exp10( A ); * // Computes the base 10 exponential of each element \endcode * * // Note that in case of sparse matrices only the non-zero elements are taken * into account! // // // \n \subsection matrix_operators_log log() / log2() * / log10() // // The \c log(), \c log2() and \c log10() functions can be * used to compute the natural, binary // and common logarithm of each * element of a matrix: * * \code blaze::StaticMatrix<double,3UL,3UL> A, B; * * B = log( A ); // Computes the natural logarithm of each element B = log2( * A ); // Computes the binary logarithm of each element B = log10( A ); * // Computes the common logarithm of each element \endcode * * // \n \subsection matrix_operators_trigonometric_functions sin() / cos() / * tan() / asin() / acos() / atan() // // The following trigonometric * functions are available for both dense and sparse matrices: * * \code blaze::DynamicMatrix<double> A, B; * * B = sin( A ); // Computes the sine of each element of the matrix B = cos( A * ); // Computes the cosine of each element of the matrix B = tan( A ); // * Computes the tangent of each element of the matrix * * B = asin( A ); // Computes the inverse sine of each element of the matrix B * = acos( A ); // Computes the inverse cosine of each element of the matrix * B = atan( A ); // Computes the inverse tangent of each element of the * matrix \endcode * * // Note that in case of sparse matrices only the non-zero elements are taken * into account! // // // \n \subsection * matrix_operators_hyperbolic_functions sinh() / cosh() / tanh() / asinh() / * acosh() / atanh() // // The following hyperbolic functions are available * for both dense and sparse matrices: * * \code blaze::DynamicMatrix<double> A, B; * * B = sinh( A ); // Computes the hyperbolic sine of each element of the matrix * B = cosh( A ); // Computes the hyperbolic cosine of each element of the * matrix B = tanh( A ); // Computes the hyperbolic tangent of each element * of the matrix * * B = asinh( A ); // Computes the inverse hyperbolic sine of each element of * the matrix B = acosh( A ); // Computes the inverse hyperbolic cosine of * each element of the matrix B = atanh( A ); // Computes the inverse * hyperbolic tangent of each element of the matrix \endcode * * // \n \subsection matrix_operations_atan2 atan2() // // The multi-valued * inverse tangent is available for a pair of dense matrices: * * \code blaze::DynamicMatrix<double> A, B, C; * * C = atan2( A, B ); // Computes the componentwise multi-valued inverse * tangent \endcode * * // \n \subsection matrix_operators_erf erf() / erfc() // // The \c erf() and * \c erfc() functions compute the (complementary) error function of each // * element of a matrix: * * \code blaze::StaticMatrix<double,3UL,3UL> A, B; * * B = erf( A ); // Computes the error function of each element B = erfc( A ); * // Computes the complementary error function of each element \endcode * * // Note that in case of sparse matrices only the non-zero elements are taken * into account! // // // \n \subsection matrix_operations_map map() / * forEach() // // Via the unary and binary \c map() functions it is possible * to execute componentwise custom // operations on matrices. The unary \c * map() function can be used to apply a custom operation // on each element * of a dense or sparse matrix. For instance, the following example * demonstrates // a custom square root computation via a lambda: * * \code blaze::DynamicMatrix<double> A, B; * * B = map( A, []( double d ) { return std::sqrt( d ); } ); \endcode * * // The binary \c map() function can be used to apply an operation pairwise to * the elements of // two dense matrices. The following example demonstrates * the merging of two matrices of double // precision values into a matrix of * double precision complex numbers: * * \code blaze::DynamicMatrix<double> real{ { 2.1, -4.2 }, { 1.0, 0.6 } }; * blaze::DynamicMatrix<double> imag{ { 0.3, 1.4 }, { 2.9, -3.4 } }; * * blaze::DynamicMatrix< complex<double> > cplx; * * // Creating the matrix // ( (-2.1, 0.3) (-4.2, -1.4) ) // ( ( 1.0, * 2.9) ( 0.6, -3.4) ) cplx = map( real, imag, []( double r, double i ){ * return complex( r, i ); } ); \endcode * * // Although the computation can be parallelized it is not vectorized and thus * cannot perform at // peak performance. However, it is also possible to * create vectorized custom operations. See // \ref custom_operations for a * detailed overview of the possibilities of custom operations. // // Please * note that unary custom operations on vectors have been introduced in \b * Blaze 3.0 in // form of the \c forEach() function. With the introduction * of binary custom functions, the // \c forEach() function has been renamed * to \c map(). The \c forEach() function can still be // used (even for * binary custom operations), but the function might be deprecated in future * // releases of \b Blaze. // // // \n \section * matrix_operations_reduction_operations Reduction Operations // <hr> // // * \subsection matrix_operations_reduction_operations_reduce reduce() // // * The \c reduce() function performs either a total reduction, a rowwise * reduction or a columnwise // reduction of the elements of the given dense * matrix or the non-zero elements of the given sparse // matrix. The * following examples demonstrate the total reduction of a dense and sparse * matrix: * * \code blaze::DynamicMatrix<double> A; // ... Resizing and initialization * * const double totalsum1 = reduce( A, blaze::Add() ); const double totalsum2 = * reduce( A, []( double a, double b ){ return a + b; } ); \endcode * * \code blaze::CompressedMatrix<double> A; // ... Resizing and initialization * * const double totalsum1 = reduce( A, blaze::Add() ); const double totalsum2 = * reduce( A, []( double a, double b ){ return a + b; } ); \endcode * * // By specifying \c blaze::columnwise or \c blaze::rowwise the \c reduce() * function performs a // column-wise or row-wise reduction, respectively. In * case \c blaze::columnwise is specified, the // (non-zero) elements of the * matrix are reduced column-wise and the result is a row vector. In // case * \c blaze::rowwise is specified, the (non-zero) elements of the matrix are * reduced row-wise // and the result is a column vector: * * \code blaze::DynamicMatrix<double> A; blaze::CompressedMatrix<double> B; * blaze::DynamicVector<double,rowVector> colsum1, colsum2; // ... Resizing * and initialization * * colsum1 = reduce<columnwise>( A, blaze::Add() ); colsum2 = * reduce<columnwise>( B, []( double a, double b ){ return a + b; } ); * \endcode * * \code blaze::DynamicMatrix<double> A; blaze::CompressedMatrix<double> B; * blaze::DynamicVector<double,columnVector> rowsum1, rowsum2; // ... * Resizing and initialization * * rowsum1 = reduce<rowwise>( A, blaze::Add() ); rowsum2 = reduce<rowwise>( B, * []( double a, double b ){ return a + b; } ); \endcode * * // As demonstrated in the examples it is possible to pass any binary callable * as custom reduction // operation. However, for instance in the case of * lambdas the vectorization of the reduction // operation is compiler * dependent and might not perform at peak performance. However, it is also * // possible to create vectorized custom operations. See \ref * custom_operations for a detailed // overview of the possibilities of * custom operations. // // Please note that the evaluation order of the \c * reduce() function is unspecified. Thus the // behavior is * non-deterministic if the given reduction operation is not associative or * not // commutative. Also, the operation is undefined if the given * reduction operation modifies the // values. // // \n \subsection * matrix_operations_reduction_operations_sum sum() // // The \c sum() * function reduces the elements of the given dense vector or the non-zero * elements // of the given sparse vector by means of addition: * * \code blaze::DynamicMatrix<int> A{ { 1, 2 }, { 3, 4 } }; * * const int totalsum = sum( A ); // Results in 10 \endcode * * \code blaze::CompressedMatrix<int> a{ { 1, 2 }, { 3, 4 } }; * * const int totalsum = sum( A ); // Results in 10 \endcode * * // By specifying \c blaze::columnwise or \c blaze::rowwise the \c sum() * function performs a // column-wise or row-wise summation, respectively. In * case \c blaze::columnwise is specified, // the (non-zero) elements of the * matrix are summed up column-wise and the result is a row vector. // In * case \c blaze::rowwise is specified, the (non-zero) elements of the matrix * are summed up // row-wise and the result is a column vector: * * \code using blaze::columnwise; * * blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } }; * blaze::CompressedMatrix<int> B{ { 1, 0, 2 }, { 1, 3, 4 } }; * blaze::DynamicVector<int,rowVector> colsum1, colsum2; * * colsum1 = sum<columnwise>( A ); // Results in ( 2, 3, 6 ) colsum2 = * sum<columnwise>( B ); // Same result \endcode * * \code using blaze::rowwise; * * blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } }; * blaze::CompressedMatrix<int> B{ { 1, 0, 2 }, { 1, 3, 4 } }; * blaze::DynamicVector<int,columnVector> rowsum1, rowsum2; * * rowsum1 = sum<rowwise>( A ); // Results in ( 3, 8 ) rowsum2 = sum<rowwise>( * B ); // Same result \endcode * * // Please note that the evaluation order of the \c sum() function is * unspecified. // // \n \subsection * matrix_operations_reduction_operations_prod prod() // // The \c prod() * function reduces the elements of the given dense vector or the non-zero * elements // of the given sparse vector by means of multiplication: * * \code blaze::DynamicMatrix<int> A{ { 1, 2 }, { 3, 4 } }; * * const int totalprod = prod( A ); // Results in 24 \endcode * * \code blaze::CompressedMatrix<int> A{ { 1, 2 }, { 3, 4 } }; * * const int totalprod = prod( A ); // Results in 24 \endcode * * // By specifying \c blaze::columnwise or \c blaze::rowwise the \c prod() * function performs a // column-wise or row-wise multiplication, * respectively. In case \c blaze::columnwise is specified, // the (non-zero) * elements of the matrix are multiplied column-wise and the result is a row * vector. // In case \c blaze::rowwise is specified, the (non-zero) elements * of the matrix are multiplied // row-wise and the result is a column * vector: * * \code using blaze::columnwise; * * blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } }; * blaze::CompressedMatrix<int> B{ { 1, 0, 2 }, { 1, 3, 4 } }; * blaze::DynamicVector<int,rowVector> colprod1, colprod2; * * colprod1 = prod<columnwise>( A ); // Results in ( 1, 0, 8 ) colprod2 = * prod<columnwise>( A ); // Results in ( 1, 3, 8 ) \endcode * * \code using blaze::rowwise; * * blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } }; * blaze::CompressedMatrix<int> B{ { 1, 0, 2 }, { 1, 3, 4 } }; * blaze::DynamicVector<int,columnVector> rowprod1, rowprod2; * * rowprod1 = prod<rowwise>( A ); // Results in ( 0, 12 ) rowprod2 = * prod<rowwise>( A ); // Results in ( 2, 12 ) \endcode * * // Please note that the evaluation order of the \c prod() function is * unspecified. // // \n \subsection * matrix_operations_reduction_operations_min min() // // The unary \c min() * function returns the smallest element of the given dense matrix or the // * smallest non-zero element of the given sparse matrix. This function can * only be used for // element types that support the smaller-than * relationship. In case the given matrix currently // has either 0 rows or 0 * columns, the returned value is the default value (e.g. 0 in case of // * fundamental data types). * * \code blaze::DynamicMatrix<int> A{ { 1, 2 }, { 3, 4 } }; * * const int totalmin = min( A ); // Results in 1 \endcode * * \code blaze::CompressedMatrix<int> A{ { 1, 0 }, { 3, 0 } }; * * const int totalmin = min( A ); // Results in 1 \endcode * * // \note In case the sparse matrix is not completely filled, the implicit * zero elements are NOT // taken into account. In the previous example the * compressed matrix has only 2 non-zero elements. // However, the minimum of * this matrix is 1. // // By specifying \c blaze::columnwise or \c * blaze::rowwise the \c min() function determines the // smallest (non-zero) * element in each row or column, respectively. In case \c blaze::columnwise * // is specified, the smallest (non-zero) element of each column is * determined and the result is // a row vector. In case \c blaze::rowwise is * specified, the smallest (non-zero) element of each // row is determined * and the result is a column vector. * * \code using blaze::columnwise; * * blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } }; * blaze::CompressedMatrix<int> B{ { 1, 0, 2 }, { 1, 3, 4 } }; * blaze::DynamicVector<int,rowVector> colmin1, colmin2; * * colmin1 = min<columnwise>( A ); // Results in ( 1, 0, 2 ) colmin2 = * min<columnwise>( B ); // Results in ( 1, 3, 2 ) \endcode * * \code using blaze::rowwise; * * blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } }; * blaze::CompressedMatrix<int> B{ { 1, 0, 2 }, { 1, 3, 4 } }; * blaze::DynamicVector<int,columnVector> rowmin1, rowmin2; * * rowmin1 = min<rowwise>( A ); // Results in ( 0, 1 ) rowmin2 = min<rowwise>( * B ); // Results in ( 1, 1 ) \endcode * * // \note In case the sparse matrix is not completely filled, the implicit * zero elements are NOT // taken into account. // // \n \subsection * matrix_operations_reduction_operations_max max() // // The unary \c max() * function returns the largest element of the given dense matrix or the // * largest non-zero element of the given sparse matrix. This function can * only be used for // element types that support the smaller-than * relationship. In case the given matrix currently // has either 0 rows or 0 * columns, the returned value is the default value (e.g. 0 in case of // * fundamental data types). * * \code blaze::DynamicMatrix<int> A{ { 1, 2 }, { 3, 4 } }; * * const int totalmax = max( A ); // Results in 4 \endcode * * \code blaze::CompressedMatrix<int> A{ { -1, 0 }, { -3, 0 } }; * * const int totalmax = max( A ); // Results in -1 \endcode * * // \note In case the sparse matrix is not completely filled, the implicit * zero elements are NOT // taken into account. In the previous example the * compressed matrix has only 2 non-zero elements. // However, the maximum of * this matrix is -1. // // By specifying \c blaze::columnwise or \c * blaze::rowwise the \c max() function determines the // largest (non-zero) * element in each row or column, respectively. In case \c blaze::columnwise * // is specified, the largest (non-zero) element of each column is * determined and the result is // a row vector. In case \c blaze::rowwise is * specified, the largest (non-zero) element of each // row is determined and * the result is a column vector. * * \code using blaze::columnwise; * * blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } }; * blaze::CompressedMatrix<int> B{ { -1, 0, -2 }, { -1, -3, -4 } }; * blaze::DynamicVector<int,rowVector> colmax1, colmax2; * * colmax1 = max<columnwise>( A ); // Results in ( 1, 3, 4 ) colmax2 = * max<columnwise>( B ); // Results in ( -1, -3, -2 ) \endcode * * \code using blaze::rowwise; * * blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } }; * blaze::CompressedMatrix<int> B{ { -1, 0, -2 }, { -1, -3, -4 } }; * blaze::DynamicVector<int,columnVector> rowmax1, rowmax2; * * rowmax1 = max<rowwise>( A ); // Results in ( 2, 4 ) rowmax2 = max<rowwise>( * B ); // Results in ( -1, -1 ) \endcode * * // \note In case the sparse matrix is not completely filled, the implicit * zero elements are NOT // taken into account. // // // \n \section * matrix_operations_norms Norms // <hr> // // \subsection * matrix_operations_norms_norm norm() // // The \c norm() function computes * the L2 norm of the given dense or sparse matrix: * * \code blaze::DynamicMatrix<double> A; // ... Resizing and initialization * const double l2 = norm( A ); \endcode * * // \n \subsection matrix_operations_norms_sqrnorm sqrNorm() // // The \c * sqrNorm() function computes the squared L2 norm of the given dense or * sparse matrix: * * \code blaze::DynamicMatrix<double> A; // ... Resizing and initialization * const double l2 = sqrNorm( A ); \endcode * * // \n \subsection matrix_operations_norms_l1norm l1Norm() // // The \c * l1Norm() function computes the squared L1 norm of the given dense or * sparse matrix: * * \code blaze::DynamicMatrix<double> A; // ... Resizing and initialization * const double l1 = l1Norm( A ); \endcode * * // \n \subsection matrix_operations_norms_l2norm l2Norm() // // The \c * l2Norm() function computes the squared L2 norm of the given dense or * sparse matrix: * * \code blaze::DynamicMatrix<double> A; // ... Resizing and initialization * const double l2 = l2Norm( A ); \endcode * * // \n \subsection matrix_operations_norms_l3norm l3Norm() // // The \c * l3Norm() function computes the squared L3 norm of the given dense or * sparse matrix: * * \code blaze::DynamicMatrix<double> A; // ... Resizing and initialization * const double l3 = l3Norm( A ); \endcode * * // \n \subsection matrix_operations_norms_l4norm l4Norm() // // The \c * l4Norm() function computes the squared L4 norm of the given dense or * sparse matrix: * * \code blaze::DynamicMatrix<double> A; // ... Resizing and initialization * const double l4 = l4Norm( A ); \endcode * * // \n \subsection matrix_operations_norms_lpnorm lpNorm() // // The \c * lpNorm() function computes the general Lp norm of the given dense or * sparse matrix, // where the norm is specified by either a compile time or * a runtime argument: * * \code blaze::DynamicMatrix<double> A; // ... Resizing and initialization * const double lp1 = lpNorm<2>( A ); // Compile time argument const * double lp2 = lpNorm( A, 2.3 ); // Runtime argument \endcode * * // \n \subsection matrix_operations_norms_maxnorm maxNorm() // // The \c * maxNorm() function computes the maximum norm of the given dense or sparse * matrix: * * \code blaze::DynamicMatrix<double> A; // ... Resizing and initialization * const double max = maxNorm( A ); \endcode * * // \n \section matrix_operations_declaration_operations Declaration * Operations // <hr> // // \subsection matrix_operations_declsym declsym() * // // The \c declsym() operation can be used to explicitly declare any * matrix or matrix expression // as symmetric: * * \code blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization * * B = declsym( A ); \endcode * * // Any matrix or matrix expression that has been declared as symmetric via \c * declsym() will // gain all the benefits of a symmetric matrix, which range * from reduced runtime checking to // a considerable speed-up in * computations: * * \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; * * DynamicMatrix<double> A, B, C; SymmetricMatrix< DynamicMatrix<double> > S; // * ... Resizing and initialization * * isSymmetric( declsym( A ) ); // Will always return true without runtime * effort * * S = declsym( A ); // Omit any runtime check for symmetry * * C = declsym( A * B ); // Declare the result of the matrix multiplication as * symmetric, // i.e. perform an optimized matrix multiplication \endcode * * // \warning The \c declsym() operation has the semantics of a cast: The * caller is completely // responsible and the system trusts the given * information. Declaring a non-symmetric matrix or // matrix expression as * symmetric via the \c declsym() operation leads to undefined behavior // * (which can be violated invariants or wrong computation results)! // // // * \n \subsection matrix_operations_declherm declherm() // // The \c * declherm() operation can be used to explicitly declare any matrix or * matrix expression // as Hermitian: * * \code blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization * * B = declherm( A ); \endcode * * // Any matrix or matrix expression that has been declared as Hermitian via \c * declherm() will // gain all the benefits of an Hermitian matrix, which * range from reduced runtime checking to // a considerable speed-up in * computations: * * \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; * * DynamicMatrix<double> A, B, C; HermitianMatrix< DynamicMatrix<double> > S; // * ... Resizing and initialization * * isHermitian( declherm( A ) ); // Will always return true without runtime * effort * * S = declherm( A ); // Omit any runtime check for Hermitian symmetry * * C = declherm( A * B ); // Declare the result of the matrix multiplication as * Hermitian, // i.e. perform an optimized matrix multiplication \endcode * * // \warning The \c declherm() operation has the semantics of a cast: The * caller is completely // responsible and the system trusts the given * information. Declaring a non-Hermitian matrix or // matrix expression as * Hermitian via the \c declherm() operation leads to undefined behavior // * (which can be violated invariants or wrong computation results)! // // // * \n \subsection matrix_operations_decllow decllow() // // The \c decllow() * operation can be used to explicitly declare any matrix or matrix * expression // as lower triangular: * * \code blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization * * B = decllow( A ); \endcode * * // Any matrix or matrix expression that has been declared as lower triangular * via \c decllow() // will gain all the benefits of a lower triangular * matrix, which range from reduced runtime // checking to a considerable * speed-up in computations: * * \code using blaze::DynamicMatrix; using blaze::LowerMatrix; * * DynamicMatrix<double> A, B, C; LowerMatrix< DynamicMatrix<double> > L; // ... * Resizing and initialization * * isLower( decllow( A ) ); // Will always return true without runtime effort * * L = decllow( A ); // Omit any runtime check for A being a lower matrix * * C = decllow( A * B ); // Declare the result of the matrix multiplication as * lower triangular, // i.e. perform an optimized matrix multiplication * \endcode * * // \warning The \c decllow() operation has the semantics of a cast: The * caller is completely // responsible and the system trusts the given * information. Declaring a non-lower matrix or // matrix expression as lower * triangular via the \c decllow() operation leads to undefined // behavior * (which can be violated invariants or wrong computation results)! // // // * \n \subsection matrix_operations_declupp declupp() // // The \c declupp() * operation can be used to explicitly declare any matrix or matrix * expression // as upper triangular: * * \code blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization * * B = declupp( A ); \endcode * * // Any matrix or matrix expression that has been declared as upper triangular * via \c declupp() // will gain all the benefits of a upper triangular * matrix, which range from reduced runtime // checking to a considerable * speed-up in computations: * * \code using blaze::DynamicMatrix; using blaze::UpperMatrix; * * DynamicMatrix<double> A, B, C; UpperMatrix< DynamicMatrix<double> > U; // ... * Resizing and initialization * * isUpper( declupp( A ) ); // Will always return true without runtime effort * * U = declupp( A ); // Omit any runtime check for A being a upper matrix * * C = declupp( A * B ); // Declare the result of the matrix multiplication as * upper triangular, // i.e. perform an optimized matrix multiplication * \endcode * * // \warning The \c declupp() operation has the semantics of a cast: The * caller is completely // responsible and the system trusts the given * information. Declaring a non-upper matrix or // matrix expression as upper * triangular via the \c declupp() operation leads to undefined // behavior * (which can be violated invariants or wrong computation results)! // // // * \n \subsection matrix_operations_decldiag decldiag() // // The \c * decldiag() operation can be used to explicitly declare any matrix or * matrix expression // as diagonal: * * \code blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization * * B = decldiag( A ); \endcode * * // Any matrix or matrix expression that has been declared as diagonal via \c * decldiag() will // gain all the benefits of a diagonal matrix, which range * from reduced runtime checking to // a considerable speed-up in * computations: * * \code using blaze::DynamicMatrix; using blaze::DiagonalMatrix; * * DynamicMatrix<double> A, B, C; DiagonalMatrix< DynamicMatrix<double> > D; // * ... Resizing and initialization * * isDiagonal( decldiag( A ) ); // Will always return true without runtime * effort * * D = decldiag( A ); // Omit any runtime check for A being a diagonal matrix * * C = decldiag( A * B ); // Declare the result of the matrix multiplication as * diagonal, // i.e. perform an optimized matrix multiplication \endcode * * // \warning The \c decldiag() operation has the semantics of a cast: The * caller is completely // responsible and the system trusts the given * information. Declaring a non-diagonal matrix // or matrix expression as * diagonal via the \c decldiag() operation leads to undefined // behavior * (which can be violated invariants or wrong computation results)! // // // * \n \subsection matrix_operations_declid declid() // // The \c declid() * operation can be used to explicitly declare any matrix or matrix * expression // as identity matrix: * * \code blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization * * B = declid( A ); \endcode * * // Any matrix or matrix expression that has been declared as identity matrix * via \c declid() will // gain all the benefits of an identity matrix, which * range from reduced runtime checking to a // considerable speed-up in * computations: * * \code using blaze::DynamicMatrix; using blaze::DiagonalMatrix; * * DynamicMatrix<double> A, B, C; DiagonalMatrix< DynamicMatrix<double> > D; // * ... Resizing and initialization * * isIdentity( declid( A ) ); // Will always return true without runtime effort * * D = declid( A ); // Omit any runtime check for A being a diagonal matrix * * C = declid( A ) * B; // Declare the left operand of the matrix * multiplication as an // identity matrix, i.e. perform an optimized matrix * multiplication \endcode * * // \warning The \c declid() operation has the semantics of a cast: The caller * is completely // responsible and the system trusts the given information. * Declaring a non-identity matrix // or matrix expression as identity matrix * via the \c declid() operation leads to undefined // behavior (which can be * violated invariants or wrong computation results)! // // // \n \section * matrix_operations_matrix_inversion Matrix Inversion // <hr> // // The * inverse of a square dense matrix can be computed via the \c inv() * function: * * \code blaze::DynamicMatrix<float,blaze::rowMajor> A, B; // ... Resizing and * initialization B = inv( A ); // Compute the inverse of A \endcode * * // Alternatively, an in-place inversion of a dense matrix can be performed * via the \c invert() // function: * * \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and * initialization invert( A ); // In-place matrix inversion \endcode * * // Both the \c inv() and the \c invert() functions will automatically select * the most suited matrix // inversion algorithm depending on the size and * type of the given matrix. For small matrices of // up to 6x6, both * functions use manually optimized kernels for maximum performance. For * matrices // larger than 6x6 the inversion is performed by means of the * most suited matrix decomposition // method: In case of a general matrix * the LU decomposition is used, for symmetric matrices the // LDLT * decomposition is applied, for Hermitian matrices the LDLH decomposition is * performed, and // for triangular matrices the inverse is computed via a * forward or back substitution. // // In case the type of the matrix does * not provide additional compile time information about its // structure * (symmetric, lower, upper, diagonal, ...), the information can be provided * manually // when calling the \c invert() function: * * \code using blaze::asGeneral; using blaze::asSymmetric; using * blaze::asHermitian; using blaze::asLower; using blaze::asUniLower; using * blaze::asUpper; using blaze::asUniUpper; using blaze::asDiagonal; * * invert<asGeneral> ( A ); // In-place inversion of a general matrix * invert<asSymmetric>( A ); // In-place inversion of a symmetric matrix * invert<asHermitian>( A ); // In-place inversion of a Hermitian matrix * invert<asLower> ( A ); // In-place inversion of a lower triangular * matrix invert<asUniLower> ( A ); // In-place inversion of a lower * unitriangular matrix invert<asUpper> ( A ); // In-place inversion of a * upper triangular matrix invert<asUniUpper> ( A ); // In-place inversion * of a upper unitriangular matrix invert<asDiagonal> ( A ); // In-place * inversion of a diagonal matrix \endcode * * // Alternatively, via the \c invert() function it is possible to explicitly * specify the inversion // algorithm: * * \code using blaze::byLU; using blaze::byLDLT; using blaze::byLDLH; using * blaze::byLLH; * * // In-place inversion of a general matrix by means of an LU decomposition * invert<byLU>( A ); * * // In-place inversion of a symmetric indefinite matrix by means of a * Bunch-Kaufman decomposition invert<byLDLT>( A ); * * // In-place inversion of a Hermitian indefinite matrix by means of a * Bunch-Kaufman decomposition invert<byLDLH>( A ); * * // In-place inversion of a positive definite matrix by means of a Cholesky * decomposition invert<byLLH>( A ); \endcode * * // Whereas the inversion by means of an LU decomposition works for every * general square matrix, // the inversion by LDLT only works for symmetric * indefinite matrices, the inversion by LDLH is // restricted to Hermitian * indefinite matrices and the Cholesky decomposition (LLH) only works // for * Hermitian positive definite matrices. Please note that it is in the * responsibility of the // function caller to guarantee that the selected * algorithm is suited for the given matrix. In // case this precondition is * violated the result can be wrong and might not represent the inverse // of * the given matrix! // // For both the \c inv() and \c invert() function the * matrix inversion fails if ... // // - ... the given matrix is not a * square matrix; // - ... the given matrix is singular and not invertible. * // // In all failure cases either a compilation error is created if the * failure can be predicted at // compile time or a \c std::invalid_argument * exception is thrown. // // \note The matrix inversion can only be used for * dense matrices with \c float, \c double, // \c complex<float> or \c * complex<double> element type. The attempt to call the function with // * matrices of any other element type or with a sparse matrix results in a * compile time error! // // \note The functions invert the dense matrix by * means of LAPACK kernels. Thus the functions can // only be used if a * fitting LAPACK library is available and linked to the executable. * Otherwise // a linker error will be created. // // \note It is not * possible to use any kind of view on the expression object returned by the * // \c inv() function. Also, it is not possible to access individual * elements via the function call // operator on the expression object: * * \code row( inv( A ), 2UL ); // Compilation error: Views cannot be used on an * inv() expression! inv( A )(1,2); // Compilation error: It is not * possible to access individual elements! \endcode * * // \note The inversion functions do not provide any exception safety * guarantee, i.e. in case an // exception is thrown the matrix may already * have been modified. // // // \n \section matrix_operations_decomposition * Matrix Decomposition // <hr> // // \note All decomposition functions can * only be used for dense matrices with \c float, \c double, // \c * complex<float> or \c complex<double> element type. The attempt to call the * function with // matrices of any other element type or with a sparse * matrix results in a compile time error! // // \note The functions * decompose a dense matrix by means of LAPACK kernels. Thus the functions * can // only be used if a fitting LAPACK library is available and linked to * the executable. Otherwise // a linker error will be created. // // * \subsection matrix_operations_decomposition_lu LU Decomposition // // The * LU decomposition of a dense matrix can be computed via the \c lu() * function: * * \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and * initialization * * blaze::DynamicMatrix<double,blaze::rowMajor> L, U, P; * * lu( A, L, U, P ); // LU decomposition of a row-major matrix * * assert( A == L * U * P ); \endcode * * \code blaze::DynamicMatrix<double,blaze::columnMajor> A; // ... Resizing and * initialization * * blaze::DynamicMatrix<double,blaze::columnMajor> L, U, P; * * lu( A, L, U, P ); // LU decomposition of a column-major matrix * * assert( A == P * L * U ); \endcode * * // The function works for both \c rowMajor and \c columnMajor matrices. Note, * however, that the // three matrices \c A, \c L and \c U are required to * have the same storage order. Also, please // note that the way the * permutation matrix \c P needs to be applied differs between row-major and * // column-major matrices, since the algorithm uses column interchanges for * row-major matrices and // row interchanges for column-major matrices. // * // Furthermore, \c lu() can be used with adaptors. For instance, the * following example demonstrates // the LU decomposition of a symmetric * matrix into a lower and upper triangular matrix: * * \code blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> * > A; // ... Resizing and initialization * * blaze::LowerMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > L; * blaze::UpperMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > U; * blaze::DynamicMatrix<double,blaze::columnMajor> P; * * lu( A, L, U, P ); // LU decomposition of A \endcode * * // \n \subsection matrix_operations_decomposition_llh Cholesky Decomposition * // // The Cholesky (LLH) decomposition of a dense matrix can be computed * via the \c llh() function: * * \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and * initialization * * blaze::DynamicMatrix<double,blaze::rowMajor> L; * * llh( A, L ); // LLH decomposition of a row-major matrix * * assert( A == L * ctrans( L ) ); \endcode * * // The function works for both \c rowMajor and \c columnMajor matrices and * the two matrices \c A // and \c L can have any storage order. // // * Furthermore, \c llh() can be used with adaptors. For instance, the * following example demonstrates // the LLH decomposition of a symmetric * matrix into a lower triangular matrix: * * \code blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> * > A; // ... Resizing and initialization * * blaze::LowerMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > L; * * llh( A, L ); // Cholesky decomposition of A \endcode * * // \n \subsection matrix_operations_decomposition_qr QR Decomposition // // * The QR decomposition of a dense matrix can be computed via the \c qr() * function: * * \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and * initialization * * blaze::DynamicMatrix<double,blaze::columnMajor> Q; * blaze::DynamicMatrix<double,blaze::rowMajor> R; * * qr( A, Q, R ); // QR decomposition of a row-major matrix * * assert( A == Q * R ); \endcode * * // The function works for both \c rowMajor and \c columnMajor matrices and * the three matrices // \c A, \c Q and \c R can have any storage order. // * // Furthermore, \c qr() can be used with adaptors. For instance, the * following example demonstrates // the QR decomposition of a symmetric * matrix into a general matrix and an upper triangular matrix: * * \code blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> * > A; // ... Resizing and initialization * * blaze::DynamicMatrix<double,blaze::rowMajor> Q; blaze::UpperMatrix< * blaze::DynamicMatrix<double,blaze::columnMajor> > R; * * qr( A, Q, R ); // QR decomposition of A \endcode * * // \n \subsection matrix_operations_decomposition_rq RQ Decomposition // // * Similar to the QR decomposition, the RQ decomposition of a dense matrix * can be computed via // the \c rq() function: * * \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and * initialization * * blaze::DynamicMatrix<double,blaze::rowMajor> R; * blaze::DynamicMatrix<double,blaze::columnMajor> Q; * * rq( A, R, Q ); // RQ decomposition of a row-major matrix * * assert( A == R * Q ); \endcode * * // The function works for both \c rowMajor and \c columnMajor matrices and * the three matrices // \c A, \c R and \c Q can have any storage order. // * // Also the \c rq() function can be used in combination with matrix * adaptors. For instance, the // following example demonstrates the RQ * decomposition of an Hermitian matrix into a general // matrix and an upper * triangular matrix: * * \code blaze::HermitianMatrix< * blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > A; // ... * Resizing and initialization * * blaze::UpperMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> * > R; blaze::DynamicMatrix<complex<double>,blaze::rowMajor> Q; * * rq( A, R, Q ); // RQ decomposition of A \endcode * * // \n \subsection matrix_operations_decomposition_ql QL Decomposition // // * The QL decomposition of a dense matrix can be computed via the \c ql() * function: * * \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and * initialization * * blaze::DynamicMatrix<double,blaze::rowMajor> Q; * blaze::DynamicMatrix<double,blaze::columnMajor> L; * * ql( A, Q, L ); // QL decomposition of a row-major matrix * * assert( A == Q * L ); \endcode * * // The function works for both \c rowMajor and \c columnMajor matrices and * the three matrices // \c A, \c Q and \c L can have any storage order. // * // Also the \c ql() function can be used in combination with matrix * adaptors. For instance, the // following example demonstrates the QL * decomposition of a symmetric matrix into a general // matrix and a lower * triangular matrix: * * \code blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> * > A; // ... Resizing and initialization * * blaze::DynamicMatrix<double,blaze::rowMajor> Q; blaze::LowerMatrix< * blaze::DynamicMatrix<double,blaze::columnMajor> > L; * * ql( A, Q, L ); // QL decomposition of A \endcode * * // \n \subsection matrix_operations_decomposition_lq LQ Decomposition // // * The LQ decomposition of a dense matrix can be computed via the \c lq() * function: * * \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and * initialization * * blaze::DynamicMatrix<double,blaze::rowMajor> L; * blaze::DynamicMatrix<double,blaze::columnMajor> Q; * * lq( A, L, Q ); // LQ decomposition of a row-major matrix * * assert( A == L * Q ); \endcode * * // The function works for both \c rowMajor and \c columnMajor matrices and * the three matrices // \c A, \c L and \c Q can have any storage order. // * // Furthermore, \c lq() can be used with adaptors. For instance, the * following example demonstrates // the LQ decomposition of an Hermitian * matrix into a lower triangular matrix and a general matrix: * * \code blaze::HermitianMatrix< * blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > A; // ... * Resizing and initialization * * blaze::LowerMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> * > L; blaze::DynamicMatrix<complex<double>,blaze::rowMajor> Q; * * lq( A, L, Q ); // LQ decomposition of A \endcode * * // \n \section matrix_operations_eigenvalues Eigenvalues/Eigenvectors // <hr> * // // The eigenvalues and eigenvectors of a dense matrix can be computed * via the \c eigen() functions: * * \code namespace blaze { * * template< typename MT, bool SO, typename VT, bool TF > void eigen( const * DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w ); * * template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool * SO2 > void eigen( const DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, * DenseMatrix<MT2,SO2>& V ); * * } // namespace blaze \endcode * * // The first function computes only the eigenvalues of the given \a n-by-\a n * matrix, the second // function additionally computes the eigenvectors. The * eigenvalues are returned in the given vector // \a w and the eigenvectors * are returned in the given matrix \a V, which are both resized to the // * correct dimensions (if possible and necessary). // // Depending on the * given matrix type, the resulting eigenvalues are either of floating point * // or complex type: In case the given matrix is either a compile time * symmetric matrix with // floating point elements or an Hermitian matrix * with complex elements, the resulting eigenvalues // will be of floating * point type and therefore the elements of the given eigenvalue vector are * // expected to be of floating point type. In all other cases they are * expected to be of complex // type. Please note that for complex * eigenvalues no order of eigenvalues can be assumed, except // that complex * conjugate pairs of eigenvalues appear consecutively with the eigenvalue * having // the positive imaginary part first. // // In case \a A is a * row-major matrix, the left eigenvectors are returned in the rows of \a V, * // in case \a A is a column-major matrix, the right eigenvectors are * returned in the columns of // \a V. In case the given matrix is a compile * time symmetric matrix with floating point elements, // the resulting * eigenvectors will be of floating point type and therefore the elements of * the // given eigenvector matrix are expected to be of floating point type. * In all other cases they // are expected to be of complex type. // // The * following examples give an impression of the computation of eigenvalues * and eigenvectors // for a general, a symmetric, and an Hermitian matrix: * * \code using blaze::DynamicMatrix; using blaze::DynamicVector; using * blaze::rowMajor; using blaze::columnVector; * * DynamicMatrix<double,rowMajor> A( 5UL, 5UL ); // The general matrix A // ... * Initialization * * DynamicVector<complex<double>,columnVector> w( 5UL ); // The vector for the * complex eigenvalues DynamicMatrix<complex<double>,rowMajor> V( 5UL, 5UL ); * // The matrix for the left eigenvectors * * eigen( A, w, V ); \endcode * * \code using blaze::SymmetricMatrix; using blaze::DynamicMatrix; using * blaze::DynamicVector; using blaze::rowMajor; using blaze::columnVector; * * SymmetricMatrix< DynamicMatrix<double,rowMajor> > A( 5UL, 5UL ); // The * symmetric matrix A // ... Initialization * * DynamicVector<double,columnVector> w( 5UL ); // The vector for the real * eigenvalues DynamicMatrix<double,rowMajor> V( 5UL, 5UL ); // The * matrix for the left eigenvectors * * eigen( A, w, V ); \endcode * * \code using blaze::HermitianMatrix; using blaze::DynamicMatrix; using * blaze::DynamicVector; using blaze::rowMajor; using blaze::columnVector; * * HermitianMatrix< DynamicMatrix<complex<double>,rowMajor> > A( 5UL, 5UL ); // * The Hermitian matrix A // ... Initialization * * DynamicVector<double,columnVector> w( 5UL ); // The vector for the * real eigenvalues DynamicMatrix<complex<double>,rowMajor> V( 5UL, 5UL ); * // The matrix for the left eigenvectors * * eigen( A, w, V ); \endcode * * // The functions fail if ... // // - ... the given matrix \a A is not a * square matrix; // - ... the given vector \a w is a fixed size vector and * the size doesn't match; // - ... the given matrix \a V is a fixed size * matrix and the dimensions don't match; // - ... the eigenvalue * computation fails. // // In all failure cases an exception is thrown. // * // \note All \c eigen() functions can only be used for dense matrices with * \c float, \c double, // \c complex<float> or \c complex<double> element * type. The attempt to call the function with // matrices of any other * element type or with a sparse matrix results in a compile time error! // * // \note The functions compute the eigenvalues and/or eigenvectors of a * dense matrix by means of // LAPACK kernels. Thus the functions can only be * used if a fitting LAPACK library is available // and linked to the * executable. Otherwise a linker error will be created. // // // \n \section * matrix_operations_singularvalues Singular Values/Singular Vectors // <hr> * // // The singular value decomposition (SVD) of a dense matrix can be * computed via the \c svd() // functions: * * \code namespace blaze { * * template< typename MT, bool SO, typename VT, bool TF > void svd( const * DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s ); * * template< typename MT1, bool SO, typename VT, bool TF, typename MT2, typename * MT3 > void svd( const DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, * DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V ); * * template< typename MT, bool SO, typename VT, bool TF, typename ST > size_t * svd( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s, ST low, ST upp ); * * template< typename MT1, bool SO, typename VT, bool TF, typename MT2, typename * MT3, typename ST > size_t svd( const DenseMatrix<MT1,SO>& A, * DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V, ST * low, ST upp ); * * } // namespace blaze \endcode * * // The first and third function compute only singular values of the given * general \a m-by-\a n // matrix, the second and fourth function * additionally compute singular vectors. The resulting // singular values * are returned in the given vector \a s, the left singular vectors are * returned // in the given matrix \a U, and the right singular vectors are * returned in the matrix \a V. \a s, // \a U, and \a V are resized to the * correct dimensions (if possible and necessary). // // The third and fourth * function allow for the specification of a subset of singular values and/or * // vectors. The number of singular values and vectors to be computed is * specified by the lower // bound \a low and the upper bound \a upp, which * either form an integral or a floating point // range. // // In case \a low * and \a upp form are of integral type, the function computes all singular * values // in the index range \f$[low..upp]\f$. The \a num resulting real * and non-negative singular values // are stored in descending order in the * given vector \a s, which is either resized (if possible) // or expected to * be a \a num-dimensional vector. The resulting left singular vectors are * stored // in the given matrix \a U, which is either resized (if possible) * or expected to be a // \a m-by-\a num matrix. The resulting right singular * vectors are stored in the given matrix \a V, // which is either resized * (if possible) or expected to be a \a num-by-\a n matrix. // // In case \a * low and \a upp are of floating point type, the function computes all * singular values // in the half-open interval \f$(low..upp]\f$. The * resulting real and non-negative singular values // are stored in * descending order in the given vector \a s, which is either resized (if * possible) // or expected to be a min(\a m,\a n)-dimensional vector. The * resulting left singular vectors are // stored in the given matrix \a U, * which is either resized (if possible) or expected to be a // \a * m-by-min(\a m,\a n) matrix. The resulting right singular vectors are * stored in the given // matrix \a V, which is either resized (if possible) * or expected to be a min(\a m,\a n)-by-\a n // matrix. // // The functions * fail if ... // // - ... the given matrix \a U is a fixed size matrix and * the dimensions don't match; // - ... the given vector \a s is a fixed * size vector and the size doesn't match; // - ... the given matrix \a V is * a fixed size matrix and the dimensions don't match; // - ... the given * scalar values don't form a proper range; // - ... the singular value * decomposition fails. // // In all failure cases an exception is thrown. // * // Examples: * * \code using blaze::DynamicMatrix; using blaze::DynamicVector; using * blaze::rowMajor; using blaze::columnVector; * * DynamicMatrix<double,rowMajor> A( 5UL, 8UL ); // The general matrix A // * ... Initialization * * DynamicMatrix<double,rowMajor> U; // The matrix for the left singular * vectors DynamicVector<double,columnVector> s; // The vector for the * singular values DynamicMatrix<double,rowMajor> V; // The matrix for * the right singular vectors * * svd( A, U, s, V ); \endcode * * \code using blaze::DynamicMatrix; using blaze::DynamicVector; using * blaze::rowMajor; using blaze::columnVector; * * DynamicMatrix<complex<double>,rowMajor> A( 5UL, 8UL ); // The general * matrix A // ... Initialization * * DynamicMatrix<complex<double>,rowMajor> U; // The matrix for the left * singular vectors DynamicVector<double,columnVector> s; // The vector * for the singular values DynamicMatrix<complex<double>,rowMajor> V; // The * matrix for the right singular vectors * * svd( A, U, s, V, 0, 2 ); \endcode * * // \note All \c svd() functions can only be used for dense matrices with \c * float, \c double, // \c complex<float> or \c complex<double> element type. * The attempt to call the function with // matrices of any other element * type or with a sparse matrix results in a compile time error! // // \note * The functions compute the singular values and/or singular vectors of a * dense matrix by // means of LAPACK kernels. Thus the functions can only be * used if a fitting LAPACK library is // available and linked to the * executable. Otherwise a linker error will be created. // // // \n * Previous: \ref matrix_types &nbsp; &nbsp; Next: \ref adaptors */ //************************************************************************************************* //**Adaptors *************************************************************************************** /* * !\page adaptors Adaptors // // \tableofcontents // // // \section * adaptors_general General Concepts // <hr> // // Adaptors act as wrappers * around the general \ref matrix_types. They adapt the interface of the // * matrices such that certain invariants are preserved. Due to this adaptors * can provide a compile // time guarantee of certain properties, which can * be exploited for optimized performance. // // The \b Blaze library * provides a total of 9 different adaptors: // // <ul> // <li> \ref * adaptors_symmetric_matrices </li> // <li> \ref * adaptors_hermitian_matrices </li> // <li> \ref * adaptors_triangular_matrices // <ul> // <li> \ref * adaptors_triangular_matrices "Lower Triangular Matrices" // * <ul> // <li> \ref adaptors_triangular_matrices_lowermatrix * </li> // <li> \ref * adaptors_triangular_matrices_unilowermatrix </li> // <li> * \ref adaptors_triangular_matrices_strictlylowermatrix </li> // * </ul> // </li> // <li> \ref adaptors_triangular_matrices * "Upper Triangular Matrices" // <ul> // <li> * \ref adaptors_triangular_matrices_uppermatrix </li> // <li> * \ref adaptors_triangular_matrices_uniuppermatrix </li> // * <li> \ref adaptors_triangular_matrices_strictlyuppermatrix </li> // * </ul> // </li> // <li> \ref adaptors_triangular_matrices * "Diagonal Matrices" // <ul> // <li> \ref * adaptors_triangular_matrices_diagonalmatrix </li> // </ul> // * </li> // </ul> // </li> // </ul> // // In combination with the * general matrix types, \b Blaze provides a total of 40 different matrix // * types that make it possible to exactly adapt the type of matrix to every * specific problem. // // // \n \section adaptors_examples Examples // <hr> * // // The following code examples give an impression on the use of * adaptors. The first example shows // the multiplication between two lower * matrices: * * \code using blaze::DynamicMatrix; using blaze::LowerMatrix; using * blaze::rowMajor; using blaze::columnMajor; * * LowerMatrix< DynamicMatrix<double,rowMajor> > A; LowerMatrix< * DynamicMatrix<double,columnMajor> > B; DynamicMatrix<double,columnMajor> * C; * * // ... Resizing and initialization * * C = A * B; \endcode * * // When multiplying two matrices, at least one of which is triangular, \b * Blaze can exploit the // fact that either the lower or upper part of the * matrix contains only default elements and // restrict the algorithm to the * non-zero elements. Thus the adaptor provides a significant // performance * advantage in comparison to a general matrix multiplication, especially for * large // matrices. // // The second example shows the \c SymmetricMatrix * adaptor in a row-major dense matrix/sparse // vector multiplication: * * \code using blaze::DynamicMatrix; using blaze::DynamicVector; using * blaze::CompressedVector; using blaze::rowMajor; using blaze::columnVector; * * SymmetricMatrix< DynamicMatrix<double,rowMajor> > A; * CompressedVector<double,columnVector> x; * DynamicVector<double,columnVector> y; * * // ... Resizing and initialization * * y = A * x; \endcode * * // In this example it is not intuitively apparent that using a row-major * matrix is not the best // possible choice in terms of performance since * the computation cannot be vectorized. Choosing // a column-major matrix * instead, however, would enable a vectorized computation. Therefore // \b * Blaze exploits the fact that \c A is symmetric, selects the best suited * storage order and // evaluates the multiplication as * * \code y = trans( A ) * x; \endcode * * // which significantly increases the performance. // // \n Previous: \ref * matrix_operations &nbsp; &nbsp; Next: \ref adaptors_symmetric_matrices */ //************************************************************************************************* //**Symmetric Matrices ***************************************************************************** /* * !\page adaptors_symmetric_matrices Symmetric Matrices // // * \tableofcontents // // // \n \section adaptors_symmetric_matrices_general * Symmetric Matrices // <hr> // // In contrast to general matrices, which * have no restriction in their number of rows and columns // and whose * elements can have any value, symmetric matrices provide the compile time * guarantee // to be square matrices with pair-wise identical values. * Mathematically, this means that a // symmetric matrix is always equal to * its transpose (\f$ A = A^T \f$) and that all non-diagonal // values have * an identical counterpart (\f$ a_{ij} == a_{ji} \f$). This symmetry * property can // be exploited to provide higher efficiency and/or lower * memory consumption. Within the \b Blaze // library, symmetric matrices are * realized by the \ref adaptors_symmetric_matrices_symmetricmatrix // class * template. // // // \n \section adaptors_symmetric_matrices_symmetricmatrix * SymmetricMatrix // <hr> // // The SymmetricMatrix class template is an * adapter for existing dense and sparse matrix types. // It inherits the * properties and the interface of the given matrix type \c MT and extends it * // by enforcing the additional invariant of symmetry (i.e. the matrix is * always equal to its // transpose \f$ A = A^T \f$). It can be included via * the header file * * \code #include <blaze/math/SymmetricMatrix.h> \endcode * * // The type of the adapted matrix can be specified via template parameter: * * \code template< typename MT > class SymmetricMatrix; \endcode * * // \c MT specifies the type of the matrix to be adapted. SymmetricMatrix can * be used with any // non-cv-qualified, non-reference, non-pointer, * non-expression dense or sparse matrix type. Note // that the given matrix * type must be either resizable (as for instance blaze::HybridMatrix or // * blaze::DynamicMatrix) or must be square at compile time (as for instance * blaze::StaticMatrix). // // The following examples give an impression of * several possible symmetric matrices: * * \code using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; * using blaze::columnMajor; * * // Definition of a 3x3 row-major dense symmetric matrix with static memory * blaze::SymmetricMatrix< blaze::StaticMatrix<int,3UL,3UL,rowMajor> > A; * * // Definition of a resizable column-major dense symmetric matrix based on * HybridMatrix blaze::SymmetricMatrix< * blaze::HybridMatrix<float,4UL,4UL,columnMajor> B; * * // Definition of a resizable row-major dense symmetric matrix based on * DynamicMatrix blaze::SymmetricMatrix< * blaze::DynamicMatrix<double,rowMajor> > C; * * // Definition of a fixed size row-major dense symmetric matrix based on * CustomMatrix blaze::SymmetricMatrix< * blaze::CustomMatrix<double,unaligned,unpadded,rowMajor> > D; * * // Definition of a compressed row-major single precision symmetric matrix * blaze::SymmetricMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > * E; \endcode * * // The storage order of a symmetric matrix is depending on the storage order * of the adapted matrix // type \c MT. In case the adapted matrix is stored * in a row-wise fashion (i.e. is specified as // blaze::rowMajor), the * symmetric matrix will also be a row-major matrix. Otherwise, if the // * adapted matrix is column-major (i.e. is specified as blaze::columnMajor), * the symmetric matrix // will also be a column-major matrix. // // // \n * \section adaptors_symmetric_matrices_special_properties Special Properties * of Symmetric Matrices // <hr> // // A symmetric matrix is used exactly * like a matrix of the underlying, adapted matrix type \c MT. // It also * provides (nearly) the same interface as the underlying matrix type. * However, there are // some important exceptions resulting from the * symmetry constraint: // // -# <b>\ref * adaptors_symmetric_matrices_square</b> // -# <b>\ref * adaptors_symmetric_matrices_symmetry</b> // -# <b>\ref * adaptors_symmetric_matrices_initialization</b> // // \n \subsection * adaptors_symmetric_matrices_square Symmetric Matrices Must Always be * Square! // // In case a resizable matrix is used (as for instance * blaze::HybridMatrix, blaze::DynamicMatrix, // or blaze::CompressedMatrix), * this means that the according constructors, the \c resize() and // the \c * extend() functions only expect a single parameter, which specifies both * the number of // rows and columns, instead of two (one for the number of * rows and one for the number of columns): * * \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; using * blaze::rowMajor; * * // Default constructed, default initialized, row-major 3x3 symmetric dynamic * matrix SymmetricMatrix< DynamicMatrix<double,rowMajor> > A( 3 ); * * // Resizing the matrix to 5x5 A.resize( 5 ); * * // Extending the number of rows and columns by 2, resulting in a 7x7 matrix * A.extend( 2 ); \endcode * * // In case a matrix with a fixed size is used (as for instance * blaze::StaticMatrix), the number // of rows and number of columns must be * specified equally: * * \code using blaze::StaticMatrix; using blaze::SymmetricMatrix; using * blaze::columnMajor; * * // Correct setup of a fixed size column-major 3x3 symmetric static matrix * SymmetricMatrix< StaticMatrix<int,3UL,3UL,columnMajor> > A; * * // Compilation error: the provided matrix type is not a square matrix type * SymmetricMatrix< StaticMatrix<int,3UL,4UL,columnMajor> > B; \endcode * * // \n \subsection adaptors_symmetric_matrices_symmetry The Symmetric Property * is Always Enforced! // // This means that modifying the element \f$ a_{ij} * \f$ of a symmetric matrix also modifies its // counterpart element \f$ * a_{ji} \f$. Also, it is only possible to assign matrices that are // * symmetric themselves: * * \code using blaze::CompressedMatrix; using blaze::DynamicMatrix; using * blaze::StaticMatrix; using blaze::SymmetricMatrix; using blaze::rowMajor; * * // Default constructed, row-major 3x3 symmetric compressed matrix * SymmetricMatrix< CompressedMatrix<double,rowMajor> > A( 3 ); * * // Initializing three elements via the function call operator A(0,0) = 1.0; * // Initialization of the diagonal element (0,0) A(0,2) = 2.0; // * Initialization of the elements (0,2) and (2,0) * * // Inserting three more elements via the insert() function A.insert( 1, 1, * 3.0 ); // Inserting the diagonal element (1,1) A.insert( 1, 2, 4.0 ); // * Inserting the elements (1,2) and (2,1) * * // Access via a non-const iterator A.begin(1UL) = 10.0; // Modifies both * elements (1,0) and (0,1) * * // Erasing elements via the erase() function A.erase( 0, 0 ); // Erasing the * diagonal element (0,0) A.erase( 0, 2 ); // Erasing the elements (0,2) and * (2,0) * * // Construction from a symmetric dense matrix StaticMatrix<double,3UL,3UL> B{ * { 3.0, 8.0, -2.0 }, { 8.0, 0.0, -1.0 }, { -2.0, -1.0, 4.0 } }; * * SymmetricMatrix< DynamicMatrix<double,rowMajor> > C( B ); // OK * * // Assignment of a non-symmetric dense matrix StaticMatrix<double,3UL,3UL> D{ * { 3.0, 7.0, -2.0 }, { 8.0, 0.0, -1.0 }, { -2.0, -1.0, 4.0 } }; * * C = D; // Throws an exception; symmetric invariant would be violated! * \endcode * * // The same restriction also applies to the \c append() function for sparse * matrices: Appending // the element \f$ a_{ij} \f$ additionally inserts the * element \f$ a_{ji} \f$ into the matrix. // Despite the additional * insertion, the \c append() function still provides the most efficient // * way to set up a symmetric sparse matrix. In order to achieve the maximum * efficiency, the // capacity of the individual rows/columns of the matrix * should to be specifically prepared with // \c reserve() calls: * * \code using blaze::CompressedMatrix; using blaze::SymmetricMatrix; using * blaze::rowMajor; * * // Setup of the symmetric matrix // // ( 0 1 3 ) // A = ( 1 2 0 ) // * ( 3 0 0 ) // SymmetricMatrix< CompressedMatrix<double,rowMajor> > A( 3 ); * * A.reserve( 5 ); // Reserving enough space for 5 non-zero elements * A.reserve( 0, 2 ); // Reserving two non-zero elements in the first * row A.reserve( 1, 2 ); // Reserving two non-zero elements in the * second row A.reserve( 2, 1 ); // Reserving a single non-zero element * in the third row A.append( 0, 1, 1.0 ); // Appending the value 1 at * position (0,1) and (1,0) A.append( 1, 1, 2.0 ); // Appending the value 2 * at position (1,1) A.append( 2, 0, 3.0 ); // Appending the value 3 at * position (2,0) and (0,2) \endcode * * // The symmetry property is also enforced for symmetric custom matrices: In * case the given array // of elements does not represent a symmetric matrix, * a \c std::invalid_argument exception is // thrown: * * \code using blaze::CustomMatrix; using blaze::SymmetricMatrix; using * blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; * * using CustomSymmetric = SymmetricMatrix< * CustomMatrix<double,unaligned,unpadded,rowMajor> >; * * // Creating a 3x3 symmetric custom matrix from a properly initialized array * double array[9] = { 1.0, 2.0, 4.0, 2.0, 3.0, 5.0, 4.0, 5.0, 6.0 }; * CustomSymmetric A( array, 3UL ); // OK * * // Attempt to create a second 3x3 symmetric custom matrix from an * uninitialized array std::unique_ptr<double[]> memory( new double[9UL] ); * CustomSymmetric B( memory.get(), 3UL ); // Throws an exception \endcode * * // Finally, the symmetry property is enforced for views (rows, columns, * submatrices, ...) on the // symmetric matrix. The following example * demonstrates that modifying the elements of an entire // row of the * symmetric matrix also affects the counterpart elements in the according * column of // the matrix: * * \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; * * // Setup of the symmetric matrix // // ( 0 1 0 2 ) // A = ( 1 3 4 0 ) * // ( 0 4 0 5 ) // ( 2 0 5 0 ) // SymmetricMatrix< * DynamicMatrix<int> > A( 4 ); A(0,1) = 1; A(0,3) = 2; A(1,1) = 3; A(1,2) = * 4; A(2,3) = 5; * * // Setting all elements in the 1st row to 0 results in the matrix // // * ( 0 0 0 2 ) // A = ( 0 0 0 0 ) // ( 0 0 0 5 ) // ( 2 0 5 0 ) * // row( A, 1 ) = 0; \endcode * * // The next example demonstrates the (compound) assignment to submatrices of * symmetric matrices. // Since the modification of element \f$ a_{ij} \f$ of * a symmetric matrix also modifies the // element \f$ a_{ji} \f$, the matrix * to be assigned must be structured such that the symmetry // of the * symmetric matrix is preserved. Otherwise a \c std::invalid_argument * exception is // thrown: * * \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; * * // Setup of two default 4x4 symmetric matrices SymmetricMatrix< * DynamicMatrix<int> > A1( 4 ), A2( 4 ); * * // Setup of the 3x2 dynamic matrix // // ( 1 2 ) // B = ( 3 4 ) // * ( 5 6 ) // DynamicMatrix<int> B{ { 1, 2 }, { 3, 4 }, { 5, 6 } }; * * // OK: Assigning B to a submatrix of A1 such that the symmetry can be * preserved // // ( 0 0 1 2 ) // A1 = ( 0 0 3 4 ) // ( 1 3 5 * 6 ) // ( 2 4 6 0 ) // submatrix( A1, 0UL, 2UL, 3UL, 2UL ) = B; // * OK * * // Error: Assigning B to a submatrix of A2 such that the symmetry cannot be * preserved! // The elements marked with X cannot be assigned * unambiguously! // // ( 0 1 2 0 ) // A2 = ( 1 3 X 0 ) // ( * 2 X 6 0 ) // ( 0 0 0 0 ) // submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = * B; // Assignment throws an exception! \endcode * * // \n \subsection adaptors_symmetric_matrices_initialization The Elements of * a Dense Symmetric Matrix are Always Default Initialized! // // Although * this results in a small loss of efficiency (especially in case all default * values are // overridden afterwards), this property is important since * otherwise the symmetric property of // dense symmetric matrices could not * be guaranteed: * * \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; * * // Uninitialized, 5x5 row-major dynamic matrix DynamicMatrix<int,rowMajor> A( * 5, 5 ); * * // Default initialized, 5x5 row-major symmetric dynamic matrix * SymmetricMatrix< DynamicMatrix<int,rowMajor> > B( 5 ); \endcode * * // \n \section adaptors_symmetric_matrices_arithmetic_operations Arithmetic * Operations // <hr> // // A SymmetricMatrix matrix can participate in * numerical operations in any way any other dense // or sparse matrix can * participate. It can also be combined with any other dense or sparse vector * // or matrix. The following code example gives an impression of the use of * SymmetricMatrix within // arithmetic operations: * * \code using blaze::SymmetricMatrix; using blaze::DynamicMatrix; using * blaze::HybridMatrix; using blaze::StaticMatrix; using * blaze::CompressedMatrix; using blaze::rowMajor; using blaze::columnMajor; * * DynamicMatrix<double,rowMajor> A( 3, 3 ); CompressedMatrix<double,rowMajor> * B( 3, 3 ); * * SymmetricMatrix< DynamicMatrix<double,rowMajor> > C( 3 ); SymmetricMatrix< * CompressedMatrix<double,rowMajor> > D( 3 ); * * SymmetricMatrix< HybridMatrix<float,3UL,3UL,rowMajor> > E; SymmetricMatrix< * StaticMatrix<float,3UL,3UL,columnMajor> > F; * * E = A + B; // Matrix addition and assignment to a row-major symmetric * matrix (includes runtime check) F = C - D; // Matrix subtraction and * assignment to a column-major symmetric matrix (only compile time check) F * = A * D; // Matrix multiplication between a dense and a sparse matrix * (includes runtime check) * * C *= 2.0; // In-place scaling of matrix C E = 2.0 * B; // Scaling of * matrix B (includes runtime check) F = C * 2.0; // Scaling of matrix C * (only compile time check) * * E += A - B; // Addition assignment (includes runtime check) F -= C + D; * // Subtraction assignment (only compile time check) F *= A * D; // * Multiplication assignment (includes runtime check) \endcode * * // Note that it is possible to assign any kind of matrix to a symmetric * matrix. In case the matrix // to be assigned is not symmetric at compile * time, a runtime check is performed. // // // \n \section * adaptors_symmetric_matrices_block_matrices Symmetric Block Matrices // * <hr> // // It is also possible to use symmetric block matrices: * * \code using blaze::CompressedMatrix; using blaze::StaticMatrix; using * blaze::SymmetricMatrix; * * // Definition of a 3x3 symmetric block matrix based on CompressedMatrix * SymmetricMatrix< CompressedMatrix< StaticMatrix<int,3UL,3UL> > > A( 3 ); * \endcode * * // Also in this case, the SymmetricMatrix class template enforces the * invariant of symmetry and // guarantees that a modifications of element * \f$ a_{ij} \f$ of the adapted matrix is also // applied to element \f$ * a_{ji} \f$: * * \code // Inserting the elements (2,4) and (4,2) A.insert( 2, 4, * StaticMatrix<int,3UL,3UL>{ { 1, -4, 5 }, { 6, 8, -3 }, { 2, -1, 2 } } * ); * * // Manipulating the elements (2,4) and (4,2) A(2,4)(1,1) = -5; \endcode * * // For more information on block matrices, see the tutorial on \ref * block_vectors_and_matrices. // // // \n \section * adaptors_symmetric_matrices_performance Performance Considerations // <hr> * // // When the symmetric property of a matrix is known beforehands using * the SymmetricMatrix adaptor // instead of a general matrix can be a * considerable performance advantage. The \b Blaze library // tries to * exploit the properties of symmetric matrices whenever possible. However, * there are // also situations when using a symmetric matrix introduces some * overhead. The following examples // demonstrate several situations where * symmetric matrices can positively or negatively impact // performance. // * // \n \subsection adaptors_symmetric_matrices_matrix_matrix_multiplication * Positive Impact: Matrix/Matrix Multiplication // // When multiplying two * matrices, at least one of which is symmetric, \b Blaze can exploit the * fact // that \f$ A = A^T \f$ and choose the fastest and most suited * combination of storage orders for the // multiplication. The following * example demonstrates this by means of a dense matrix/sparse matrix // * multiplication: * * \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; using * blaze::rowMajor; using blaze::columnMajor; * * SymmetricMatrix< DynamicMatrix<double,rowMajor> > A; SymmetricMatrix< * CompressedMatrix<double,columnMajor> > B; * DynamicMatrix<double,columnMajor> C; * * // ... Resizing and initialization * * C = A * B; \endcode * * // Intuitively, the chosen combination of a row-major and a column-major * matrix is the most suited // for maximum performance. However, \b Blaze * evaluates the multiplication as * * \code C = A * trans( B ); \endcode * * // which significantly increases the performance since in contrast to the * original formulation the // optimized form can be vectorized. Therefore, * in the context of matrix multiplications, using the // SymmetricMatrix * adapter is obviously an advantage. // // \n \subsection * adaptors_symmetric_matrices_matrix_vector_multiplication Positive Impact: * Matrix/Vector Multiplication // // A similar optimization is possible in * case of matrix/vector multiplications: * * \code using blaze::DynamicMatrix; using blaze::DynamicVector; using * blaze::CompressedVector; using blaze::rowMajor; using blaze::columnVector; * * SymmetricMatrix< DynamicMatrix<double,rowMajor> > A; * CompressedVector<double,columnVector> x; * DynamicVector<double,columnVector> y; * * // ... Resizing and initialization * * y = A * x; \endcode * * // In this example it is not intuitively apparent that using a row-major * matrix is not the best // possible choice in terms of performance since * the computation cannot be vectorized. Choosing // a column-major matrix * instead, however, would enable a vectorized computation. Therefore // \b * Blaze exploits the fact that \c A is symmetric, selects the best suited * storage order and // evaluates the multiplication as * * \code y = trans( A ) * x; \endcode * * // which also significantly increases the performance. // // \n \subsection * adaptors_symmetric_matrices_views Positive Impact: Row/Column Views on * Column/Row-Major Matrices // // Another example is the optimization of a * row view on a column-major symmetric matrix: * * \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; using * blaze::columnMajor; * * SymmetricMatrix< DynamicMatrix<double,columnMajor> > A( 10UL ); auto row5 = * row( A, 5UL ); \endcode * * // Usually, a row view on a column-major matrix results in a considerable * performance decrease in // comparison to a row view on a row-major matrix * due to the non-contiguous storage of the matrix // elements. However, in * case of symmetric matrices, \b Blaze instead uses the according column of * // the matrix, which provides the same performance as if the matrix would * be row-major. Note that // this also works for column views on row-major * matrices, where \b Blaze can use the according // row instead of a column * in order to provide maximum performance. // // \n \subsection * adaptors_symmetric_matrices_assignment Negative Impact: Assignment of a * General Matrix // // In contrast to using a symmetric matrix on the * right-hand side of an assignment (i.e. for read // access), which * introduces absolutely no performance penalty, using a symmetric matrix on * the // left-hand side of an assignment (i.e. for write access) may * introduce additional overhead when // it is assigned a general matrix, * which is not symmetric at compile time: * * \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; * * SymmetricMatrix< DynamicMatrix<double> > A, C; DynamicMatrix<double> B; * * B = A; // Only read-access to the symmetric matrix; no performance penalty C * = A; // Assignment of a symmetric matrix to another symmetric matrix; no * runtime overhead C = B; // Assignment of a general matrix to a symmetric * matrix; some runtime overhead \endcode * * // When assigning a general, potentially not symmetric matrix to a symmetric * matrix it is necessary // to check whether the matrix is symmetric at * runtime in order to guarantee the symmetry property // of the symmetric * matrix. In case it turns out to be symmetric, it is assigned as * efficiently as // possible, if it is not, an exception is thrown. In order * to prevent this runtime overhead it is // therefore generally advisable to * assign symmetric matrices to other symmetric matrices.\n // In this * context it is especially noteworthy that in contrast to additions and * subtractions the // multiplication of two symmetric matrices does not * necessarily result in another symmetric matrix: * * \code SymmetricMatrix< DynamicMatrix<double> > A, B, C; * * C = A + B; // Results in a symmetric matrix; no runtime overhead C = A - B; * // Results in a symmetric matrix; no runtime overhead C = A * B; // Is * not guaranteed to result in a symmetric matrix; some runtime overhead * \endcode * * // \n Previous: \ref adaptors &nbsp; &nbsp; Next: \ref * adaptors_hermitian_matrices */ //************************************************************************************************* //**Hermitian Matrices ***************************************************************************** /* * !\page adaptors_hermitian_matrices Hermitian Matrices // // * \tableofcontents // // // \n \section adaptors_hermitian_matrices_general * Hermitian Matrices // <hr> // // In addition to symmetric matrices, \b * Blaze also provides an adaptor for Hermitian matrices. // Hermitian * matrices provide the compile time guarantee to be square matrices with * pair-wise // conjugate complex values. Mathematically, this means that an * Hermitian matrix is always equal // to its conjugate transpose (\f$ A = * \overline{A^T} \f$) and that all non-diagonal values have // a complex * conjugate counterpart (\f$ a_{ij} == \overline{a_{ji}} \f$). Within the \b * Blaze // library, Hermitian matrices are realized by the \ref * adaptors_hermitian_matrices_hermitianmatrix // class template. // // // \n * \section adaptors_hermitian_matrices_hermitianmatrix HermitianMatrix // * <hr> // // The HermitianMatrix class template is an adapter for existing * dense and sparse matrix types. // It inherits the properties and the * interface of the given matrix type \c MT and extends it by // enforcing * the additional invariant of Hermitian symmetry (i.e. the matrix is always * equal to // its conjugate transpose \f$ A = \overline{A^T} \f$). It can be * included via the header file * * \code #include <blaze/math/HermitianMatrix.h> \endcode * * // The type of the adapted matrix can be specified via template parameter: * * \code template< typename MT > class HermitianMatrix; \endcode * * // \c MT specifies the type of the matrix to be adapted. HermitianMatrix can * be used with any // non-cv-qualified, non-reference, non-pointer, * non-expression dense or sparse matrix type. Also, // the given matrix type * must have numeric element types (i.e. all integral types except \c bool, * // floating point and complex types). Note that the given matrix type must * be either resizable (as // for instance blaze::HybridMatrix or * blaze::DynamicMatrix) or must be square at compile time (as // for * instance blaze::StaticMatrix). // // The following examples give an * impression of several possible Hermitian matrices: * * \code using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; * using blaze::columnMajor; * * // Definition of a 3x3 row-major dense Hermitian matrix with static memory * blaze::HermitianMatrix< blaze::StaticMatrix<int,3UL,3UL,rowMajor> > A; * * // Definition of a resizable column-major dense Hermitian matrix based on * HybridMatrix blaze::HermitianMatrix< * blaze::HybridMatrix<float,4UL,4UL,columnMajor> B; * * // Definition of a resizable row-major dense Hermitian matrix based on * DynamicMatrix blaze::HermitianMatrix< * blaze::DynamicMatrix<std::complex<double>,rowMajor> > C; * * // Definition of a fixed size row-major dense Hermitian matrix based on * CustomMatrix blaze::HermitianMatrix< * blaze::CustomMatrix<double,unaligned,unpadded,rowMajor> > D; * * // Definition of a compressed row-major single precision complex Hermitian * matrix blaze::HermitianMatrix< * blaze::CompressedMatrix<std::complex<float>,rowMajor> > E; \endcode * * // The storage order of a Hermitian matrix is depending on the storage order * of the adapted matrix // type \c MT. In case the adapted matrix is stored * in a row-wise fashion (i.e. is specified as // blaze::rowMajor), the * Hermitian matrix will also be a row-major matrix. Otherwise, if the // * adapted matrix is column-major (i.e. is specified as blaze::columnMajor), * the Hermitian matrix // will also be a column-major matrix. // // // \n * \section adaptors_hermitian_matrices_vs_symmetric_matrices Hermitian * Matrices vs. Symmetric Matrices // // The blaze::HermitianMatrix adaptor * and the blaze::SymmetricMatrix adaptor share several traits. // However, * there are a couple of differences, both from a mathematical point of view * as well as // from an implementation point of view. // // From a * mathematical point of view, a matrix is called symmetric when it is equal * to its // transpose (\f$ A = A^T \f$) and it is called Hermitian when it * is equal to its conjugate // transpose (\f$ A = \overline{A^T} \f$). For * matrices of real values, however, these two // conditions coincide, which * means that symmetric matrices of real values are also Hermitian // and * Hermitian matrices of real values are also symmetric. // // From an * implementation point of view, \b Blaze restricts Hermitian matrices to * numeric data // types (i.e. all integral types except \c bool, floating * point and complex types), whereas // symmetric matrices can also be block * matrices (i.e. can have vector or matrix elements). // For built-in * element types, the HermitianMatrix adaptor behaves exactly like the * according // SymmetricMatrix implementation. For complex element types, * however, the Hermitian property // is enforced (see also \ref * adaptors_hermitian_matrices_hermitian). * * \code using blaze::DynamicMatrix; using blaze::DynamicVector; using * blaze::HermitianMatrix; using blaze::SymmetricMatrix; * * // The following two matrices provide an identical experience (including * performance) HermitianMatrix< DynamicMatrix<double> > A; // Both * Hermitian and symmetric SymmetricMatrix< DynamicMatrix<double> > B; // * Both Hermitian and symmetric * * // The following two matrices will behave differently HermitianMatrix< * DynamicMatrix< complex<double> > > C; // Only Hermitian SymmetricMatrix< * DynamicMatrix< complex<double> > > D; // Only symmetric * * // Hermitian block matrices are not allowed HermitianMatrix< DynamicMatrix< * DynamicVector<double> > > E; // Compilation error! SymmetricMatrix< * DynamicMatrix< DynamicVector<double> > > F; // Symmetric block matrix * \endcode * * // \n \section adaptors_hermitian_matrices_special_properties Special * Properties of Hermitian Matrices // <hr> // // A Hermitian matrix is used * exactly like a matrix of the underlying, adapted matrix type \c MT. // It * also provides (nearly) the same interface as the underlying matrix type. * However, there are // some important exceptions resulting from the * Hermitian symmetry constraint: // // -# <b>\ref * adaptors_hermitian_matrices_square</b> // -# <b>\ref * adaptors_hermitian_matrices_hermitian</b> // -# <b>\ref * adaptors_hermitian_matrices_initialization</b> // // \n \subsection * adaptors_hermitian_matrices_square Hermitian Matrices Must Always be * Square! // // In case a resizable matrix is used (as for instance * blaze::HybridMatrix, blaze::DynamicMatrix, // or blaze::CompressedMatrix), * this means that the according constructors, the \c resize() and // the \c * extend() functions only expect a single parameter, which specifies both * the number of // rows and columns, instead of two (one for the number of * rows and one for the number of columns): * * \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; using * blaze::rowMajor; * * // Default constructed, default initialized, row-major 3x3 Hermitian dynamic * matrix HermitianMatrix< DynamicMatrix<std::complex<double>,rowMajor> > A( * 3 ); * * // Resizing the matrix to 5x5 A.resize( 5 ); * * // Extending the number of rows and columns by 2, resulting in a 7x7 matrix * A.extend( 2 ); \endcode * * // In case a matrix with a fixed size is used (as for instance * blaze::StaticMatrix), the number // of rows and number of columns must be * specified equally: * * \code using blaze::StaticMatrix; using blaze::HermitianMatrix; using * blaze::columnMajor; * * // Correct setup of a fixed size column-major 3x3 Hermitian static matrix * HermitianMatrix< StaticMatrix<std::complex<float>,3UL,3UL,columnMajor> > * A; * * // Compilation error: the provided matrix type is not a square matrix type * HermitianMatrix< StaticMatrix<std::complex<float>,3UL,4UL,columnMajor> > * B; \endcode * * // \n \subsection adaptors_hermitian_matrices_hermitian The Hermitian * Property is Always Enforced! // // This means that the following * properties of a Hermitian matrix are always guaranteed: // // - The * diagonal elements are real numbers, i.e. the imaginary part is zero // - * Element \f$ a_{ij} \f$ is always the complex conjugate of element \f$ * a_{ji} \f$ // // Thus modifying the element \f$ a_{ij} \f$ of a Hermitian * matrix also modifies its // counterpart element \f$ a_{ji} \f$. Also, it * is only possible to assign matrices that // are Hermitian themselves: * * \code using blaze::CompressedMatrix; using blaze::DynamicMatrix; using * blaze::StaticMatrix; using blaze::HermitianMatrix; using blaze::rowMajor; * * using cplx = std::complex<double>; * * // Default constructed, row-major 3x3 Hermitian compressed matrix * HermitianMatrix< CompressedMatrix<cplx,rowMajor> > A( 3 ); * * // Initializing the matrix via the function call operator // // ( (1, 0) * (0,0) (2,1) ) // ( (0, 0) (0,0) (0,0) ) // ( (2,-1) (0,0) (0,0) ) // * A(0,0) = cplx( 1.0, 0.0 ); // Initialization of the diagonal element * (0,0) A(0,2) = cplx( 2.0, 1.0 ); // Initialization of the elements (0,2) * and (2,0) * * // Inserting three more elements via the insert() function // // ( (1,-3) * (0,0) (2, 1) ) // ( (0, 0) (2,0) (4,-2) ) // ( (2,-1) (4,2) (0, 0) ) // * A.insert( 1, 1, cplx( 2.0, 0.0 ) ); // Inserting the diagonal element * (1,1) A.insert( 1, 2, cplx( 4.0, -2.0 ) ); // Inserting the elements * (1,2) and (2,1) * * // Access via a non-const iterator // // ( (1,-3) (8,1) (2, 1) ) // ( * (8,-1) (2,0) (4,-2) ) // ( (2,-1) (4,2) (0, 0) ) // A.begin(1UL) = cplx( * 8.0, -1.0 ); // Modifies both elements (1,0) and (0,1) * * // Erasing elements via the erase() function // // ( (0, 0) (8,1) (0, 0) ) * // ( (8,-1) (2,0) (4,-2) ) // ( (0, 0) (4,2) (0, 0) ) // A.erase( 0, 0 * ); // Erasing the diagonal element (0,0) A.erase( 0, 2 ); // Erasing the * elements (0,2) and (2,0) * * // Construction from a Hermitian dense matrix StaticMatrix<cplx,3UL,3UL> B{ { * cplx( 3.0, 0.0 ), cplx( 8.0, 2.0 ), cplx( -2.0, 2.0 ) }, { cplx( 8.0, * 1.0 ), cplx( 0.0, 0.0 ), cplx( -1.0, -1.0 ) }, { cplx( -2.0, -2.0 ), * cplx( -1.0, 1.0 ), cplx( 4.0, 0.0 ) } }; * * HermitianMatrix< DynamicMatrix<double,rowMajor> > C( B ); // OK * * // Assignment of a non-Hermitian dense matrix StaticMatrix<cplx,3UL,3UL> D{ * { cplx( 3.0, 0.0 ), cplx( 7.0, 2.0 ), cplx( 3.0, 2.0 ) }, { cplx( * 8.0, 1.0 ), cplx( 0.0, 0.0 ), cplx( 6.0, 4.0 ) }, { cplx( -2.0, 2.0 ), * cplx( -1.0, 1.0 ), cplx( 4.0, 0.0 ) } }; * * C = D; // Throws an exception; Hermitian invariant would be violated! * \endcode * * // The same restriction also applies to the \c append() function for sparse * matrices: Appending // the element \f$ a_{ij} \f$ additionally inserts the * element \f$ a_{ji} \f$ into the matrix. // Despite the additional * insertion, the \c append() function still provides the most efficient // * way to set up a Hermitian sparse matrix. In order to achieve the maximum * efficiency, the // capacity of the individual rows/columns of the matrix * should to be specifically prepared with // \c reserve() calls: * * \code using blaze::CompressedMatrix; using blaze::HermitianMatrix; using * blaze::rowMajor; * * using cplx = std::complex<double>; * * // Setup of the Hermitian matrix // // ( (0, 0) (1,2) (3,-4) ) // A = * ( (1,-2) (2,0) (0, 0) ) // ( (3, 4) (0,0) (0, 0) ) // * HermitianMatrix< CompressedMatrix<cplx,rowMajor> > A( 3 ); * * A.reserve( 5 ); // Reserving enough space for 5 non-zero elements * A.reserve( 0, 2 ); // Reserving two non-zero elements in the first * row A.reserve( 1, 2 ); // Reserving two non-zero elements in the * second row A.reserve( 2, 1 ); // Reserving a single non-zero element * in the third row * * A.append( 0, 1, cplx( 1.0, 2.0 ) ); // Appending an element at position * (0,1) and (1,0) A.append( 1, 1, cplx( 2.0, 0.0 ) ); // Appending an * element at position (1,1) A.append( 2, 0, cplx( 3.0, 4.0 ) ); // * Appending an element at position (2,0) and (0,2) \endcode * * // The Hermitian property is also enforced for Hermitian custom matrices: In * case the given array // of elements does not represent a Hermitian matrix, * a \c std::invalid_argument exception is // thrown: * * \code using blaze::CustomMatrix; using blaze::HermitianMatrix; using * blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; * * using CustomHermitian = HermitianMatrix< * CustomMatrix<double,unaligned,unpadded,rowMajor> >; * * // Creating a 3x3 Hermitian custom matrix from a properly initialized array * double array[9] = { 1.0, 2.0, 4.0, 2.0, 3.0, 5.0, 4.0, 5.0, 6.0 }; * CustomHermitian A( array, 3UL ); // OK * * // Attempt to create a second 3x3 Hermitian custom matrix from an * uninitialized array std::unique_ptr<double[]> memory( new double[9UL] ); * CustomHermitian B( memory.get(), 3UL ); // Throws an exception \endcode * * // Finally, the Hermitian property is enforced for views (rows, columns, * submatrices, ...) on the // Hermitian matrix. The following example * demonstrates that modifying the elements of an entire // row of the * Hermitian matrix also affects the counterpart elements in the according * column of // the matrix: * * \code using blaze::DynamicMatrix; using blaze::HermtianMatrix; * * using cplx = std::complex<double>; * * // Setup of the Hermitian matrix // // ( (0, 0) (1,-1) (0,0) (2, 1) ) * // A = ( (1, 1) (3, 0) (4,2) (0, 0) ) // ( (0, 0) (4,-2) (0,0) * (5,-3) ) // ( (2,-1) (0, 0) (5,3) (0, 0) ) // HermitianMatrix< * DynamicMatrix<int> > A( 4 ); A(0,1) = cplx( 1.0, -1.0 ); A(0,3) = cplx( * 2.0, 1.0 ); A(1,1) = cplx( 3.0, 0.0 ); A(1,2) = cplx( 4.0, 2.0 ); * A(2,3) = cplx( 5.0, 3.0 ); * * // Setting all elements in the 1st row to 0 results in the matrix // // * ( (0, 0) (0,0) (0,0) (2, 1) ) // A = ( (0, 0) (0,0) (0,0) (0, 0) ) // * ( (0, 0) (0,0) (0,0) (5,-3) ) // ( (2,-1) (0,0) (5,3) (0, 0) ) // * row( A, 1 ) = cplx( 0.0, 0.0 ); \endcode * * // The next example demonstrates the (compound) assignment to submatrices of * Hermitian matrices. // Since the modification of element \f$ a_{ij} \f$ of * a Hermitian matrix also modifies the // element \f$ a_{ji} \f$, the matrix * to be assigned must be structured such that the Hermitian // symmetry of * the matrix is preserved. Otherwise a \c std::invalid_argument exception is * thrown: * * \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; * * std::complex<double> cplx; * * // Setup of two default 4x4 Hermitian matrices HermitianMatrix< * DynamicMatrix<cplx> > A1( 4 ), A2( 4 ); * * // Setup of the 3x2 dynamic matrix // // ( (1,-1) (2, 5) ) // B = ( * (3, 0) (4,-6) ) // ( (5, 0) (6, 0) ) // DynamicMatrix<int> B( 3UL, * 2UL ); B(0,0) = cplx( 1.0, -1.0 ); B(0,1) = cplx( 2.0, 5.0 ); B(1,0) = * cplx( 3.0, 0.0 ); B(1,1) = cplx( 4.0, -6.0 ); B(2,1) = cplx( 5.0, 0.0 ); * B(2,2) = cplx( 6.0, 7.0 ); * * // OK: Assigning B to a submatrix of A1 such that the Hermitian property is * preserved // // ( (0, 0) (0, 0) (1,-1) (2, 5) ) // A1 = ( (0, 0) * (0, 0) (3, 0) (4,-6) ) // ( (1, 1) (3, 0) (5, 0) (6, 0) ) // * ( (2,-5) (4, 6) (6, 0) (0, 0) ) // submatrix( A1, 0UL, 2UL, 3UL, 2UL ) = * B; // OK * * // Error: Assigning B to a submatrix of A2 such that the Hermitian property * isn't preserved! // The elements marked with X cannot be assigned * unambiguously! // // ( (0, 0) (1,-1) (2,5) (0,0) ) // A2 = ( (1, * 1) (3, 0) (X,X) (0,0) ) // ( (2,-5) (X, X) (6,0) (0,0) ) // * ( (0, 0) (0, 0) (0,0) (0,0) ) // submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = B; * // Assignment throws an exception! \endcode * * // \n \subsection adaptors_hermitian_matrices_initialization The Elements of * a Dense Hermitian Matrix are Always Default Initialized! // // Although * this results in a small loss of efficiency (especially in case all default * values are // overridden afterwards), this property is important since * otherwise the Hermitian property of // dense Hermitian matrices could not * be guaranteed: * * \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; * * // Uninitialized, 5x5 row-major dynamic matrix DynamicMatrix<int,rowMajor> A( * 5, 5 ); * * // Default initialized, 5x5 row-major Hermitian dynamic matrix * HermitianMatrix< DynamicMatrix<int,rowMajor> > B( 5 ); \endcode * * // \n \section adaptors_hermitian_matrices_arithmetic_operations Arithmetic * Operations // <hr> // // A HermitianMatrix can be used within all * numerical operations in any way any other dense or // sparse matrix can be * used. It can also be combined with any other dense or sparse vector or // * matrix. The following code example gives an impression of the use of * HermitianMatrix within // arithmetic operations: * * \code using blaze::HermitianMatrix; using blaze::DynamicMatrix; using * blaze::HybridMatrix; using blaze::StaticMatrix; using * blaze::CompressedMatrix; using blaze::rowMajor; using blaze::columnMajor; * * using cplx = complex<float>; * * DynamicMatrix<cplx,rowMajor> A( 3, 3 ); CompressedMatrix<cplx,rowMajor> B( 3, * 3 ); * * HermitianMatrix< DynamicMatrix<cplx,rowMajor> > C( 3 ); HermitianMatrix< * CompressedMatrix<cplx,rowMajor> > D( 3 ); * * HermitianMatrix< HybridMatrix<cplx,3UL,3UL,rowMajor> > E; HermitianMatrix< * StaticMatrix<cplx,3UL,3UL,columnMajor> > F; * * E = A + B; // Matrix addition and assignment to a row-major Hermitian * matrix (includes runtime check) F = C - D; // Matrix subtraction and * assignment to a column-major Hermitian matrix (only compile time check) F * = A * D; // Matrix multiplication between a dense and a sparse matrix * (includes runtime check) * * C *= 2.0; // In-place scaling of matrix C E = 2.0 * B; // Scaling of * matrix B (includes runtime check) F = C * 2.0; // Scaling of matrix C * (only compile time check) * * E += A - B; // Addition assignment (includes runtime check) F -= C + D; * // Subtraction assignment (only compile time check) F *= A * D; // * Multiplication assignment (includes runtime check) \endcode * * // Note that it is possible to assign any kind of matrix to a Hermitian * matrix. In case the matrix // to be assigned is not Hermitian at compile * time, a runtime check is performed. // // // \n \section * adaptors_hermitian_matrices_performance Performance Considerations // <hr> * // // When the Hermitian property of a matrix is known beforehands using * the HermitianMatrix adaptor // instead of a general matrix can be a * considerable performance advantage. This is particularly // true in case * the Hermitian matrix is also symmetric (i.e. has built-in element types). * The // \b Blaze library tries to exploit the properties of Hermitian * (symmetric) matrices whenever // possible. However, there are also * situations when using a Hermitian matrix introduces some // overhead. The * following examples demonstrate several situations where Hermitian matrices * can // positively or negatively impact performance. // // \n \subsection * adaptors_hermitian_matrices_matrix_matrix_multiplication Positive Impact: * Matrix/Matrix Multiplication // // When multiplying two matrices, at least * one of which is symmetric, \b Blaze can exploit the fact // that \f$ A = * A^T \f$ and choose the fastest and most suited combination of storage * orders for the // multiplication. The following example demonstrates this * by means of a dense matrix/sparse matrix // multiplication: * * \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; using * blaze::rowMajor; using blaze::columnMajor; * * HermitianMatrix< DynamicMatrix<double,rowMajor> > A; // Both Hermitian * and symmetric HermitianMatrix< CompressedMatrix<double,columnMajor> > B; * // Both Hermitian and symmetric DynamicMatrix<double,columnMajor> C; * * // ... Resizing and initialization * * C = A * B; \endcode * * // Intuitively, the chosen combination of a row-major and a column-major * matrix is the most suited // for maximum performance. However, \b Blaze * evaluates the multiplication as * * \code C = A * trans( B ); \endcode * * // which significantly increases the performance since in contrast to the * original formulation the // optimized form can be vectorized. Therefore, * in the context of matrix multiplications, using a // symmetric matrix is * obviously an advantage. // // \n \subsection * adaptors_hermitian_matrices_matrix_vector_multiplication Positive Impact: * Matrix/Vector Multiplication // // A similar optimization is possible in * case of matrix/vector multiplications: * * \code using blaze::DynamicMatrix; using blaze::DynamicVector; using * blaze::CompressedVector; using blaze::HermitianMatrix; using * blaze::rowMajor; using blaze::columnVector; * * HermitianMatrix< DynamicMatrix<double,rowMajor> > A; // Hermitian and * symmetric CompressedVector<double,columnVector> x; * DynamicVector<double,columnVector> y; * * // ... Resizing and initialization * * y = A * x; \endcode * * // In this example it is not intuitively apparent that using a row-major * matrix is not the best // possible choice in terms of performance since * the computation cannot be vectorized. Choosing // a column-major matrix * instead, however, would enable a vectorized computation. Therefore // \b * Blaze exploits the fact that \c A is symmetric, selects the best suited * storage order and // evaluates the multiplication as * * \code y = trans( A ) * x; \endcode * * // which also significantly increases the performance. // // \n \subsection * adaptors_hermitian_matrices_views Positive Impact: Row/Column Views on * Column/Row-Major Matrices // // Another example is the optimization of a * row view on a column-major symmetric matrix: * * \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; using * blaze::columnMajor; * * HermitianMatrix< DynamicMatrix<double,columnMajor> > A( 10UL ); // Both * Hermitian and symmetric auto row5 = row( A, 5UL ); \endcode * * // Usually, a row view on a column-major matrix results in a considerable * performance decrease in // comparison to a row view on a row-major matrix * due to the non-contiguous storage of the matrix // elements. However, in * case of symmetric matrices, \b Blaze instead uses the according column of * // the matrix, which provides the same performance as if the matrix would * be row-major. Note that // this also works for column views on row-major * matrices, where \b Blaze can use the according // row instead of a column * in order to provide maximum performance. // // \n \subsection * adaptors_hermitian_matrices_assignment Negative Impact: Assignment of a * General Matrix // // In contrast to using a Hermitian matrix on the * right-hand side of an assignment (i.e. for read // access), which * introduces absolutely no performance penalty, using a Hermitian matrix on * the // left-hand side of an assignment (i.e. for write access) may * introduce additional overhead when // it is assigned a general matrix, * which is not Hermitian at compile time: * * \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; * * HermitianMatrix< DynamicMatrix< complex<double> > > A, C; * DynamicMatrix<double> B; * * B = A; // Only read-access to the Hermitian matrix; no performance penalty C * = A; // Assignment of a Hermitian matrix to another Hermitian matrix; no * runtime overhead C = B; // Assignment of a general matrix to a Hermitian * matrix; some runtime overhead \endcode * * // When assigning a general, potentially not Hermitian matrix to a Hermitian * matrix it is necessary // to check whether the matrix is Hermitian at * runtime in order to guarantee the Hermitian property // of the Hermitian * matrix. In case it turns out to be Hermitian, it is assigned as * efficiently as // possible, if it is not, an exception is thrown. In order * to prevent this runtime overhead it is // therefore generally advisable to * assign Hermitian matrices to other Hermitian matrices.\n // In this * context it is especially noteworthy that in contrast to additions and * subtractions the // multiplication of two Hermitian matrices does not * necessarily result in another Hermitian matrix: * * \code HermitianMatrix< DynamicMatrix<double> > A, B, C; * * C = A + B; // Results in a Hermitian matrix; no runtime overhead C = A - B; * // Results in a Hermitian matrix; no runtime overhead C = A * B; // Is * not guaranteed to result in a Hermitian matrix; some runtime overhead * \endcode * * // \n Previous: \ref adaptors_symmetric_matrices &nbsp; &nbsp; Next: \ref * adaptors_triangular_matrices */ //************************************************************************************************* //**Triangular Matrices **************************************************************************** /* * !\page adaptors_triangular_matrices Triangular Matrices // // * \tableofcontents // // // \n \section adaptors_triangular_matrices_general * Triangular Matrices // <hr> // // Triangular matrices come in three * flavors: Lower triangular matrices provide the compile time // guarantee * to be square matrices and that the upper part of the matrix contains only * default // elements that cannot be modified. Upper triangular matrices on * the other hand provide the // compile time guarantee to be square and that * the lower part of the matrix contains only fixed // default elements. * Finally, diagonal matrices provide the compile time guarantee to be square * // and that both the lower and upper part of the matrix contain only * immutable default elements. // These properties can be exploited to gain * higher performance and/or to save memory. Within the // \b Blaze library, * several kinds of lower and upper triangular and diagonal matrices are * realized // by the following class templates: // // Lower triangular * matrices: // - <b>\ref adaptors_triangular_matrices_lowermatrix</b> // - * <b>\ref adaptors_triangular_matrices_unilowermatrix</b> // - <b>\ref * adaptors_triangular_matrices_strictlylowermatrix</b> // // Upper * triangular matrices: // - <b>\ref * adaptors_triangular_matrices_uppermatrix</b> // - <b>\ref * adaptors_triangular_matrices_uniuppermatrix</b> // - <b>\ref * adaptors_triangular_matrices_strictlyuppermatrix</b> // // Diagonal * matrices // - <b>\ref adaptors_triangular_matrices_diagonalmatrix</b> // * // // \n \section adaptors_triangular_matrices_lowermatrix LowerMatrix // * <hr> // // The blaze::LowerMatrix class template is an adapter for * existing dense and sparse matrix types. // It inherits the properties and * the interface of the given matrix type \c MT and extends it by // * enforcing the additional invariant that all matrix elements above the * diagonal are 0 (lower // triangular matrix): * * \f[\left(\begin{array}{*{5}{c}} l_{0,0} & 0 & 0 & \cdots & 0 * \\ l_{1,0} & l_{1,1} & 0 & \cdots & 0 \\ l_{2,0} & l_{2,1} & * l_{2,2} & \cdots & 0 \\ \vdots & \vdots & \vdots & \ddots & * \vdots \\ l_{N,0} & l_{N,1} & l_{N,2} & \cdots & l_{N,N} \\ * \end{array}\right).\f] * * // It can be included via the header file * * \code #include <blaze/math/LowerMatrix.h> \endcode * * // The type of the adapted matrix can be specified via the first template * parameter: * * \code template< typename MT > class LowerMatrix; \endcode * * // \c MT specifies the type of the matrix to be adapted. blaze::LowerMatrix * can be used with any // non-cv-qualified, non-reference, non-pointer, * non-expression dense or sparse matrix type. Note // that the given matrix * type must be either resizable (as for instance blaze::HybridMatrix or // * blaze::DynamicMatrix) or must be square at compile time (as for instance * blaze::StaticMatrix). // // The following examples give an impression of * several possible lower matrices: * * \code using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; * using blaze::columnMajor; * * // Definition of a 3x3 row-major dense lower matrix with static memory * blaze::LowerMatrix< blaze::StaticMatrix<int,3UL,3UL,rowMajor> > A; * * // Definition of a resizable column-major dense lower matrix based on * HybridMatrix blaze::LowerMatrix< * blaze::HybridMatrix<float,4UL,4UL,columnMajor> B; * * // Definition of a resizable row-major dense lower matrix based on * DynamicMatrix blaze::LowerMatrix< blaze::DynamicMatrix<double,rowMajor> > * C; * * // Definition of a fixed size row-major dense lower matrix based on * CustomMatrix blaze::LowerMatrix< * blaze::CustomMatrix<double,unaligned,unpadded,rowMajor> > D; * * // Definition of a compressed row-major single precision lower matrix * blaze::LowerMatrix< blaze::CompressedMatrix<float,rowMajor> > E; \endcode * * // The storage order of a lower matrix is depending on the storage order of * the adapted matrix // type \c MT. In case the adapted matrix is stored in * a row-wise fashion (i.e. is specified // as blaze::rowMajor), the lower * matrix will also be a row-major matrix. Otherwise, if the // adapted * matrix is column-major (i.e. is specified as blaze::columnMajor), the * lower matrix // will also be a column-major matrix. // // // \n \section * adaptors_triangular_matrices_unilowermatrix UniLowerMatrix // <hr> // // * The blaze::UniLowerMatrix class template is an adapter for existing dense * and sparse matrix // types. It inherits the properties and the interface * of the given matrix type \c MT and extends // it by enforcing the * additional invariant that all diagonal matrix elements are 1 and all * matrix // elements above the diagonal are 0 (lower unitriangular matrix): * * \f[\left(\begin{array}{*{5}{c}} 1 & 0 & 0 & \cdots & 0 * \\ l_{1,0} & 1 & 0 & \cdots & 0 \\ l_{2,0} & l_{2,1} & 1 * & \cdots & 0 \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ * l_{N,0} & l_{N,1} & l_{N,2} & \cdots & 1 \\ \end{array}\right).\f] * * // It can be included via the header file * * \code #include <blaze/math/UniLowerMatrix.h> \endcode * * // The type of the adapted matrix can be specified via the first template * parameter: * * \code template< typename MT > class UniLowerMatrix; \endcode * * // \c MT specifies the type of the matrix to be adapted. * blaze::UniLowerMatrix can be used with any // non-cv-qualified, * non-reference, non-pointer, non-expression dense or sparse matrix type. * Also, // the given matrix type must have numeric element types (i.e. all * integral types except \c bool, // floating point and complex types). Note * that the given matrix type must be either resizable (as // for instance * blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile * time (as // for instance blaze::StaticMatrix). // // The following * examples give an impression of several possible lower unitriangular * matrices: * * \code // Definition of a 3x3 row-major dense unilower matrix with static * memory blaze::UniLowerMatrix< * blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; * * // Definition of a resizable column-major dense unilower matrix based on * HybridMatrix blaze::UniLowerMatrix< * blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; * * // Definition of a resizable row-major dense unilower matrix based on * DynamicMatrix blaze::UniLowerMatrix< * blaze::DynamicMatrix<double,blaze::rowMajor> > C; * * // Definition of a compressed row-major single precision unilower matrix * blaze::UniLowerMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; * \endcode * * // The storage order of a lower unitriangular matrix is depending on the * storage order of the // adapted matrix type \c MT. In case the adapted * matrix is stored in a row-wise fashion (i.e. // is specified as * blaze::rowMajor), the unilower matrix will also be a row-major matrix. // * Otherwise if the adapted matrix is column-major (i.e. is specified as * blaze::columnMajor), // the unilower matrix will also be a column-major * matrix. // // // \n \section * adaptors_triangular_matrices_strictlylowermatrix StrictlyLowerMatrix // * <hr> // // The blaze::StrictlyLowerMatrix class template is an adapter for * existing dense and sparse matrix // types. It inherits the properties and * the interface of the given matrix type \c MT and extends // it by * enforcing the additional invariant that all diagonal matrix elements and * all matrix // elements above the diagonal are 0 (strictly lower triangular * matrix): * * \f[\left(\begin{array}{*{5}{c}} 0 & 0 & 0 & \cdots & 0 * \\ l_{1,0} & 0 & 0 & \cdots & 0 \\ l_{2,0} & l_{2,1} & 0 * & \cdots & 0 \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ * l_{N,0} & l_{N,1} & l_{N,2} & \cdots & 0 \\ \end{array}\right).\f] * * // It can be included via the header file * * \code #include <blaze/math/StrictlyLowerMatrix.h> \endcode * * // The type of the adapted matrix can be specified via the first template * parameter: * * \code template< typename MT > class StrictlyLowerMatrix; \endcode * * // \c MT specifies the type of the matrix to be adapted. * blaze::StrictlyLowerMatrix can be used // with any non-cv-qualified, * non-reference, non-pointer, non-expression dense or sparse matrix // type. * Note that the given matrix type must be either resizable (as for instance * // blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at * compile time (as for instance // blaze::StaticMatrix). // // The following * examples give an impression of several possible strictly lower triangular * matrices: * * \code // Definition of a 3x3 row-major dense strictly lower matrix with * static memory blaze::StrictlyLowerMatrix< * blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; * * // Definition of a resizable column-major dense strictly lower matrix based * on HybridMatrix blaze::StrictlyLowerMatrix< * blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; * * // Definition of a resizable row-major dense strictly lower matrix based on * DynamicMatrix blaze::StrictlyLowerMatrix< * blaze::DynamicMatrix<double,blaze::rowMajor> > C; * * // Definition of a compressed row-major single precision strictly lower * matrix blaze::StrictlyLowerMatrix< * blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode * * // The storage order of a strictly lower triangular matrix is depending on * the storage order of // the adapted matrix type \c MT. In case the adapted * matrix is stored in a row-wise fashion (i.e. // is specified as * blaze::rowMajor), the strictly lower matrix will also be a row-major * matrix. // Otherwise if the adapted matrix is column-major (i.e. is * specified as blaze::columnMajor), // the strictly lower matrix will also * be a column-major matrix. // // // \n \section * adaptors_triangular_matrices_uppermatrix UpperMatrix // <hr> // // The * blaze::UpperMatrix class template is an adapter for existing dense and * sparse matrix types. // It inherits the properties and the interface of * the given matrix type \c MT and extends it by // enforcing the additional * invariant that all matrix elements below the diagonal are 0 (upper // * triangular matrix): * * \f[\left(\begin{array}{*{5}{c}} u_{0,0} & u_{0,1} & u_{0,2} & \cdots & * u_{0,N} \\ 0 & u_{1,1} & u_{1,2} & \cdots & u_{1,N} \\ 0 & 0 * & u_{2,2} & \cdots & u_{2,N} \\ \vdots & \vdots & \vdots & \ddots & * \vdots \\ 0 & 0 & 0 & \cdots & u_{N,N} \\ * \end{array}\right).\f] * * // It can be included via the header file * * \code #include <blaze/math/UpperMatrix.h> \endcode * * // The type of the adapted matrix can be specified via the first template * parameter: * * \code template< typename MT > class UpperMatrix; \endcode * * // \c MT specifies the type of the matrix to be adapted. blaze::UpperMatrix * can be used with any // non-cv-qualified, non-reference, non-pointer, * non-expression dense or sparse matrix type. Note // that the given matrix * type must be either resizable (as for instance blaze::HybridMatrix or // * blaze::DynamicMatrix) or must be square at compile time (as for instance * blaze::StaticMatrix). // // The following examples give an impression of * several possible upper matrices: * * \code // Definition of a 3x3 row-major dense upper matrix with static memory * blaze::UpperMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; * * // Definition of a resizable column-major dense upper matrix based on * HybridMatrix blaze::UpperMatrix< * blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; * * // Definition of a resizable row-major dense upper matrix based on * DynamicMatrix blaze::UpperMatrix< * blaze::DynamicMatrix<double,blaze::rowMajor> > C; * * // Definition of a compressed row-major single precision upper matrix * blaze::UpperMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; * \endcode * * // The storage order of an upper matrix is depending on the storage order of * the adapted matrix // type \c MT. In case the adapted matrix is stored in * a row-wise fashion (i.e. is specified // as blaze::rowMajor), the upper * matrix will also be a row-major matrix. Otherwise, if the // adapted * matrix is column-major (i.e. is specified as blaze::columnMajor), the * upper matrix // will also be a column-major matrix. // // // \n \section * adaptors_triangular_matrices_uniuppermatrix UniUpperMatrix // <hr> // // * The blaze::UniUpperMatrix class template is an adapter for existing dense * and sparse matrix // types. It inherits the properties and the interface * of the given matrix type \c MT and extends // it by enforcing the * additional invariant that all diagonal matrix elements are 1 and all * matrix // elements below the diagonal are 0 (upper unitriangular matrix): * * \f[\left(\begin{array}{*{5}{c}} 1 & u_{0,1} & u_{0,2} & \cdots & * u_{0,N} \\ 0 & 1 & u_{1,2} & \cdots & u_{1,N} \\ 0 & 0 * & 1 & \cdots & u_{2,N} \\ \vdots & \vdots & \vdots & \ddots & * \vdots \\ 0 & 0 & 0 & \cdots & 1 \\ * \end{array}\right).\f] * * // It can be included via the header file * * \code #include <blaze/math/UniUpperMatrix.h> \endcode * * // The type of the adapted matrix can be specified via the first template * parameter: * * \code template< typename MT > class UniUpperMatrix; \endcode * * // \c MT specifies the type of the matrix to be adapted. * blaze::UniUpperMatrix can be used with any // non-cv-qualified, * non-reference, non-pointer, non-expression dense or sparse matrix type. * Also, // the given matrix type must have numeric element types (i.e. all * integral types except \c bool, // floating point and complex types). Note * that the given matrix type must be either resizable (as // for instance * blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile * time (as // for instance blaze::StaticMatrix). // // The following * examples give an impression of several possible upper unitriangular * matrices: * * \code // Definition of a 3x3 row-major dense uniupper matrix with static * memory blaze::UniUpperMatrix< * blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; * * // Definition of a resizable column-major dense uniupper matrix based on * HybridMatrix blaze::UniUpperMatrix< * blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; * * // Definition of a resizable row-major dense uniupper matrix based on * DynamicMatrix blaze::UniUpperMatrix< * blaze::DynamicMatrix<double,blaze::rowMajor> > C; * * // Definition of a compressed row-major single precision uniupper matrix * blaze::UniUpperMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; * \endcode * * // The storage order of an upper unitriangular matrix is depending on the * storage order of the // adapted matrix type \c MT. In case the adapted * matrix is stored in a row-wise fashion (i.e. // is specified as * blaze::rowMajor), the uniupper matrix will also be a row-major matrix. // * Otherwise, if the adapted matrix is column-major (i.e. is specified as * blaze::columnMajor), // the uniupper matrix will also be a column-major * matrix. // // // \n \section * adaptors_triangular_matrices_strictlyuppermatrix StrictlyUpperMatrix // * <hr> // // The blaze::StrictlyUpperMatrix class template is an adapter for * existing dense and sparse matrix // types. It inherits the properties and * the interface of the given matrix type \c MT and extends // it by * enforcing the additional invariant that all diagonal matrix elements and * all matrix // elements below the diagonal are 0 (strictly upper triangular * matrix): * * \f[\left(\begin{array}{*{5}{c}} 0 & u_{0,1} & u_{0,2} & \cdots & * u_{0,N} \\ 0 & 0 & u_{1,2} & \cdots & u_{1,N} \\ 0 & 0 * & 0 & \cdots & u_{2,N} \\ \vdots & \vdots & \vdots & \ddots & * \vdots \\ 0 & 0 & 0 & \cdots & 0 \\ * \end{array}\right).\f] * * // It can be included via the header file * * \code #include <blaze/math/StrictlyUpperMatrix.h> \endcode * * // The type of the adapted matrix can be specified via the first template * parameter: * * \code template< typename MT > class StrictlyUpperMatrix; \endcode * * // \c MT specifies the type of the matrix to be adapted. * blaze::StrictlyUpperMatrix can be used // with any non-cv-qualified, * non-reference, non-pointer, non-expression dense or sparse matrix // type. * Note that the given matrix type must be either resizable (as for instance * // blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at * compile time (as for instance // blaze::StaticMatrix). // // The following * examples give an impression of several possible strictly upper triangular * matrices: * * \code // Definition of a 3x3 row-major dense strictly upper matrix with * static memory blaze::StrictlyUpperMatrix< * blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; * * // Definition of a resizable column-major dense strictly upper matrix based * on HybridMatrix blaze::StrictlyUpperMatrix< * blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; * * // Definition of a resizable row-major dense strictly upper matrix based on * DynamicMatrix blaze::StrictlyUpperMatrix< * blaze::DynamicMatrix<double,blaze::rowMajor> > C; * * // Definition of a compressed row-major single precision strictly upper * matrix blaze::StrictlyUpperMatrix< * blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode * * // The storage order of a strictly upper triangular matrix is depending on * the storage order of // the adapted matrix type \c MT. In case the adapted * matrix is stored in a row-wise fashion (i.e. // is specified as * blaze::rowMajor), the strictly upper matrix will also be a row-major * matrix. // Otherwise, if the adapted matrix is column-major (i.e. is * specified as blaze::columnMajor), // the strictly upper matrix will also * be a column-major matrix. // // // \n \section * adaptors_triangular_matrices_diagonalmatrix DiagonalMatrix // <hr> // // * The blaze::DiagonalMatrix class template is an adapter for existing dense * and sparse matrix // types. It inherits the properties and the interface * of the given matrix type \c MT and extends // it by enforcing the * additional invariant that all matrix elements above and below the diagonal * // are 0 (diagonal matrix): * * \f[\left(\begin{array}{*{5}{c}} l_{0,0} & 0 & 0 & \cdots & 0 * \\ 0 & l_{1,1} & 0 & \cdots & 0 \\ 0 & 0 & * l_{2,2} & \cdots & 0 \\ \vdots & \vdots & \vdots & \ddots & * \vdots \\ 0 & 0 & 0 & \cdots & l_{N,N} \\ * \end{array}\right).\f] * * // It can be included via the header file * * \code #include <blaze/math/DiagonalMatrix.h> \endcode * * // The type of the adapted matrix can be specified via the first template * parameter: * * \code template< typename MT > class DiagonalMatrix; \endcode * * // \c MT specifies the type of the matrix to be adapted. * blaze::DiagonalMatrix can be used with any // non-cv-qualified, * non-reference, non-pointer, non-expression dense or sparse matrix type. * Note // that the given matrix type must be either resizable (as for * instance blaze::HybridMatrix or // blaze::DynamicMatrix) or must be square * at compile time (as for instance blaze::StaticMatrix). // // The following * examples give an impression of several possible diagonal matrices: * * \code // Definition of a 3x3 row-major dense diagonal matrix with static * memory blaze::DiagonalMatrix< * blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; * * // Definition of a resizable column-major dense diagonal matrix based on * HybridMatrix blaze::DiagonalMatrix< * blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; * * // Definition of a resizable row-major dense diagonal matrix based on * DynamicMatrix blaze::DiagonalMatrix< * blaze::DynamicMatrix<double,blaze::rowMajor> > C; * * // Definition of a compressed row-major single precision diagonal matrix * blaze::DiagonalMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; * \endcode * * // The storage order of a diagonal matrix is depending on the storage order * of the adapted matrix // type \c MT. In case the adapted matrix is stored * in a row-wise fashion (i.e. is specified // as blaze::rowMajor), the * diagonal matrix will also be a row-major matrix. Otherwise, if the // * adapted matrix is column-major (i.e. is specified as blaze::columnMajor), * the diagonal matrix // will also be a column-major matrix. // // // \n * \section adaptors_triangular_matrices_special_properties Special * Properties of Triangular Matrices // <hr> // // A triangular matrix is * used exactly like a matrix of the underlying, adapted matrix type \c MT. * // It also provides (nearly) the same interface as the underlying matrix * type. However, there are // some important exceptions resulting from the * triangular matrix constraint: // // -# <b>\ref * adaptors_triangular_matrices_square</b> // -# <b>\ref * adaptors_triangular_matrices_triangular</b> // -# <b>\ref * adaptors_triangular_matrices_initialization</b> // -# <b>\ref * adaptors_triangular_matrices_storage</b> // -# <b>\ref * adaptors_triangular_matrices_scaling</b> // // \n \subsection * adaptors_triangular_matrices_square Triangular Matrices Must Always be * Square! // // In case a resizable matrix is used (as for instance * blaze::HybridMatrix, blaze::DynamicMatrix, // or blaze::CompressedMatrix), * this means that the according constructors, the \c resize() and // the \c * extend() functions only expect a single parameter, which specifies both * the number of // rows and columns, instead of two (one for the number of * rows and one for the number of columns): * * \code using blaze::DynamicMatrix; using blaze::LowerMatrix; using * blaze::rowMajor; * * // Default constructed, default initialized, row-major 3x3 lower dynamic * matrix LowerMatrix< DynamicMatrix<double,rowMajor> > A( 3 ); * * // Resizing the matrix to 5x5 A.resize( 5 ); * * // Extending the number of rows and columns by 2, resulting in a 7x7 matrix * A.extend( 2 ); \endcode * * // In case a matrix with a fixed size is used (as for instance * blaze::StaticMatrix), the number // of rows and number of columns must be * specified equally: * * \code using blaze::StaticMatrix; using blaze::LowerMatrix; using * blaze::columnMajor; * * // Correct setup of a fixed size column-major 3x3 lower static matrix * LowerMatrix< StaticMatrix<int,3UL,3UL,columnMajor> > A; * * // Compilation error: the provided matrix type is not a square matrix type * LowerMatrix< StaticMatrix<int,3UL,4UL,columnMajor> > B; \endcode * * // \n \subsection adaptors_triangular_matrices_triangular The Triangular * Property is Always Enforced! // // This means that it is only allowed to * modify elements in the lower part or the diagonal of // a lower triangular * matrix and in the upper part or the diagonal of an upper triangular * matrix. // Unitriangular and strictly triangular matrices are even more * restrictive and don't allow the // modification of diagonal elements. * Also, triangular matrices can only be assigned matrices that // don't * violate their triangular property. The following example demonstrates this * restriction // by means of the blaze::LowerMatrix adaptor. For examples * with other triangular matrix types // see the according class * documentations. * * \code using blaze::CompressedMatrix; using blaze::DynamicMatrix; using * blaze::StaticMatrix; using blaze::LowerMatrix; using blaze::rowMajor; * * using CompressedLower = LowerMatrix< CompressedMatrix<double,rowMajor> >; * * // Default constructed, row-major 3x3 lower compressed matrix CompressedLower * A( 3 ); * * // Initializing elements via the function call operator A(0,0) = 1.0; // * Initialization of the diagonal element (0,0) A(2,0) = 2.0; // * Initialization of the lower element (2,0) A(1,2) = 9.0; // Throws an * exception; invalid modification of upper element * * // Inserting two more elements via the insert() function A.insert( 1, 0, 3.0 * ); // Inserting the lower element (1,0) A.insert( 2, 1, 4.0 ); // * Inserting the lower element (2,1) A.insert( 0, 2, 9.0 ); // Throws an * exception; invalid insertion of upper element * * // Appending an element via the append() function A.reserve( 1, 3 ); // * Reserving enough capacity in row 1 A.append( 1, 1, 5.0 ); // Appending * the diagonal element (1,1) A.append( 1, 2, 9.0 ); // Throws an exception; * appending an element in the upper part * * // Access via a non-const iterator CompressedLower::Iterator it = A.begin(1); * it = 6.0; // Modifies the lower element (1,0) ++it; it = 9.0; // * Modifies the diagonal element (1,1) * * // Erasing elements via the erase() function A.erase( 0, 0 ); // Erasing the * diagonal element (0,0) A.erase( 2, 0 ); // Erasing the lower element * (2,0) * * // Construction from a lower dense matrix StaticMatrix<double,3UL,3UL> B{ { * 3.0, 0.0, 0.0 }, { 8.0, 0.0, 0.0 }, { -2.0, -1.0, 4.0 } }; * * LowerMatrix< DynamicMatrix<double,rowMajor> > C( B ); // OK * * // Assignment of a non-lower dense matrix StaticMatrix<double,3UL,3UL> D{ { * 3.0, 0.0, -2.0 }, { 8.0, 0.0, 0.0 }, { -2.0, -1.0, 4.0 } }; * * C = D; // Throws an exception; lower matrix invariant would be violated! * \endcode * * // The triangular property is also enforced during the construction of * triangular custom matrices: // In case the given array of elements does * not represent the according triangular matrix type, a // \c * std::invalid_argument exception is thrown: * * \code using blaze::CustomMatrix; using blaze::LowerMatrix; using * blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; * * using CustomLower = LowerMatrix< * CustomMatrix<double,unaligned,unpadded,rowMajor> >; * * // Creating a 3x3 lower custom matrix from a properly initialized array * double array[9] = { 1.0, 0.0, 0.0, 2.0, 3.0, 0.0, 4.0, 5.0, 6.0 }; * CustomLower A( array, 3UL ); // OK * * // Attempt to create a second 3x3 lower custom matrix from an uninitialized * array std::unique_ptr<double[]> memory( new double[9UL] ); CustomLower B( * memory.get(), 3UL ); // Throws an exception \endcode * * // Finally, the triangular matrix property is enforced for views (rows, * columns, submatrices, ...) // on the triangular matrix. The following * example demonstrates that modifying the elements of an // entire row and * submatrix of a lower matrix only affects the lower and diagonal matrix * elements. // Again, this example uses blaze::LowerMatrix, for examples * with other triangular matrix types // see the according class * documentations. * * \code using blaze::DynamicMatrix; using blaze::LowerMatrix; * * // Setup of the lower matrix // // ( 0 0 0 0 ) // A = ( 1 2 0 0 ) // * ( 0 3 0 0 ) // ( 4 0 5 0 ) // LowerMatrix< DynamicMatrix<int> > A( 4 * ); A(1,0) = 1; A(1,1) = 2; A(2,1) = 3; A(3,0) = 4; A(3,2) = 5; * * // Setting the lower and diagonal elements in the 2nd row to 9 results in the * matrix // // ( 0 0 0 0 ) // A = ( 1 2 0 0 ) // ( 9 9 9 0 ) * // ( 4 0 5 0 ) // row( A, 2 ) = 9; * * // Setting the lower and diagonal elements in the 1st and 2nd column to 7 * results in // // ( 0 0 0 0 ) // A = ( 1 7 0 0 ) // ( 9 7 7 0 * ) // ( 4 7 7 0 ) // submatrix( A, 0, 1, 4, 2 ) = 7; \endcode * * // The next example demonstrates the (compound) assignment to rows/columns * and submatrices of // triangular matrices. Since only lower/upper and * potentially diagonal elements may be modified // the matrix to be assigned * must be structured such that the triangular matrix invariant of the // * matrix is preserved. Otherwise a \c std::invalid_argument exception is * thrown: * * \code using blaze::DynamicMatrix; using blaze::DynamicVector; using * blaze::LowerMatrix; using blaze::rowVector; * * // Setup of two default 4x4 lower matrices LowerMatrix< DynamicMatrix<int> > * A1( 4 ), A2( 4 ); * * // Setup of a 4-dimensional vector // // v = ( 1 2 3 0 ) // * DynamicVector<int,rowVector> v{ 1, 2, 3, 0 }; * * // OK: Assigning v to the 2nd row of A1 preserves the lower matrix invariant * // // ( 0 0 0 0 ) // A1 = ( 0 0 0 0 ) // ( 1 2 3 0 ) // * ( 0 0 0 0 ) // row( A1, 2 ) = v; // OK * * // Error: Assigning v to the 1st row of A1 violates the lower matrix * invariant! The element // marked with X cannot be assigned and triggers * an exception. // // ( 0 0 0 0 ) // A1 = ( 1 2 X 0 ) // ( 1 * 2 3 0 ) // ( 0 0 0 0 ) // row( A1, 1 ) = v; // Assignment throws * an exception! * * // Setup of the 3x2 dynamic matrix // // ( 0 0 ) // B = ( 7 0 ) // * ( 8 9 ) // DynamicMatrix<int> B( 3UL, 2UL, 0 ); B(1,0) = 7; B(2,0) = 8; * B(2,1) = 9; * * // OK: Assigning B to a submatrix of A2 such that the lower matrix invariant * can be preserved // // ( 0 0 0 0 ) // A2 = ( 0 7 0 0 ) // * ( 0 8 9 0 ) // ( 0 0 0 0 ) // submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = * B; // OK * * // Error: Assigning B to a submatrix of A2 such that the lower matrix * invariant cannot be // preserved! The elements marked with X cannot be * assigned without violating the invariant! // // ( 0 0 0 0 ) // A2 * = ( 0 7 X 0 ) // ( 0 8 8 X ) // ( 0 0 0 0 ) // submatrix( * A2, 0UL, 2UL, 3UL, 2UL ) = B; // Assignment throws an exception! \endcode * * // \n \subsection adaptors_triangular_matrices_initialization The Elements of * a Dense Triangular Matrix are Always Default Initialized! // // Although * this results in a small loss of efficiency during the creation of a dense * lower or // upper matrix this initialization is important since otherwise * the lower/upper matrix property // of dense lower matrices would not be * guaranteed: * * \code using blaze::DynamicMatrix; using blaze::LowerMatrix; using * blaze::UpperMatrix; * * // Uninitialized, 5x5 row-major dynamic matrix DynamicMatrix<int,rowMajor> A( * 5, 5 ); * * // 5x5 row-major lower dynamic matrix with default initialized upper matrix * LowerMatrix< DynamicMatrix<int,rowMajor> > B( 5 ); * * // 7x7 column-major upper dynamic matrix with default initialized lower * matrix UpperMatrix< DynamicMatrix<int,columnMajor> > C( 7 ); * * // 3x3 row-major diagonal dynamic matrix with default initialized lower and * upper matrix DiagonalMatrix< DynamicMatrix<int,rowMajor> > D( 3 ); * \endcode * * // \n \subsection adaptors_triangular_matrices_storage Dense Triangular * Matrices Store All Elements! // // All dense triangular matrices store all * \f$ N \times N \f$ elements, including the immutable // elements in the * lower or upper part, respectively. Therefore dense triangular matrices * don't // provide any kind of memory reduction! There are two main reasons * for this: First, storing also // the zero elements guarantees maximum * performance for many algorithms that perform vectorized // operations on * the triangular matrices, which is especially true for small dense * matrices. // Second, conceptually all triangular adaptors merely restrict * the interface to the matrix type // \c MT and do not change the data * layout or the underlying matrix type. // // This property matters most for * diagonal matrices. In order to achieve the perfect combination // of * performance and memory consumption for a diagonal matrix it is recommended * to use dense // matrices for small diagonal matrices and sparse matrices * for large diagonal matrices: * * \code // Recommendation 1: use dense matrices for small diagonal matrices * using SmallDiagonalMatrix = blaze::DiagonalMatrix< * blaze::StaticMatrix<float,3UL,3UL> >; * * // Recommendation 2: use sparse matrices for large diagonal matrices using * LargeDiagonalMatrix = blaze::DiagonalMatrix< * blaze::CompressedMatrix<float> >; \endcode * * // \n \subsection adaptors_triangular_matrices_scaling Unitriangular Matrices * Cannot Be Scaled! // // Since the diagonal elements of a unitriangular * matrix have a fixed value of 1 it is not possible // to self-scale such a * matrix: * * \code using blaze::DynamicMatrix; using blaze::UniLowerMatrix; * * UniLowerMatrix< DynamicMatrix<int> > A( 4 ); * * A *= 2; // Compilation error; Scale operation is not available on an * unilower matrix A /= 2; // Compilation error; Scale operation is * not available on an unilower matrix A.scale( 2 ); // Compilation error; * Scale function is not available on an unilower matrix * * A = A * 2; // Throws an exception; Invalid assignment of non-unilower matrix * A = A / 2; // Throws an exception; Invalid assignment of non-unilower * matrix \endcode * * // \n \section adaptors_triangular_matrices_arithmetic_operations Arithmetic * Operations // <hr> // // A lower and upper triangular matrix can * participate in numerical operations in any way any other // dense or * sparse matrix can participate. It can also be combined with any other * dense or sparse // vector or matrix. The following code example gives an * impression of the use of blaze::LowerMatrix // within arithmetic * operations: * * \code using blaze::LowerMatrix; using blaze::DynamicMatrix; using * blaze::HybridMatrix; using blaze::StaticMatrix; using * blaze::CompressedMatrix; using blaze::rowMajor; using blaze::columnMajor; * * DynamicMatrix<double,rowMajor> A( 3, 3 ); CompressedMatrix<double,rowMajor> * B( 3, 3 ); * * LowerMatrix< DynamicMatrix<double,rowMajor> > C( 3 ); LowerMatrix< * CompressedMatrix<double,rowMajor> > D( 3 ); * * LowerMatrix< HybridMatrix<float,3UL,3UL,rowMajor> > E; LowerMatrix< * StaticMatrix<float,3UL,3UL,columnMajor> > F; * * E = A + B; // Matrix addition and assignment to a row-major lower matrix * (includes runtime check) F = C - D; // Matrix subtraction and * assignment to a column-major lower matrix (only compile time check) F = A * * D; // Matrix multiplication between a dense and a sparse matrix * (includes runtime check) * * C *= 2.0; // In-place scaling of matrix C E = 2.0 * B; // Scaling of * matrix B (includes runtime check) F = C * 2.0; // Scaling of matrix C * (only compile time check) * * E += A - B; // Addition assignment (includes runtime check) F -= C + D; * // Subtraction assignment (only compile time check) F *= A * D; // * Multiplication assignment (includes runtime check) \endcode * * // Note that it is possible to assign any kind of matrix to a triangular * matrix. In case the // matrix to be assigned does not satisfy the * invariants of the triangular matrix at compile // time, a runtime check is * performed. Also note that upper triangular, diagonal, unitriangular // and * strictly triangular matrix types can be used in the same way, but may pose * some additional // restrictions (see the according class documentations). * // // // \n \section adaptors_triangular_matrices_block_matrices * Triangular Block Matrices // <hr> // // It is also possible to use * triangular block matrices: * * \code using blaze::CompressedMatrix; using blaze::DynamicMatrix; using * blaze::StaticMatrix; using blaze::LowerMatrix; using blaze::UpperMatrix; * * // Definition of a 5x5 lower block matrix based on DynamicMatrix LowerMatrix< * DynamicMatrix< StaticMatrix<int,3UL,3UL> > > A( 5 ); * * // Definition of a 7x7 upper block matrix based on CompressedMatrix * UpperMatrix< CompressedMatrix< StaticMatrix<int,3UL,3UL> > > B( 7 ); * \endcode * * // Also in this case the triangular matrix invariant is enforced, i.e. it is * not possible to // manipulate elements in the upper part (lower triangular * matrix) or the lower part (upper // triangular matrix) of the matrix: * * \code const StaticMatrix<int,3UL,3UL> C{ { 1, -4, 5 }, { 6, 8, -3 }, { 2, * -1, 2 } }; * * A(2,4)(1,1) = -5; // Invalid manipulation of upper matrix element; * Results in an exception B.insert( 4, 2, C ); // Invalid insertion of the * elements (4,2); Results in an exception \endcode * * // Note that unitriangular matrices are restricted to numeric element types * and therefore cannot // be used for block matrices: * * \code using blaze::CompressedMatrix; using blaze::DynamicMatrix; using * blaze::StaticMatrix; using blaze::UniLowerMatrix; using * blaze::UniUpperMatrix; * * // Compilation error: lower unitriangular matrices are restricted to numeric * element types UniLowerMatrix< DynamicMatrix< StaticMatrix<int,3UL,3UL> > > * A( 5 ); * * // Compilation error: upper unitriangular matrices are restricted to numeric * element types UniUpperMatrix< CompressedMatrix< StaticMatrix<int,3UL,3UL> * > > B( 7 ); \endcode * * // For more information on block matrices, see the tutorial on \ref * block_vectors_and_matrices. // // // \n \section * adaptors_triangular_matrices_performance Performance Considerations // * <hr> // // The \b Blaze library tries to exploit the properties of lower * and upper triangular matrices // whenever and wherever possible. Therefore * using triangular matrices instead of a general // matrices can result in a * considerable performance improvement. However, there are also // * situations when using a triangular matrix introduces some overhead. The * following examples // demonstrate several common situations where * triangular matrices can positively or negatively // impact performance. // * // \n \subsection * adaptors_triangular_matrices_matrix_matrix_multiplication Positive Impact: * Matrix/Matrix Multiplication // // When multiplying two matrices, at least * one of which is triangular, \b Blaze can exploit the // fact that either * the lower or upper part of the matrix contains only default elements and * // restrict the algorithm to the non-zero elements. The following example * demonstrates this by // means of a dense matrix/dense matrix * multiplication with lower triangular matrices: * * \code using blaze::DynamicMatrix; using blaze::LowerMatrix; using * blaze::rowMajor; using blaze::columnMajor; * * LowerMatrix< DynamicMatrix<double,rowMajor> > A; LowerMatrix< * DynamicMatrix<double,columnMajor> > B; DynamicMatrix<double,columnMajor> * C; * * // ... Resizing and initialization * * C = A * B; \endcode * * // In comparison to a general matrix multiplication, the performance * advantage is significant, // especially for large matrices. Therefore is * it highly recommended to use the blaze::LowerMatrix // and * blaze::UpperMatrix adaptors when a matrix is known to be lower or upper * triangular, // respectively. Note however that the performance advantage * is most pronounced for dense matrices // and much less so for sparse * matrices. // // \n \subsection * adaptors_triangular_matrices_matrix_vector_multiplication Positive Impact: * Matrix/Vector Multiplication // // A similar performance improvement can * be gained when using a triangular matrix in a matrix/vector // * multiplication: * * \code using blaze::DynamicMatrix; using blaze::DynamicVector; using * blaze::rowMajor; using blaze::columnVector; * * LowerMatrix< DynamicMatrix<double,rowMajor> > A; * DynamicVector<double,columnVector> x, y; * * // ... Resizing and initialization * * y = A * x; \endcode * * // In this example, \b Blaze also exploits the structure of the matrix and * approx. halves the // runtime of the multiplication. Also in case of * matrix/vector multiplications the performance // improvement is most * pronounced for dense matrices and much less so for sparse matrices. // // * \n \subsection adaptors_triangular_matrices_assignment Negative Impact: * Assignment of a General Matrix // // In contrast to using a triangular * matrix on the right-hand side of an assignment (i.e. for // read access), * which introduces absolutely no performance penalty, using a triangular * matrix // on the left-hand side of an assignment (i.e. for write access) * may introduce additional // overhead when it is assigned a general matrix, * which is not triangular at compile time: * * \code using blaze::DynamicMatrix; using blaze::LowerMatrix; * * LowerMatrix< DynamicMatrix<double> > A, C; DynamicMatrix<double> B; * * B = A; // Only read-access to the lower matrix; no performance penalty C = * A; // Assignment of a lower matrix to another lower matrix; no runtime * overhead C = B; // Assignment of a general matrix to a lower matrix; some * runtime overhead \endcode * * // When assigning a general (potentially not lower triangular) matrix to a * lower matrix or a // general (potentially not upper triangular) matrix to * an upper matrix it is necessary to check // whether the matrix is lower or * upper at runtime in order to guarantee the triangular property // of the * matrix. In case it turns out to be lower or upper, respectively, it is * assigned as // efficiently as possible, if it is not, an exception is * thrown. In order to prevent this runtime // overhead it is therefore * generally advisable to assign lower or upper triangular matrices to // * other lower or upper triangular matrices.\n // In this context it is * especially noteworthy that the addition, subtraction, and multiplication * // of two triangular matrices of the same structure always results in * another triangular matrix: * * \code LowerMatrix< DynamicMatrix<double> > A, B, C; * * C = A + B; // Results in a lower matrix; no runtime overhead C = A - B; // * Results in a lower matrix; no runtime overhead C = A * B; // Results in a * lower matrix; no runtime overhead \endcode * * \code UpperMatrix< DynamicMatrix<double> > A, B, C; * * C = A + B; // Results in a upper matrix; no runtime overhead C = A - B; // * Results in a upper matrix; no runtime overhead C = A * B; // Results in a * upper matrix; no runtime overhead \endcode * * // \n Previous: \ref adaptors_hermitian_matrices &nbsp; &nbsp; Next: \ref * views */ //************************************************************************************************* //**Views ****************************************************************************************** /* * !\page views Views // // \tableofcontents // // // \section views_general * General Concepts // <hr> // // Views represents parts of a vector or * matrix, such as a subvector, a submatrix, or a specific // row, column, or * band of a matrix. As such, views act as a reference to specific elements * of // a vector or matrix. This reference is valid and can be used in every * way as any other vector // or matrix can be used as long as the referenced * vector or matrix is not resized or entirely // destroyed. Views also act * as alias to the elements of the vector or matrix: Changes made to the // * elements (e.g. modifying values, inserting or erasing elements) via the * view are immediately // visible in the vector or matrix and changes made * via the vector or matrix are immediately // visible in the view. // // It * is also possible to create nested views (compound views), such as for * instance bands of // submatrices or row selections on column selections. A * compound view also acts as reference // to specific elements of the * underlying vector or matrix and is valid as long as the underlying, // * referenced vector or matrix is not resized or entirely destroyed. // // * The \b Blaze library provides the following views on vectors and matrices: * // // Vector views: // - \ref views_subvectors // - \ref * views_element_selections // // Matrix views: // - \ref views_submatrices * // - \ref views_rows // - \ref views_row_selections // - \ref * views_columns // - \ref views_column_selections // - \ref views_bands // * // // \n \section views_examples Examples * * \code using blaze::DynamicMatrix; using blaze::StaticVector; * * // Setup of the 3x5 row-major matrix DynamicMatrix<int> A{ { 1, 0, -2, 3, * 0 }, { 0, 2, 5, -1, -1 }, { 1, 0, 0, 2, 1 } }; * * // Setup of the 2-dimensional row vector StaticVector<int,2UL,rowVector> * vec{ 18, 19 }; * * // Assigning to the elements (1,2) and (1,3) via a subvector of a row // // * ( 1 0 -2 3 0 ) // ( 0 2 18 19 -1 ) // ( 1 0 0 2 1 ) // * subvector( row( A, 1UL ), 2UL, 2UL ) = vec; * * // Switching rows 0 and 2 of A // // ( 1 0 0 2 1 ) // ( 0 2 18 19 -1 ) * // ( 1 0 -2 3 0 ) // rows<0,2>( A ) = rows<2,0>( A ); * * // Warning: It is the programmer's responsibility to ensure the view does not * outlive // the viewed vector or matrix (dangling reference)! auto * row1 = row<1UL>( DynamicMatrix<int>{ { 1, 2, 3 }, { 4, 5, 6 } } ); * \endcode * * // \n Previous: \ref adaptors_triangular_matrices &nbsp; &nbsp; Next: \ref * views_subvectors */ //************************************************************************************************* //**Subvectors ************************************************************************************* /* * !\page views_subvectors Subvectors // // \tableofcontents // // // * Subvectors provide views on a specific part of a dense or sparse vector. * As such, subvectors // act as a reference to a specific range within a * vector. This reference is valid and can be // used in every way any other * dense or sparse vector can be used as long as the vector containing // the * subvector is not resized or entirely destroyed. The subvector also acts as * an alias to the // vector elements in the specified range: Changes made to * the elements (e.g. modifying values, // inserting or erasing elements) are * immediately visible in the vector and changes made via the // vector are * immediately visible in the subvector. // // // \n \section * views_subvectors_setup Setup of Subvectors // <hr> // // A view on a dense * or sparse subvector can be created very conveniently via the \c * subvector() // function. It can be included via the header file * * \code #include <blaze/math/Subvector.h> \endcode * * // The first parameter specifies the offset of the subvector within the * underlying dense or sparse // vector, the second parameter specifies the * size of the subvector. The two parameters can be // specified either at * compile time or at runtime: * * \code blaze::DynamicVector<double,blaze::rowVector> x; // ... Resizing and * initialization * * // Create a subvector from index 4 with a size of 12 (i.e. in the range * [4..15]) (compile time arguments) auto sv1 = subvector<4UL,12UL>( x ); * * // Create a subvector from index 8 with a size of 16 (i.e. in the range * [8..23]) (runtime arguments) auto sv2 = subvector( x, 8UL, 16UL ); * \endcode * * // The \c subvector() function returns an expression representing the * subvector view. The type of // this expression depends on the given * subvector arguments, primarily the type of the vector and // the compile * time arguments. If the type is required, it can be determined via the \c * decltype // specifier: * * \code using VectorType = blaze::DynamicVector<int>; using SubvectorType = * decltype( blaze::subvector<4UL,12UL>( std::declval<VectorType>() ) ); * \endcode * * // The resulting view can be treated as any other dense or sparse vector, * i.e. it can be assigned // to, it can be copied from, and it can be used * in arithmetic operations. A subvector created // from a row vector can be * used as any other row vector, a subvector created from a column vector // * can be used as any other column vector. The view can also be used on both * sides of an assignment: // The subvector can either be used as an alias to * grant write access to a specific subvector of a // vector primitive on the * left-hand side of an assignment or to grant read-access to a specific // * subvector of a vector primitive or expression on the right-hand side of an * assignment. The // following example demonstrates this in detail: * * \code blaze::DynamicVector<double,blaze::rowVector> x; * blaze::CompressedVector<double,blaze::rowVector> y; * blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and * initialization * * // Create a subvector from index 0 with a size of 10 (i.e. in the range * [0..9]) auto sv = subvector( x, 0UL, 10UL ); * * // Setting the first ten elements of x to the 2nd row of matrix A sv = row( * A, 2UL ); * * // Setting the second ten elements of x to y subvector( x, 10UL, 10UL ) = y; * * // Setting the 3rd row of A to a subvector of x row( A, 3UL ) = subvector( x, * 3UL, 10UL ); * * // Setting x to a subvector of the result of the addition between y and the * 1st row of A x = subvector( y + row( A, 1UL ), 2UL, 5UL ); \endcode * * // \warning It is the programmer's responsibility to ensure the subvector * does not outlive the // viewed vector: * * \code // Creating a subvector on a temporary vector; results in a dangling * reference! auto sv = subvector<1UL,3UL>( DynamicVector<int>{ 1, 2, 3, 4, 5 * } ); \endcode * * // \n \section views_subvectors_element_access Element Access // <hr> // // * The elements of a subvector can be directly accessed via the subscript * operator: * * \code blaze::DynamicVector<double,blaze::rowVector> v; // ... Resizing and * initialization * * // Creating an 8-dimensional subvector, starting from index 4 auto sv = * subvector( v, 4UL, 8UL ); * * // Setting the 1st element of the subvector, which corresponds to // the * element at index 5 in vector v sv[1] = 2.0; \endcode * * // The numbering of the subvector elements is * * \f[\left(\begin{array}{*{5}{c}} 0 & 1 & 2 & \cdots & N-1 \\ * \end{array}\right),\f] * * // where N is the specified size of the subvector. Alternatively, the * elements of a subvector can // be traversed via iterators. Just as with * vectors, in case of non-const subvectors, \c begin() // and \c end() * return an iterator, which allows to manipulate the elements, in case of * constant // subvectors an iterator to immutable elements is returned: * * \code blaze::DynamicVector<int,blaze::rowVector> v( 256UL ); // ... Resizing * and initialization * * // Creating a reference to a specific subvector of vector v auto sv = * subvector( v, 16UL, 64UL ); * * // Traversing the elements via iterators to non-const elements for( auto * it=sv.begin(); it!=sv.end(); ++it ) { it = ...; // OK: Write access to * the dense subvector value. ... = *it; // OK: Read access to the dense * subvector value. } * * // Traversing the elements via iterators to const elements for( auto * it=sv.cbegin(); it!=sv.cend(); ++it ) { it = ...; // Compilation error: * Assignment to the value via iterator-to-const is invalid. ... = *it; // * OK: Read access to the dense subvector value. } \endcode * * \code blaze::CompressedVector<int,blaze::rowVector> v( 256UL ); // ... * Resizing and initialization * * // Creating a reference to a specific subvector of vector v auto sv = * subvector( v, 16UL, 64UL ); * * // Traversing the elements via iterators to non-const elements for( auto * it=sv.begin(); it!=sv.end(); ++it ) { it->value() = ...; // OK: Write * access to the value of the non-zero element. ... = it->value(); // OK: * Read access to the value of the non-zero element. it->index() = ...; // * Compilation error: The index of a non-zero element cannot be changed. ... * = it->index(); // OK: Read access to the index of the sparse element. } * * // Traversing the elements via iterators to const elements for( auto * it=sv.cbegin(); it!=sv.cend(); ++it ) { it->value() = ...; // Compilation * error: Assignment to the value via iterator-to-const is invalid. ... = * it->value(); // OK: Read access to the value of the non-zero element. * it->index() = ...; // Compilation error: The index of a non-zero element * cannot be changed. ... = it->index(); // OK: Read access to the index of * the sparse element. } \endcode * * // \n \section views_subvectors_element_insertion Element Insertion // <hr> * // // Inserting/accessing elements in a sparse subvector can be done by * several alternative functions. // The following example demonstrates all * options: * * \code blaze::CompressedVector<double,blaze::rowVector> v( 256UL ); // * Non-initialized vector of size 256 * * auto sv = subvector( v, 10UL, 60UL ); // View on the range [10..69] of v * * // The subscript operator provides access to all possible elements of the * sparse subvector, // including the zero elements. In case the subscript * operator is used to access an element // that is currently not stored in * the sparse subvector, the element is inserted into the // subvector. * sv[42] = 2.0; * * // The second operation for inserting elements is the set() function. In case * the element is // not contained in the subvector it is inserted into the * subvector, if it is already contained // in the subvector its value is * modified. sv.set( 45UL, -1.2 ); * * // An alternative for inserting elements into the subvector is the insert() * function. However, // it inserts the element only in case the element is * not already contained in the subvector. sv.insert( 50UL, 3.7 ); * * // Just as in case of vectors, elements can also be inserted via the append() * function. In // case of subvectors, append() also requires that the * appended element's index is strictly // larger than the currently largest * non-zero index of the subvector and that the subvector's // capacity is * large enough to hold the new element. Note however that due to the nature * of // a subvector, which may be an alias to the middle of a sparse vector, * the append() function // does not work as efficiently for a subvector as * it does for a vector. sv.reserve( 10UL ); sv.append( 51UL, -2.1 ); * \endcode * * // \n \section views_subvectors_common_operations Common Operations // <hr> * // // A subvector view can be used like any other dense or sparse vector. * This means that with // only a few exceptions all \ref vector_operations * and \ref arithmetic_operations can be used. // For instance, the current * number of elements can be obtained via the \c size() function, the // * current capacity via the \c capacity() function, and the number of * non-zero elements via the // \c nonZeros() function. However, since * subvectors are references to a specific range of a // vector, several * operations are not possible, such as resizing and swapping. The following * // example shows this by means of a dense subvector view: * * \code blaze::DynamicVector<int,blaze::rowVector> v( 42UL ); // ... Resizing * and initialization * * // Creating a view on the range [5..15] of vector v auto sv = subvector( v, * 5UL, 10UL ); * * sv.size(); // Returns the number of elements in the subvector * sv.capacity(); // Returns the capacity of the subvector * sv.nonZeros(); // Returns the number of non-zero elements contained * in the subvector * * sv.resize( 84UL ); // Compilation error: Cannot resize a subvector of a * vector * * auto sv2 = subvector( v, 15UL, 10UL ); swap( sv, sv2 ); // Compilation * error: Swap operation not allowed \endcode * * // \n \section views_subvectors_arithmetic_operations Arithmetic Operations * // <hr> // // Both dense and sparse subvectors can be used in all * arithmetic operations that any other dense // or sparse vector can be used * in. The following example gives an impression of the use of dense // * subvectors within arithmetic operations. All operations (addition, * subtraction, multiplication, // scaling, ...) can be performed on all * possible combinations of dense and sparse subvectors with // fitting * element types: * * \code blaze::DynamicVector<double,blaze::rowVector> d1, d2, d3; * blaze::CompressedVector<double,blaze::rowVector> s1, s2; * * // ... Resizing and initialization * * blaze::DynamicMatrix<double,blaze::rowMajor> A; * * auto sv( subvector( d1, 0UL, 10UL ) ); // View on the range [0..9] of vector * d1 * * sv = d2; // Dense vector initialization of the * range [0..9] subvector( d1, 10UL, 10UL ) = s1; // Sparse vector * initialization of the range [10..19] * * d3 = sv + d2; // Dense vector/dense vector addition * s2 = s1 + subvector( d1, 10UL, 10UL ); // Sparse vector/dense vector * addition d2 = sv * subvector( d1, 20UL, 10UL ); // Component-wise vector * multiplication * * subvector( d1, 3UL, 4UL ) *= 2.0; // In-place scaling of the range * [3..6] d2 = subvector( d1, 7UL, 3UL ) * 2.0; // Scaling of the range * [7..9] d2 = 2.0 * subvector( d1, 7UL, 3UL ); // Scaling of the range * [7..9] * * subvector( d1, 0UL , 10UL ) += d2; // Addition assignment subvector( d1, * 10UL, 10UL ) -= s2; // Subtraction assignment subvector( d1, 20UL, 10UL ) * *= sv; // Multiplication assignment * * double scalar = subvector( d1, 5UL, 10UL ) * trans( s1 ); // * Scalar/dot/inner product between two vectors * * A = trans( s1 ) * subvector( d1, 4UL, 16UL ); // Outer product between two * vectors \endcode * * // \n \section views_aligned_subvectors Aligned Subvectors // <hr> // // * Usually subvectors can be defined anywhere within a vector. They may start * at any position and // may have an arbitrary size (only restricted by the * size of the underlying vector). However, in // contrast to vectors * themselves, which are always properly aligned in memory and therefore can * // provide maximum performance, this means that subvectors in general have * to be considered to be // unaligned. This can be made explicit by the \c * blaze::unaligned flag: * * \code using blaze::unaligned; * * blaze::DynamicVector<double,blaze::rowVector> x; // ... Resizing and * initialization * * // Identical creations of an unaligned subvector in the range [8..23] auto * sv1 = subvector ( x, 8UL, 16UL ); auto sv2 = * subvector<unaligned>( x, 8UL, 16UL ); auto sv3 = subvector<8UL,16UL> * ( x ); auto sv4 = subvector<unaligned,8UL,16UL>( x ); \endcode * * // All of these calls to the \c subvector() function are identical. Whether * the alignment flag is // explicitly specified or not, it always returns an * unaligned subvector. Whereas this may provide // full flexibility in the * creation of subvectors, this might result in performance disadvantages // * in comparison to vector primitives (even in case the specified subvector * could be aligned). // Whereas vector primitives are guaranteed to be * properly aligned and therefore provide maximum // performance in all * operations, a general view on a vector might not be properly aligned. This * // may cause a performance penalty on some platforms and/or for some * operations. // // However, it is also possible to create aligned * subvectors. Aligned subvectors are identical to // unaligned subvectors in * all aspects, except that they may pose additional alignment restrictions * // and therefore have less flexibility during creation, but don't suffer * from performance penalties // and provide the same performance as the * underlying vector. Aligned subvectors are created by // explicitly * specifying the \c blaze::aligned flag: * * \code using blaze::aligned; * * // Creating an aligned subvector in the range [8..23] auto sv1 = * subvector<aligned>( x, 8UL, 16UL ); auto sv2 = * subvector<aligned,8UL,16UL>( x ); \endcode * * // The alignment restrictions refer to system dependent address restrictions * for the used element // type and the available vectorization mode (SSE, * AVX, ...). In order to be properly aligned the // first element of the * subvector must be aligned. The following source code gives some examples * // for a double precision dynamic vector, assuming that AVX is available, * which packs 4 \c double // values into a SIMD vector: * * \code using blaze::aligned; * * blaze::DynamicVector<double,blaze::columnVector> d( 17UL ); // ... Resizing * and initialization * * // OK: Starts at the beginning, i.e. the first element is aligned auto dsv1 = * subvector<aligned>( d, 0UL, 13UL ); * * // OK: Start index is a multiple of 4, i.e. the first element is aligned auto * dsv2 = subvector<aligned>( d, 4UL, 7UL ); * * // OK: The start index is a multiple of 4 and the subvector includes the last * element auto dsv3 = subvector<aligned>( d, 8UL, 9UL ); * * // Error: Start index is not a multiple of 4, i.e. the first element is not * aligned auto dsv4 = subvector<aligned>( d, 5UL, 8UL ); \endcode * * // Note that the discussed alignment restrictions are only valid for aligned * dense subvectors. // In contrast, aligned sparse subvectors at this time * don't pose any additional restrictions. // Therefore aligned and unaligned * sparse subvectors are truly fully identical. Still, in case // the \c * blaze::aligned flag is specified during setup, an aligned subvector is * created: * * \code using blaze::aligned; * * blaze::CompressedVector<double,blaze::rowVector> x; // ... Resizing and * initialization * * // Creating an aligned subvector in the range [8..23] auto sv1 = * subvector<aligned>( x, 8UL, 16UL ); auto sv2 = * subvector<aligned,8UL,16UL>( x ); \endcode * * // \n Previous: \ref views &nbsp; &nbsp; Next: \ref views_element_selections */ //************************************************************************************************* //**Element Selections ***************************************************************************** /* * !\page views_element_selections Element Selections // // \tableofcontents * // // // Element selections provide views on arbitrary compositions of * elements of dense and sparse // vectors. These views act as a reference to * the selected elements and represent them as another // dense or sparse * vector. This reference is valid and can be used in every way any other * dense // or sparse vector can be used as long as the vector containing the * elements is not resized or // entirely destroyed. The element selection * also acts as an alias to the vector elements in the // specified range: * Changes made to the elements (e.g. modifying values, inserting or erasing * // elements) are immediately visible in the vector and changes made via * the vector are immediately // visible in the elements. // // // \n * \section views_element_selections_setup Setup of Element Selections // // * An element selection can be created very conveniently via the \c * elements() function. It can // be included via the header file * * \code #include <blaze/math/Elements.h> \endcode * * // The indices of the elements to be selected can be specified either at * compile time or at runtime // (by means of an initializer list, array or * vector): * * \code blaze::DynamicVector<double,blaze::rowVector> x; // ... Resizing and * initialization * * // Selecting the elements 4, 6, 8, and 10 (compile time arguments) auto e1 = * elements<4UL,6UL,8UL,10UL>( x ); * * // Selecting the elements 3, 2, and 1 (runtime arguments via an initializer * list) const std::initializer_list<size_t> list{ 3UL, 2UL, 1UL }; auto e2 = * elements( x, { 3UL, 2UL, 1UL } ); auto e3 = elements( x, list ); * * // Selecting the elements 1, 2, 3, 3, 2, and 1 (runtime arguments via a * std::array) const std::array<size_t> array{ 1UL, 2UL, 3UL, 3UL, 2UL, 1UL * }; auto e4 = elements( x, array ); auto e5 = elements( x, array.data(), * array.size() ); * * // Selecting the element 4 fives times (runtime arguments via a std::vector) * const std::vector<size_t> vector{ 4UL, 4UL, 4UL, 4UL, 4UL }; auto e6 = * elements( x, vector ); auto e7 = elements( x, vector.data(), vector.size() * ); \endcode * * // Note that it is possible to alias the elements of the underlying vector in * any order. Also note // that it is possible to use the same index multiple * times. The \c elements() function returns an // expression representing * the view on the selected elements. The type of this expression depends // * on the given arguments, primarily the type of the vector and the compile * time arguments. If the // type is required, it can be determined via the * \c decltype specifier: * * \code using VectorType = blaze::DynamicVector<int>; using ElementsType = * decltype( blaze::elements<4UL,12UL>( std::declval<VectorType>() ) ); * \endcode * * // The resulting view can be treated as any other dense or sparse vector, * i.e. it can be assigned // to, it can be copied from, and it can be used * in arithmetic operations. An element selection // created from a row * vector can be used as any other row vector, an element selection created * // from a column vector can be used as any other column vector. The view * can also be used on both // sides of an assignment: It can either be used * as an alias to grant write access to specific // elements of a vector * primitive on the left-hand side of an assignment or to grant read-access * // to specific elements of a vector primitive or expression on the * right-hand side of an assignment. // The following example demonstrates * this in detail: * * \code blaze::DynamicVector<double,blaze::rowVector> x; * blaze::CompressedVector<double,blaze::rowVector> y; * blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and * initialization * * // Selecting the elements 1, 3, 5, and 7 auto e = elements( x, { 1UL, 3UL, * 5UL, 7UL } ); * * // Setting the elements 1, 3, 5, and 7 of x to the 2nd row of matrix A e = * row( A, 2UL ); * * // Setting the elements 2, 4, 6, and 8 of x to y elements( x, { 2UL, 4UL, * 6UL, 8UL } ) = y; * * // Setting the 3rd row of A to the elements 5, 4, 3, and 2 of x row( A, 3UL ) * = elements( x, { 5UL, 4UL, 3UL, 2UL } ); * * // Rotating the result of the addition between y and the 1st row of A x = * elements( y + row( A, 1UL ), { 2UL, 3UL, 0UL, 1UL } ) \endcode * * // Please note that using an element selection, which refers to an index * multiple times, on the // left-hand side of an assignment leads to * undefined behavior: * * \code blaze::DynamicVector<int,blaze::rowVector> a{ 1, 2, 3 }; * blaze::DynamicVector<int,blaze::rowVector> b{ 1, 2, 3, 4 }; * * auto e = elements( a, { 1, 1, 1, 1 } ); // Selecting the element 1 four * times e = b; // Undefined behavior \endcode * * // In this example both vectors have the same size, which results in a * correct vector assignment, // but the final value of the element at index * 1 is unspecified. // // \warning It is the programmer's responsibility to * ensure the element selection does not outlive // the viewed vector: * * \code // Creating an element selection on a temporary vector; results in a * dangling reference! auto e = elements<1UL,3UL>( DynamicVector<int>{ 1, 2, * 3, 4, 5 } ); \endcode * * // \n \section views_element_selections_element_access Element Access // // * The elements of an element selection can be directly accessed via the * subscript operator: * * \code blaze::DynamicVector<double,blaze::rowVector> v; // ... Resizing and * initialization * * // Selecting the elements 2, 4, 6, and 8 auto e = elements( v, { 2UL, 4UL, * 6UL, 8UL } ); * * // Setting the 1st element of the element selection, which corresponds to // * the element at index 4 in vector v e[1] = 2.0; \endcode * * // The numbering of the selected elements is * * \f[\left(\begin{array}{*{5}{c}} 0 & 1 & 2 & \cdots & N-1 \\ * \end{array}\right),\f] * * // where N is the number of selected elements. Alternatively, the elements of * an element selection // can be traversed via iterators. Just as with * vectors, in case of non-const element selections, // \c begin() and \c * end() return an iterator, which allows to manipulate the elements, in case * of // constant element selections an iterator to immutable elements is * returned: * * \code blaze::DynamicVector<int,blaze::rowVector> v( 256UL ); // ... Resizing * and initialization * * // Creating an element selection including specific elements of dense vector * v auto e = elements( v, { 0UL, 3UL, 6UL, 9UL, 12UL } ); * * // Traversing the elements via iterators to non-const elements for( auto * it=e.begin(); it!=e.end(); ++it ) { it = ...; // OK: Write access to the * dense vector value. ... = *it; // OK: Read access to the dense vector * value. } * * // Traversing the elements via iterators to const elements for( auto * it=e.cbegin(); it!=e.cend(); ++it ) { it = ...; // Compilation error: * Assignment to the value via iterator-to-const is invalid. ... = *it; // * OK: Read access to the dense vector value. } \endcode * * \code blaze::CompressedVector<int,blaze::rowVector> v( 256UL ); // ... * Resizing and initialization * * // Creating an element selection including specific elements of sparse vector * v auto e = elements( v, { 0UL, 3UL, 6UL, 9UL, 12UL } ); * * // Traversing the elements via iterators to non-const elements for( auto * it=e.begin(); it!=e.end(); ++it ) { it->value() = ...; // OK: Write * access to the value of the non-zero element. ... = it->value(); // OK: * Read access to the value of the non-zero element. it->index() = ...; // * Compilation error: The index of a non-zero element cannot be changed. ... * = it->index(); // OK: Read access to the index of the sparse element. } * * // Traversing the elements via iterators to const elements for( auto * it=e.cbegin(); it!=e.cend(); ++it ) { it->value() = ...; // Compilation * error: Assignment to the value via iterator-to-const is invalid. ... = * it->value(); // OK: Read access to the value of the non-zero element. * it->index() = ...; // Compilation error: The index of a non-zero element * cannot be changed. ... = it->index(); // OK: Read access to the index of * the sparse element. } \endcode * * // \n \section views_element_selections_element_insertion Element Insertion * // // Inserting/accessing elements in a sparse element selection can be * done by several alternative // functions. The following example * demonstrates all options: * * \code blaze::CompressedVector<double,blaze::rowVector> v( 256UL ); // * Non-initialized vector of size 256 * * std::vector<size_t> indices; // ... Selecting indices of the sparse vector * * auto e = elements( v, indices ); * * // The subscript operator provides access to the selected elements of the * sparse vector, // including the zero elements. In case the subscript * operator is used to access an element // that is currently not stored in * the sparse vector, the element is inserted. e[42] = 2.0; * * // The second operation for inserting elements via the element selection is * the set() function. // In case the element is not contained in the vector * it is inserted into the vector, if it is // already contained in the * vector its value is modified. e.set( 45UL, -1.2 ); * * // An alternative for inserting elements into the vector is the insert() * function. However, it // inserts the element only in case the element is * not already contained in the vector. e.insert( 50UL, 3.7 ); * * // Just as in case of vectors, elements can also be inserted via the append() * function. In case // of element selections, append() also requires that * the appended element's index is strictly // larger than the currently * largest non-zero index of the selection and that the selections's // * capacity is large enough to hold the new element. Note however that due to * the nature of an // element selection, which is an alias to arbitrary * elements of a sparse vector, the append() // function does not work as * efficiently for an element selection as it does for a vector. e.reserve( * 10UL ); e.append( 51UL, -2.1 ); \endcode * * // \n \section views_element_selections_common_operations Common Operations * // // An element selection can be used like any other dense or sparse * vector. For instance, the // number of selected elements can be obtained * via the \c size() function, the current capacity // via the \c capacity() * function, and the number of non-zero elements via the \c nonZeros() // * function. However, since element selections are references to a specific * range of a vector, // several operations are not possible, such as * resizing and swapping. The following example // shows this by means of an * element selection on a dense vector: * * \code blaze::DynamicVector<int,blaze::rowVector> v( 42UL ); // ... Resizing * and initialization * * // Selecting the elements 5 and 10 auto e = elements( v, { 5UL, 10UL } ); * * e.size(); // Returns the number of elements in the element selection * e.capacity(); // Returns the capacity of the element selection * e.nonZeros(); // Returns the number of non-zero elements contained in * the element selection * * e.resize( 84UL ); // Compilation error: Cannot resize an element selection * * auto e2 = elements( v, { 15UL, 10UL } ); swap( e, e2 ); // Compilation * error: Swap operation not allowed \endcode * * // \n \section views_element_selections_arithmetic_operations Arithmetic * Operations // // Both dense and sparse element selections can be used in * all arithmetic operations that any other // dense or sparse vector can be * used in. The following example gives an impression of the use of // dense * element selections within arithmetic operations. All operations (addition, * subtraction, // multiplication, scaling, ...) can be performed on all * possible combinations of dense and sparse // element selections with * fitting element types: * * \code blaze::DynamicVector<double,blaze::rowVector> d1, d2, d3; * blaze::CompressedVector<double,blaze::rowVector> s1, s2; * * // ... Resizing and initialization * * blaze::DynamicMatrix<double,blaze::rowMajor> A; * * std::initializer_list<size_t> indices1{ 0UL, 3UL, 6UL, 9UL, 12UL, 15UL, * 18UL, 21UL }; std::initializer_list<size_t> indices2{ 1UL, 4UL, 7UL, 10UL, * 13UL, 16UL, 19UL, 22UL }; std::initializer_list<size_t> indices3{ 2UL, * 5UL, 8UL, 11UL, 14UL, 17UL, 20UL, 23UL }; * * auto e( elements( d1, indices1 ) ); // Selecting the every third element of * d1 in the range [0..21] * * e = d2; // Dense vector assignment to the selected * elements elements( d1, indices2 ) = s1; // Sparse vector assignment to * the selected elements * * d3 = e + d2; // Dense vector/dense vector addition s2 * = s1 + elements( d1, indices2 ); // Sparse vector/dense vector addition * d2 = e * elements( d1, indices3 ); // Component-wise vector * multiplication * * elements( d1, indices2 ) *= 2.0; // In-place scaling of the second * selection of elements d2 = elements( d1, indices3 ) * 2.0; // Scaling of * the elements in the third selection of elements d2 = 2.0 * elements( d1, * indices3 ); // Scaling of the elements in the third selection of elements * * elements( d1, indices1 ) += d2; // Addition assignment elements( d1, * indices2 ) -= s2; // Subtraction assignment elements( d1, indices3 ) *= * e; // Multiplication assignment * * double scalar = elements( d1, indices2 ) * trans( s1 ); // Scalar/dot/inner * product between two vectors * * A = trans( s1 ) * elements( d1, { 3UL, 6UL } ); // Outer product between two * vectors \endcode * * // \n Previous: \ref views_subvectors &nbsp; &nbsp; Next: \ref * views_submatrices */ //************************************************************************************************* //**Submatrices ************************************************************************************ /* * !\page views_submatrices Submatrices // // \tableofcontents // // // * Submatrices provide views on a specific part of a dense or sparse matrix * just as subvectors // provide views on specific parts of vectors. As such, * submatrices act as a reference to a // specific block within a matrix. * This reference is valid and can be used in evary way any // other dense or * sparse matrix can be used as long as the matrix containing the submatrix * is // not resized or entirely destroyed. The submatrix also acts as an * alias to the matrix elements // in the specified block: Changes made to * the elements (e.g. modifying values, inserting or // erasing elements) are * immediately visible in the matrix and changes made via the matrix are // * immediately visible in the submatrix. // // // \n \section * views_submatrices_setup Setup of Submatrices // <hr> // // A view on a * dense or sparse submatrix can be created very conveniently via the \c * submatrix() // function. It can be included via the header file * * \code #include <blaze/math/Submatrix.h> \endcode * * // The first and second parameter specify the row and column of the first * element of the submatrix. // The third and fourth parameter specify the * number of rows and columns, respectively. The four // parameters can be * specified either at compile time or at runtime: * * \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and * initialization * * // Creating a dense submatrix of size 4x8, starting in row 3 and column 0 * (compile time arguments) auto sm1 = submatrix<3UL,0UL,4UL,8UL>( A ); * * // Creating a dense submatrix of size 8x16, starting in row 0 and column 4 * (runtime arguments) auto sm2 = submatrix( A, 0UL, 4UL, 8UL, 16UL ); * \endcode * * // The \c submatrix() function returns an expression representing the * submatrix view. The type of // this expression depends on the given * submatrix arguments, primarily the type of the matrix and // the compile * time arguments. If the type is required, it can be determined via the \c * decltype // specifier: * * \code using MatrixType = blaze::DynamicMatrix<int>; using SubmatrixType = * decltype( blaze::submatrix<3UL,0UL,4UL,8UL>( std::declval<MatrixType>() ) * ); \endcode * * // The resulting view can be treated as any other dense or sparse matrix, * i.e. it can be assigned // to, it can be copied from, and it can be used * in arithmetic operations. A submatrix created from // a row-major matrix * will itself be a row-major matrix, a submatrix created from a column-major * // matrix will be a column-major matrix. The view can also be used on both * sides of an assignment: // The submatrix can either be used as an alias to * grant write access to a specific submatrix // of a matrix primitive on the * left-hand side of an assignment or to grant read-access to // a specific * submatrix of a matrix primitive or expression on the right-hand side of an * // assignment. The following example demonstrates this in detail: * * \code blaze::DynamicMatrix<double,blaze::columnMajor> A, B; * blaze::CompressedMatrix<double,blaze::rowMajor> C; // ... Resizing and * initialization * * // Creating a dense submatrix of size 8x4, starting in row 0 and column 2 * auto sm = submatrix( A, 0UL, 2UL, 8UL, 4UL ); * * // Setting the submatrix of A to a 8x4 submatrix of B sm = submatrix( B, 0UL, * 0UL, 8UL, 4UL ); * * // Copying the sparse matrix C into another 8x4 submatrix of A submatrix( A, * 8UL, 2UL, 8UL, 4UL ) = C; * * // Assigning part of the result of a matrix addition to the first submatrix * sm = submatrix( B + C, 0UL, 0UL, 8UL, 4UL ); \endcode * * // \warning It is the programmer's responsibility to ensure the submatrix * does not outlive the // viewed matrix: * * \code // Creating a submatrix on a temporary matrix; results in a dangling * reference! auto sm = submatrix<1UL,0UL,2UL,3UL>( DynamicMatrix<int>{ { 1, * 2, 3 }, { 4, 5, 6 }, { 7, 8, 9 } } ); \endcode * * // \n \section views_submatrices_element_access Element Access // <hr> // // * The elements of a submatrix can be directly accessed with the function * call operator: * * \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and * initialization * * // Creating a 8x8 submatrix, starting from position (4,4) auto sm = * submatrix( A, 4UL, 4UL, 8UL, 8UL ); * * // Setting the element (0,0) of the submatrix, which corresponds to // the * element at position (4,4) in matrix A sm(0,0) = 2.0; \endcode * * // Alternatively, the elements of a submatrix can be traversed via (const) * iterators. Just as // with matrices, in case of non-const submatrices, \c * begin() and \c end() return an iterator, // which allows to manipuate the * elements, in case of constant submatrices an iterator to // immutable * elements is returned: * * \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 256UL, 512UL ); // ... * Resizing and initialization * * // Creating a reference to a specific submatrix of matrix A auto sm = * submatrix( A, 16UL, 16UL, 64UL, 128UL ); * * // Traversing the elements of the 0th row via iterators to non-const elements * for( auto it=sm.begin(0); it!=sm.end(0); ++it ) { it = ...; // OK: Write * access to the dense submatrix value. ... = *it; // OK: Read access to the * dense submatrix value. } * * // Traversing the elements of the 1st row via iterators to const elements * for( auto it=sm.cbegin(1); it!=sm.cend(1); ++it ) { it = ...; // * Compilation error: Assignment to the value via iterator-to-const is * invalid. ... = *it; // OK: Read access to the dense submatrix value. } * \endcode * * \code blaze::CompressedMatrix<int,blaze::rowMajor> A( 256UL, 512UL ); // ... * Resizing and initialization * * // Creating a reference to a specific submatrix of matrix A auto sm = * submatrix( A, 16UL, 16UL, 64UL, 128UL ); * * // Traversing the elements of the 0th row via iterators to non-const elements * for( auto it=sm.begin(0); it!=sm.end(0); ++it ) { it->value() = ...; // * OK: Write access to the value of the non-zero element. ... = it->value(); * // OK: Read access to the value of the non-zero element. it->index() = * ...; // Compilation error: The index of a non-zero element cannot be * changed. ... = it->index(); // OK: Read access to the index of the sparse * element. } * * // Traversing the elements of the 1st row via iterators to const elements * for( auto it=sm.cbegin(1); it!=sm.cend(1); ++it ) { it->value() = ...; // * Compilation error: Assignment to the value via iterator-to-const is * invalid. ... = it->value(); // OK: Read access to the value of the * non-zero element. it->index() = ...; // Compilation error: The index of a * non-zero element cannot be changed. ... = it->index(); // OK: Read access * to the index of the sparse element. } \endcode * * // \n \section views_submatrices_element_insertion Element Insertion // <hr> * // // Inserting/accessing elements in a sparse submatrix can be done by * several alternative functions. // The following example demonstrates all * options: * * \code blaze::CompressedMatrix<double,blaze::rowMajor> A( 256UL, 512UL ); // * Non-initialized matrix of size 256x512 * * auto sm = submatrix( A, 10UL, 10UL, 16UL, 16UL ); // View on a 16x16 * submatrix of A * * // The function call operator provides access to all possible elements of the * sparse submatrix, // including the zero elements. In case the function * call operator is used to access an element // that is currently not stored * in the sparse submatrix, the element is inserted into the // submatrix. * sm(2,4) = 2.0; * * // The second operation for inserting elements is the set() function. In case * the element is // not contained in the submatrix it is inserted into the * submatrix, if it is already contained // in the submatrix its value is * modified. sm.set( 2UL, 5UL, -1.2 ); * * // An alternative for inserting elements into the submatrix is the insert() * function. However, // it inserts the element only in case the element is * not already contained in the submatrix. sm.insert( 2UL, 6UL, 3.7 ); * * // Just as in the case of sparse matrices, elements can also be inserted via * the append() // function. In case of submatrices, append() also requires * that the appended element's // index is strictly larger than the currently * largest non-zero index in the according row // or column of the submatrix * and that the according row's or column's capacity is large // enough to * hold the new element. Note however that due to the nature of a submatrix, * which // may be an alias to the middle of a sparse matrix, the append() * function does not work as // efficiently for a submatrix as it does for a * matrix. sm.reserve( 2UL, 10UL ); sm.append( 2UL, 10UL, -2.1 ); \endcode * * // \n \section views_submatrices_common_operations Common Operations // <hr> * // // A submatrix view can be used like any other dense or sparse matrix. * This means that with only // a few exceptions all \ref matrix_operations * and \ref arithmetic_operations can be used. For // instance, the current * size of the matrix, i.e. the number of rows or columns can be obtained // * via the \c rows() and \c columns() functions, the current total capacity * via the \c capacity() // function, and the number of non-zero elements via * the \c nonZeros() function. However, since // submatrices are views on a * specific submatrix of a matrix, several operations are not possible, // * such as resizing and swapping: * * \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 42UL, 42UL ); // ... * Resizing and initialization * * // Creating a view on the a 8x12 submatrix of matrix A auto sm = submatrix( * A, 0UL, 0UL, 8UL, 12UL ); * * sm.rows(); // Returns the number of rows of the submatrix sm.columns(); * // Returns the number of columns of the submatrix sm.capacity(); // * Returns the capacity of the submatrix sm.nonZeros(); // Returns the * number of non-zero elements contained in the submatrix * * sm.resize( 10UL, 8UL ); // Compilation error: Cannot resize a submatrix of a * matrix * * auto sm2 = submatrix( A, 8UL, 0UL, 12UL, 8UL ); swap( sm, sm2 ); // * Compilation error: Swap operation not allowed \endcode * * // \n \section views_submatrices_arithmetic_operations Arithmetic Operations * // <hr> // // Both dense and sparse submatrices can be used in all * arithmetic operations that any other dense // or sparse matrix can be used * in. The following example gives an impression of the use of dense // * submatrices within arithmetic operations. All operations (addition, * subtraction, multiplication, // scaling, ...) can be performed on all * possible combinations of dense and sparse matrices with // fitting element * types: * * \code blaze::DynamicMatrix<double,blaze::rowMajor> D1, D2, D3; * blaze::CompressedMatrix<double,blaze::rowMajor> S1, S2; * * blaze::CompressedVector<double,blaze::columnVector> a, b; * * // ... Resizing and initialization * * auto sm = submatrix( D1, 0UL, 0UL, 8UL, 8UL ); // View on the 8x8 submatrix * of matrix D1 // starting from row 0 and column 0 * * submatrix( D1, 0UL, 8UL, 8UL, 8UL ) = D2; // Dense matrix initialization of * the 8x8 submatrix // starting in row 0 and column 8 sm = S1; * // Sparse matrix initialization of the second 8x8 submatrix * * D3 = sm + D2; // Dense matrix/dense matrix * addition S2 = S1 - submatrix( D1, 8UL, 0UL, 8UL, 8UL ); // Sparse * matrix/dense matrix subtraction D2 = sm * submatrix( D1, 8UL, 8UL, 8UL, * 8UL ); // Dense matrix/dense matrix multiplication * * submatrix( D1, 8UL, 0UL, 8UL, 8UL ) *= 2.0; // In-place scaling of a * submatrix of D1 D2 = submatrix( D1, 8UL, 8UL, 8UL, 8UL ) * 2.0; // * Scaling of the a submatrix of D1 D2 = 2.0 * sm; * // Scaling of the a submatrix of D1 * * submatrix( D1, 0UL, 8UL, 8UL, 8UL ) += D2; // Addition assignment submatrix( * D1, 8UL, 0UL, 8UL, 8UL ) -= S1; // Subtraction assignment submatrix( D1, * 8UL, 8UL, 8UL, 8UL ) *= sm; // Multiplication assignment * * a = submatrix( D1, 4UL, 4UL, 8UL, 8UL ) * b; // Dense matrix/sparse vector * multiplication \endcode * * // \n \section views_aligned_submatrices Aligned Submatrices // <hr> // // * Usually submatrices can be defined anywhere within a matrix. They may * start at any position and // may have an arbitrary extension (only * restricted by the extension of the underlying matrix). // However, in * contrast to matrices themselves, which are always properly aligned in * memory and // therefore can provide maximum performance, this means that * submatrices in general have to be // considered to be unaligned. This can * be made explicit by the \c blaze::unaligned flag: * * \code using blaze::unaligned; * * blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and * initialization * * // Identical creations of an unaligned submatrix of size 8x8, starting in row * 0 and column 0 auto sm1 = submatrix ( A, 0UL, 0UL, 8UL, 8UL ); * auto sm2 = submatrix<unaligned>( A, 0UL, 0UL, 8UL, 8UL ); auto sm3 = * submatrix<0UL,0UL,8UL,8UL> ( A ); auto sm4 = * submatrix<unaligned,0UL,0UL,8UL,8UL>( A ); \endcode * * // All of these calls to the \c submatrix() function are identical. Whether * the alignment flag is // explicitly specified or not, it always returns an * unaligned submatrix. Whereas this may provide // full flexibility in the * creation of submatrices, this might result in performance disadvantages // * in comparison to matrix primitives (even in case the specified submatrix * could be aligned). // Whereas matrix primitives are guaranteed to be * properly aligned and therefore provide maximum // performance in all * operations, a general view on a matrix might not be properly aligned. This * // may cause a performance penalty on some platforms and/or for some * operations. // // However, it is also possible to create aligned * submatrices. Aligned submatrices are identical to // unaligned submatrices * in all aspects, except that they may pose additional alignment * restrictions // and therefore have less flexibility during creation, but * don't suffer from performance penalties // and provide the same * performance as the underlying matrix. Aligned submatrices are created by * // explicitly specifying the \c blaze::aligned flag: * * \code using blaze::aligned; * * // Creating an aligned submatrix of size 8x8, starting in row 0 and column 0 * auto sv1 = submatrix<aligned>( A, 0UL, 0UL, 8UL, 8UL ); auto sv2 = * submatrix<aligned,0UL,0UL,8UL,8UL>( A ); \endcode * * // The alignment restrictions refer to system dependent address restrictions * for the used element // type and the available vectorization mode (SSE, * AVX, ...). In order to be properly aligned the // first element of each * row/column of the submatrix must be aligned. The following source code // * gives some examples for a double precision row-major dynamic matrix, * assuming that padding is // enabled and that AVX is available, which packs * 4 \c double values into a SIMD vector: * * \code using blaze::aligned; * * blaze::DynamicMatrix<double,blaze::rowMajor> D( 13UL, 17UL ); // ... Resizing * and initialization * * // OK: Starts at position (0,0), i.e. the first element of each row is * aligned (due to padding) auto dsm1 = submatrix<aligned>( D, 0UL, 0UL, 7UL, * 11UL ); * * // OK: First column is a multiple of 4, i.e. the first element of each row is * aligned (due to padding) auto dsm2 = submatrix<aligned>( D, 3UL, 12UL, * 8UL, 16UL ); * * // OK: First column is a multiple of 4 and the submatrix includes the last * row and column auto dsm3 = submatrix<aligned>( D, 4UL, 0UL, 9UL, 17UL ); * * // Error: First column is not a multiple of 4, i.e. the first element is not * aligned auto dsm4 = submatrix<aligned>( D, 2UL, 3UL, 12UL, 12UL ); * \endcode * * // Note that the discussed alignment restrictions are only valid for aligned * dense submatrices. // In contrast, aligned sparse submatrices at this time * don't pose any additional restrictions. // Therefore aligned and unaligned * sparse submatrices are truly fully identical. Still, in case // the \c * blaze::aligned flag is specified during setup, an aligned submatrix is * created: * * \code using blaze::aligned; * * blaze::CompressedMatrix<double,blaze::rowMajor> A; // ... Resizing and * initialization * * // Creating an aligned submatrix of size 8x8, starting in row 0 and column 0 * auto sv = submatrix<aligned>( A, 0UL, 0UL, 8UL, 8UL ); \endcode * * // \n \section views_submatrices_on_symmetric_matrices Submatrices on * Symmetric Matrices // // Submatrices can also be created on symmetric * matrices (see the \c SymmetricMatrix class template): * * \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; * * // Setup of a 16x16 symmetric matrix SymmetricMatrix< DynamicMatrix<int> > A( * 16UL ); * * // Creating a dense submatrix of size 8x12, starting in row 2 and column 4 * auto sm = submatrix( A, 2UL, 4UL, 8UL, 12UL ); \endcode * * // It is important to note, however, that (compound) assignments to such * submatrices have a // special restriction: The symmetry of the underlying * symmetric matrix must not be broken! // Since the modification of element * \f$ a_{ij} \f$ of a symmetric matrix also modifies the // element \f$ * a_{ji} \f$, the matrix to be assigned must be structured such that the * symmetry // of the symmetric matrix is preserved. Otherwise a \a * std::invalid_argument exception is // thrown: * * \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; * * // Setup of two default 4x4 symmetric matrices SymmetricMatrix< * DynamicMatrix<int> > A1( 4 ), A2( 4 ); * * // Setup of the 3x2 dynamic matrix // // ( 1 2 ) // B = ( 3 4 ) // * ( 5 6 ) // DynamicMatrix<int> B{ { 1, 2 }, { 3, 4 }, { 5, 6 } }; * * // OK: Assigning B to a submatrix of A1 such that the symmetry can be * preserved // // ( 0 0 1 2 ) // A1 = ( 0 0 3 4 ) // ( 1 3 5 * 6 ) // ( 2 4 6 0 ) // submatrix( A1, 0UL, 2UL, 3UL, 2UL ) = B; // * OK * * // Error: Assigning B to a submatrix of A2 such that the symmetry cannot be * preserved! // The elements marked with X cannot be assigned * unambiguously! // // ( 0 1 2 0 ) // A2 = ( 1 3 X 0 ) // ( * 2 X 6 0 ) // ( 0 0 0 0 ) // submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = * B; // Assignment throws an exception! \endcode * * // \n Previous: \ref views_element_selections &nbsp; &nbsp; Next: \ref * views_rows */ //************************************************************************************************* //**Rows ******************************************************************************************* /* * !\page views_rows Rows // // \tableofcontents // // // Rows provide views * on a specific row of a dense or sparse matrix. As such, rows act as a // * reference to a specific row. This reference is valid and can be used in * every way any other // row vector can be used as long as the matrix * containing the row is not resized or entirely // destroyed. The row also * acts as an alias to the row elements: Changes made to the elements // * (e.g. modifying values, inserting or erasing elements) are immediately * visible in the matrix // and changes made via the matrix are immediately * visible in the row. // // // \n \section views_rows_setup Setup of Rows // * <hr> // // \image html row.png // \image latex row.eps "Row view" * width=250pt // // A reference to a dense or sparse row can be created very * conveniently via the \c row() function. // It can be included via the * header file * * \code #include <blaze/math/Row.h> \endcode * * // The row index must be in the range from \f$[0..M-1]\f$, where \c M is the * total number of rows // of the matrix, and can be specified both at * compile time or at runtime: * * \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and * initialization * * // Creating a reference to the 1st row of matrix A (compile time index) auto * row1 = row<1UL>( A ); * * // Creating a reference to the 2nd row of matrix A (runtime index) auto row2 * = row( A, 2UL ); \endcode * * // The \c row() function returns an expression representing the row view. The * type of this // expression depends on the given row arguments, primarily * the type of the matrix and the compile // time arguments. If the type is * required, it can be determined via the \c decltype specifier: * * \code using MatrixType = blaze::DynamicMatrix<int>; using RowType = decltype( * blaze::row<1UL>( std::declval<MatrixType>() ) ); \endcode * * // The resulting view can be treated as any other row vector, i.e. it can be * assigned to, it can // be copied from, and it can be used in arithmetic * operations. The reference can also be used on // both sides of an * assignment: The row can either be used as an alias to grant write access * to a // specific row of a matrix primitive on the left-hand side of an * assignment or to grant read-access // to a specific row of a matrix * primitive or expression on the right-hand side of an assignment. // The * following example demonstrates this in detail: * * \code blaze::DynamicVector<double,blaze::rowVector> x; * blaze::CompressedVector<double,blaze::rowVector> y; * blaze::DynamicMatrix<double,blaze::rowMajor> A, B; * blaze::CompressedMatrix<double,blaze::rowMajor> C, D; // ... Resizing and * initialization * * // Setting the 2nd row of matrix A to x auto row2 = row( A, 2UL ); row2 = x; * * // Setting the 3rd row of matrix B to y row( B, 3UL ) = y; * * // Setting x to the 4th row of the result of the matrix multiplication x = * row( A * B, 4UL ); * * // Setting y to the 2nd row of the result of the sparse matrix multiplication * y = row( C * D, 2UL ); \endcode * * // \warning It is the programmer's responsibility to ensure the row does not * outlive the viewed // matrix: * * \code // Creating a row on a temporary matrix; results in a dangling * reference! auto row1 = row<1UL>( DynamicMatrix<int>{ { 1, 2, 3 }, { 4, 5, * 6 }, { 7, 8, 9 } } ); \endcode * * // \n \section views_rows_element_access Element Access // <hr> // // The * elements of a row can be directly accessed with the subscript operator: * * \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and * initialization * * // Creating a view on the 4th row of matrix A auto row4 = row( A, 4UL ); * * // Setting the 1st element of the dense row, which corresponds // to the 1st * element in the 4th row of matrix A row4[1] = 2.0; \endcode * * // The numbering of the row elements is * * \f[\left(\begin{array}{*{5}{c}} 0 & 1 & 2 & \cdots & N-1 \\ * \end{array}\right),\f] * * // where N is the number of columns of the referenced matrix. Alternatively, * the elements of a // row can be traversed via iterators. Just as with * vectors, in case of non-const rows, \c begin() // and \c end() return an * iterator, which allows to manipulate the elements, in case of constant // * rows an iterator to immutable elements is returned: * * \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 128UL, 256UL ); // ... * Resizing and initialization * * // Creating a reference to the 31st row of matrix A auto row31 = row( A, 31UL * ); * * // Traversing the elements via iterators to non-const elements for( auto * it=row31.begin(); it!=row31.end(); ++it ) { it = ...; // OK; Write access * to the dense row value ... = *it; // OK: Read access to the dense row * value. } * * // Traversing the elements via iterators to const elements for( auto * it=row31.cbegin(); it!=row31.cend(); ++it ) { it = ...; // Compilation * error: Assignment to the value via a ConstIterator is invalid. ... = *it; * // OK: Read access to the dense row value. } \endcode * * \code blaze::CompressedMatrix<int,blaze::rowMajor> A( 128UL, 256UL ); // ... * Resizing and initialization * * // Creating a reference to the 31st row of matrix A auto row31 = row( A, 31UL * ); * * // Traversing the elements via iterators to non-const elements for( auto * it=row31.begin(); it!=row31.end(); ++it ) { it->value() = ...; // OK: * Write access to the value of the non-zero element. ... = it->value(); // * OK: Read access to the value of the non-zero element. it->index() = ...; * // Compilation error: The index of a non-zero element cannot be changed. * ... = it->index(); // OK: Read access to the index of the sparse element. * } * * // Traversing the elements via iterators to const elements for( auto * it=row31.cbegin(); it!=row31.cend(); ++it ) { it->value() = ...; // * Compilation error: Assignment to the value via a ConstIterator is invalid. * ... = it->value(); // OK: Read access to the value of the non-zero * element. it->index() = ...; // Compilation error: The index of a non-zero * element cannot be changed. ... = it->index(); // OK: Read access to the * index of the sparse element. } \endcode * * // \n \section views_rows_element_insertion Element Insertion // <hr> // // * Inserting/accessing elements in a sparse row can be done by several * alternative functions. // The following example demonstrates all options: * * \code blaze::CompressedMatrix<double,blaze::rowMajor> A( 10UL, 100UL ); // * Non-initialized 10x100 matrix * * auto row0( row( A, 0UL ) ); // Reference to the 0th row of A * * // The subscript operator provides access to all possible elements of the * sparse row, // including the zero elements. In case the subscript operator * is used to access an element // that is currently not stored in the sparse * row, the element is inserted into the row. row0[42] = 2.0; * * // The second operation for inserting elements is the set() function. In case * the element // is not contained in the row it is inserted into the row, if * it is already contained in // the row its value is modified. row0.set( * 45UL, -1.2 ); * * // An alternative for inserting elements into the row is the insert() * function. However, // it inserts the element only in case the element is * not already contained in the row. row0.insert( 50UL, 3.7 ); * * // A very efficient way to add new elements to a sparse row is the append() * function. // Note that append() requires that the appended element's index * is strictly larger than // the currently largest non-zero index of the row * and that the row's capacity is large // enough to hold the new element. * row0.reserve( 10UL ); row0.append( 51UL, -2.1 ); \endcode * * // \n \section views_rows_common_operations Common Operations // <hr> // // A * row view can be used like any other row vector. This means that with only * a few exceptions // all \ref vector_operations and \ref * arithmetic_operations can be used. For instance, the // current number of * elements can be obtained via the \c size() function, the current capacity * // via the \c capacity() function, and the number of non-zero elements via * the \c nonZeros() // function. However, since rows are references to * specific rows of a matrix, several operations // are not possible on * views, such as resizing and swapping. The following example shows this by * // means of a dense row view: * * \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 42UL, 42UL ); // ... * Resizing and initialization * * // Creating a reference to the 2nd row of matrix A auto row2 = row( A, 2UL ); * * row2.size(); // Returns the number of elements in the row * row2.capacity(); // Returns the capacity of the row row2.nonZeros(); * // Returns the number of non-zero elements contained in the row * * row2.resize( 84UL ); // Compilation error: Cannot resize a single row of a * matrix * * auto row3 = row( A, 3UL ); swap( row2, row3 ); // Compilation error: Swap * operation not allowed \endcode * * // \n \section views_rows_arithmetic_operations Arithmetic Operations // <hr> * // // Both dense and sparse rows can be used in all arithmetic operations * that any other dense or // sparse row vector can be used in. The following * example gives an impression of the use of // dense rows within arithmetic * operations. All operations (addition, subtraction, multiplication, // * scaling, ...) can be performed on all possible combinations of dense and * sparse rows with // fitting element types: * * \code blaze::DynamicVector<double,blaze::rowVector> a( 2UL, 2.0 ), b; * blaze::CompressedVector<double,blaze::rowVector> c( 2UL ); c[1] = 3.0; * * blaze::DynamicMatrix<double,blaze::rowMajor> A( 4UL, 2UL ); // * Non-initialized 4x2 matrix * * auto row0( row( A, 0UL ) ); // Reference to the 0th row of A * * row0[0] = 0.0; // Manual initialization of the 0th row of A row0[1] = * 0.0; row( A, 1UL ) = 1.0; // Homogeneous initialization of the 1st row of * A row( A, 2UL ) = a; // Dense vector initialization of the 2nd row of A * row( A, 3UL ) = c; // Sparse vector initialization of the 3rd row of A * * b = row0 + a; // Dense vector/dense vector addition b = c + row( * A, 1UL ); // Sparse vector/dense vector addition b = row0 * row( A, * 2UL ); // Component-wise vector multiplication * * row( A, 1UL ) *= 2.0; // In-place scaling of the 1st row b = row( A, 1UL * ) * 2.0; // Scaling of the 1st row b = 2.0 * row( A, 1UL ); // Scaling * of the 1st row * * row( A, 2UL ) += a; // Addition assignment row( A, 2UL ) -= c; * // Subtraction assignment row( A, 2UL ) *= row( A, 0UL ); // * Multiplication assignment * * double scalar = row( A, 1UL ) * trans( c ); // Scalar/dot/inner product * between two vectors * * A = trans( c ) * row( A, 1UL ); // Outer product between two vectors * \endcode * * // \n \section views_rows_non_fitting_storage_order Views on Matrices with * Non-Fitting Storage Order // <hr> // // Especially noteworthy is that row * views can be created for both row-major and column-major // matrices. * Whereas the interface of a row-major matrix only allows to traverse a row * directly // and the interface of a column-major matrix only allows to * traverse a column, via views it is // possible to traverse a row of a * column-major matrix or a column of a row-major matrix. For // instance: * * \code blaze::DynamicMatrix<int,blaze::columnMajor> A( 64UL, 32UL ); // ... * Resizing and initialization * * // Creating a reference to the 1st row of a column-major matrix A auto row1 = * row( A, 1UL ); * * for( auto it=row1.begin(); it!=row1.end(); ++it ) { // ... } \endcode * * // However, please note that creating a row view on a matrix stored in a * column-major fashion // can result in a considerable performance decrease * in comparison to a row view on a matrix // with row-major storage format. * This is due to the non-contiguous storage of the matrix // elements. * Therefore care has to be taken in the choice of the most suitable storage * order: * * \code // Setup of two column-major matrices * blaze::DynamicMatrix<double,blaze::columnMajor> A( 128UL, 128UL ); * blaze::DynamicMatrix<double,blaze::columnMajor> B( 128UL, 128UL ); // ... * Resizing and initialization * * // The computation of the 15th row of the multiplication between A and B ... * blaze::DynamicVector<double,blaze::rowVector> x = row( A * B, 15UL ); * * // ... is essentially the same as the following computation, which multiplies * // the 15th row of the column-major matrix A with B. * blaze::DynamicVector<double,blaze::rowVector> x = row( A, 15UL ) * B; * \endcode * * // Although \b Blaze performs the resulting vector/matrix multiplication as * efficiently as possible // using a row-major storage order for matrix \c A * would result in a more efficient evaluation. // // \n Previous: \ref * views_submatrices &nbsp; &nbsp; Next: \ref views_row_selections */ //************************************************************************************************* //**Row Selections ********************************************************************************* /* * !\page views_row_selections Row Selections // // \tableofcontents // // // * Row selections provide views on arbitrary compositions of rows of dense * and sparse matrices. // These views act as a reference to the selected * rows and represent them as another dense or // sparse matrix. This * reference is valid and can be used in every way any other dense or sparse * // matrix can be used as long as the matrix containing the rows is not * resized or entirely // destroyed. The row selection also acts as an alias * to the matrix elements in the specified // range: Changes made to the rows * (e.g. modifying values, inserting or erasing elements) are // immediately * visible in the matrix and changes made via the matrix are immediately * visible // in the rows. // // // \n \section views_row_selections_setup * Setup of Row Selections // // A row selection can be created very * conveniently via the \c rows() function. It can be included // via the * header file * * \code #include <blaze/math/Rows.h> \endcode * * // The indices of the rows to be selected can be specified either at compile * time or at runtime // (by means of an initializer list, array or vector): * * \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and * initialization * * // Selecting the rows 4, 6, 8, and 10 (compile time arguments) auto rs1 = * rows<4UL,6UL,8UL,10UL>( A ); * * // Selecting the rows 3, 2, and 1 (runtime arguments via an initializer list) * const std::initializer_list<size_t> list{ 3UL, 2UL, 1UL }; auto rs2 = * rows( A, { 3UL, 2UL, 1UL } ); auto rs3 = rows( A, list ); * * // Selecting the rows 1, 2, 3, 3, 2, and 1 (runtime arguments via a * std::array) const std::array<size_t> array{ 1UL, 2UL, 3UL, 3UL, 2UL, 1UL * }; auto rs4 = rows( A, array ); auto rs5 = rows( A, array.data(), * array.size() ); * * // Selecting the row 4 fives times (runtime arguments via a std::vector) * const std::vector<size_t> vector{ 4UL, 4UL, 4UL, 4UL, 4UL }; auto rs6 = * rows( A, vector ); auto rs7 = rows( A, vector.data(), vector.size() ); * \endcode * * // Note that it is possible to alias the rows of the underlying matrix in any * order. Also note // that it is possible to use the same index multiple * times. The \c rows() function returns an // expression representing the * view on the selected rows. The type of this expression depends // on the * given arguments, primarily the type of the matrix and the compile time * arguments. If // the type is required, it can be determined via the \c * decltype specifier: * * \code using MatrixType = blaze::DynamicMatrix<int>; using RowsType = * decltype( blaze::rows<3UL,0UL,4UL,8UL>( std::declval<MatrixType>() ) ); * \endcode * * // The resulting view can be treated as any other dense or sparse matrix, * i.e. it can be assigned // to, it can be copied from, and it can be used * in arithmetic operations. Note, however, that a // row selection will * always be treated as a row-major matrix, regardless of the storage order * of // the matrix containing the rows. The view can also be used on both * sides of an assignment: It // can either be used as an alias to grant * write access to specific rows of a matrix primitive // on the left-hand * side of an assignment or to grant read-access to specific rows of a matrix * // primitive or expression on the right-hand side of an assignment. The * following example // demonstrates this in detail: * * \code blaze::DynamicMatrix<double,blaze::rowMajor> A; * blaze::DynamicMatrix<double,blaze::columnMajor> B; * blaze::CompressedMatrix<double,blaze::rowMajor> C; // ... Resizing and * initialization * * // Selecting the rows 1, 3, 5, and 7 of A auto rs = rows( A, { 1UL, 3UL, 5UL, * 7UL } ); * * // Setting rows 1, 3, 5, and 7 of A to row 4 of B rs = rows( B, { 4UL, 4UL, * 4UL, 4UL } ); * * // Setting the rows 2, 4, 6, and 8 of A to C rows( A, { 2UL, 4UL, 6UL, 8UL } * ) = C; * * // Setting the first 4 rows of A to the rows 5, 4, 3, and 2 of C submatrix( * A, 0UL, 0UL, 4UL, A.columns() ) = rows( C, { 5UL, 4UL, 3UL, 2UL } ); * * // Rotating the result of the addition between rows 1, 3, 5, and 7 of A and C * B = rows( rs + C, { 2UL, 3UL, 0UL, 1UL } ); \endcode * * // \warning It is the programmer's responsibility to ensure the row selection * does not outlive the // viewed matrix: * * \code // Creating a row selection on a temporary matrix; results in a * dangling reference! auto rs = rows<2UL,0UL>( DynamicMatrix<int>{ { 1, 2, 3 * }, { 4, 5, 6 }, { 7, 8, 9 } } ); \endcode * * // \n \section views_row_selections_element_access Element Access // // The * elements of a row selection can be directly accessed via the function call * operator: * * \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and * initialization * * // Creating a view on the first four rows of A in reverse order auto rs = * rows( A, { 3UL, 2UL, 1UL, 0UL } ); * * // Setting the element (0,0) of the row selection, which corresponds // to * the element at position (3,0) in matrix A rs(0,0) = 2.0; \endcode * * // Alternatively, the elements of a row selection can be traversed via * (const) iterators. Just as // with matrices, in case of non-const row * selection, \c begin() and \c end() return an iterator, // which allows to * manipuate the elements, in case of constant row selection an iterator to * // immutable elements is returned: * * \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 256UL, 512UL ); // ... * Resizing and initialization * * // Creating a reference to a selection of rows of matrix A auto rs = rows( A, * { 16UL, 32UL, 64UL, 128UL } ); * * // Traversing the elements of the 0th row via iterators to non-const elements * for( auto it=rs.begin(0); it!=rs.end(0); ++it ) { it = ...; // OK: Write * access to the dense value. ... = *it; // OK: Read access to the dense * value. } * * // Traversing the elements of the 1st row via iterators to const elements * for( auto it=rs.cbegin(1); it!=rs.cend(1); ++it ) { it = ...; // * Compilation error: Assignment to the value via iterator-to-const is * invalid. ... = *it; // OK: Read access to the dense value. } \endcode * * \code blaze::CompressedMatrix<int,blaze::rowMajor> A( 256UL, 512UL ); // ... * Resizing and initialization * * // Creating a reference to a selection of rows of matrix A auto rs = rows( A, * { 16UL, 32UL, 64UL, 128UL } ); * * // Traversing the elements of the 0th row via iterators to non-const elements * for( auto it=rs.begin(0); it!=rs.end(0); ++it ) { it->value() = ...; // * OK: Write access to the value of the non-zero element. ... = it->value(); * // OK: Read access to the value of the non-zero element. it->index() = * ...; // Compilation error: The index of a non-zero element cannot be * changed. ... = it->index(); // OK: Read access to the index of the sparse * element. } * * // Traversing the elements of the 1st row via iterators to const elements * for( auto it=rs.cbegin(1); it!=rs.cend(1); ++it ) { it->value() = ...; // * Compilation error: Assignment to the value via iterator-to-const is * invalid. ... = it->value(); // OK: Read access to the value of the * non-zero element. it->index() = ...; // Compilation error: The index of a * non-zero element cannot be changed. ... = it->index(); // OK: Read access * to the index of the sparse element. } \endcode * * // \n \section views_row_selections_element_insertion Element Insertion // // * Inserting/accessing elements in a sparse row selection can be done by * several alternative // functions. The following example demonstrates all * options: * * \code blaze::CompressedMatrix<double,blaze::rowMajor> A( 256UL, 512UL ); // * Non-initialized matrix of size 256x512 * * auto rs = rows( A, { 10UL, 20UL, 30UL, 40UL } ); // View on the rows 10, 20, * 30, and 40 of A * * // The function call operator provides access to all possible elements of the * sparse row // selection, including the zero elements. In case the function * call operator is used to // access an element that is currently not stored * in the sparse row selection, the element // is inserted into the row * selection. rs(2,4) = 2.0; * * // The second operation for inserting elements is the set() function. In case * the element is // not contained in the row selection it is inserted into * the row selection, if it is already // contained in the row selection its * value is modified. rs.set( 2UL, 5UL, -1.2 ); * * // An alternative for inserting elements into the row selection is the * insert() function. // However, it inserts the element only in case the * element is not already contained in the // row selection. rs.insert( 2UL, * 6UL, 3.7 ); * * // Just as in the case of sparse matrices, elements can also be inserted via * the append() // function. In case of row selections, append() also * requires that the appended element's // index is strictly larger than the * currently largest non-zero index in the according row // of the row * selection and that the according row's capacity is large enough to hold * the new // element. Note however that due to the nature of a row * selection, which may be an alias to // an arbitrary collection of rows, * the append() function does not work as efficiently for // a row selection * as it does for a matrix. rs.reserve( 2UL, 10UL ); rs.append( 2UL, 10UL, * -2.1 ); \endcode * * // \n \section views_row_selections_common_operations Common Operations // // * A view on specific rows of a matrix can be used like any other dense or * sparse matrix. For // instance, the current size of the matrix, i.e. the * number of rows or columns can be obtained // via the \c rows() and \c * columns() functions, the current total capacity via the \c capacity() // * function, and the number of non-zero elements via the \c nonZeros() * function. However, since // row selections are views on specific rows of a * matrix, several operations are not possible, // such as resizing and * swapping: * * \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 42UL, 42UL ); // ... * Resizing and initialization * * // Creating a view on the rows 8, 16, 24, and 32 of matrix A auto rs = rows( * A, { 8UL, 16UL, 24UL, 32UL } ); * * rs.rows(); // Returns the number of rows of the row selection * rs.columns(); // Returns the number of columns of the row selection * rs.capacity(); // Returns the capacity of the row selection * rs.nonZeros(); // Returns the number of non-zero elements contained in * the row selection * * rs.resize( 10UL, 8UL ); // Compilation error: Cannot resize a row selection * * auto rs2 = rows( A, 9UL, 17UL, 25UL, 33UL ); swap( rs, rs2 ); // Compilation * error: Swap operation not allowed \endcode * * // \n \section views_row_selections_arithmetic_operations Arithmetic * Operations // // Both dense and sparse row selections can be used in all * arithmetic operations that any other // dense or sparse matrix can be used * in. The following example gives an impression of the use // of dense row * selctions within arithmetic operations. All operations (addition, * subtraction, // multiplication, scaling, ...) can be performed on all * possible combinations of dense and // sparse matrices with fitting element * types: * * \code blaze::DynamicMatrix<double,blaze::rowMajor> D1, D2, D3; * blaze::CompressedMatrix<double,blaze::rowMajor> S1, S2; * * blaze::CompressedVector<double,blaze::columnVector> a, b; * * // ... Resizing and initialization * * std::initializer_list<size_t> indices1{ 0UL, 3UL, 6UL, 9UL, 12UL, 15UL, * 18UL, 21UL }; std::initializer_list<size_t> indices2{ 1UL, 4UL, 7UL, 10UL, * 13UL, 16UL, 19UL, 22UL }; std::initializer_list<size_t> indices3{ 2UL, * 5UL, 8UL, 11UL, 14UL, 17UL, 20UL, 23UL }; * * auto rs = rows( D1, indices1 ); // Selecting the every third row of D1 in * the range [0..21] * * rs = D2; // Dense matrix assignment to the selected rows * rows( D1, indices2 ) = S1; // Sparse matrix assignment to the selected * rows * * D3 = rs + D2; // Dense matrix/dense matrix addition S2 = * S1 - rows( D1, indices2 ); // Sparse matrix/dense matrix subtraction D2 = * rs % rows( D1, indices3 ); // Dense matrix/dense matrix Schur product D2 * = rows( D1, indices2 ) * D1; // Dense matrix/dense matrix multiplication * * rows( D1, indices2 ) *= 2.0; // In-place scaling of the second selection * of rows D2 = rows( D1, indices3 ) * 2.0; // Scaling of the elements in * the third selection of rows D2 = 2.0 * rows( D1, indices3 ); // Scaling * of the elements in the third selection of rows * * rows( D1, indices1 ) += D2; // Addition assignment rows( D1, indices2 ) -= * S1; // Subtraction assignment rows( D1, indices3 ) %= rs; // Schur * product assignment * * a = rows( D1, indices1 ) * b; // Dense matrix/sparse vector multiplication * \endcode * * // \n \section views_row_selections_on_column_major_matrix Row Selections on * Column-Major Matrices // // Especially noteworthy is that row selections * can be created for both row-major and column-major // matrices. Whereas * the interface of a row-major matrix only allows to traverse a row directly * // and the interface of a column-major matrix only allows to traverse a * column, via views it is // possible to traverse a row of a column-major * matrix or a column of a row-major matrix. For // instance: * * \code blaze::DynamicMatrix<int,blaze::columnMajor> A( 64UL, 32UL ); // ... * Resizing and initialization * * // Creating a reference to the 1st and 3rd row of a column-major matrix A * auto rs = rows( A, { 1UL, 3UL } ); * * // Traversing row 0 of the selection, which corresponds to the 1st row of * matrix A for( auto it=rs.begin( 0UL ); it!=rs.end( 0UL ); ++it ) { // ... * } \endcode * * // However, please note that creating a row selection on a matrix stored in a * column-major fashion // can result in a considerable performance decrease * in comparison to a row selection on a matrix // with row-major storage * format. This is due to the non-contiguous storage of the matrix elements. * // Therefore care has to be taken in the choice of the most suitable * storage order: * * \code // Setup of two column-major matrices * blaze::DynamicMatrix<double,blaze::columnMajor> A( 128UL, 128UL ); * blaze::DynamicMatrix<double,blaze::columnMajor> B( 128UL, 128UL ); // ... * Resizing and initialization * * // The computation of the 15th, 30th, and 45th row of the multiplication * between A and B ... blaze::DynamicMatrix<double,blaze::rowMajor> x = rows( * A * B, { 15UL, 30UL, 45UL } ); * * // ... is essentially the same as the following computation, which multiplies * // the 15th, 30th, and 45th row of the column-major matrix A with B. * blaze::DynamicMatrix<double,blaze::rowMajor> x = rows( A, { 15UL, 30UL, * 45UL } ) * B; \endcode * * // Although \b Blaze performs the resulting matrix/matrix multiplication as * efficiently as possible // using a row-major storage order for matrix \c A * would result in a more efficient evaluation. // // \n Previous: \ref * views_rows &nbsp; &nbsp; Next: \ref views_columns */ //************************************************************************************************* //**Columns **************************************************************************************** /* * !\page views_columns Columns // // \tableofcontents // // // Just as rows * provide a view on a specific row of a matrix, columns provide views on a * specific // column of a dense or sparse matrix. As such, columns act as a * reference to a specific column. // This reference is valid an can be used * in every way any other column vector can be used as long // as the matrix * containing the column is not resized or entirely destroyed. Changes made * to the // elements (e.g. modifying values, inserting or erasing elements) * are immediately visible in the // matrix and changes made via the matrix * are immediately visible in the column. // // // \n \section * views_colums_setup Setup of Columns // <hr> // // \image html column.png * // \image latex column.eps "Column view" width=250pt // // A reference to * a dense or sparse column can be created very conveniently via the \c * column() // function. It can be included via the header file * * \code #include <blaze/math/Column.h> \endcode * * // The column index must be in the range from \f$[0..N-1]\f$, where \c N is * the total number of // columns of the matrix, and can be specified both at * compile time or at runtime: * * \code blaze::DynamicMatrix<double,blaze::columnMajor> A; // ... Resizing and * initialization * * // Creating a reference to the 1st column of matrix A (compile time index) * auto col1 = column<1UL>( A ); * * // Creating a reference to the 2nd column of matrix A (runtime index) auto * col2 = column( A, 2UL ); \endcode * * // The \c column() function returns an expression representing the column * view. The type of this // expression depends on the given column * arguments, primarily the type of the matrix and the // compile time * arguments. If the type is required, it can be determined via the \c * decltype // specifier: * * \code using MatrixType = blaze::DynamicMatrix<int>; using ColumnType = * decltype( blaze::column<1UL>( std::declval<MatrixType>() ) ); \endcode * * // The resulting view can be treated as any other column vector, i.e. it can * be assigned to, it // can be copied from, and it can be used in arithmetic * operations. The reference can also be used // on both sides of an * assignment: The column can either be used as an alias to grant write * access // to a specific column of a matrix primitive on the left-hand side * of an assignment or to grant // read-access to a specific column of a * matrix primitive or expression on the right-hand side // of an assignment. * The following example demonstrates this in detail: * * \code blaze::DynamicVector<double,blaze::columnVector> x; * blaze::CompressedVector<double,blaze::columnVector> y; * blaze::DynamicMatrix<double,blaze::columnMajor> A, B; * blaze::CompressedMatrix<double,blaze::columnMajor> C, D; // ... Resizing * and initialization * * // Setting the 1st column of matrix A to x auto col1 = column( A, 1UL ); col1 * = x; * * // Setting the 4th column of matrix B to y column( B, 4UL ) = y; * * // Setting x to the 2nd column of the result of the matrix multiplication x = * column( A * B, 2UL ); * * // Setting y to the 2nd column of the result of the sparse matrix * multiplication y = column( C * D, 2UL ); \endcode * * // \warning It is the programmer's responsibility to ensure the column does * not outlive the // viewed matrix: * * \code // Creating a column on a temporary matrix; results in a dangling * reference! auto col1 = column<1UL>( DynamicMatrix<int>{ { 1, 2, 3 }, { 4, * 5, 6 }, { 7, 8, 9 } } ); \endcode * * // \n \section views_columns_element_access Element Access // <hr> // // The * elements of a column can be directly accessed with the subscript operator. * * \code blaze::DynamicMatrix<double,blaze::columnMajor> A; // ... Resizing and * initialization * * // Creating a view on the 4th column of matrix A auto col4 = column( A, 4UL * ); * * // Setting the 1st element of the dense column, which corresponds // to the * 1st element in the 4th column of matrix A col4[1] = 2.0; \endcode * * // The numbering of the column elements is * * \f[\left(\begin{array}{*{5}{c}} 0 & 1 & 2 & \cdots & N-1 \\ * \end{array}\right),\f] * * // where N is the number of rows of the referenced matrix. Alternatively, the * elements of a column // can be traversed via iterators. Just as with * vectors, in case of non-const columns, \c begin() // and \c end() return * an iterator, which allows to manipulate the elements, in case of constant * // columns an iterator to immutable elements is returned: * * \code blaze::DynamicMatrix<int,blaze::columnMajor> A( 128UL, 256UL ); // ... * Resizing and initialization * * // Creating a reference to the 31st column of matrix A auto col31 = column( * A, 31UL ); * * // Traversing the elements via iterators to non-const elements for( auto * it=col31.begin(); it!=col31.end(); ++it ) { it = ...; // OK; Write access * to the dense column value ... = *it; // OK: Read access to the dense * column value. } * * // Traversing the elements via iterators to const elements for( auto * it=col31.cbegin(); it!=col31.cend(); ++it ) { it = ...; // Compilation * error: Assignment to the value via iterator-to-const is invalid. ... = * *it; // OK: Read access to the dense column value. } \endcode * * \code blaze::CompressedMatrix<int,blaze::columnMajor> A( 128UL, 256UL ); // * ... Resizing and initialization * * // Creating a reference to the 31st column of matrix A auto col31 = column( * A, 31UL ); * * // Traversing the elements via iterators to non-const elements for( auto * it=col31.begin(); it!=col31.end(); ++it ) { it->value() = ...; // OK: * Write access to the value of the non-zero element. ... = it->value(); // * OK: Read access to the value of the non-zero element. it->index() = ...; * // Compilation error: The index of a non-zero element cannot be changed. * ... = it->index(); // OK: Read access to the index of the sparse element. * } * * // Traversing the elements via iterators to const elements for( auto * it=col31.cbegin(); it!=col31.cend(); ++it ) { it->value() = ...; // * Compilation error: Assignment to the value via iterator-to-const is * invalid. ... = it->value(); // OK: Read access to the value of the * non-zero element. it->index() = ...; // Compilation error: The index of a * non-zero element cannot be changed. ... = it->index(); // OK: Read access * to the index of the sparse element. } \endcode * * // \n \section views_columns_element_insertion Element Insertion // <hr> // * // Inserting/accessing elements in a sparse column can be done by several * alternative functions. // The following example demonstrates all options: * * \code blaze::CompressedMatrix<double,blaze::columnMajor> A( 100UL, 10UL ); * // Non-initialized 100x10 matrix * * auto col0( column( A, 0UL ) ); // Reference to the 0th column of A * * // The subscript operator provides access to all possible elements of the * sparse column, // including the zero elements. In case the subscript * operator is used to access an element // that is currently not stored in * the sparse column, the element is inserted into the column. col0[42] = * 2.0; * * // The second operation for inserting elements is the set() function. In case * the element // is not contained in the column it is inserted into the * column, if it is already contained // in the column its value is modified. * col0.set( 45UL, -1.2 ); * * // An alternative for inserting elements into the column is the insert() * function. However, // it inserts the element only in case the element is * not already contained in the column. col0.insert( 50UL, 3.7 ); * * // A very efficient way to add new elements to a sparse column is the * append() function. // Note that append() requires that the appended * element's index is strictly larger than // the currently largest non-zero * index of the column and that the column's capacity is // large enough to * hold the new element. col0.reserve( 10UL ); col0.append( 51UL, -2.1 ); * \endcode * * // \n \section views_columns_common_operations Common Operations // <hr> // * // A column view can be used like any other column vector. This means that * with only a few // exceptions all \ref vector_operations and \ref * arithmetic_operations can be used. For instance, // the current number of * elements can be obtained via the \c size() function, the current capacity * // via the \c capacity() function, and the number of non-zero elements via * the \c nonZeros() // function. However, since columns are references to * specific columns of a matrix, several // operations are not possible on * views, such as resizing and swapping. The following example // shows this * by means of a dense column view: * * \code blaze::DynamicMatrix<int,blaze::columnMajor> A( 42UL, 42UL ); // ... * Resizing and initialization * * // Creating a reference to the 2nd column of matrix A auto col2 = column( A, * 2UL ); * * col2.size(); // Returns the number of elements in the column * col2.capacity(); // Returns the capacity of the column * col2.nonZeros(); // Returns the number of non-zero elements contained * in the column * * col2.resize( 84UL ); // Compilation error: Cannot resize a single column of * a matrix * * auto col3 = column( A, 3UL ); swap( col2, col3 ); // Compilation error: * Swap operation not allowed \endcode * * // \n \section views_columns_arithmetic_operations Arithmetic Operations // * <hr> // // Both dense and sparse columns can be used in all arithmetic * operations that any other dense or // sparse column vector can be used in. * The following example gives an impression of the use of // dense columns * within arithmetic operations. All operations (addition, subtraction, * multiplication, // scaling, ...) can be performed on all possible * combinations of dense and sparse columns with // fitting element types: * * \code blaze::DynamicVector<double,blaze::columnVector> a( 2UL, 2.0 ), b; * blaze::CompressedVector<double,blaze::columnVector> c( 2UL ); c[1] = 3.0; * * blaze::DynamicMatrix<double,blaze::columnMajor> A( 2UL, 4UL ); // * Non-initialized 2x4 matrix * * auto col0( column( A, 0UL ) ); // Reference to the 0th column of A * * col0[0] = 0.0; // Manual initialization of the 0th column of A * col0[1] = 0.0; column( A, 1UL ) = 1.0; // Homogeneous initialization of * the 1st column of A column( A, 2UL ) = a; // Dense vector * initialization of the 2nd column of A column( A, 3UL ) = c; // Sparse * vector initialization of the 3rd column of A * * b = col0 + a; // Dense vector/dense vector addition b = c + * column( A, 1UL ); // Sparse vector/dense vector addition b = col0 * * column( A, 2UL ); // Component-wise vector multiplication * * column( A, 1UL ) *= 2.0; // In-place scaling of the 1st column b = * column( A, 1UL ) * 2.0; // Scaling of the 1st column b = 2.0 * column( A, * 1UL ); // Scaling of the 1st column * * column( A, 2UL ) += a; // Addition assignment column( A, 2UL * ) -= c; // Subtraction assignment column( A, 2UL ) *= * column( A, 0UL ); // Multiplication assignment * * double scalar = trans( c ) * column( A, 1UL ); // Scalar/dot/inner product * between two vectors * * A = column( A, 1UL ) * trans( c ); // Outer product between two vectors * \endcode * * // \n \section views_columns_non_fitting_storage_order Views on Matrices with * Non-Fitting Storage Order // <hr> // // Especially noteworthy is that * column views can be created for both row-major and column-major // * matrices. Whereas the interface of a row-major matrix only allows to * traverse a row directly // and the interface of a column-major matrix only * allows to traverse a column, via views it is // possible to traverse a row * of a column-major matrix or a column of a row-major matrix. For // * instance: * * \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 64UL, 32UL ); // ... * Resizing and initialization * * // Creating a reference to the 1st column of a column-major matrix A auto * col1 = column( A, 1UL ); * * for( auto it=col1.begin(); it!=col1.end(); ++it ) { // ... } \endcode * * // However, please note that creating a column view on a matrix stored in a * row-major fashion // can result in a considerable performance decrease in * comparison to a column view on a matrix // with column-major storage * format. This is due to the non-contiguous storage of the matrix // * elements. Therefore care has to be taken in the choice of the most * suitable storage order: * * \code // Setup of two row-major matrices * blaze::DynamicMatrix<double,blaze::rowMajor> A( 128UL, 128UL ); * blaze::DynamicMatrix<double,blaze::rowMajor> B( 128UL, 128UL ); // ... * Resizing and initialization * * // The computation of the 15th column of the multiplication between A and B * ... blaze::DynamicVector<double,blaze::columnVector> x = column( A * B, * 15UL ); * * // ... is essentially the same as the following computation, which multiplies * // A with the 15th column of the row-major matrix B. * blaze::DynamicVector<double,blaze::columnVector> x = A * column( B, 15UL * ); \endcode * * // Although \b Blaze performs the resulting matrix/vector multiplication as * efficiently as possible // using a column-major storage order for matrix * \c B would result in a more efficient evaluation. // // \n Previous: \ref * views_row_selections &nbsp; &nbsp; Next: \ref views_column_selections */ //************************************************************************************************* //**Column Selections ****************************************************************************** /* * !\page views_column_selections Column Selections // // \tableofcontents // * // // Column selections provide views on arbitrary compositions of columns * of dense and sparse // matrices. These views act as a reference to the * selected columns and represent them as another // dense or sparse matrix. * This reference is valid and can be used in every way any other dense // or * sparse matrix can be used as long as the matrix containing the columns is * not resized or // entirely destroyed. The column selection also acts as an * alias to the matrix elements in the // specified range: Changes made to * the columns (e.g. modifying values, inserting or erasing // elements) are * immediately visible in the matrix and changes made via the matrix are * immediately // visible in the columns. // // // \n \section * views_column_selections_setup Setup of Column Selections // // A column * selection can be created very conveniently via the \c columns() function. * It can be // included via the header file * * \code #include <blaze/math/Columns.h> \endcode * * // The indices of the columns to be selected can be specified either at * compile time or at runtime // (by means of an initializer list, array or * vector): * * \code blaze::DynamicMatrix<double,blaze::columnMajor> A; // ... Resizing and * initialization * * // Selecting the columns 4, 6, 8, and 10 (compile time arguments) auto cs1 = * columns<4UL,6UL,8UL,10UL>( A ); * * // Selecting the columns 3, 2, and 1 (runtime arguments via an initializer * list) const std::initializer_list<size_t> list{ 3UL, 2UL, 1UL }; auto cs2 * = columns( A, { 3UL, 2UL, 1UL } ); auto cs3 = columns( A, list ); * * // Selecting the columns 1, 2, 3, 3, 2, and 1 (runtime arguments via a * std::array) const std::array<size_t> array{ 1UL, 2UL, 3UL, 3UL, 2UL, 1UL * }; auto cs4 = columns( A, array ); auto cs5 = columns( A, array.data(), * array.size() ); * * // Selecting the column 4 fives times (runtime arguments via a std::vector) * const std::vector<size_t> vector{ 4UL, 4UL, 4UL, 4UL, 4UL }; auto cs6 = * columns( A, vector ); auto cs7 = columns( A, vector.data(), vector.size() * ); \endcode * * // Note that it is possible to alias the columns of the underlying matrix in * any order. Also note // that it is possible to use the same index multiple * times. The \c columns() function returns an // expression representing the * view on the selected columns. The type of this expression depends // on * the given arguments, primarily the type of the matrix and the compile time * arguments. If // the type is required, it can be determined via the \c * decltype specifier: * * \code using MatrixType = blaze::DynamicMatrix<int>; using ColumnsType = * decltype( blaze::columns<3UL,0UL,4UL,8UL>( std::declval<MatrixType>() ) ); * \endcode * * // The resulting view can be treated as any other dense or sparse matrix, * i.e. it can be assigned // to, it can be copied from, and it can be used * in arithmetic operations. Note, however, that a // column selection will * always be treated as a column-major matrix, regardless of the storage // * order of the matrix containing the columns. The view can also be used on * both sides of an // assignment: It can either be used as an alias to grant * write access to specific columns of a // matrix primitive on the left-hand * side of an assignment or to grant read-access to specific // columns of a * matrix primitive or expression on the right-hand side of an assignment. * The // following example demonstrates this in detail: * * \code blaze::DynamicMatrix<double,blaze::columnMajor> A; * blaze::DynamicMatrix<double,blaze::rowMajor> B; * blaze::CompressedMatrix<double,blaze::columnMajor> C; // ... Resizing and * initialization * * // Selecting the columns 1, 3, 5, and 7 of A auto cs = columns( A, { 1UL, * 3UL, 5UL, 7UL } ); * * // Setting columns 1, 3, 5, and 7 of A to column 4 of B cs = columns( B, { * 4UL, 4UL, 4UL, 4UL } ); * * // Setting the columns 2, 4, 6, and 8 of A to C columns( A, { 2UL, 4UL, 6UL, * 8UL } ) = C; * * // Setting the first 4 columns of A to the columns 5, 4, 3, and 2 of C * submatrix( A, 0UL, 0UL, A.rows(), 4UL ) = columns( C, { 5UL, 4UL, 3UL, 2UL * } ); * * // Rotating the result of the addition between columns 1, 3, 5, and 7 of A * and C B = columns( cs + C, { 2UL, 3UL, 0UL, 1UL } ); \endcode * * // \warning It is the programmer's responsibility to ensure the column * selection does not outlive // the viewed matrix: * * \code // Creating a column selection on a temporary matrix; results in a * dangling reference! auto cs = columns<2UL,0UL>( DynamicMatrix<int>{ { 1, * 2, 3 }, { 4, 5, 6 }, { 7, 8, 9 } } ); \endcode * * // \n \section views_column_selections_element_access Element Access // // * The elements of a column selection can be directly accessed via the * function call operator: * * \code blaze::DynamicMatrix<double,blaze::columnMajor> A; // ... Resizing and * initialization * * // Creating a view on the first four columns of A in reverse order auto cs = * columns( A, { 3UL, 2UL, 1UL, 0UL } ); * * // Setting the element (0,0) of the column selection, which corresponds // to * the element at position (0,3) in matrix A cs(0,0) = 2.0; \endcode * * // Alternatively, the elements of a column selection can be traversed via * (const) iterators. // Just as with matrices, in case of non-const column * selection, \c begin() and \c end() return // an iterator, which allows to * manipuate the elements, in case of constant column selection an // * iterator to immutable elements is returned: * * \code blaze::DynamicMatrix<int,blaze::columnMajor> A( 512UL, 256UL ); // ... * Resizing and initialization * * // Creating a reference to a selection of columns of matrix A auto cs = * columns( A, { 16UL, 32UL, 64UL, 128UL } ); * * // Traversing the elements of the 0th column via iterators to non-const * elements for( auto it=cs.begin(0); it!=cs.end(0); ++it ) { it = ...; // * OK: Write access to the dense value. ... = *it; // OK: Read access to the * dense value. } * * // Traversing the elements of the 1st column via iterators to const elements * for( auto it=cs.cbegin(1); it!=cs.cend(1); ++it ) { it = ...; // * Compilation error: Assignment to the value via iterator-to-const is * invalid. ... = *it; // OK: Read access to the dense value. } \endcode * * \code blaze::CompressedMatrix<int,blaze::columnMajor> A( 512UL, 256UL ); // * ... Resizing and initialization * * // Creating a reference to a selection of columns of matrix A auto cs = * columns( A, { 16UL, 32UL, 64UL, 128UL } ); * * // Traversing the elements of the 0th column via iterators to non-const * elements for( auto it=cs.begin(0); it!=cs.end(0); ++it ) { it->value() = * ...; // OK: Write access to the value of the non-zero element. ... = * it->value(); // OK: Read access to the value of the non-zero element. * it->index() = ...; // Compilation error: The index of a non-zero element * cannot be changed. ... = it->index(); // OK: Read access to the index of * the sparse element. } * * // Traversing the elements of the 1st column via iterators to const elements * for( auto it=cs.cbegin(1); it!=cs.cend(1); ++it ) { it->value() = ...; // * Compilation error: Assignment to the value via iterator-to-const is * invalid. ... = it->value(); // OK: Read access to the value of the * non-zero element. it->index() = ...; // Compilation error: The index of a * non-zero element cannot be changed. ... = it->index(); // OK: Read access * to the index of the sparse element. } \endcode * * // \n \section views_column_selections_element_insertion Element Insertion // * // Inserting/accessing elements in a sparse column selection can be done * by several alternative // functions. The following example demonstrates * all options: * * \code blaze::CompressedMatrix<double,blaze::columnMajor> A( 512UL, 256UL ); * // Non-initialized matrix of size 512x256 * * auto cs = columns( A, { 10UL, 20UL, 30UL, 40UL } ); // View on the columns * 10, 20, 30, and 40 of A * * // The function call operator provides access to all possible elements of the * sparse column // selection, including the zero elements. In case the * function call operator is used to // access an element that is currently * not stored in the sparse column selection, the element // is inserted into * the column selection. cs(2,4) = 2.0; * * // The second operation for inserting elements is the set() function. In case * the element is // not contained in the column selection it is inserted * into the column selection, if it is // already contained in the column * selection its value is modified. cs.set( 2UL, 5UL, -1.2 ); * * // An alternative for inserting elements into the column selection is the * insert() function. // However, it inserts the element only in case the * element is not already contained in the // column selection. cs.insert( * 2UL, 6UL, 3.7 ); * * // Just as in the case of sparse matrices, elements can also be inserted via * the append() // function. In case of column selections, append() also * requires that the appended element's // index is strictly larger than the * currently largest non-zero index in the according column // of the column * selection and that the according column's capacity is large enough to hold * the // new element. Note however that due to the nature of a column * selection, which may be an alias // to an arbitrary collection of columns, * the append() function does not work as efficiently // for a column * selection as it does for a matrix. cs.reserve( 2UL, 10UL ); cs.append( * 2UL, 10UL, -2.1 ); \endcode * * // \n \section views_column_selections_common_operations Common Operations // * // A view on specific columns of a matrix can be used like any other dense * or sparse matrix. For // instance, the current size of the matrix, i.e. * the number of rows or columns can be obtained // via the \c rows() and \c * columns() functions, the current total capacity via the \c capacity() // * function, and the number of non-zero elements via the \c nonZeros() * function. However, since // column selections are views on specific * columns of a matrix, several operations are not possible, // such as * resizing and swapping: * * \code blaze::DynamicMatrix<int,blaze::columnMajor> A( 42UL, 42UL ); // ... * Resizing and initialization * * // Creating a view on the columns 8, 16, 24, and 32 of matrix A auto cs = * columns( A, { 8UL, 16UL, 24UL, 32UL } ); * * cs.rows(); // Returns the number of rows of the column selection * cs.columns(); // Returns the number of columns of the column selection * cs.capacity(); // Returns the capacity of the column selection * cs.nonZeros(); // Returns the number of non-zero elements contained in * the column selection * * cs.resize( 10UL, 8UL ); // Compilation error: Cannot resize a column * selection * * auto cs2 = columns( A, 9UL, 17UL, 25UL, 33UL ); swap( cs, cs2 ); // * Compilation error: Swap operation not allowed \endcode * * // \n \section views_column_selections_arithmetic_operations Arithmetic * Operations // // Both dense and sparse column selections can be used in * all arithmetic operations that any other // dense or sparse matrix can be * used in. The following example gives an impression of the use of // dense * column selctions within arithmetic operations. All operations (addition, * subtraction, // multiplication, scaling, ...) can be performed on all * possible combinations of dense and // sparse matrices with fitting element * types: * * \code blaze::DynamicMatrix<double,blaze::columnMajor> D1, D2, D3; * blaze::CompressedMatrix<double,blaze::columnMajor> S1, S2; * * blaze::CompressedVector<double,blaze::columnVector> a, b; * * // ... Resizing and initialization * * std::initializer_list<size_t> indices1{ 0UL, 3UL, 6UL, 9UL, 12UL, 15UL, * 18UL, 21UL }; std::initializer_list<size_t> indices2{ 1UL, 4UL, 7UL, 10UL, * 13UL, 16UL, 19UL, 22UL }; std::initializer_list<size_t> indices3{ 2UL, * 5UL, 8UL, 11UL, 14UL, 17UL, 20UL, 23UL }; * * auto cs = columns( D1, indices1 ); // Selecting the every third column of D1 * in the range [0..21] * * cs = D2; // Dense matrix assignment to the selected * columns columns( D1, indices2 ) = S1; // Sparse matrix assignment to the * selected columns * * D3 = cs + D2; // Dense matrix/dense matrix addition S2 * = S1 - columns( D1, indices2 ); // Sparse matrix/dense matrix subtraction * D2 = cs % columns( D1, indices3 ); // Dense matrix/dense matrix Schur * product D2 = columns( D1, indices2 ) * D1; // Dense matrix/dense matrix * multiplication * * columns( D1, indices2 ) *= 2.0; // In-place scaling of the second * selection of columns D2 = columns( D1, indices3 ) * 2.0; // Scaling of * the elements in the third selection of columns D2 = 2.0 * columns( D1, * indices3 ); // Scaling of the elements in the third selection of columns * * columns( D1, indices1 ) += D2; // Addition assignment columns( D1, indices2 * ) -= S1; // Subtraction assignment columns( D1, indices3 ) %= cs; // * Schur product assignment * * a = columns( D1, indices1 ) * b; // Dense matrix/sparse vector * multiplication \endcode * * // \n \section views_column_selections_on_row_major_matrix Column Selections * on a Row-Major Matrix // // Especially noteworthy is that column * selections can be created for both row-major and // column-major matrices. * Whereas the interface of a row-major matrix only allows to traverse a // * row directly and the interface of a column-major matrix only allows to * traverse a column, via // views it is possible to traverse a row of a * column-major matrix or a column of a row-major // matrix. For instance: * * \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 64UL, 32UL ); // ... * Resizing and initialization * * // Creating a reference to the 1st and 3rd column of a column-major matrix A * auto cs = columns( A, { 1UL, 3UL } ); * * // Traversing column 0 of the selection, which corresponds to the 1st column * of matrix A for( auto it=cs.begin( 0UL ); it!=cs.end( 0UL ); ++it ) { // * ... } \endcode * * // However, please note that creating a column selection on a matrix stored * in a row-major fashion // can result in a considerable performance * decrease in comparison to a column selection on a // matrix with * column-major storage format. This is due to the non-contiguous storage of * the // matrix elements. Therefore care has to be taken in the choice of * the most suitable storage // order: * * \code // Setup of two row-major matrices * blaze::DynamicMatrix<double,blaze::rowMajor> A( 128UL, 128UL ); * blaze::DynamicMatrix<double,blaze::rowMajor> B( 128UL, 128UL ); // ... * Resizing and initialization * * // The computation of the 15th, 30th, and 45th column of the multiplication * between A and B ... blaze::DynamicMatrix<double,blaze::columnMajor> x = * columns( A * B, { 15UL, 30UL, 45UL } ); * * // ... is essentially the same as the following computation, which multiplies * // A with the 15th, 30th, and 45th column of the row-major matrix B. * blaze::DynamicMatrix<double,blaze::columnMajor> x = A * column( B, { 15UL, * 30UL, 45UL } ); \endcode * * // Although \b Blaze performs the resulting matrix/matrix multiplication as * efficiently as possible // using a column-major storage order for matrix * \c A would result in a more efficient evaluation. // // \n Previous: \ref * views_columns &nbsp; &nbsp; Next: \ref views_bands */ //************************************************************************************************* //**Bands ****************************************************************************************** /* * !\page views_bands Bands // // \tableofcontents // // // Bands provide * views on a specific band of a dense or sparse matrix (e.g. the diagonal, * the // subdiagonal, ...). As such, bands act as a reference to a specific * band. This reference // is valid and can be used in every way any other * vector can be used as long as the matrix // containing the band is not * resized or entirely destroyed. The band also acts as an alias to // the * band elements: Changes made to the elements (e.g. modifying values, * inserting or erasing // elements) are immediately visible in the matrix * and changes made via the matrix are immediately // visible in the band. // * // // \n \section views_bands_setup Setup of Bands // <hr> // // \image * html band.png // \image latex band.eps "Band view" width=250pt // // A * reference to a dense or sparse band can be created very conveniently via * the \c band() // function. It can be included via the header file * * \code #include <blaze/math/Band.h> \endcode * * // The band index must be in the range from \f$[min(0,1-M)..max(0,N-1)]\f$, * where \c M is the // total number of rows and \c N is the total number of * columns, and can be specified both at // compile time or at runtime: * * \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and * initialization * * // Creating a reference to the 1st lower band of matrix A (compile time * index) auto band1 = band<-1L>( A ); * * // Creating a reference to the 2nd upper band of matrix A (runtime index) * auto band2 = band( A, 2L ); \endcode * * // In addition, the \c diagonal() function provides a convenient shortcut for * the setup of a view // on the diagonal of a dense or sparse matrix. It has * the same effect as calling the \c band() // function with a compile time * index of 0: * * \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and * initialization * * // Creating a reference to the diagonal of matrix A via the band() and * diagonal() functions auto diag1 = band<0L>( A ); auto diag2 = diagonal( A * ); * * static_assert( blaze::IsSame< decltype(diag1), decltype(diag2) >::value, * "Non-identical types detected" ); \endcode * * // Both the \c band() and the \c diagonal() function return an expression * representing the band // view. The type of this expression depends on the * given arguments, primarily the type of the // matrix and the compile time * arguments. If the type is required, it can be determined via // \c * decltype specifier: * * \code using MatrixType = blaze::DynamicMatrix<int>; using BandType = * decltype( blaze::band<1L>( std::declval<MatrixType>() ) ); using * DiagonalType = decltype( blaze::diagonal( std::declval<MatrixType>() ) ); * \endcode * * // This resulting view can be treated as any other vector, i.e. it can be * assigned to, it can // be copied from, and it can be used in arithmetic * operations. By default, bands are considered // column vectors, but this * setting can be changed via the \c defaultTransposeFlag switch. The // * reference can also be used on both sides of an assignment: The band can * either be used as an // alias to grant write access to a specific band of * a matrix primitive on the left-hand side of // an assignment or to grant * read-access to a specific band of a matrix primitive or expression // on * the right-hand side of an assignment. The following example demonstrates * this in detail: * * \code blaze::DynamicVector<double,blaze::rowVector> x; * blaze::CompressedVector<double,blaze::rowVector> y; * blaze::DynamicMatrix<double,blaze::rowMajor> A, B; * blaze::CompressedMatrix<double,blaze::rowMajor> C, D; // ... Resizing and * initialization * * // Setting the 2nd upper band of matrix A to x auto band2 = band( A, 2L ); * band2 = x; * * // Setting the 3rd upper band of matrix B to y band( B, 3L ) = y; * * // Setting x to the 2nd lower band of the result of the matrix multiplication * x = band( A * B, -2L ); * * // Setting y to the 2nd upper band of the result of the sparse matrix * multiplication y = band( C * D, 2L ); \endcode * * // \warning It is the programmer's responsibility to ensure the band does not * outlive the viewed // matrix: * * \code // Creating a band on a temporary matrix; results in a dangling * reference! auto band1 = band<1L>( DynamicMatrix<int>{ { 1, 2, 3 }, { 4, 5, * 6 }, { 7, 8, 9 } } ); \endcode * * // \n \section views_bands_element_access Element Access // <hr> // // The * elements of a band can be directly accessed with the subscript operator: * * \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and * initialization * * // Creating a view on the 4th upper band of matrix A auto band4 = band( A, 4L * ); * * // Setting the 1st element of the dense band, which corresponds // to the 1st * element in the 4th upper band of matrix A band4[1] = 2.0; \endcode * * // The numbering of the band elements is * * \f[\left(\begin{array}{*{5}{c}} 0 & 1 & 2 & \cdots & N-1 \\ * \end{array}\right),\f] * * // where N is the number of elements of the referenced band. Alternatively, * the elements of a band // can be traversed via iterators. Just as with * vectors, in case of non-const band, \c begin() and // \c end() return an * iterator, which allows to manipulate the elements, in case of constant * bands // an iterator to immutable elements is returned: * * \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 128UL, 256UL ); // ... * Resizing and initialization * * // Creating a reference to the 5th upper band of matrix A auto band5 = band( * A, 5L ); * * // Traversing the elements via iterators to non-const elements for( auto * it=band5.begin(); it!=band5.end(); ++it ) { it = ...; // OK; Write access * to the dense band value ... = *it; // OK: Read access to the dense band * value. } * * // Traversing the elements via iterators to const elements for( auto * it=band5.cbegin(); it!=band5.cend(); ++it ) { it = ...; // Compilation * error: Assignment to the value via iterator-to-const is invalid. ... = * *it; // OK: Read access to the dense band value. } \endcode * * \code blaze::CompressedMatrix<int,blaze::rowMajor> A( 128UL, 256UL ); // ... * Resizing and initialization * * // Creating a reference to the 5th band of matrix A auto band5 = band( A, 5L * ); * * // Traversing the elements via iterators to non-const elements for( auto * it=band5.begin(); it!=band5.end(); ++it ) { it->value() = ...; // OK: * Write access to the value of the non-zero element. ... = it->value(); // * OK: Read access to the value of the non-zero element. it->index() = ...; * // Compilation error: The index of a non-zero element cannot be changed. * ... = it->index(); // OK: Read access to the index of the sparse element. * } * * // Traversing the elements via iterators to const elements for( auto * it=band5.cbegin(); it!=band5.cend(); ++it ) { it->value() = ...; // * Compilation error: Assignment to the value via iterator-to-const is * invalid. ... = it->value(); // OK: Read access to the value of the * non-zero element. it->index() = ...; // Compilation error: The index of a * non-zero element cannot be changed. ... = it->index(); // OK: Read access * to the index of the sparse element. } \endcode * * // \n \section views_bands_element_insertion Element Insertion // <hr> // // * Inserting/accessing elements in a sparse band can be done by several * alternative functions. // The following example demonstrates all options: * * \code blaze::CompressedMatrix<double,blaze::rowMajor> A( 10UL, 100UL ); // * Non-initialized 10x100 matrix * * auto diag( band( A, 0L ) ); // Reference to the diagonal of A * * // The subscript operator provides access to all possible elements of the * sparse band, // including the zero elements. In case the subscript * operator is used to access an element // that is currently not stored in * the sparse band, the element is inserted into the band. diag[42] = 2.0; * * // The second operation for inserting elements is the set() function. In case * the element // is not contained in the band it is inserted into the band, * if it is already contained in // the band its value is modified. diag.set( * 45UL, -1.2 ); * * // An alternative for inserting elements into the band is the insert() * function. However, // it inserts the element only in case the element is * not already contained in the band. diag.insert( 50UL, 3.7 ); \endcode * * // \n \section views_bands_common_operations Common Operations // <hr> // // * A band view can be used like any other column vector. This means that with * only a few // exceptions all \ref vector_operations and \ref * arithmetic_operations can be used. For instance, // the current number of * band elements can be obtained via the \c size() function, the current // * capacity via the \c capacity() function, and the number of non-zero * elements via the // \c nonZeros() function. However, since bands are * references to specific bands of a matrix, // several operations are not * possible, such as resizing and swapping. The following example // shows * this by means of a dense band view: * * \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 42UL, 42UL ); // ... * Resizing and initialization * * // Creating a reference to the 2nd upper band of matrix A auto band2 = band( * A, 2L ); * * band2.size(); // Returns the number of elements in the band * band2.capacity(); // Returns the capacity of the band * band2.nonZeros(); // Returns the number of non-zero elements * contained in the band * * band2.resize( 84UL ); // Compilation error: Cannot resize a single band of a * matrix * * auto band3 = band( A, 3L ); swap( band2, band3 ); // Compilation error: * Swap operation not allowed \endcode * * // \n \section views_bands_arithmetic_operations Arithmetic Operations // * <hr> // // Both dense and sparse bands can be used in all arithmetic * operations that any other dense or // sparse vector can be used in. The * following example gives an impression of the use of dense // bands within * arithmetic operations. All operations (addition, subtraction, * multiplication, // scaling, ...) can be performed on all possible * combinations of dense and sparse bands with // fitting element types: * * \code blaze::DynamicVector<double,blaze::columnVector> a( 2UL, 2.0 ), b; * blaze::CompressedVector<double,blaze::columnVector> c( 2UL ); c[1] = 3.0; * * blaze::DynamicMatrix<double,blaze::rowMajor> A( 4UL, 2UL ); // * Non-initialized 4x2 matrix * * auto band1( band( A, 1L ) ); // Reference to the 1st upper band of A auto * diag ( band( A, 0L ) ); // Reference to the diagonal of A * * band1[0] = 0.0; // Manual initialization of the 1st upper band of A diag * = 1.0; // Homogeneous initialization of the diagonal of A band( * A, -1L ) = a; // Dense vector initialization of the 1st lower band of A * band( A, -2L ) = c; // Sparse vector initialization of the 2nd lower band * of A * * b = diag + a; // Dense vector/dense vector addition b = c + * band( A, -1L ); // Sparse vector/dense vector addition b = diag * * band( A, -2L ); // Component-wise vector multiplication * * band( A, -1L ) *= 2.0; // In-place scaling of the 1st upper band b = * band( A, -1L ) * 2.0; // Scaling of the 1st upper band b = 2.0 * band( A, * -1L ); // Scaling of the 1st upper band * * band( A, -2L ) += a; // Addition assignment band( A, -2L ) -= c; * // Subtraction assignment band( A, -2L ) *= band( A, 0L ); // * Multiplication assignment * * double scalar = trans( c ) * band( A, -1L ); // Scalar/dot/inner product * between two vectors * * A = band( A, -1L ) * trans( c ); // Outer product between two vectors * \endcode * * // \n Previous: \ref views_column_selections &nbsp; &nbsp; Next: \ref * arithmetic_operations */ //************************************************************************************************* //**Arithmetic Operations ************************************************************************** /* * !\page arithmetic_operations Arithmetic Operations // // \tableofcontents * // // // \b Blaze provides the following arithmetic operations for vectors * and matrices: // // <ul> // <li> \ref addition </li> // <li> \ref * subtraction </li> // <li> \ref scalar_multiplication </li> // <li> * \ref vector_vector_multiplication // <ul> // <li> \ref * componentwise_multiplication </li> // <li> \ref inner_product * </li> // <li> \ref outer_product </li> // <li> \ref * cross_product </li> // </ul> // </li> // <li> \ref * vector_vector_division </li> // <li> \ref matrix_vector_multiplication * </li> // <li> \ref matrix_matrix_multiplication </li> // </ul> // // \n * Previous: \ref views_bands &nbsp; &nbsp; Next: \ref addition */ //************************************************************************************************* //**Addition *************************************************************************************** /* * !\page addition Addition // // The addition of vectors and matrices is as * intuitive as the addition of scalar values. For both // the vector * addition as well as the matrix addition the addition operator can be used. * It even // enables the addition of dense and sparse vectors as well as the * addition of dense and sparse // matrices: * * \code blaze::DynamicVector<int> v1( 5UL ), v3; * blaze::CompressedVector<float> v2( 5UL ); * * // ... Initializing the vectors * * v3 = v1 + v2; // Addition of a two column vectors of different data type * \endcode * * \code blaze::DynamicMatrix<float,rowMajor> M1( 7UL, 3UL ); * blaze::CompressedMatrix<size_t,columnMajor> M2( 7UL, 3UL ), M3; * * // ... Initializing the matrices * * M3 = M1 + M2; // Addition of a row-major and a column-major matrix of * different data type \endcode * * // Note that it is necessary that both operands have exactly the same * dimensions. Violating this // precondition results in an exception. Also * note that in case of vectors it is only possible to // add vectors with * the same transpose flag: * * \code blaze::DynamicVector<int,columnVector> v1( 5UL ); * blaze::CompressedVector<float,rowVector> v2( 5UL ); * * v1 + v2; // Compilation error: Cannot add a column vector and a row * vector v1 + trans( v2 ); // OK: Addition of two column vectors \endcode * * // In case of matrices, however, it is possible to add row-major and * column-major matrices. Note // however that in favor of performance the * addition of two matrices with the same storage order // is favorable. The * same argument holds for the element type: In case two vectors or matrices * // with the same element type are added, the performance can be much * higher due to vectorization // of the operation. * * \code blaze::DynamicVector<double>v1( 100UL ), v2( 100UL ), v3; * * // ... Initialization of the vectors * * v3 = v1 + v2; // Vectorized addition of two double precision vectors * \endcode * * \code blaze::DynamicMatrix<float> M1( 50UL, 70UL ), M2( 50UL, 70UL ), M3; * * // ... Initialization of the matrices * * M3 = M1 + M2; // Vectorized addition of two row-major, single precision * dense matrices \endcode * * // \n Previous: \ref arithmetic_operations &nbsp; &nbsp; Next: \ref * subtraction */ //************************************************************************************************* //**Subtraction ************************************************************************************ /* * !\page subtraction Subtraction // // The subtraction of vectors and * matrices works exactly as intuitive as the addition, but with // the * subtraction operator. For both the vector subtraction as well as the * matrix subtraction // the subtraction operator can be used. It also * enables the subtraction of dense and sparse // vectors as well as the * subtraction of dense and sparse matrices: * * \code blaze::DynamicVector<int> v1( 5UL ), v3; * blaze::CompressedVector<float> v2( 5UL ); * * // ... Initializing the vectors * * v3 = v1 - v2; // Subtraction of a two column vectors of different data type * * * blaze::DynamicMatrix<float,rowMajor> M1( 7UL, 3UL ); * blaze::CompressedMatrix<size_t,columnMajor> M2( 7UL, 3UL ), M3; * * // ... Initializing the matrices * * M3 = M1 - M2; // Subtraction of a row-major and a column-major matrix of * different data type \endcode * * // Note that it is necessary that both operands have exactly the same * dimensions. Violating this // precondition results in an exception. Also * note that in case of vectors it is only possible to // subtract vectors * with the same transpose flag: * * \code blaze::DynamicVector<int,columnVector> v1( 5UL ); * blaze::CompressedVector<float,rowVector> v2( 5UL ); * * v1 - v2; // Compilation error: Cannot subtract a row vector from a * column vector v1 - trans( v2 ); // OK: Subtraction of two column vectors * \endcode * * // In case of matrices, however, it is possible to subtract row-major and * column-major matrices. // Note however that in favor of performance the * subtraction of two matrices with the same storage // order is favorable. * The same argument holds for the element type: In case two vectors or * matrices // with the same element type are added, the performance can be * much higher due to vectorization // of the operation. * * \code blaze::DynamicVector<double>v1( 100UL ), v2( 100UL ), v3; * * // ... Initialization of the vectors * * v3 = v1 - v2; // Vectorized subtraction of two double precision vectors * * * blaze::DynamicMatrix<float> M1( 50UL, 70UL ), M2( 50UL, 70UL ), M3; * * // ... Initialization of the matrices * * M3 = M1 - M2; // Vectorized subtraction of two row-major, single precision * dense matrices \endcode * * // \n Previous: \ref addition &nbsp; &nbsp; Next: \ref scalar_multiplication */ //************************************************************************************************* //**Scalar Multiplication ************************************************************************** /* * !\page scalar_multiplication Scalar Multiplication // // The scalar * multiplication is the multiplication of a scalar value with a vector or a * matrix. // In \b Blaze it is possible to use all built-in/fundamental data * types except bool as scalar // values. Additionally, it is possible to use * std::complex values with the same built-in data // types as element type. * * \code blaze::StaticVector<int,3UL> v1{ 1, 2, 3 }; * * blaze::DynamicVector<double> v2 = v1 * 1.2; blaze::CompressedVector<float> * v3 = -0.3F * v1; \endcode * * \code blaze::StaticMatrix<int,3UL,2UL> M1{ { 1, 2 }, { 3, 4 }, { 5, 6 } }; * * blaze::DynamicMatrix<double> M2 = M1 * 1.2; blaze::CompressedMatrix<float> * M3 = -0.3F * M1; \endcode * * // Vectors and matrices cannot be used for as scalar value for scalar * multiplications (see the // following example). However, each vector and * matrix provides the \c scale() function, which // can be used to scale a * vector or matrix element-wise with arbitrary scalar data types: * * \code blaze::CompressedMatrix< blaze::StaticMatrix<int,3UL,3UL> > M1; * blaze::StaticMatrix<int,3UL,3UL> scalar; * * M1 * scalar; // No scalar multiplication, but matrix/matrix multiplication * * M1.scale( scalar ); // Scalar multiplication \endcode * * // \n Previous: \ref subtraction &nbsp; &nbsp; Next: \ref * componentwise_multiplication */ //************************************************************************************************* //**Vector / Vector Multiplication ******************************************************************* /* * !\page vector_vector_multiplication Vector/Vector Multiplication // // \n * \section componentwise_multiplication Componentwise Multiplication // <hr> * // // Multiplying two vectors with the same transpose flag (i.e. either * blaze::columnVector or // blaze::rowVector) via the multiplication * operator results in a componentwise multiplication // of the two vectors: * * \code using blaze::DynamicVector; using blaze::CompressedVector; * * CompressedVector<int,columnVector> v1( 17UL ); * DynamicVector<int,columnVector> v2( 17UL ); * * StaticVector<double,10UL,rowVector> v3; DynamicVector<double,rowVector> * v4( 10UL ); * * // ... Initialization of the vectors * * CompressedVector<int,columnVector> v5( v1 * v2 ); // Componentwise * multiplication of a sparse and // a dense column vector. The result is a * sparse // column vector. DynamicVector<double,rowVector> v6( v3 * v4 ); * // Componentwise multiplication of two dense row // vectors. The result is * a dense row vector. \endcode * * // \n \section inner_product Inner Product / Scalar Product / Dot Product // * <hr> // // The multiplication between a row vector and a column vector * results in an inner product between // the two vectors: * * \code blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 }; * blaze::DynamicVector<int,columnVector> v2{ -1, 3, -2 }; * * int result = v1 * v2; // Results in the value 15 \endcode * * // The \c trans() function can be used to transpose a vector as necessary: * * \code blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 }; * blaze::StaticVector<int,3UL,rowVector> v2{ -1, 3, -2 }; * * int result = v1 * trans( v2 ); // Also results in the value 15 \endcode * * // Alternatively, either the \c inner() function, the \c dot() function or * the comma operator can // be used for any combination of vectors (row or * column vectors) to perform an inner product: * * \code blaze::StaticVector<int,3UL,columnVector> v1{ 2, 5, -1 }; * blaze::StaticVector<int,3UL,rowVector> v2{ -1, 3, -2 }; * * // All alternatives for the inner product between a column vector and a row * vector int result1 = trans( v1 ) * trans( v2 ); int result2 = inner( v1, * v2 ); int result3 = dot( v1, v2 ); int result4 = (v1,v2); \endcode * * // When using the comma operator, please note the brackets embracing the * inner product expression. // Due to the low precedence of the comma * operator (lower even than the assignment operator) these // brackets are * strictly required for a correct evaluation of the inner product. // // // * \n \section outer_product Outer Product // <hr> // // The multiplication * between a column vector and a row vector results in the outer product of * // the two vectors: * * \code blaze::StaticVector<int,3UL,columnVector> v1{ 2, 5, -1 }; * blaze::DynamicVector<int,rowVector> v2{ -1, 3, -2 }; * * StaticMatrix<int,3UL,3UL> M1 = v1 * v2; \endcode * * // The \c trans() function can be used to transpose a vector as necessary: * * \code blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 }; * blaze::StaticVector<int,3UL,rowVector> v2{ -1, 3, -2 }; * * int result = trans( v1 ) * v2; \endcode * * // Alternatively, the \c outer() function can be used for any combination of * vectors (row or column // vectors) to perform an outer product: * * \code blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 }; * blaze::StaticVector<int,3UL,rowVector> v2{ -1, 3, -2 }; * * StaticMatrix<int,3UL,3UL> M1 = outer( v1, v2 ); // Outer product between two * row vectors \endcode * * // \n \section cross_product Cross Product // <hr> // // Two vectors with the * same transpose flag can be multiplied via the cross product. The cross // * product between two vectors \f$ a \f$ and \f$ b \f$ is defined as * * \f[ \left(\begin{array}{*{1}{c}} c_0 \\ c_1 \\ c_2 \\ \end{array}\right) = * \left(\begin{array}{*{1}{c}} a_1 b_2 - a_2 b_1 \\ a_2 b_0 - a_0 b_2 \\ a_0 * b_1 - a_1 b_0 \\ \end{array}\right). \f] * * // Due to the absence of a \f$ \times \f$ operator in the C++ language, the * cross product is // realized via the \c cross() function. Alternatively, * the modulo operator (i.e. \c operator%) // can be used in case infix * notation is required: * * \code blaze::StaticVector<int,3UL,columnVector> v1{ 2, 5, -1 }; * blaze::DynamicVector<int,columnVector> v2{ -1, 3, -2 }; * * blaze::StaticVector<int,3UL,columnVector> v3( cross( v1, v2 ) ); * blaze::StaticVector<int,3UL,columnVector> v4( v1 % v2 ); \endcode * * // Please note that the cross product is restricted to three dimensional * (dense and sparse) // column vectors. // // \n Previous: \ref * scalar_multiplication &nbsp; &nbsp; Next: \ref vector_vector_division */ //************************************************************************************************* //**Vector / Vector Division ************************************************************************* /* * !\page vector_vector_division Vector/Vector Division // // \n \section * componentwise_division Componentwise Division // <hr> // // Dividing a * vector by a dense vector with the same transpose flag (i.e. either * blaze::columnVector // or blaze::rowVector) via the division operator * results in a componentwise division: * * \code using blaze::DynamicVector; using blaze::CompressedVector; * * CompressedVector<int,columnVector> v1( 17UL ); * DynamicVector<int,columnVector> v2( 17UL ); * * StaticVector<double,10UL,rowVector> v3; DynamicVector<double,rowVector> * v4( 10UL ); * * // ... Initialization of the vectors * * CompressedVector<int,columnVector> v5( v1 / v2 ); // Componentwise division * of a sparse and a // dense column vector. The result is a sparse // column * vector. DynamicVector<double,rowVector> v6( v3 / v4 ); // * Componentwise division of two dense row // vectors. The result is a dense * row vector. \endcode * * // Note that all values of the divisor must be non-zero and that no checks * are performed to assert // this precondition! // // \n Previous: \ref * vector_vector_multiplication &nbsp; &nbsp; Next: \ref * matrix_vector_multiplication */ //************************************************************************************************* //**Matrix / Vector Multiplication ******************************************************************* /* * !\page matrix_vector_multiplication Matrix/Vector Multiplication // // In * \b Blaze matrix/vector multiplications can be as intuitively formulated as * in mathematical // textbooks. Just as in textbooks there are two different * multiplications between a matrix and // a vector: a matrix/column vector * multiplication and a row vector/matrix multiplication: * * \code using blaze::StaticVector; using blaze::DynamicVector; using * blaze::DynamicMatrix; * * DynamicMatrix<int> M1( 39UL, 12UL ); * StaticVector<int,12UL,columnVector> v1; * * // ... Initialization of the matrix and the vector * * DynamicVector<int,columnVector> v2 = M1 * v1; // Matrix/column * vector multiplication DynamicVector<int,rowVector> v3 = trans( v1 ) * * M1; // Row vector/matrix multiplication \endcode * * // Note that the storage order of the matrix poses no restrictions on the * operation. Also note, // that the highest performance for a multiplication * between a dense matrix and a dense vector can // be achieved if both the * matrix and the vector have the same scalar element type. // // \n * Previous: \ref vector_vector_division &nbsp; &nbsp; Next: \ref * matrix_matrix_multiplication */ //************************************************************************************************* //**Matrix / Matrix Multiplication ******************************************************************* /* * !\page matrix_matrix_multiplication Matrix/Matrix Multiplication // // \n * \section schur_product Componentwise Multiplication / Schur Product // * <hr> // // Multiplying two matrices with the same dimensions (i.e. the * same number of rows and columns) // via the modulo operator results in a * componentwise multiplication (Schur product) of the two // matrices: * * \code using blaze::DynamicMatrix; using blaze::CompressedMatrix; * * DynamicMatrix<double> M1( 28UL, 35UL ); CompressedMatrix<float> M2( 28UL, * 35UL ); * * // ... Initialization of the matrices * * DynamicMatrix<double> M3 = M1 % M2; \endcode * * // \n \section matrix_product Matrix Product // <hr> // // The matrix/matrix * product can be formulated exactly as in mathematical textbooks: * * \code using blaze::DynamicMatrix; using blaze::CompressedMatrix; * * DynamicMatrix<double> M1( 45UL, 85UL ); CompressedMatrix<float> M2( 85UL, * 37UL ); * * // ... Initialization of the matrices * * DynamicMatrix<double> M3 = M1 * M2; \endcode * * // The storage order of the two matrices poses no restrictions on the * operation, all variations // are possible. It is also possible to multiply * two matrices with different element type, as // long as the element types * themselves can be multiplied and added. Note however that the // highest * performance for a multiplication between two matrices can be expected for * two // matrices with the same scalar element type. // // In case the * resulting matrix is known to be symmetric, Hermitian, lower triangular, * upper // triangular, or diagonal, the computation can be optimized by * explicitly declaring the // multiplication as symmetric, Hermitian, lower * triangular, upper triangular, or diagonal by // means of the \ref * matrix_operations_declaration_operations : * * \code using blaze::DynamicMatrix; * * DynamicMatrix<double> M1, M2, M3; * * // ... Initialization of the square matrices * * M3 = declsym ( M1 * M2 ); // Declare the result of the matrix multiplication * as symmetric M3 = declherm( M1 * M2 ); // Declare the result of the * matrix multiplication as Hermitian M3 = decllow ( M1 * M2 ); // Declare * the result of the matrix multiplication as lower triangular M3 = declupp ( * M1 * M2 ); // Declare the result of the matrix multiplication as upper * triangular M3 = decldiag( M1 * M2 ); // Declare the result of the matrix * multiplication as diagonal \endcode * * // Using a declaration operation on the a multiplication expression can speed * up the computation // by a factor of 2. Note however that the caller of * the according declaration operation takes // full responsibility for the * correctness of the declaration. Falsely declaring a multiplication // as * symmetric, Hermitian, lower triangular, upper triangular, or diagonal * leads to undefined // behavior! // // \n Previous: \ref * matrix_vector_multiplication &nbsp; &nbsp; Next: \ref * shared_memory_parallelization */ //************************************************************************************************* //**Shared Memory Parallelization ****************************************************************** /* * !\page shared_memory_parallelization Shared Memory Parallelization // // * For all possible operations \b Blaze tries to achieve maximum performance * on a single CPU // core. However, today's CPUs are not single core * anymore, but provide several (homogeneous // or heterogeneous) compute * cores. In order to fully exploit the performance potential of a // * multicore CPU, computations have to be parallelized across all available * cores of a CPU. // For this purpose, \b Blaze provides four different * shared memory parallelization techniques: // // - \ref * hpx_parallelization // - \ref cpp_threads_parallelization // - \ref * boost_threads_parallelization // - \ref openmp_parallelization // // When * any of the shared memory parallelization techniques is activated, all * arithmetic // operations on dense vectors and matrices (including * additions, subtractions, multiplications, // divisions, and all * componentwise arithmetic operations) and most operations on sparse vectors * // and matrices are automatically run in parallel. However, in addition, * \b Blaze provides means // to enforce the serial execution of specific * operations: // // - \ref serial_execution // // \n Previous: \ref * matrix_matrix_multiplication &nbsp; &nbsp; Next: \ref hpx_parallelization */ //************************************************************************************************* //**HPX Parallelization **************************************************************************** /* * !\page hpx_parallelization HPX Parallelization // // \tableofcontents // * // // The first shared memory parallelization provided with \b Blaze is * based on // <a href="http://stellar.cct.lsu.edu/projects/hpx/">HPX</a>. // * // // \n \section hpx_setup HPX Setup // <hr> // // In order to enable the * HPX-based parallelization, the following steps have to be taken: First, // * the \c BLAZE_USE_HPX_THREADS command line argument has to be explicitly * specified during // compilation: * * \code ... -DBLAZE_USE_HPX_THREADS ... \endcode * * // Second, the HPX library and depending libraries such as Boost, hwloc, etc. * have to be linked. // And third, the HPX threads have to be initialized by * a call to the \c hpx::init() function (see // the <a * href="http://stellar.cct.lsu.edu/files/hpx_0.9.0/docs/hpx/tutorial.html">HP * X tutorial</a> // for further details). These three actions will cause the * \b Blaze library to automatically try // to run all operations in parallel * with the specified number of HPX threads. // // Note that the HPX-based * parallelization has priority over the OpenMP-based, C++11 thread-based, // * and Boost thread-based parallelizations, i.e. is preferred in case * multiple parallelizations // are enabled in combination with the HPX * thread parallelization. // // The number of threads used by the HPX * backend has to be specified via the command line: * * \code ... --hpx:threads 4 ... \endcode * * // Please note that the \b Blaze library does not limit the available number * of threads. Therefore // it is in YOUR responsibility to choose an * appropriate number of threads. The best performance, // though, can be * expected if the specified number of threads matches the available number * of // cores. // // In order to query the number of threads used for the * parallelization of operations, the // \c getNumThreads() function can be * used: * * \code const size_t threads = blaze::getNumThreads(); \endcode * * // In the context of HPX threads, the function will return the actual number * of threads used by // the HPX subsystem. // // // \n \section * hpx_configuration HPX Configuration // <hr> // // As in case of the other * shared memory parallelizations \b Blaze is not unconditionally running // * an operation in parallel (see for instance \ref openmp_parallelization). * Only in case a given // operation is large enough and exceeds a certain * threshold the operation is executed in parallel. // All thresholds related * to the HPX-based parallelization are contained within the configuration // * file <tt><blaze/config/Thresholds.h></tt>. // // Please note that these * thresholds are highly sensitiv to the used system architecture and // the * shared memory parallelization technique. Therefore the default values * cannot guarantee // maximum performance for all possible situations and * configurations. They merely provide a // reasonable standard for the * current CPU generation. Also note that the provided defaults // have been * determined using the OpenMP parallelization and require individual * adaption for // the HPX-based parallelization. // // \n Previous: \ref * shared_memory_parallelization &nbsp; &nbsp; Next: \ref * cpp_threads_parallelization */ //************************************************************************************************* //**C++ 11 Thread Parallelization ******************************************************************* /* * !\page cpp_threads_parallelization C++11 Thread Parallelization // // * \tableofcontents // // // In addition to the HPX-based shared memory * parallelization, starting with \b Blaze 2.1, // \b Blaze also provides a * shared memory parallelization based on C++11 threads. // // // \n \section * cpp_threads_setup C++11 Thread Setup // <hr> // // In order to enable the * C++11 thread-based parallelization, first the according C++11-specific // * compiler flags have to be used and second the \c BLAZE_USE_CPP_THREADS * command line argument // has to be explicitly specified. For instance, in * case of the GNU C++ and Clang compilers the // compiler flags have to be * extended by * * \code ... -std=c++11 -DBLAZE_USE_CPP_THREADS ... \endcode * * // This simple action will cause the \b Blaze library to automatically try to * run all operations // in parallel with the specified number of C++11 * threads. Note that in case both HPX and C++11 // threads are enabled on * the command line, the HPX-based parallelization has priority and is // * preferred. // // The number of threads can be either specified via the * environment variable \c BLAZE_NUM_THREADS * * \code export BLAZE_NUM_THREADS=4 // Unix systems set BLAZE_NUM_THREADS=4 * // Windows systems \endcode * * // or alternatively via the \c setNumThreads() function provided by the \b * Blaze library: * * \code blaze::setNumThreads( 4 ); \endcode * * // Please note that the \b Blaze library does not limit the available number * of threads. Therefore // it is in YOUR responsibility to choose an * appropriate number of threads. The best performance, // though, can be * expected if the specified number of threads matches the available number * of // cores. // // In order to query the number of threads used for the * parallelization of operations, the // \c getNumThreads() function can be * used: * * \code const size_t threads = blaze::getNumThreads(); \endcode * * // In the context of C++11 threads, the function will return the previously * specified number of // threads. // // // \n \section * cpp_threads_configuration C++11 Thread Configuration // <hr> // // As in * case of the OpenMP-based parallelization \b Blaze is not unconditionally * running an // operation in parallel. In case \b Blaze deems the parallel * execution as counterproductive for // the overall performance, the * operation is executed serially. One of the main reasons for not // * executing an operation in parallel is the size of the operands. For * instance, a vector addition // is only executed in parallel if the size of * both vector operands exceeds a certain threshold. // Otherwise, the * performance could seriously decrease due to the overhead caused by the * thread // setup. However, in order to be able to adjust the \b Blaze * library to a specific system, it // is possible to configure these * thresholds manually. All thresholds are contained within the // * configuration file <tt><blaze/config/Thresholds.h></tt>. // // Please note * that these thresholds are highly sensitiv to the used system architecture * and // the shared memory parallelization technique. Therefore the default * values cannot guarantee // maximum performance for all possible situations * and configurations. They merely provide a // reasonable standard for the * current CPU generation. Also note that the provided defaults // have been * determined using the OpenMP parallelization and require individual * adaption for // the C++11 thread parallelization. // // // \n \section * cpp_threads_known_issues Known Issues // <hr> // // There is a known issue * in Visual Studio 2012 and 2013 that may cause C++11 threads to hang // if * their destructor is executed after the \c main() function: // // * http://connect.microsoft.com/VisualStudio/feedback/details/747145 // // * Unfortunately, the C++11 parallelization of the \b Blaze library is * affected from this bug. // In order to circumvent this problem, \b Blaze * provides the \c shutDownThreads() function, // which can be used to * manually destroy all threads at the end of the \c main() function: * * \code int main() { // ... Using the C++11 thread parallelization of Blaze * * shutDownThreads(); } \endcode * * // Please note that this function may only be used at the end of the \c * main() function. After // this function no further computation may be * executed! Also note that this function has an // effect for Visual Studio * compilers only and doesn't need to be used with any other compiler. // // * \n Previous: \ref hpx_parallelization &nbsp; &nbsp; Next: \ref * boost_threads_parallelization */ //************************************************************************************************* //**Boost Thread Parallelization ******************************************************************* /* * !\page boost_threads_parallelization Boost Thread Parallelization // // * \tableofcontents // // // The third available shared memory * parallelization provided with \b Blaze is based // on <a * href="https://www.boost.org/doc/libs/1_68_0/doc/html/thread.html">Boost * threads</a>. // // // \n \section boost_threads_setup Boost Thread Setup * // <hr> // // In order to enable the Boost thread-based parallelization, * two steps have to be taken: First, // the \c BLAZE_USE_BOOST_THREADS * command line argument has to be explicitly specified during // * compilation: * * \code ... -DBLAZE_USE_BOOST_THREADS ... \endcode * * // Second, the according Boost libraries have to be linked. These two simple * actions will cause // the \b Blaze library to automatically try to run all * operations in parallel with the specified // number of Boost threads. Note * that the HPX-based and C++11 thread-based parallelizations have // * priority, i.e. are preferred in case either is enabled in combination with * the Boost thread // parallelization. // // The number of threads can be * either specified via the environment variable \c BLAZE_NUM_THREADS * * \code export BLAZE_NUM_THREADS=4 // Unix systems set BLAZE_NUM_THREADS=4 * // Windows systems \endcode * * // or alternatively via the \c setNumThreads() function provided by the \b * Blaze library: * * \code blaze::setNumThreads( 4 ); \endcode * * // Please note that the \b Blaze library does not limit the available number * of threads. Therefore // it is in YOUR responsibility to choose an * appropriate number of threads. The best performance, // though, can be * expected if the specified number of threads matches the available number * of // cores. // // In order to query the number of threads used for the * parallelization of operations, the // \c getNumThreads() function can be * used: * * \code const size_t threads = blaze::getNumThreads(); \endcode * * // In the context of Boost threads, the function will return the previously * specified number of // threads. // // // \n \section * boost_threads_configuration Boost Thread Configuration // <hr> // // As in * case of the other shared memory parallelizations \b Blaze is not * unconditionally running // an operation in parallel (see \ref * openmp_parallelization or \ref cpp_threads_parallelization). // All * thresholds related to the Boost thread parallelization are also contained * within the // configuration file <tt><blaze/config/Thresholds.h></tt>. // * // Please note that these thresholds are highly sensitiv to the used * system architecture and // the shared memory parallelization technique. * Therefore the default values cannot guarantee // maximum performance for * all possible situations and configurations. They merely provide a // * reasonable standard for the current CPU generation. Also note that the * provided defaults // have been determined using the OpenMP parallelization * and require individual adaption for // the Boost thread parallelization. * // // \n Previous: \ref cpp_threads_parallelization &nbsp; &nbsp; Next: * \ref openmp_parallelization */ //************************************************************************************************* //**OpenMP Parallelization ************************************************************************* /* * !\page openmp_parallelization OpenMP Parallelization // // * \tableofcontents // // // The fourth and final shared memory * parallelization provided with \b Blaze is based on // <a * href="https://www.openmp.org">OpenMP</a>. // // // \n \section * openmp_setup OpenMP Setup // <hr> // // To enable the OpenMP-based * parallelization, all that needs to be done is to explicitly specify // the * use of OpenMP on the command line: * * \code -fopenmp // GNU/Clang C++ compiler -openmp // Intel C++ compiler * /openmp // Visual Studio \endcode * * // This simple action will cause the \b Blaze library to automatically try to * run all operations // in parallel with the specified number of threads. * Note however that the HPX-based, the C++11 // thread-based, and the Boost * thread-based parallelizations have priority, i.e. are preferred in // case * either is enabled in combination with the OpenMP thread parallelization. * // // As common for OpenMP, the number of threads can be specified either * via an environment variable * * \code export OMP_NUM_THREADS=4 // Unix systems set OMP_NUM_THREADS=4 // * Windows systems \endcode * * // or via an explicit call to the \c omp_set_num_threads() function: * * \code omp_set_num_threads( 4 ); \endcode * * // Alternatively, the number of threads can also be specified via the \c * setNumThreads() function // provided by the \b Blaze library: * * \code blaze::setNumThreads( 4 ); \endcode * * // Please note that the \b Blaze library does not limit the available number * of threads. Therefore // it is in YOUR responsibility to choose an * appropriate number of threads. The best performance, // though, can be * expected if the specified number of threads matches the available number * of // cores. // // In order to query the number of threads used for the * parallelization of operations, the // \c getNumThreads() function can be * used: * * \code const size_t threads = blaze::getNumThreads(); \endcode * * // In the context of OpenMP, the function returns the maximum number of * threads OpenMP will use // within a parallel region and is therefore * equivalent to the \c omp_get_max_threads() function. // // // \n \section * openmp_configuration OpenMP Configuration // <hr> // // Note that \b Blaze * is not unconditionally running an operation in parallel. In case \b Blaze * // deems the parallel execution as counterproductive for the overall * performance, the operation // is executed serially. One of the main * reasons for not executing an operation in parallel is // the size of the * operands. For instance, a vector addition is only executed in parallel if * the // size of both vector operands exceeds a certain threshold. * Otherwise, the performance could // seriously decrease due to the overhead * caused by the thread setup. However, in order to be // able to adjust the * \b Blaze library to a specific system, it is possible to configure these * // thresholds manually. All shared memory thresholds are contained within * the configuration file // <tt><blaze/config/Thresholds.h></tt>. // // * Please note that these thresholds are highly sensitiv to the used system * architecture and // the shared memory parallelization technique (see also * \ref cpp_threads_parallelization and // \ref * boost_threads_parallelization). Therefore the default values cannot * guarantee maximum // performance for all possible situations and * configurations. They merely provide a reasonable // standard for the * current CPU generation. // // // \n \section openmp_first_touch First * Touch Policy // <hr> // // So far the \b Blaze library does not (yet) * automatically initialize dynamic memory according // to the first touch * principle. Consider for instance the following vector triad example: * * \code using blaze::columnVector; * * const size_t N( 1000000UL ); * * blaze::DynamicVector<double,columnVector> a( N ), b( N ), c( N ), d( N ); * * // Initialization of the vectors b, c, and d for( size_t i=0UL; i<N; ++i ) { * b[i] = rand<double>(); c[i] = rand<double>(); d[i] = rand<double>(); } * * // Performing a vector triad a = b + c * d; \endcode * * // If this code, which is prototypical for many OpenMP applications that have * not been optimized // for ccNUMA architectures, is run across several * locality domains (LD), it will not scale // beyond the maximum performance * achievable on a single LD if the working set does not fit into // the * cache. This is because the initialization loop is executed by a single * thread, writing to // \c b, \c c, and \c d for the first time. Hence, all * memory pages belonging to those arrays will // be mapped into a single LD. * // // As mentioned above, this problem can be solved by performing vector * initialization in parallel: * * \code // ... * * // Initialization of the vectors b, c, and d for( size_t i=0UL; i<N; ++i ) { * b[i] = rand<double>(); c[i] = rand<double>(); d[i] = rand<double>(); } * * // ... \endcode * * // This simple modification makes a huge difference on ccNUMA in memory-bound * situations (as for // instance in all BLAS level 1 operations and * partially BLAS level 2 operations). Therefore, in // order to achieve the * maximum possible performance, it is imperative to initialize the memory // * according to the later use of the data structures. // // // \n \section * openmp_limitations Limitations of the OpenMP Parallelization // <hr> // // * There are a few important limitations to the current \b Blaze OpenMP * parallelization. The first // one involves the explicit use of an OpenMP * parallel region (see \ref openmp_parallel), the // other one the OpenMP \c * sections directive (see \ref openmp_sections). // // // \n \subsection * openmp_parallel The Parallel Directive // // In OpenMP threads are * explicitly spawned via the an OpenMP parallel directive: * * \code // Serial region, executed by a single thread // Parallel region, * executed by the specified number of threads * * * // Serial region, executed by a single thread \endcode * * // Conceptually, the specified number of threads (see \ref openmp_setup) is * created every time a // parallel directive is encountered. Therefore, from * a performance point of view, it seems to be // beneficial to use a single * OpenMP parallel directive for several operations: * * \code blaze::DynamicVector<double> x, y1, y2; blaze::DynamicMatrix<double> A, * B; y1 = A * x; y2 = B * x; * * \endcode * * // Unfortunately, this optimization approach is not allowed within the \b * Blaze library. More // explicitly, it is not allowed to put an operation * into a parallel region. The reason is that // the entire code contained * within a parallel region is executed by all threads. Although this // * appears to just comprise the contained computations, a computation (or * more specifically the // assignment of an expression to a vector or * matrix) can contain additional logic that must not // be handled by * multiple threads (as for instance memory allocations, setup of * temporaries, etc.). // Therefore it is not possible to manually start a * parallel region for several operations, but // \b Blaze will spawn threads * automatically, depending on the specifics of the operation at hand // and * the given operands. // // \n \subsection openmp_sections The Sections * Directive // // OpenMP provides several work-sharing construct to * distribute work among threads. One of these // constructs is the \c * sections directive: * * \code blaze::DynamicVector<double> x, y1, y2; blaze::DynamicMatrix<double> A, * B; * * // ... Resizing and initialization * * y1 = A * x; * * y2 = B * x; * * * \endcode * * // In this example, two threads are used to compute two distinct * matrix/vector multiplications // concurrently. Thereby each of the \c * sections is executed by exactly one thread. // // Unfortunately \b Blaze * does not support concurrent parallel computations and therefore this // * approach does not work with any of the \b Blaze parallelization * techniques. All techniques // (including the C++11 and Boost thread * parallelizations; see \ref cpp_threads_parallelization // and \ref * boost_threads_parallelization) are optimized for the parallel computation * of an // operation within a single thread of execution. This means that \b * Blaze tries to use all // available threads to compute the result of a * single operation as efficiently as possible. // Therefore, for this * special case, it is advisable to disable all \b Blaze parallelizations // * and to let \b Blaze compute all operations within a \c sections directive * in serial. This can // be done by either completely disabling the \b Blaze * parallelization (see \ref serial_execution) // or by selectively * serializing all operations within a \c sections directive via the \c * serial() // function: * * \code blaze::DynamicVector<double> x, y1, y2; blaze::DynamicMatrix<double> A, * B; * * // ... Resizing and initialization * * y1 = serial( A * x ); * * y2 = serial( B * x ); * * * \endcode * * // Please note that the use of the \c BLAZE_SERIAL_SECTION (see also \ref * serial_execution) does // NOT work in this context! // // \n Previous: * \ref boost_threads_parallelization &nbsp; &nbsp; Next: \ref * serial_execution */ //************************************************************************************************* //**Serial Execution ******************************************************************************* /* * !\page serial_execution Serial Execution // // Sometimes it may be * necessary to enforce the serial execution of specific operations. For this * // purpose, the \b Blaze library offers three possible options: the * serialization of a single // expression via the \c serial() function, the * serialization of a block of expressions via the // \c * BLAZE_SERIAL_SECTION, and the general deactivation of the parallel * execution. // // // \n \section serial_execution_serial_expression Option * 1: Serialization of a Single Expression // <hr> // // The first option is * the serialization of a specific operation via the \c serial() function: * * \code blaze::DynamicMatrix<double> A, B, C; // ... Resizing and * initialization C = serial( A + B ); \endcode * * // \c serial() enforces the serial evaluation of the enclosed expression. It * can be used on any // kind of dense or sparse vector or matrix expression. * // // // \n \section serial_execution_serial_section Option 2: * Serialization of Multiple Expressions // <hr> // // The second option is * the temporary and local enforcement of a serial execution via the // \c * BLAZE_SERIAL_SECTION: * * \code using blaze::rowMajor; using blaze::columnVector; * * blaze::DynamicMatrix<double,rowMajor> A; * blaze::DynamicVector<double,columnVector> b, c, d, x, y, z; * * // ... Resizing and initialization * * // Parallel execution // If possible and beneficial for performance the * following operation is executed in parallel. x = A * b; * * // Serial execution // All operations executed within the serial section are * guaranteed to be executed in // serial (even if a parallel execution would * be possible and/or beneficial). BLAZE_SERIAL_SECTION { y = A * c; z = A * * d; } * * // Parallel execution continued // ... \endcode * * // Within the scope of the \c BLAZE_SERIAL_SECTION, all operations are * guaranteed to run in serial. // Outside the scope of the serial section, * all operations are run in parallel (if beneficial for // the performance). * // // Note that the \c BLAZE_SERIAL_SECTION must only be used within a * single thread of execution. // The use of the serial section within * several concurrent threads will result undefined behavior! // // // \n * \section serial_execution_deactivate_parallelism Option 3: Deactivation of * Parallel Execution // <hr> // // The third option is the general * deactivation of the parallel execution (even in case OpenMP is // enabled * on the command line). This can be achieved via the \c * BLAZE_USE_SHARED_MEMORY_PARALLELIZATION // switch in the * <tt>./blaze/config/SMP.h</tt> configuration file: * * \code #define BLAZE_USE_SHARED_MEMORY_PARALLELIZATION 1 \endcode * * // In case the \c BLAZE_USE_SHARED_MEMORY_PARALLELIZATION switch is set to 0, * the shared memory // parallelization is deactivated altogether. // // \n * Previous: \ref openmp_parallelization &nbsp; &nbsp; Next: \ref * serialization */ //************************************************************************************************* //**Serialization ********************************************************************************** /* * !\page serialization Serialization // // Sometimes it is necessary to * store vector and/or matrices on disk, for instance for storing // results * or for sharing specific setups with other people. The \b Blaze math * serialization // module provides the according functionality to create * platform independent, portable, binary // representations of vectors and * matrices that can be used to store the \b Blaze data structures // without * loss of precision and to reliably transfer them from one machine to * another. // // The following two pages explain how to serialize vectors * and matrices: // // - \ref vector_serialization // - \ref * matrix_serialization // // \n Previous: \ref serial_execution &nbsp; * &nbsp; Next: \ref vector_serialization */ //************************************************************************************************* //**Vector Serialization *************************************************************************** /* * !\page vector_serialization Vector Serialization // // The following * example demonstrates the (de-)serialization of dense and sparse vectors: * * \code using blaze::columnVector; using blaze::rowVector; * * // Serialization of both vectors { blaze::StaticVector<double,5UL,rowVector> * d; blaze::CompressedVector<int,columnVector> s; * * // ... Resizing and initialization * * // Creating an archive that writes into a the file "vectors.blaze" * blaze::Archive<std::ofstream> archive( "vectors.blaze" ); * * // Serialization of both vectors into the same archive. Note that d lies * before s! archive << d << s; } * * // Reconstitution of both vectors { blaze::DynamicVector<double,rowVector> * d1; blaze::DynamicVector<int,rowVector> d2; * * // Creating an archive that reads from the file "vectors.blaze" * blaze::Archive<std::ifstream> archive( "vectors.blaze" ); * * // Reconstituting the former d vector into d1. Note that it is possible to * reconstitute // the vector into a differrent kind of vector (StaticVector * -> DynamicVector), but that // the type of elements has to be the same. * archive >> d1; * * // Reconstituting the former s vector into d2. Note that is is even possible * to reconstitute // a sparse vector as a dense vector (also the reverse is * possible) and that a column vector // can be reconstituted as row vector * (and vice versa). Note however that also in this case // the type of * elements is the same! archive >> d2 } \endcode * * // The (de-)serialization of vectors is not restricted to vectors of built-in * data type, but can // also be used for vectors with vector or matrix * element type: * * \code // Serialization { blaze::CompressedVector< blaze::DynamicVector< * blaze::complex<double> > > vec; * * // ... Resizing and initialization * * // Creating an archive that writes into a the file "vector.blaze" * blaze::Archive<std::ofstream> archive( "vector.blaze" ); * * // Serialization of the vector into the archive archive << vec; } * * // Deserialization { blaze::CompressedVector< blaze::DynamicVector< * blaze::complex<double> > > vec; * * // Creating an archive that reads from the file "vector.blaze" * blaze::Archive<std::ifstream> archive( "vector.blaze" ); * * // Reconstitution of the vector from the archive archive >> vec; } \endcode * * // As the examples demonstrates, the vector serialization offers an enormous * flexibility. However, // several actions result in errors: // // - * vectors cannot be reconstituted as matrices (and vice versa) // - the * element type of the serialized and reconstituted vector must match, which * means // that on the source and destination platform the general type * (signed/unsigned integral // or floating point) and the size of the * type must be exactly the same // - when reconstituting a \c StaticVector, * its size must match the size of the serialized vector // // In case an * error is encountered during (de-)serialization, a \c * std::runtime_exception is // thrown. // // \n Previous: \ref serialization * &nbsp; &nbsp; Next: \ref matrix_serialization */ //************************************************************************************************* //**Matrix Serialization *************************************************************************** /* * !\page matrix_serialization Matrix Serialization // // The serialization * of matrices works in the same manner as the serialization of vectors. The * // following example demonstrates the (de-)serialization of dense and * sparse matrices: * * \code using blaze::rowMajor; using blaze::columnMajor; * * // Serialization of both matrices { * blaze::StaticMatrix<double,3UL,5UL,rowMajor> D; * blaze::CompressedMatrix<int,columnMajor> S; * * // ... Resizing and initialization * * // Creating an archive that writes into a the file "matrices.blaze" * blaze::Archive<std::ofstream> archive( "matrices.blaze" ); * * // Serialization of both matrices into the same archive. Note that D lies * before S! archive << D << S; } * * // Reconstitution of both matrices { blaze::DynamicMatrix<double,rowMajor> * D1; blaze::DynamicMatrix<int,rowMajor> D2; * * // Creating an archive that reads from the file "matrices.blaze" * blaze::Archive<std::ifstream> archive( "matrices.blaze" ); * * // Reconstituting the former D matrix into D1. Note that it is possible to * reconstitute // the matrix into a differrent kind of matrix (StaticMatrix * -> DynamicMatrix), but that // the type of elements has to be the same. * archive >> D1; * * // Reconstituting the former S matrix into D2. Note that is is even possible * to reconstitute // a sparse matrix as a dense matrix (also the reverse is * possible) and that a column-major // matrix can be reconstituted as * row-major matrix (and vice versa). Note however that also // in this case * the type of elements is the same! archive >> D2 } \endcode * * // Note that also in case of matrices it is possible to (de-)serialize * matrices with vector or // matrix elements: * * \code // Serialization { blaze::CompressedMatrix< blaze::DynamicMatrix< * blaze::complex<double> > > mat; * * // ... Resizing and initialization * * // Creating an archive that writes into a the file "matrix.blaze" * blaze::Archive<std::ofstream> archive( "matrix.blaze" ); * * // Serialization of the matrix into the archive archive << mat; } * * // Deserialization { blaze::CompressedMatrix< blaze::DynamicMatrix< * blaze::complex<double> > > mat; * * // Creating an archive that reads from the file "matrix.blaze" * blaze::Archive<std::ifstream> archive( "matrix.blaze" ); * * // Reconstitution of the matrix from the archive archive >> mat; } \endcode * * // Note that just as the vector serialization, the matrix serialization is * restricted by a // few important rules: // // - matrices cannot be * reconstituted as vectors (and vice versa) // - the element type of the * serialized and reconstituted matrix must match, which means // that on * the source and destination platform the general type (signed/unsigned * integral // or floating point) and the size of the type must be exactly * the same // - when reconstituting a \c StaticMatrix, the number of rows * and columns must match those // of the serialized matrix // // In case * an error is encountered during (de-)serialization, a \c * std::runtime_exception is // thrown. // // \n Previous: \ref * vector_serialization &nbsp; &nbsp; Next: \ref customization \n */ //************************************************************************************************* //**Customization ********************************************************************************** /* * !\page customization Customization // // Although \b Blaze tries to work * out of the box for every possible setting, still it may be // necessary to * adapt the library to specific requirements. The following three pages * explain // how to customize the \b Blaze library to your own needs: // // * - \ref configuration_files // - \ref vector_and_matrix_customization // * - \ref error_reporting_customization // // \n Previous: \ref * matrix_serialization &nbsp; &nbsp; Next: \ref configuration_files */ //************************************************************************************************* //**Configuration Files **************************************************************************** /* * !\page configuration_files Configuration Files // // \tableofcontents // * // // Sometimes it is necessary to adapt \b Blaze to specific * requirements. For this purpose // \b Blaze provides several configuration * files in the <tt>./blaze/config/</tt> subdirectory, // which provide ample * opportunity to customize internal settings, behavior, and thresholds. // * This chapter explains the most important of these configuration files. For * a complete // overview of all customization opportunities, please go to * the configuration files in the // <tt>./blaze/config/</tt> subdirectory or * see the complete \b Blaze documentation. // // // \n \section * transpose_flag Default Vector Storage // <hr> // // The \b Blaze default * is that all vectors are created as column vectors (if not specified // * explicitly): * * \code blaze::StaticVector<double,3UL> x; // Creates a 3-dimensional static * column vector \endcode * * // The header file <tt>./blaze/config/TransposeFlag.h</tt> allows the * configuration of the default // vector storage (i.e. the default transpose * flag) of all vectors within the \b Blaze library. // The default transpose * flag is specified via the \c BLAZE_DEFAULT_TRANSPOSE_FLAG macro: * * \code #define BLAZE_DEFAULT_TRANSPOSE_FLAG blaze::columnVector \endcode * * // Alternatively the default transpose flag can be specified via command line * or by defining this // symbol manually before including any \b Blaze * header file: * * \code #define BLAZE_DEFAULT_TRANSPOSE_FLAG blaze::columnVector #include * <blaze/Blaze.h> \endcode * * // Valid settings for \c BLAZE_DEFAULT_TRANSPOSE_FLAG are blaze::rowVector * and blaze::columnVector. // // // \n \section storage_order Default Matrix * Storage // <hr> // // Matrices are by default created as row-major * matrices: * * \code blaze::StaticMatrix<double,3UL,3UL> A; // Creates a 3x3 row-major * matrix \endcode * * // The header file <tt>./blaze/config/StorageOrder.h</tt> allows the * configuration of the default // matrix storage order. Via the \c * BLAZE_DEFAULT_STORAGE_ORDER macro the default storage order // for all * matrices of the \b Blaze library can be specified. * * \code #define BLAZE_DEFAULT_STORAGE_ORDER blaze::rowMajor \endcode * * // Alternatively the default storage order can be specified via command line * or by defining this // symbol manually before including any \b Blaze * header file: * * \code #define BLAZE_DEFAULT_STORAGE_ORDER blaze::rowMajor #include * <blaze/Blaze.h> \endcode * * // Valid settings for \c BLAZE_DEFAULT_STORAGE_ORDER are blaze::rowMajor and * blaze::columnMajor. // // // \n \section blas_mode BLAS Mode // <hr> // // * In order to achieve maximum performance for multiplications with dense * matrices, \b Blaze can // be configured to use a BLAS library. Via the * following compilation switch in the configuration // file * <tt>./blaze/config/BLAS.h</tt> BLAS can be enabled: * * \code #define BLAZE_BLAS_MODE 1 \endcode * * // In case the selected BLAS library provides parallel execution, the \c * BLAZE_BLAS_IS_PARALLEL // switch should be activated to prevent \b Blaze * from parallelizing on its own: * * \code #define BLAZE_BLAS_IS_PARALLEL 1 \endcode * * // Alternatively, both settings can be specified via command line or by * defining the symbols // manually before including any \b Blaze header * file: * * \code #define BLAZE_BLAS_MODE 1 #define BLAZE_BLAS_IS_PARALLEL 1 #include * <blaze/Blaze.h> \endcode * * // In case no BLAS library is available, \b Blaze will still work and will * not be reduced in // functionality, but performance may be limited. // // * // \n \section cache_size Cache Size // <hr> // // The optimization of * several \b Blaze compute kernels depends on the cache size of the target * // architecture. By default, \b Blaze assumes a cache size of 3 MiByte. * However, for optimal // speed the exact cache size of the system should be * provided via the \c cacheSize value in the // * <tt>./blaze/config/CacheSize.h</tt> configuration file: * * \code #define BLAZE_CACHE_SIZE 3145728UL; \endcode * * // The cache size can also be specified via command line or by defining this * symbol manually // before including any \b Blaze header file: * * \code #define BLAZE_CACHE_SIZE 3145728UL #include <blaze/Blaze.h> \endcode * * // \n \section vectorization Vectorization // <hr> // // In order to achieve * maximum performance and to exploit the compute power of a target platform * // the \b Blaze library attempts to vectorize all linear algebra * operations by SSE, AVX, and/or // AVX-512 intrinsics, depending on which * instruction set is available. However, it is possible // to disable the * vectorization entirely by the compile time switch in the configuration * file // <tt>./blaze/config/Vectorization.h</tt>: * * \code #define BLAZE_USE_VECTORIZATION 1 \endcode * * // It is also possible to (de-)activate vectorization via command line or by * defining this symbol // manually before including any \b Blaze header * file: * * \code #define BLAZE_USE_VECTORIZATION 1 #include <blaze/Blaze.h> \endcode * * // In case the switch is set to 1, vectorization is enabled and the \b Blaze * library is allowed // to use intrinsics to speed up computations. In case * the switch is set to 0, vectorization is // disabled entirely and the \b * Blaze library chooses default, non-vectorized functionality for // the * operations. Note that deactivating the vectorization may pose a severe * performance // limitation for a large number of operations! // // // \n * \section thresholds Thresholds // <hr> // // For many computations \b * Blaze distinguishes between small and large vectors and matrices. // This * separation is especially important for the parallel execution of * computations, since // the use of several threads only pays off for * sufficiently large vectors and matrices. // Additionally, it also enables * \b Blaze to select kernels that are optimized for a specific // size. // * // In order to distinguish between small and large data structures \b * Blaze provides several // thresholds that can be adapted to the * characteristics of the target platform. For instance, // the \c * DMATDVECMULT_THRESHOLD specifies the threshold between the application of * the custom // \b Blaze kernels for small dense matrix/dense vector * multiplications and the BLAS kernels // for large multiplications. All * thresholds, including the thresholds for the OpenMP- and // thread-based * parallelization, are contained within the configuration file // * <tt><blaze/config/Thresholds.h></tt>. // // // \n \section padding Padding * // <hr> // // By default the \b Blaze library uses padding for all dense * vectors and matrices in order to // achieve maximum performance in all * operations. Due to padding, the proper alignment of data // elements can * be guaranteed and the need for remainder loops is minimized. However, on * the // downside padding introduces an additional memory overhead, which * can be large depending on // the used data type. // // The configuration * file <tt>./blaze/config/Optimizations.h</tt> provides a compile time * switch // that can be used to (de-)activate padding: * * \code #define BLAZE_USE_PADDING 1 \endcode * * // Alternatively it is possible to (de-)activate padding via command line or * by defining this // symbol manually before including any \b Blaze header * file: * * \code #define BLAZE_USE_PADDING 1 #include <blaze/Blaze.h> \endcode * * // If \c BLAZE_USE_PADDING is set to 1 padding is enabled for all dense * vectors and matrices, if // it is set to 0 padding is disabled. Note * however that disabling padding can considerably reduce // the performance * of all dense vector and matrix operations! // // // \n \section streaming * Streaming (Non-Temporal Stores) // <hr> // // For vectors and matrices * that don't fit into the cache anymore non-temporal stores can provide // a * significant performance advantage of about 20%. However, this advantage is * only in effect in // case the memory bandwidth of the target architecture * is maxed out. If the target architecture's // memory bandwidth cannot be * exhausted the use of non-temporal stores can decrease performance // * instead of increasing it. // // The configuration file * <tt>./blaze/config/Optimizations.h</tt> provides a compile time switch // * that can be used to (de-)activate streaming: * * \code #define BLAZE_USE_STREAMING 1 \endcode * * // Alternatively streaming can be (de-)activated via command line or by * defining this symbol // manually before including any \b Blaze header * file: * * \code #define BLAZE_USE_STREAMING 1 #include <blaze/Blaze.h> \endcode * * // If \c BLAZE_USE_STREAMING is set to 1 streaming is enabled, if it is set * to 0 streaming is // disabled. It is recommended to consult the target * architecture's white papers to decide whether // streaming is beneficial * or hurtful for performance. // // // \n Previous: \ref customization * &nbsp; &nbsp; Next: \ref vector_and_matrix_customization \n */ //************************************************************************************************* //**Customization of Vectors and Matrices ********************************************************** /* * !\page vector_and_matrix_customization Customization of Vectors and * Matrices // // \tableofcontents // // // \n \section custom_data_members * Custom Data Members // <hr> // // So far the \b Blaze library does not * provide a lot of flexibility to customize the data // members of existing * \ref vector_types and \ref matrix_types. However, to some extend it is // * possible to customize vectors and matrices by inheritance. The following * example gives an // impression on how to create a simple variation of \ref * matrix_types_custom_matrix, which // automatically takes care of acquiring * and releasing custom memory. * * \code template< typename Type // Data type of the matrix , * bool SO = defaultStorageOrder > // Storage order class MyCustomMatrix : * public CustomMatrix< Type, unaligned, unpadded, SO > { public: explicit * inline MyCustomMatrix( size_t m, size_t n ) : * CustomMatrix<Type,unaligned,unpadded,SO>() , array_( new Type[m*n] ) { * this->reset( array_.get(), m, n ); } * * private: std::unique_ptr<Type[]> array_; }; \endcode * * // Please note that this is a simplified example with the intent to show the * general approach. // The number of constructors, the memory acquisition, * and the kind of memory management can of // course be adapted to specific * requirements. Also, please note that since none of the \b Blaze // vectors * and matrices have virtual destructors polymorphic destruction cannot be * used. // // // \n \section custom_operations Custom Operations // <hr> // * // There are two approaches to extend \b Blaze with custom operations. * First, the \c map() // functions provide the possibility to execute * componentwise custom operations on vectors and // matrices. Second, it is * possible to add customized free functions. // // \n \subsection * custom_operations_map The map() Functions // // Via the unary and binary * \c map() functions it is possible to execute componentwise custom // * operations on vectors and matrices. The unary \c map() function can be * used to apply a custom // operation on each single element of a dense * vector or matrix or each non-zero element of a // sparse vector or matrix. * For instance, the following example demonstrates a custom square // root * computation on a dense matrix: * * \code blaze::DynamicMatrix<double> A, B; * * B = map( A, []( double d ) { return std::sqrt( d ); } ); \endcode * * // The binary \c map() function can be used to apply an operation pairwise to * the elements of // two dense vectors or two dense matrices. The following * example demonstrates the merging of // two matrices of double precision * values into a matrix of double precision complex numbers: * * \code blaze::DynamicMatrix<double> real{ { 2.1, -4.2 }, { 1.0, 0.6 } }; * blaze::DynamicMatrix<double> imag{ { 0.3, 1.4 }, { 2.9, -3.4 } }; * * blaze::DynamicMatrix< complex<double> > cplx; * * // Creating the matrix // ( (-2.1, 0.3) (-4.2, -1.4) ) // ( ( 1.0, * 2.9) ( 0.6, -3.4) ) cplx = map( real, imag, []( double r, double i ){ * return complex( r, i ); } ); \endcode * * // These examples demonstrate the most convenient way of defining a unary * custom operation by // passing a lambda to the \c map() function. * Alternatively, it is possible to pass a custom // functor: * * \code struct Sqrt { double operator()( double a ) const { return std::sqrt( a * ); } }; * * B = map( A, Sqrt() ); \endcode * * // In order for the functor to work in a call to \c map() it must define a * function call operator, // which accepts arguments of the type of the * according vector or matrix elements. // // Although the operation is * automatically parallelized depending on the size of the vector or // * matrix, no automatic vectorization is possible. In order to enable * vectorization, a \c load() // function can be added to the functor, which * handles the vectorized computation. Depending on // the data type this * function is passed one of the following \b Blaze SIMD data types: // // * <ul> // <li>SIMD data types for fundamental data types // <ul> // * <li>\c blaze::SIMDint8: Packed SIMD type for 8-bit signed integral data * types</li> // <li>\c blaze::SIMDuint8: Packed SIMD type for 8-bit * unsigned integral data types</li> // <li>\c blaze::SIMDint16: * Packed SIMD type for 16-bit signed integral data types</li> // * <li>\c blaze::SIMDuint16: Packed SIMD type for 16-bit unsigned integral * data types</li> // <li>\c blaze::SIMDint32: Packed SIMD type for * 32-bit signed integral data types</li> // <li>\c * blaze::SIMDuint32: Packed SIMD type for 32-bit unsigned integral data * types</li> // <li>\c blaze::SIMDint64: Packed SIMD type for * 64-bit signed integral data types</li> // <li>\c * blaze::SIMDuint64: Packed SIMD type for 64-bit unsigned integral data * types</li> // <li>\c blaze::SIMDfloat: Packed SIMD type for * single precision floating point data</li> // <li>\c * blaze::SIMDdouble: Packed SIMD type for double precision floating point * data</li> // </ul> // </li> // <li>SIMD data types for complex * data types // <ul> // <li>\c blaze::SIMDcint8: Packed SIMD * type for complex 8-bit signed integral data types</li> // <li>\c * blaze::SIMDcuint8: Packed SIMD type for complex 8-bit unsigned integral * data types</li> // <li>\c blaze::SIMDcint16: Packed SIMD type for * complex 16-bit signed integral data types</li> // <li>\c * blaze::SIMDcuint16: Packed SIMD type for complex 16-bit unsigned integral * data types</li> // <li>\c blaze::SIMDcint32: Packed SIMD type for * complex 32-bit signed integral data types</li> // <li>\c * blaze::SIMDcuint32: Packed SIMD type for complex 32-bit unsigned integral * data types</li> // <li>\c blaze::SIMDcint64: Packed SIMD type for * complex 64-bit signed integral data types</li> // <li>\c * blaze::SIMDcuint64: Packed SIMD type for complex 64-bit unsigned integral * data types</li> // <li>\c blaze::SIMDcfloat: Packed SIMD type for * complex single precision floating point data</li> // <li>\c * blaze::SIMDcdouble: Packed SIMD type for complex double precision floating * point data</li> // </ul> // </li> // </ul> // // All SIMD types * provide the \c value data member for a direct access to the underlying * intrinsic // data element. In the following example, this intrinsic * element is passed to the AVX function // \c _mm256_sqrt_pd(): * * \code struct Sqrt { double operator()( double a ) const { return std::sqrt( a * ); } * * SIMDdouble load( const SIMDdouble& a ) const { return _mm256_sqrt_pd( a.value * ); } }; \endcode * * // In this example, whenever vectorization is generally applicable, the \c * load() function is // called instead of the function call operator for as * long as the number of remaining elements // is larger-or-equal to the * width of the packed SIMD type. In all other cases (which also // includes * peel-off and remainder loops) the scalar operation is used. // // Please * note that this example has two drawbacks: First, it will only compile in * case the // intrinsic \c _mm256_sqrt_pd() function is available (i.e. when * AVX is active). Second, the // availability of AVX is not taken into * account. The first drawback can be alleviated by making // the \c load() * function a function template. The second drawback can be dealt with by * adding a // \c simdEnabled() function template to the functor: * * \code struct Sqrt { double operator()( double a ) const { return std::sqrt( a * ); } * * template< typename T > T load( const T& a ) const { return _mm256_sqrt_pd( * a.value ); } * * template< typename T > static constexpr bool simdEnabled() { #if * defined(__AVX__) return true; #else return false; #endif } }; \endcode * * // The \c simdEnabled() function must be a \c static, \c constexpr function * and must return whether // or not vectorization is available for the given * data type \c T. In case the function returns // \c true, the \c load() * function is used for a vectorized evaluation, in case the function // * returns \c false, \c load() is not called. // // Note that this is a * simplified example that is only working when used for dense vectors and // * matrices with double precision floating point elements. The following code * shows the complete // implementation of the according functor that is used * within the \b Blaze library. The \b Blaze // \c Sqrt functor is working * for all data types that are providing a square root operation: * * \code namespace blaze { * * struct Sqrt { template< typename T > BLAZE_ALWAYS_INLINE auto operator()( * const T& a ) const { return sqrt( a ); } * * template< typename T > static constexpr bool simdEnabled() { return * HasSIMDSqrt<T>::value; } * * template< typename T > BLAZE_ALWAYS_INLINE auto load( const T& a ) const { * BLAZE_CONSTRAINT_MUST_BE_SIMD_PACK( T ); return sqrt( a ); } }; * * } // namespace blaze \endcode * * // The same approach can be taken for binary custom operations. The following * code demonstrates // the \c Min functor of the \b Blaze library, which is * working for all data types that provide // a \c min() operation: * * \code struct Min { explicit inline Min() {} * * template< typename T1, typename T2 > BLAZE_ALWAYS_INLINE decltype(auto) * operator()( const T1& a, const T2& b ) const { return min( a, b ); } * * template< typename T1, typename T2 > static constexpr bool simdEnabled() { * return HasSIMDMin<T1,T2>::value; } * * template< typename T1, typename T2 > BLAZE_ALWAYS_INLINE decltype(auto) load( * const T1& a, const T2& b ) const { BLAZE_CONSTRAINT_MUST_BE_SIMD_PACK( T1 * ); BLAZE_CONSTRAINT_MUST_BE_SIMD_PACK( T2 ); return min( a, b ); } }; * \endcode * * // For more information on the available \b Blaze SIMD data types and * functions, please see the // SIMD module in the complete \b Blaze * documentation. // // \n \subsection custom_operations_free_functions Free * Functions // // In order to extend \b Blaze with new functionality it is * possible to add free functions. Free // functions can be used either as * wrappers around calls to the map() function or to implement // general, * non-componentwise operations. The following two examples will demonstrate * both ideas. // // The first example shows the \c setToZero() function, * which resets a sparse matrix to zero // without affecting the sparsity * pattern. It is implemented as a convenience wrapper around // the map() * function: * * \code template< typename MT // Type of the sparse matrix , bool SO > // * Storage order void setToZero( blaze::SparseMatrix<MT,SO>& mat ) { (~mat) = * blaze::map( ~mat, []( int ){ return 0; } ); } \endcode * * // The blaze::SparseMatrix class template is the base class for all kinds of * sparse matrices and // provides an abstraction from the actual type \c MT * of the sparse matrix. However, due to the // <a * href="https://en.wikipedia.org/wiki/Curiously_recurring_template_pattern">C * uriously Recurring Template Pattern (CRTP)</a> // it also enables a * conversion back to the actual type. This downcast is performed via the * tilde // operator (i.e. \c operator~()). The template parameter \c SO * represents the storage order // (blaze::rowMajor or blaze::columnMajor) of * the matrix. // // The second example shows the \c countZeros() function, * which counts the number of values, which // are exactly zero, in a dense, * row-major matrix: * * \code template< typename MT > size_t countZeros( * blaze::DenseMatrix<MT,rowMajor>& mat ) { const size_t M( (~mat).rows() ); * const size_t N( (~mat).columns() ); size_t count( 0UL ); * * for( size_t i=0UL; i<M; ++i ) { for( size_t j=0UL; j<N; ++j ) { if( * blaze::isDefault<strict>( (~mat)(i,j) ) ) ++count; } } * * return count; } \endcode * * // The blaze::DenseMatrix class template is the base class for all kinds of * dense matrices. Again, // it is possible to perform the conversion to the * actual type via the tilde operator. // // The following two listings show * the declarations of all vector and matrix base classes, which // can be * used for custom free functions: * * \code template< typename VT // Concrete type of the dense or sparse vector , * bool TF > // Transpose flag (blaze::columnVector or blaze::rowVector) * class Vector; * * template< typename VT // Concrete type of the dense vector , bool TF > // * Transpose flag (blaze::columnVector or blaze::rowVector) class * DenseVector; * * template< typename VT // Concrete type of the sparse vector , bool TF > * // Transpose flag (blaze::columnVector or blaze::rowVector) class * SparseVector; \endcode * * \code template< typename MT // Concrete type of the dense or sparse matrix , * bool SO > // Storage order (blaze::rowMajor or blaze::columnMajor) * class Matrix; * * template< typename MT // Concrete type of the dense matrix , bool SO > // * Storage order (blaze::rowMajor or blaze::columnMajor) class DenseMatrix; * * template< typename MT // Concrete type of the sparse matrix , bool SO > * // Storage order (blaze::rowMajor or blaze::columnMajor) class * SparseMatrix; \endcode * * // \n \section custom_data_types Custom Data Types // <hr> // // The \b Blaze * library tries hard to make the use of custom data types as convenient, * easy and // intuitive as possible. However, unfortunately it is not * possible to meet the requirements of // all possible data types. Thus it * might be necessary to provide \b Blaze with some additional // information * about the data type. The following sections give an overview of the * necessary steps // to enable the use of the hypothetical custom data type * \c custom::double_t for vector and // matrix operations. For example: * * \code blaze::DynamicVector<custom::double_t> a, b, c; // ... Resizing and * initialization c = a + b; \endcode * * // The \b Blaze library assumes that the \c custom::double_t data type * provides \c operator+() // for additions, \c operator-() for subtractions, * \c operator*() for multiplications and // \c operator/() for divisions. If * any of these functions is missing it is necessary to implement // the * operator to perform the according operation. For this example we assume * that the custom // data type provides the four following functions instead * of operators: * * \code namespace custom { * * double_t add ( const double_t& a, const double_t b ); double_t sub ( const * double_t& a, const double_t b ); double_t mult( const double_t& a, const * double_t b ); double_t div ( const double_t& a, const double_t b ); * * } // namespace custom \endcode * * // The following implementations will satisfy the requirements of the \b * Blaze library: * * \code inline custom::double_t operator+( const custom::double_t& a, const * custom::double_t& b ) { return add( a, b ); } * * inline custom::double_t operator-( const custom::double_t& a, const * custom::double_t& b ) { return sub( a, b ); } * * inline custom::double_t operator*( const custom::double_t& a, const * custom::double_t& b ) { return mult( a, b ); } * * inline custom::double_t operator/( const custom::double_t& a, const * custom::double_t& b ) { return div( a, b ); } \endcode * * // \b Blaze will use all the information provided with these functions (for * instance the return // type) to properly handle the operations. In the * rare case that the return type cannot be // automatically determined from * the operator it might be additionally necessary to provide a // * specialization of the following four \b Blaze class templates: * * \code namespace blaze { * * template<> struct AddTrait<custom::double_t,custom::double_t> { using Type = * custom::double_t; }; * * template<> struct SubTrait<custom::double_t,custom::double_t> { using Type = * custom::double_t; }; * * template<> struct MultTrait<custom::double_t,custom::double_t> { using Type = * custom::double_t; }; * * template<> struct DivTrait<custom::double_t,custom::double_t> { using Type = * custom::double_t; }; * * } // namespace blaze \endcode * * // The same steps are necessary if several custom data types need to be * combined (as for instance // \c custom::double_t and \c custom::float_t). * Note that in this case both permutations need to // be taken into account: * * \code custom::double_t operator+( const custom::double_t& a, const * custom::float_t& b ); custom::double_t operator+( const custom::float_t& * a, const custom::double_t& b ); // ... \endcode * * // Please note that only built-in data types apply for vectorization and thus * custom data types // cannot achieve maximum performance! // // // \n * Previous: \ref configuration_files &nbsp; &nbsp; Next: \ref * custom_operations \n */ //************************************************************************************************* //**Customization of the Error Reporting Mechanism ************************************************* /* * !\page error_reporting_customization Customization of the Error Reporting * Mechanism // // \tableofcontents // // // \n \section * error_reporting_background Background // <hr> // // The default way of \b * Blaze to report errors of any kind is to throw a standard exception. // * However, although in general this approach works well, in certain * environments and under // special circumstances exceptions may not be the * mechanism of choice and a different error // reporting mechanism may be * desirable. For this reason, \b Blaze provides several macros, // which * enable the customization of the error reporting mechanism. Via these * macros it is // possible to replace the standard exceptions by some other * exception type or a completely // different approach to report errors. // * // // \n \section error_reporting_general_customization Customization of * the Reporting Mechanism // <hr> // // In some cases it might be necessary * to adapt the entire error reporting mechanism and to // replace it by some * other means to signal failure. The primary macro for this purpose is the * // \c BLAZE_THROW macro: * * \code #define BLAZE_THROW( EXCEPTION ) \ throw EXCEPTION \endcode * * // This macro represents the default mechanism of the \b Blaze library to * report errors of any // kind. In order to customize the error reporing * mechanism all that needs to be done is to // define the macro prior to * including any \b Blaze header file. This will cause the \b Blaze // * specific mechanism to be overridden. The following example demonstrates * this by replacing // exceptions by a call to a \c log() function and a * direct call to abort: * * \code #define BLAZE_THROW( EXCEPTION ) \ log( "..." ); \ abort() * * #include <blaze/Blaze.h> \endcode * * // Doing this will trigger a call to \c log() and an abort instead of * throwing an exception // whenever an error (such as an invalid argument) * is detected. // // \note It is possible to execute several statements * instead of executing a single statement to // throw an exception. Also * note that it is recommended to define the macro such that a subsequent // * semicolon is required! // // \warning This macro is provided with the * intention to assist in adapting \b Blaze to special // conditions and * environments. However, the customization of the error reporting mechanism * via // this macro can have a significant effect on the library. Thus be * advised to use the macro // with due care! // // // \n \section * error_reporting_exception_customization Customization of the Type of * Exceptions // <hr> // // In addition to the customization of the entire * error reporting mechanism it is also possible // to customize the type of * exceptions being thrown. This can be achieved by customizing any // number * of the following macros: * * \code #define BLAZE_THROW_BAD_ALLOC \ BLAZE_THROW( std::bad_alloc() ) * * #define BLAZE_THROW_LOGIC_ERROR( MESSAGE ) \ BLAZE_THROW( std::logic_error( * MESSAGE ) ) * * #define BLAZE_THROW_INVALID_ARGUMENT( MESSAGE ) \ BLAZE_THROW( * std::invalid_argument( MESSAGE ) ) * * #define BLAZE_THROW_LENGTH_ERROR( MESSAGE ) \ BLAZE_THROW( std::length_error( * MESSAGE ) ) * * #define BLAZE_THROW_OUT_OF_RANGE( MESSAGE ) \ BLAZE_THROW( std::out_of_range( * MESSAGE ) ) * * #define BLAZE_THROW_RUNTIME_ERROR( MESSAGE ) \ BLAZE_THROW( * std::runtime_error( MESSAGE ) ) \endcode * * // In order to customize the type of exception the according macro has to be * defined prior to // including any \b Blaze header file. This will override * the \b Blaze default behavior. The // following example demonstrates this * by replacing \c std::invalid_argument by a custom // exception type: * * \code class InvalidArgument { public: InvalidArgument(); explicit * InvalidArgument( const std::string& message ); // ... }; * * #define BLAZE_THROW_INVALID_ARGUMENT( MESSAGE ) \ BLAZE_THROW( * InvalidArgument( MESSAGE ) ) * * #include <blaze/Blaze.h> \endcode * * // By manually defining the macro, an \c InvalidArgument exception is thrown * instead of a // \c std::invalid_argument exception. Note that it is * recommended to define the macro such // that a subsequent semicolon is * required! // // \warning These macros are provided with the intention to * assist in adapting \b Blaze to // special conditions and environments. * However, the customization of the type of an exception // via this macro * may have an effect on the library. Thus be advised to use the macro with * due // care! // // // \n \section error_reporting_special_errors * Customization of Special Errors // <hr> // // Last but not least it is * possible to customize the error reporting for special kinds of errors. // * This can be achieved by customizing any number of the following macros: * * \code #define BLAZE_THROW_DIVISION_BY_ZERO( MESSAGE ) \ * BLAZE_THROW_RUNTIME_ERROR( MESSAGE ) * * #define BLAZE_THROW_LAPACK_ERROR( MESSAGE ) \ BLAZE_THROW_RUNTIME_ERROR( * MESSAGE ) \endcode * * // As explained in the previous sections, in order to customize the handling * of special errors // the according macro has to be defined prior to * including any \b Blaze header file. This will // override the \b Blaze * default behavior. // // // \n Previous: \ref * vector_and_matrix_customization &nbsp; &nbsp; Next: \ref blas_functions \n */ //************************************************************************************************* //**BLAS Functions ********************************************************************************* /* * !\page blas_functions BLAS Functions // // \tableofcontents // // // For * vector/vector, matrix/vector and matrix/matrix multiplications with large * dense matrices // \b Blaze relies on the efficiency of BLAS libraries. For * this purpose, \b Blaze implements // several convenient C++ wrapper * functions for several BLAS functions. The following sections // give a * complete overview of all available BLAS level 1, 2 and 3 functions. // // * // \n \section blas_level_1 BLAS Level 1 // <hr> // // \subsection * blas_level_1_dotu Dot Product (dotu) // // The following wrapper functions * provide a generic interface for the BLAS functions for the // dot product * of two dense vectors (\c sdot(), \c ddot(), \c cdotu_sub(), and \c * zdotu_sub()): * * \code namespace blaze { * * float dotu( int n, const float* x, int incX, const float* y, int incY ); * * double dotu( int n, const double* x, int incX, const double* y, int incY ); * * complex<float> dotu( int n, const complex<float>* x, int incX, const * complex<float>* y, int incY ); * * complex<double> dotu( int n, const complex<double>* x, int incX, const * complex<double>* y, int incY ); * * template< typename VT1, bool TF1, typename VT2, bool TF2 > ElementType_<VT1> * dotu( const DenseVector<VT1,TF1>& x, const DenseVector<VT2,TF2>& y ); * * } // namespace blaze \endcode * * // \subsection blas_level_1_dotc Complex Conjugate Dot Product (dotc) // // * The following wrapper functions provide a generic interface for the BLAS * functions for the // complex conjugate dot product of two dense vectors * (\c sdot(), \c ddot(), \c cdotc_sub(), // and \c zdotc_sub()): * * \code namespace blaze { * * float dotc( int n, const float* x, int incX, const float* y, int incY ); * * double dotc( int n, const double* x, int incX, const double* y, int incY ); * * complex<float> dotc( int n, const complex<float>* x, int incX, const * complex<float>* y, int incY ); * * complex<double> dotc( int n, const complex<double>* x, int incX, const * complex<double>* y, int incY ); * * template< typename VT1, bool TF1, typename VT2, bool TF2 > ElementType_<VT1> * dotc( const DenseVector<VT1,TF1>& x, const DenseVector<VT2,TF2>& y ); * * } // namespace blaze \endcode * * // \subsection blas_level_1_axpy Axpy Product (axpy) // // The following * wrapper functions provide a generic interface for the BLAS functions for * the // axpy product of two dense vectors (\c saxpy(), \c daxpy(), \c * caxpy(), and \c zaxpy()): * * \code namespace blaze { * * void axpy( int n, float alpha, const float* x, int incX, float* y, int incY * ); * * void axpy( int n, double alpha, const double* x, int incX, double* y, int * incY ); * * void axpy( int n, complex<float> alpha, const complex<float>* x, int incX, * complex<float>* y, int incY ); * * void axpy( int n, complex<double> alpha, const complex<double>* x, int incX, * complex<double>* y, int incY ); * * template< typename VT1, bool TF1, typename VT2, bool TF2, typename ST > void * axpy( const DenseVector<VT1,TF1>& x, const DenseVector<VT2,TF2>& y, ST * alpha ); * * } // namespace blaze \endcode * * // \n \section blas_level_2 BLAS Level 2 // <hr> // // \subsection * blas_level_2_gemv General Matrix/Vector Multiplication (gemv) // // The * following wrapper functions provide a generic interface for the BLAS * functions for the // general matrix/vector multiplication (\c sgemv(), \c * dgemv(), \c cgemv(), and \c zgemv()): * * \code namespace blaze { * * void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, int m, int n, float * alpha, const float* A, int lda, const float* x, int incX, float beta, * float* y, int incY ); * * void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, int m, int n, double * alpha, const double* A, int lda, const double* x, int incX, double beta, * double* y, int incY ); * * void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, int m, int n, * complex<float> alpha, const complex<float>* A, int lda, const * complex<float>* x, int incX, complex<float> beta, complex<float>* y, int * incY ); * * void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, int m, int n, * complex<double> alpha, const complex<double>* A, int lda, const * complex<double>* x, int incX, complex<double> beta, complex<double>* y, * int incY ); * * template< typename VT1, typename MT1, bool SO, typename VT2, typename ST > * void gemv( DenseVector<VT1,false>& y, const DenseMatrix<MT1,SO>& A, const * DenseVector<VT2,false>& x, ST alpha, ST beta ); * * template< typename VT1, typename VT2, typename MT1, bool SO, typename ST > * void gemv( DenseVector<VT1,true>& y, const DenseVector<VT2,true>& x, const * DenseMatrix<MT1,SO>& A, ST alpha, ST beta ); * * } // namespace blaze \endcode * * // \n \subsection blas_level_2_trmv Triangular Matrix/Vector Multiplication * (trmv) // // The following wrapper functions provide a generic interface * for the BLAS functions for the // matrix/vector multiplication with a * triangular matrix (\c strmv(), \c dtrmv(), \c ctrmv(), // and \c ztrmv()): * * \code namespace blaze { * * void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, * CBLAS_DIAG diag, int n, const float* A, int lda, float* x, int incX ); * * void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, * CBLAS_DIAG diag, int n, const double* A, int lda, double* x, int incX ); * * void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, * CBLAS_DIAG diag, int n, const complex<float>* A, int lda, complex<float>* * x, int incX ); * * void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, * CBLAS_DIAG diag, int n, const complex<double>* A, int lda, complex<double>* * x, int incX ); * * template< typename VT, typename MT, bool SO > void trmv( * DenseVector<VT,false>& x, const DenseMatrix<MT,SO>& A, CBLAS_UPLO uplo ); * * template< typename VT, typename MT, bool SO > void trmv( * DenseVector<VT,true>& x, const DenseMatrix<MT,SO>& A, CBLAS_UPLO uplo ); * * } // namespace blaze \endcode * * // \n \section blas_level_3 BLAS Level 3 // <hr> // // \subsection * blas_level_3_gemm General Matrix/Matrix Multiplication (gemm) // // The * following wrapper functions provide a generic interface for the BLAS * functions for the // general matrix/matrix multiplication (\c sgemm(), \c * dgemm(), \c cgemm(), and \c zgemm()): * * \code namespace blaze { * * void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB, * int m, int n, int k, float alpha, const float* A, int lda, const float* B, * int ldb, float beta, float* C, int ldc ); * * void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB, * int m, int n, int k, double alpha, const double* A, int lda, const double* * B, int ldb, double beta, float* C, int ldc ); * * void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB, * int m, int n, int k, complex<float> alpha, const complex<float>* A, int * lda, const complex<float>* B, int ldb, complex<float> beta, float* C, int * ldc ); * * void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB, * int m, int n, int k, complex<double> alpha, const complex<double>* A, int * lda, const complex<double>* B, int ldb, complex<double> beta, float* C, * int ldc ); * * template< typename MT1, bool SO1, typename MT2, bool SO2, typename MT3, bool * SO3, typename ST > void gemm( DenseMatrix<MT1,SO1>& C, const * DenseMatrix<MT2,SO2>& A, const DenseMatrix<MT3,SO3>& B, ST alpha, ST beta * ); * * } // namespace blaze \endcode * * // \n \subsection blas_level_3_trmm Triangular Matrix/Matrix Multiplication * (trmm) // // The following wrapper functions provide a generic interface * for the BLAS functions for the // matrix/matrix multiplication with a * triangular matrix (\c strmm(), \c dtrmm(), \c ctrmm(), and // \c ztrmm()): * * \code namespace blaze { * * void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, * CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, float alpha, const * float* A, int lda, float* B, int ldb ); * * void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, * CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, double alpha, const * double* A, int lda, double* B, int ldb ); * * void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, * CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, complex<float> * alpha, const complex<float>* A, int lda, complex<float>* B, int ldb ); * * void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, * CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, complex<double> * alpha, const complex<double>* A, int lda, complex<double>* B, int ldb ); * * template< typename MT1, bool SO1, typename MT2, bool SO2, typename ST > void * trmm( DenseMatrix<MT1,SO1>& B, const DenseMatrix<MT2,SO2>& A, CBLAS_SIDE * side, CBLAS_UPLO uplo, ST alpha ); * * } // namespace blaze \endcode * * // \n \subsection blas_level_3_trsm Triangular System Solver (trsm) // // The * following wrapper functions provide a generic interface for the BLAS * functions for solving // a triangular system of equations (\c strsm(), \c * dtrsm(), \c ctrsm(), and \c ztrsm()): * * \code namespace blaze { * * void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, * CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, float alpha, const * float* A, int lda, float* B, int ldb ); * * void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, * CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, double alpha, const * double* A, int lda, double* B, int ldb ); * * void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, * CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, complex<float> * alpha, const complex<float>* A, int lda, complex<float>* B, int ldb ); * * void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, * CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, complex<double> * alpha, const complex<double>* A, int lda, complex<double>* B, int ldb ); * * template< typename MT, bool SO, typename VT, bool TF, typename ST > void * trsm( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, CBLAS_SIDE side, * CBLAS_UPLO uplo, ST alpha ); * * template< typename MT1, bool SO1, typename MT2, bool SO2, typename ST > void * trsm( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, CBLAS_SIDE * side, CBLAS_UPLO uplo, ST alpha ); * * } // namespace blaze \endcode * * // \n Previous: \ref error_reporting_customization &nbsp; &nbsp; Next: \ref * lapack_functions \n */ //************************************************************************************************* //**LAPACK Functions ******************************************************************************* /* * !\page lapack_functions LAPACK Functions // // \tableofcontents // // // * \n \section lapack_introction Introduction // <hr> // // The \b Blaze * library makes extensive use of the LAPACK functionality for various * compute tasks // (including the decomposition, inversion and the * computation of the determinant of dense matrices). // For this purpose, \b * Blaze implements several convenient C++ wrapper functions for all required * // LAPACK functions. The following sections give a complete overview of * all available LAPACK wrapper // functions. For more details on the * individual LAPACK functions see the \b Blaze function // documentation or * the LAPACK online documentation browser: // // * http://www.netlib.org/lapack/explore-html/ // // Most of the wrapper * functions are implemented as thin wrappers around LAPACK functions. They * // provide the parameters of the original LAPACK functions and thus * provide maximum flexibility: * * \code constexpr size_t N( 100UL ); * * blaze::DynamicMatrix<double,blaze::columnMajor> A( N, N ); // ... * Initializing the matrix * * const int m ( numeric_cast<int>( A.rows() ) ); // == N const int n * ( numeric_cast<int>( A.columns() ) ); // == N const int lda ( * numeric_cast<int>( A.spacing() ) ); // >= N const int lwork( n*lda ); * * const std::unique_ptr<int[]> ipiv( new int[N] ); // No initialization * required const std::unique_ptr<double[]> work( new double[N] ); // No * initialization required * * int info( 0 ); * * getrf( m, n, A.data(), lda, ipiv.get(), &info ); // Reports * failure via 'info' getri( n, A.data(), lda, ipiv.get(), work.get(), lwork, * &info ); // Reports failure via 'info' \endcode * * // Additionally, \b Blaze provides wrappers that provide a higher level of * abstraction. These // wrappers provide a maximum of convenience: * * \code constexpr size_t N( 100UL ); * * blaze::DynamicMatrix<double,blaze::columnMajor> A( N, N ); // ... * Initializing the matrix * * const std::unique_ptr<int[]> ipiv( new int[N] ); // No initialization * required * * getrf( A, ipiv.get() ); // Cannot fail getri( A, ipiv.get() ); // Reports * failure via exception \endcode * * // \note All functions only work for general, non-adapted matrices with \c * float, \c double, // \c complex<float>, or \c complex<double> element * type. The attempt to call the function with // adaptors or matrices of any * other element type results in a compile time error! // // \note All * functions can only be used if a fitting LAPACK library is available and * linked to // the final executable. Otherwise a call to this function will * result in a linker error. // // \note For performance reasons all * functions do only provide the basic exception safety guarantee, // i.e. in * case an exception is thrown the given matrix may already have been * modified. // // // \n \section lapack_decomposition Matrix Decomposition * // <hr> // // The following functions decompose/factorize the given dense * matrix. Based on this decomposition // the matrix can be inverted or used * to solve a linear system of equations. // // // \n \subsection * lapack_lu_decomposition LU Decomposition // // The following functions * provide an interface for the LAPACK functions \c sgetrf(), \c dgetrf(), // * \c cgetrf(), and \c zgetrf(), which compute the LU decomposition for the * given general matrix: * * \code namespace blaze { * * void getrf( int m, int n, float* A, int lda, int* ipiv, int* info ); * * void getrf( int m, int n, double* A, int lda, int* ipiv, int* info ); * * void getrf( int m, int n, complex<float>* A, int lda, int* ipiv, int* info ); * * void getrf( int m, int n, complex<double>* A, int lda, int* ipiv, int* info * ); * * template< typename MT, bool SO > void getrf( DenseMatrix<MT,SO>& A, int* ipiv * ); * * } // namespace blaze \endcode * * // The decomposition has the form * * \f[ A = P \cdot L \cdot U, \f]\n * * // where \c P is a permutation matrix, \c L is a lower unitriangular matrix, * and \c U is an upper // triangular matrix. The resulting decomposition is * stored within \a A: In case of a column-major // matrix, \c L is stored in * the lower part of \a A and \c U is stored in the upper part. The unit // * diagonal elements of \c L are not stored. In case \a A is a row-major * matrix the result is // transposed. // // \note The LU decomposition will * never fail, even for singular matrices. However, in case of a // singular * matrix the resulting decomposition cannot be used for a matrix inversion * or solving // a linear system of equations. // // // \n \subsection * lapack_ldlt_decomposition LDLT Decomposition // // The following functions * provide an interface for the LAPACK functions \c ssytrf(), \c dsytrf(), // * \c csytrf(), and \c zsytrf(), which compute the LDLT (Bunch-Kaufman) * decomposition for the given // symmetric indefinite matrix: * * \code namespace blaze { * * void sytrf( char uplo, int n, float* A, int lda, int* ipiv, float* work, int * lwork, int* info ); * * void sytrf( char uplo, int n, double* A, int lda, int* ipiv, double* work, * int lwork, int* info ); * * void sytrf( char uplo, int n, complex<float>* A, int lda, int* ipiv, * complex<float>* work, int lwork, int* info ); * * void sytrf( char uplo, int n, complex<double>* A, int lda, int* ipiv, * complex<double>* work, int lwork, int* info ); * * template< typename MT, bool SO > void sytrf( DenseMatrix<MT,SO>& A, char * uplo, int* ipiv ); * * } // namespace blaze \endcode * * // The decomposition has the form * * \f[ A = U D U^{T} \texttt{ (if uplo = 'U'), or } A = L D L^{T} \texttt{ (if * uplo = 'L'), } \f] * * // where \c U (or \c L) is a product of permutation and unit upper (lower) * triangular matrices, // and \c D is symmetric and block diagonal with * 1-by-1 and 2-by-2 diagonal blocks. The resulting // decomposition is * stored within \a A: In case \a uplo is set to \c 'L' the result is stored * in // the lower part of the matrix and the upper part remains untouched, * in case \a uplo is set to // \c 'U' the result is stored in the upper part * and the lower part remains untouched. // // \note The Bunch-Kaufman * decomposition will never fail, even for singular matrices. However, in // * case of a singular matrix the resulting decomposition cannot be used for a * matrix inversion or // solving a linear system of equations. // // // \n * \subsection lapack_ldlh_decomposition LDLH Decomposition // // The * following functions provide an interface for the LAPACK functions \c * chetrf() and \c zsytrf(), // which compute the LDLH (Bunch-Kaufman) * decomposition for the given Hermitian indefinite matrix: * * \code namespace blaze { * * void hetrf( char uplo, int n, complex<float>* A, int lda, int* ipiv, * complex<float>* work, int lwork, int* info ); * * void hetrf( char uplo, int n, complex<double>* A, int lda, int* ipiv, * complex<double>* work, int lwork, int* info ); * * template< typename MT, bool SO > void hetrf( DenseMatrix<MT,SO>& A, char * uplo, int* ipiv ); * * } // namespace blaze \endcode * * // The decomposition has the form * * \f[ A = U D U^{H} \texttt{ (if uplo = 'U'), or } A = L D L^{H} \texttt{ (if * uplo = 'L'), } \f] * * // where \c U (or \c L) is a product of permutation and unit upper (lower) * triangular matrices, // and \c D is Hermitian and block diagonal with * 1-by-1 and 2-by-2 diagonal blocks. The resulting // decomposition is * stored within \a A: In case \a uplo is set to \c 'L' the result is stored * in // the lower part of the matrix and the upper part remains untouched, * in case \a uplo is set to // \c 'U' the result is stored in the upper part * and the lower part remains untouched. // // \note The Bunch-Kaufman * decomposition will never fail, even for singular matrices. However, in // * case of a singular matrix the resulting decomposition cannot be used for a * matrix inversion or // solving a linear system of equations. // // // \n * \subsection lapack_llh_decomposition Cholesky Decomposition // // The * following functions provide an interface for the LAPACK functions \c * spotrf(), \c dpotrf(), // \c cpotrf(), and \c zpotrf(), which compute the * Cholesky (LLH) decomposition for the given // positive definite matrix: * * \code namespace blaze { * * void potrf( char uplo, int n, float* A, int lda, int* info ); * * void potrf( char uplo, int n, double* A, int lda, int* info ); * * void potrf( char uplo, int n, complex<float>* A, int lda, int* info ); * * void potrf( char uplo, int n, complex<double>* A, int lda, int* info ); * * template< typename MT, bool SO > void potrf( DenseMatrix<MT,SO>& A, char uplo * ); * * } // namespace blaze \endcode * * // The decomposition has the form * * \f[ A = U^{T} U \texttt{ (if uplo = 'U'), or } A = L L^{T} \texttt{ (if uplo * = 'L'), } \f] * * // where \c U is an upper triangular matrix and \c L is a lower triangular * matrix. The Cholesky // decomposition fails if the given matrix \a A is * not a positive definite matrix. In this case // a \a * std::std::invalid_argument exception is thrown. // // // \n \subsection * lapack_qr_decomposition QR Decomposition // // The following functions * provide an interface for the LAPACK functions \c sgeqrf(), \c dgeqrf(), // * \c cgeqrf(), and \c zgeqrf(), which compute the QR decomposition of the * given general matrix: * * \code namespace blaze { * * void geqrf( int m, int n, float* A, int lda, float* tau, float* work, int * lwork, int* info ); * * void geqrf( int m, int n, double* A, int lda, double* tau, double* work, int * lwork, int* info ); * * void geqrf( int m, int n, complex<float>* A, int lda, complex<float>* tau, * complex<float>* work, int lwork, int* info ); * * void geqrf( int m, int n, complex<double>* A, int lda, complex<double>* tau, * complex<double>* work, int lwork, int* info ); * * template< typename MT, bool SO > void geqrf( DenseMatrix<MT,SO>& A, typename * MT::ElementType* tau ); * * } // namespace blaze \endcode * * // The decomposition has the form * * \f[ A = Q \cdot R, \f] * * // where the \c Q is represented as a product of elementary reflectors * * \f[ Q = H(1) H(2) . . . H(k) \texttt{, with k = min(m,n).} \f] * * // Each H(i) has the form * * \f[ H(i) = I - tau \cdot v \cdot v^T, \f] * * // where \c tau is a real scalar, and \c v is a real vector with <tt>v(0:i-1) * = 0</tt> and // <tt>v(i) = 1</tt>. <tt>v(i+1:m)</tt> is stored on exit in * <tt>A(i+1:m,i)</tt>, and \c tau // in \c tau(i). Thus on exit the elements * on and above the diagonal of the matrix contain the // min(\a m,\a * n)-by-\a n upper trapezoidal matrix \c R (\c R is upper triangular if \a m * >= \a n); // the elements below the diagonal, with the array \c tau, * represent the orthogonal matrix \c Q as // a product of min(\a m,\a n) * elementary reflectors. // // The following functions provide an interface * for the LAPACK functions \c sorgqr(), \c dorgqr(), // \c cungqr(), and \c * zunqqr(), which reconstruct the \c Q matrix from a QR decomposition: * * \code namespace blaze { * * void orgqr( int m, int n, int k, float* A, int lda, const float* tau, float* * work, int lwork, int* info ); * * void orgqr( int m, int n, int k, double* A, int lda, const double* tau, * double* work, int lwork, int* info ); * * void ungqr( int m, int n, int k, complex<float>* A, int lda, const * complex<float>* tau, complex<float>* work, int lwork, int* info ); * * void ungqr( int m, int n, int k, complex<double>* A, int lda, const * complex<double>* tau, complex<double>* work, int lwork, int* info ); * * template< typename MT, bool SO > void orgqr( DenseMatrix<MT,SO>& A, const * typename MT::ElementType* tau ); * * template< typename MT, bool SO > void ungqr( DenseMatrix<MT,SO>& A, const * typename MT::ElementType* tau ); * * } // namespace blaze \endcode * * // The following functions provide an interface for the LAPACK functions \c * sormqr(), \c dormqr(), // \c cunmqr(), and \c zunmqr(), which can be used * to multiply a matrix with the \c Q matrix from // a QR decomposition: * * \code namespace blaze { * * void ormqr( char side, char trans, int m, int n, int k, const float* A, int * lda, const float* tau, float* C, int ldc, float* work, int lwork, int* * info ); * * void ormqr( char side, char trans, int m, int n, int k, const double* A, int * lda, const double* tau, double* C, int ldc, double* work, int lwork, int* * info ); * * void unmqr( char side, char trans, int m, int n, int k, const complex<float>* * A, int lda, const complex<float>* tau, complex<float>* C, int ldc, * complex<float>* work, int lwork, int* info ); * * void unmqr( char side, char trans, int m, int n, int k, const complex<double>* * A, int lda, const complex<double>* tau, complex<double>* C, int ldc, * complex<double>* work, int lwork, int* info ); * * template< typename MT1, bool SO1, typename MT2, bool SO2 > void ormqr( * DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char * trans, const ElementType_<MT2>* tau ); * * template< typename MT1, bool SO, typename MT2 > void unmqr( * DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, * ElementType_<MT2>* tau ); * * } // namespace blaze \endcode * * // \n \subsection lapack_rq_decomposition RQ Decomposition // // The * following functions provide an interface for the LAPACK functions \c * sgerqf(), \c dgerqf(), // \c cgerqf(), and \c zgerqf(), which compute the * RQ decomposition of the given general matrix: * * \code namespace blaze { * * void gerqf( int m, int n, float* A, int lda, float* tau, float* work, int * lwork, int* info ); * * void gerqf( int m, int n, double* A, int lda, double* tau, double* work, int * lwork, int* info ); * * void gerqf( int m, int n, complex<float>* A, int lda, complex<float>* tau, * complex<float>* work, int lwork, int* info ); * * void gerqf( int m, int n, complex<double>* A, int lda, complex<double>* tau, * complex<double>* work, int lwork, int* info ); * * template< typename MT, bool SO > void gerqf( DenseMatrix<MT,SO>& A, typename * MT::ElementType* tau ); * * } // namespace blaze \endcode * * // The decomposition has the form * * \f[ A = R \cdot Q, \f] * * // where the \c Q is represented as a product of elementary reflectors * * \f[ Q = H(1) H(2) . . . H(k) \texttt{, with k = min(m,n).} \f] * * // Each H(i) has the form * * \f[ H(i) = I - tau \cdot v \cdot v^T, \f] * * // where \c tau is a real scalar, and \c v is a real vector with * <tt>v(n-k+i+1:n) = 0</tt> and // <tt>v(n-k+i) = 1</tt>. * <tt>v(1:n-k+i-1)</tt> is stored on exit in <tt>A(m-k+i,1:n-k+i-1)</tt>, // * and \c tau in \c tau(i). Thus in case \a m <= \a n, the upper triangle of * the subarray // <tt>A(1:m,n-m+1:n)</tt> contains the \a m-by-\a m upper * triangular matrix \c R and in case // \a m >= \a n, the elements on and * above the (\a m-\a n)-th subdiagonal contain the \a m-by-\a n // upper * trapezoidal matrix \c R; the remaining elements in combination with the * array \c tau // represent the orthogonal matrix \c Q as a product of * min(\a m,\a n) elementary reflectors. // // The following functions * provide an interface for the LAPACK functions \c sorgrq(), \c dorgrq(), // * \c cungrq(), and \c zunqrq(), which reconstruct the \c Q matrix from a RQ * decomposition: * * \code namespace blaze { * * void orgrq( int m, int n, int k, float* A, int lda, const float* tau, float* * work, int lwork, int* info ); * * void orgrq( int m, int n, int k, double* A, int lda, const double* tau, * double* work, int lwork, int* info ); * * void ungrq( int m, int n, int k, complex<float>* A, int lda, const * complex<float>* tau, complex<float>* work, int lwork, int* info ); * * void ungrq( int m, int n, int k, complex<double>* A, int lda, const * complex<double>* tau, complex<double>* work, int lwork, int* info ); * * template< typename MT, bool SO > void orgrq( DenseMatrix<MT,SO>& A, const * typename MT::ElementType* tau ); * * template< typename MT, bool SO > void ungrq( DenseMatrix<MT,SO>& A, const * typename MT::ElementType* tau ); * * } // namespace blaze \endcode * * // The following functions provide an interface for the LAPACK functions \c * sormrq(), \c dormrq(), // \c cunmrq(), and \c zunmrq(), which can be used * to multiply a matrix with the \c Q matrix from // a RQ decomposition: * * \code namespace blaze { * * void ormrq( char side, char trans, int m, int n, int k, const float* A, int * lda, const float* tau, float* C, int ldc, float* work, int lwork, int* * info ); * * void ormrq( char side, char trans, int m, int n, int k, const double* A, int * lda, const double* tau, double* C, int ldc, double* work, int lwork, int* * info ); * * void unmrq( char side, char trans, int m, int n, int k, const complex<float>* * A, int lda, const complex<float>* tau, complex<float>* C, int ldc, * complex<float>* work, int lwork, int* info ); * * void unmrq( char side, char trans, int m, int n, int k, const complex<double>* * A, int lda, const complex<double>* tau, complex<double>* C, int ldc, * complex<double>* work, int lwork, int* info ); * * template< typename MT1, bool SO1, typename MT2, bool SO2 > void ormrq( * DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char * trans, const ElementType_<MT2>* tau ); * * template< typename MT1, bool SO, typename MT2 > void unmrq( * DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, * ElementType_<MT2>* tau ); * * } // namespace blaze \endcode * * // \n \subsection lapack_ql_decomposition QL Decomposition // // The * following functions provide an interface for the LAPACK functions \c * sgeqlf(), \c dgeqlf(), // \c cgeqlf(), and \c zgeqlf(), which compute the * QL decomposition of the given general matrix: * * \code namespace blaze { * * void geqlf( int m, int n, float* A, int lda, float* tau, float* work, int * lwork, int* info ); * * void geqlf( int m, int n, double* A, int lda, double* tau, double* work, int * lwork, int* info ); * * void geqlf( int m, int n, complex<float>* A, int lda, complex<float>* tau, * complex<float>* work, int lwork, int* info ); * * void geqlf( int m, int n, complex<double>* A, int lda, complex<double>* tau, * complex<double>* work, int lwork, int* info ); * * template< typename MT, bool SO > void geqlf( DenseMatrix<MT,SO>& A, typename * MT::ElementType* tau ); * * } // namespace blaze \endcode * * // The decomposition has the form * * \f[ A = Q \cdot L, \f] * * // where the \c Q is represented as a product of elementary reflectors * * \f[ Q = H(k) . . . H(2) H(1) \texttt{, with k = min(m,n).} \f] * * // Each H(i) has the form * * \f[ H(i) = I - tau \cdot v \cdot v^T, \f] * * // where \c tau is a real scalar, and \c v is a real vector with * <tt>v(m-k+i+1:m) = 0</tt> and // <tt>v(m-k+i) = 1</tt>. * <tt>v(1:m-k+i-1)</tt> is stored on exit in <tt>A(1:m-k+i-1,n-k+i)</tt>, // * and \c tau in \c tau(i). Thus in case \a m >= \a n, the lower triangle of * the subarray // A(m-n+1:m,1:n) contains the \a n-by-\a n lower triangular * matrix \c L and in case \a m <= \a n, // the elements on and below the (\a * n-\a m)-th subdiagonal contain the \a m-by-\a n lower // trapezoidal * matrix \c L; the remaining elements in combination with the array \c tau * represent // the orthogonal matrix \c Q as a product of min(\a m,\a n) * elementary reflectors. // // The following functions provide an interface * for the LAPACK functions \c sorgql(), \c dorgql(), // \c cungql(), and \c * zunqql(), which reconstruct the \c Q matrix from an QL decomposition: * * \code namespace blaze { * * void orgql( int m, int n, int k, float* A, int lda, const float* tau, float* * work, int lwork, int* info ); * * void orgql( int m, int n, int k, double* A, int lda, const double* tau, * double* work, int lwork, int* info ); * * void ungql( int m, int n, int k, complex<float>* A, int lda, const * complex<float>* tau, complex<float>* work, int lwork, int* info ); * * void ungql( int m, int n, int k, complex<double>* A, int lda, const * complex<double>* tau, complex<double>* work, int lwork, int* info ); * * template< typename MT, bool SO > void orgql( DenseMatrix<MT,SO>& A, const * typename MT::ElementType* tau ); * * template< typename MT, bool SO > void ungql( DenseMatrix<MT,SO>& A, const * typename MT::ElementType* tau ); * * } // namespace blaze \endcode * * // The following functions provide an interface for the LAPACK functions \c * sormql(), \c dormql(), // \c cunmql(), and \c zunmql(), which can be used * to multiply a matrix with the \c Q matrix from // a QL decomposition: * * \code namespace blaze { * * void ormql( char side, char trans, int m, int n, int k, const float* A, int * lda, const float* tau, float* C, int ldc, float* work, int lwork, int* * info ); * * void ormql( char side, char trans, int m, int n, int k, const double* A, int * lda, const double* tau, double* C, int ldc, double* work, int lwork, int* * info ); * * void unmql( char side, char trans, int m, int n, int k, const complex<float>* * A, int lda, const complex<float>* tau, complex<float>* C, int ldc, * complex<float>* work, int lwork, int* info ); * * void unmql( char side, char trans, int m, int n, int k, const complex<double>* * A, int lda, const complex<double>* tau, complex<double>* C, int ldc, * complex<double>* work, int lwork, int* info ); * * template< typename MT1, bool SO1, typename MT2, bool SO2 > void ormql( * DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char * trans, const ElementType_<MT2>* tau ); * * template< typename MT1, bool SO, typename MT2 > void unmql( * DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, * ElementType_<MT2>* tau ); * * } // namespace blaze \endcode * * // \n \subsection lapack_lq_decomposition LQ Decomposition // // The * following functions provide an interface for the LAPACK functions \c * sgelqf(), \c dgelqf(), // \c cgelqf(), and \c zgelqf(), which compute the * LQ decomposition of the given general matrix: * * \code namespace blaze { * * void gelqf( int m, int n, float* A, int lda, float* tau, float* work, int * lwork, int* info ); * * void gelqf( int m, int n, double* A, int lda, double* tau, double* work, int * lwork, int* info ); * * void gelqf( int m, int n, complex<float>* A, int lda, complex<float>* tau, * complex<float>* work, int lwork, int* info ); * * void gelqf( int m, int n, complex<double>* A, int lda, complex<double>* tau, * complex<double>* work, int lwork, int* info ); * * template< typename MT, bool SO > void gelqf( DenseMatrix<MT,SO>& A, typename * MT::ElementType* tau ); * * } // namespace blaze \endcode * * // The decomposition has the form * * \f[ A = L \cdot Q, \f] * * // where the \c Q is represented as a product of elementary reflectors * * \f[ Q = H(k) . . . H(2) H(1) \texttt{, with k = min(m,n).} \f] * * // Each H(i) has the form * * \f[ H(i) = I - tau \cdot v \cdot v^T, \f] * * // where \c tau is a real scalar, and \c v is a real vector with <tt>v(0:i-1) * = 0</tt> and // <tt>v(i) = 1</tt>. <tt>v(i+1:n)</tt> is stored on exit in * <tt>A(i,i+1:n)</tt>, and \c tau // in \c tau(i). Thus on exit the elements * on and below the diagonal of the matrix contain the // \a m-by-min(\a m,\a * n) lower trapezoidal matrix \c L (\c L is lower triangular if \a m <= \a * n); // the elements above the diagonal, with the array \c tau, represent * the orthogonal matrix \c Q // as a product of min(\a m,\a n) elementary * reflectors. // // The following functions provide an interface for the * LAPACK functions \c sorglq(), \c dorglq(), // \c cunglq(), and \c * zunqlq(), which reconstruct the \c Q matrix from an LQ decomposition: * * \code namespace blaze { * * void orglq( int m, int n, int k, float* A, int lda, const float* tau, float* * work, int lwork, int* info ); * * void orglq( int m, int n, int k, double* A, int lda, const double* tau, * double* work, int lwork, int* info ); * * void unglq( int m, int n, int k, complex<float>* A, int lda, const * complex<float>* tau, complex<float>* work, int lwork, int* info ); * * void unglq( int m, int n, int k, complex<double>* A, int lda, const * complex<double>* tau, complex<double>* work, int lwork, int* info ); * * template< typename MT, bool SO > void orglq( DenseMatrix<MT,SO>& A, const * typename MT::ElementType* tau ); * * template< typename MT, bool SO > void unglq( DenseMatrix<MT,SO>& A, const * typename MT::ElementType* tau ); * * } // namespace blaze \endcode * * // The following functions provide an interface for the LAPACK functions \c * sormlq(), \c dormlq(), // \c cunmlq(), and \c zunmlq(), which can be used * to multiply a matrix with the \c Q matrix from // a LQ decomposition: * * \code namespace blaze { * * void ormlq( char side, char trans, int m, int n, int k, const float* A, int * lda, const float* tau, float* C, int ldc, float* work, int lwork, int* * info ); * * void ormlq( char side, char trans, int m, int n, int k, const double* A, int * lda, const double* tau, double* C, int ldc, double* work, int lwork, int* * info ); * * void unmlq( char side, char trans, int m, int n, int k, const complex<float>* * A, int lda, const complex<float>* tau, complex<float>* C, int ldc, * complex<float>* work, int lwork, int* info ); * * void unmlq( char side, char trans, int m, int n, int k, const complex<double>* * A, int lda, const complex<double>* tau, complex<double>* C, int ldc, * complex<double>* work, int lwork, int* info ); * * template< typename MT1, bool SO1, typename MT2, bool SO2 > void ormlq( * DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char * trans, const ElementType_<MT2>* tau ); * * template< typename MT1, bool SO, typename MT2 > void unmlq( * DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, * ElementType_<MT2>* tau ); * * } // namespace blaze \endcode * * // \n \section lapack_inversion Matrix Inversion // <hr> // // Given a matrix * that has already been decomposed, the following functions can be used to * invert // the matrix in-place. // // // \n \subsection lapack_lu_inversion * LU-based Inversion // // The following functions provide an interface for * the LAPACK functions \c sgetri(), \c dgetri(), // \c cgetri(), and \c * zgetri(), which invert a general matrix that has already been decomposed * by // an \ref lapack_lu_decomposition : * * \code namespace blaze { * * void getri( int n, float* A, int lda, const int* ipiv, float* work, int * lwork, int* info ); * * void getri( int n, double* A, int lda, const int* ipiv, double* work, int * lwork, int* info ); * * void getri( int n, complex<float>* A, int lda, const int* ipiv, * complex<float>* work, int lwork, int* info ); * * void getri( int n, complex<double>* A, int lda, const int* ipiv, * complex<double>* work, int lwork, int* info ); * * template< typename MT, bool SO > void getri( DenseMatrix<MT,SO>& A, const int* * ipiv ); * * } // namespace blaze \endcode * * // The functions fail if ... // // - ... the given matrix is not a square * matrix; // - ... the given matrix is singular and not invertible. // // * The first four functions report failure via the \c info argument, the * fifth function throws a // \a std::invalid_argument exception in case of * an error. // // // \n \subsection lapack_ldlt_inversion LDLT-based * Inversion // // The following functions provide an interface for the * LAPACK functions \c ssytri(), \c dsytri(), // \c csytri(), and \c * zsytri(), which invert a symmetric indefinite matrix that has already been * // decomposed by an \ref lapack_ldlt_decomposition : * * \code namespace blaze { * * void sytri( char uplo, int n, float* A, int lda, const int* ipiv, float* * work, int* info ); * * void sytri( char uplo, int n, double* A, int lda, const int* ipiv, double* * work, int* info ); * * void sytri( char uplo, int n, complex<float>* A, int lda, const int* ipiv, * complex<float>* work, int* info ); * * void sytri( char uplo, int n, complex<double>* A, int lda, const int* ipiv, * complex<double>* work, int* info ); * * template< typename MT, bool SO > void sytri( DenseMatrix<MT,SO>& A, char * uplo, const int* ipiv ); * * } // namespace blaze \endcode * * // The functions fail if ... // // - ... the given matrix is not a square * matrix; // - ... the given matrix is singular and not invertible. // // * The first four functions report failure via the \c info argument, the * fifth function throws a // \a std::invalid_argument exception in case of * an error. // // // \n \subsection lapack_ldlh_inversion LDLH-based * Inversion // // The following functions provide an interface for the * LAPACK functions \c chetri() and // \c zhetri(), which invert an Hermitian * indefinite matrix that has already been decomposed by // an \ref * lapack_ldlh_decomposition : * * \code namespace blaze { * * void hetri( char uplo, int n, complex<float>* A, int lda, const int* ipiv, * complex<float>* work, int* info ); * * void hetri( char uplo, int n, complex<double>* A, int lda, const int* ipiv, * complex<double>* work, int* info ); * * template< typename MT, bool SO > void hetri( DenseMatrix<MT,SO>& A, char * uplo, const int* ipiv ); * * } // namespace blaze \endcode * * // The functions fail if ... // // - ... the given matrix is not a square * matrix; // - ... the given matrix is singular and not invertible. // // * The first four functions report failure via the \c info argument, the * fifth function throws a // \a std::invalid_argument exception in case of * an error. // // // \n \subsection lapack_llh_inversion Cholesky-based * Inversion // // The following functions provide an interface for the * LAPACK functions \c spotri(), \c dpotri(), // \c cpotri(), and \c * zpotri(), which invert a positive definite matrix that has already been // * decomposed by an \ref lapack_llh_decomposition : * * \code namespace blaze { * * void potri( char uplo, int n, float* A, int lda, int* info ); * * void potri( char uplo, int n, double* A, int lda, int* info ); * * void potri( char uplo, int n, complex<float>* A, int lda, int* info ); * * void potri( char uplo, int n, complex<double>* A, int lda, int* info ); * * template< typename MT, bool SO > void potri( DenseMatrix<MT,SO>& A, char uplo * ); * * } // namespace blaze \endcode * * // The functions fail if ... // // - ... the given matrix is not a square * matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - * ... the given matrix is singular and not invertible. // // The first four * functions report failure via the \c info argument, the fifth function * throws a // \a std::invalid_argument exception in case of an error. // // * // \n \subsection lapack_triangular_inversion Inversion of Triangular * Matrices // // The following functions provide an interface for the LAPACK * functions \c strtri(), \c dtrtri(), // \c ctrtri(), and \c ztrtri(), which * invert the given triangular matrix in-place: * * \code namespace blaze { * * void trtri( char uplo, char diag, int n, float* A, int lda, int* info ); * * void trtri( char uplo, char diag, int n, double* A, int lda, int* info ); * * void trtri( char uplo, char diag, int n, complex<float>* A, int lda, int* * info ); * * void trtri( char uplo, char diag, int n, complex<double>* A, int lda, int* * info ); * * template< typename MT, bool SO > void trtri( DenseMatrix<MT,SO>& A, char * uplo, char diag ); * * } // namespace blaze \endcode * * // The functions fail if ... // // - ... the given matrix is not a square * matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - * ... the given \a diag argument is neither 'U' nor 'N'; // - ... the given * matrix is singular and not invertible. // // The first four functions * report failure via the \c info argument, the fifth function throws a // \a * std::invalid_argument exception in case of an error. // // // \n \section * lapack_substitution Substitution // <hr> // // Given a matrix that has * already been decomposed the following functions can be used to perform // * the forward/backward substitution step to compute the solution to a system * of linear equations. // Note that depending on the storage order of the * system matrix and the given right-hand side the // functions solve * different equation systems: // // Single right-hand side: // - \f$ A * *x=b \f$ if \a A is column-major // - \f$ A^T*x=b \f$ if \a A is * row-major // // Multiple right-hand sides: // - \f$ A *X =B \f$ if * both \a A and \a B are column-major // - \f$ A^T*X =B \f$ if \a A is * row-major and \a B is column-major // - \f$ A *X^T=B^T \f$ if \a A is * column-major and \a B is row-major // - \f$ A^T*X^T=B^T \f$ if both \a A * and \a B are row-major // // In this context the general system matrix \a * A is a n-by-n matrix that has already been // factorized by the according * decomposition function, \a x and \a b are n-dimensional vectors // and \a * X and \a B are either row-major m-by-n matrices or column-major n-by-m * matrices. // // // \n \subsection lapack_lu_substitution LU-based * Substitution // // The following functions provide an interface for the * LAPACK functions \c sgetrs(), \c dgetrs(), // \c cgetrs(), and \c * zgetrs(), which perform the substitution step for a general matrix that * has // already been decomposed by an \ref lapack_lu_decomposition : * * \code namespace blaze { * * void getrs( char trans, int n, int nrhs, const float* A, int lda, const int* * ipiv, float* B, int ldb, int* info ); * * void getrs( char trans, int n, int nrhs, const double* A, int lda, const int* * ipiv, double* B, int ldb, int* info ); * * void getrs( char trans, int n, const complex<float>* A, int lda, const int* * ipiv, complex<float>* B, int ldb, int* info ); * * void getrs( char trans, int n, const complex<double>* A, int lda, const int* * ipiv, complex<double>* B, int ldb, int* info ); * * template< typename MT, bool SO, typename VT, bool TF > void getrs( const * DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char trans, const int* ipiv * ); * * template< typename MT1, bool SO1, typename MT2, bool SO2 > void getrs( const * DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char trans, const int* * ipiv ); * * } // namespace blaze \endcode * * // If the function exits successfully, the vector \a b or the matrix \a B * contain the solution(s) // of the linear system of equations. The * functions fail if ... // // - ... the given system matrix is not a square * matrix; // - ... the given \a trans argument is neither 'N' nor 'T' nor * 'C'; // - ... the sizes of the two given matrices do not match. // // The * first four functions report failure via the \c info argument, the last two * functions throw // a \a std::invalid_argument exception in case of an * error. // // // \n \subsection lapack_ldlt_substitution LDLT-based * Substitution // // The following functions provide an interface for the * LAPACK functions \c ssytrs(), \c dsytrs(), // \c csytrs(), and \c * zsytrs(), which perform the substitution step for a symmetric indefinite * // matrix that has already been decomposed by an \ref * lapack_ldlt_decomposition : * * \code namespace blaze { * * void sytrs( char uplo, int n, int nrhs, const float* A, int lda, const int* * ipiv, float* B, int ldb, int* info ); * * void sytrs( char uplo, int n, int nrhs, const double* A, int lda, const int* * ipiv, double* B, int ldb, int* info ); * * void sytrs( char uplo, int n, int nrhs, const complex<float>* A, int lda, * const int* ipiv, complex<float>* B, int ldb, int* info ); * * void sytrs( char uplo, int n, int nrhs, const complex<double>* A, int lda, * const int* ipiv, complex<double>* B, int ldb, int* info ); * * template< typename MT, bool SO, typename VT, bool TF > void sytrs( const * DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, const int* ipiv * ); * * template< typename MT1, bool SO1, typename MT2, bool SO2 > void sytrs( const * DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, const int* * ipiv ); * * } // namespace blaze \endcode * * // If the function exits successfully, the vector \a b or the matrix \a B * contain the solution(s) // of the linear system of equations. The * functions fail if ... // // - ... the given system matrix is not a square * matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - * ... the sizes of the two given matrices do not match. // // The first four * functions report failure via the \c info argument, the last two functions * throw // a \a std::invalid_argument exception in case of an error. // // * // \n \subsection lapack_ldlh_substitution LDLH-based Substitution // // * The following functions provide an interface for the LAPACK functions \c * chetrs(), and \c zhetrs(), // which perform the substitution step for an * Hermitian indefinite matrix that has already been // decomposed by an \ref * lapack_ldlh_decomposition : * * \code namespace blaze { * * void hetrs( char uplo, int n, int nrhs, const complex<float>* A, int lda, * const int* ipiv, complex<float>* B, int ldb, int* info ); * * void hetrs( char uplo, int n, int nrhs, const complex<double>* A, int lda, * const int* ipiv, complex<double>* B, int ldb, int* info ); * * template< typename MT, bool SO, typename VT, bool TF > void hetrs( const * DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, const int* ipiv * ); * * template< typename MT1, bool SO1, typename MT2, bool SO2 > void hetrs( const * DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, const int* * ipiv ); * * } // namespace blaze \endcode * * // If the function exits successfully, the vector \a b or the matrix \a B * contain the solution(s) // of the linear system of equations. The * functions fail if ... // // - ... the given system matrix is not a square * matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - * ... the sizes of the two given matrices do not match. // // The first two * functions report failure via the \c info argument, the last two functions * throw // a \a std::invalid_argument exception in case of an error. // // * // \n \subsection lapack_llh_substitution Cholesky-based Substitution // * // The following functions provide an interface for the LAPACK functions * \c spotrs(), \c dpotrs(), // \c cpotrs(), and \c zpotrs(), which perform * the substitution step for a positive definite matrix // that has already * been decomposed by an \ref lapack_llh_decomposition : * * \code namespace blaze { * * void potrs( char uplo, int n, int nrhs, const float* A, int lda, float* B, * int ldb, int* info ); * * void potrs( char uplo, int n, int nrhs, const double* A, int lda, double* B, * int ldb, int* info ); * * void potrs( char uplo, int n, int nrhs, const complex<float>* A, int lda, * complex<float>* B, int ldb, int* info ); * * void potrs( char uplo, int n, int nrhs, const complex<double>* A, int lda, * complex<double>* B, int ldb, int* info ); * * template< typename MT, bool SO, typename VT, bool TF > void potrs( const * DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo ); * * template< typename MT1, bool SO1, typename MT2, bool SO2 > void potrs( const * DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo ); * * } // namespace blaze \endcode * * // If the function exits successfully, the vector \a b or the matrix \a B * contain the solution(s) // of the linear system of equations. The * functions fail if ... // // - ... the given system matrix is not a square * matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - * ... the sizes of the two given matrices do not match. // // The first two * functions report failure via the \c info argument, the last two functions * throw // a \a std::invalid_argument exception in case of an error. // // * // \n \subsection lapack_triangular_substitution Substitution for * Triangular Matrices // // The following functions provide an interface for * the LAPACK functions \c strtrs(), \c dtrtrs(), // \c ctrtrs(), and \c * ztrtrs(), which perform the substitution step for a triangular matrix: * * \code namespace blaze { * * void trtrs( char uplo, char trans, char diag, int n, int nrhs, const float* * A, int lda, float* B, int ldb, int* info ); * * void trtrs( char uplo, char trans, char diag, int n, int nrhs, const double* * A, int lda, double* B, int ldb, int* info ); * * void trtrs( char uplo, char trans, char diag, int n, int nrhs, const * complex<float>* A, int lda, complex<float>* B, int ldb, int* info ); * * void trtrs( char uplo, char trans, char diag, int n, int nrhs, const * complex<double>* A, int lda, complex<double>* B, int ldb, int* info ); * * template< typename MT, bool SO, typename VT, bool TF > void trtrs( const * DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, char trans, char * diag ); * * template< typename MT1, bool SO1, typename MT2, bool SO2 > void trtrs( const * DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, char trans, * char diag ); * * } // namespace blaze \endcode * * // If the function exits successfully, the vector \a b or the matrix \a B * contain the solution(s) // of the linear system of equations. The * functions fail if ... // // - ... the given system matrix is not a square * matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - * ... the given \a trans argument is neither 'N' nor 'T' nor 'C'; // - ... * the given \a diag argument is neither 'U' nor 'N'; // - ... the sizes of * the two given matrices do not match. // // The first four functions report * failure via the \c info argument, the last two functions throw // a \a * std::invalid_argument exception in case of an error. // // // \n \section * lapack_linear_system_solver Linear System Solver // <hr> // // The * following functions represent compound functions that perform both the * decomposition step // as well as the substitution step to compute the * solution to a system of linear equations. Note // that depending on the * storage order of the system matrix and the given right-hand side the // * functions solve different equation systems: // // Single right-hand side: * // - \f$ A *x=b \f$ if \a A is column-major // - \f$ A^T*x=b \f$ if \a * A is row-major // // Multiple right-hand sides: // - \f$ A *X =B \f$ * if both \a A and \a B are column-major // - \f$ A^T*X =B \f$ if \a A * is row-major and \a B is column-major // - \f$ A *X^T=B^T \f$ if \a A is * column-major and \a B is row-major // - \f$ A^T*X^T=B^T \f$ if both \a A * and \a B are row-major // // In this context the general system matrix \a * A is a n-by-n matrix that has already been // factorized by the according * decomposition function, \a x and \a b are n-dimensional vectors // and \a * X and \a B are either row-major m-by-n matrices or column-major n-by-m * matrices. // // // \subsection lapack_lu_linear_system_solver LU-based * Linear System Solver // // The following functions provide an interface * for the LAPACK functions \c sgesv(), \c dgesv(), // \c cgesv(), and \c * zgesv(), which combine an \ref lapack_lu_decomposition and the according * // \ref lapack_lu_substitution : * * \code namespace blaze { * * void gesv( int n, int nrhs, float* A, int lda, int* ipiv, float* B, int ldb, * int* info ); * * void gesv( int n, int nrhs, double* A, int lda, int* ipiv, double* B, int * ldb, int* info ); * * void gesv( int n, int nrhs, complex<float>* A, int lda, int* ipiv, * complex<float>* B, int ldb, int* info ); * * void gesv( int n, int nrhs, complex<double>* A, int lda, int* ipiv, * complex<double>* B, int ldb, int* info ); * * template< typename MT, bool SO, typename VT, bool TF > void gesv( * DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, int* ipiv ); * * template< typename MT1, bool SO1, typename MT2, bool SO2 > void gesv( * DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, int* ipiv ); * * } // namespace blaze \endcode * * // If the function exits successfully, the vector \a b or the matrix \a B * contain the // solution(s) of the linear system of equations and \a A has * been decomposed by means of an // \ref lapack_lu_decomposition. // // The * functions fail if ... // // - ... the given system matrix is not a square * matrix; // - ... the given system matrix is singular and not invertible. * // // The first four functions report failure via the \c info argument, * the fifth function throws a // \a std::invalid_argument exception in case * of an error. // // // \n \subsection lapack_ldlt_linear_system_solver * LDLT-based Linear System Solver // // The following functions provide an * interface for the LAPACK functions \c ssysv(), \c dsysv(), // \c csysv(), * and \c zsysv(), which combine an \ref lapack_ldlt_decomposition and the * according // \ref lapack_ldlt_substitution : * * \code namespace blaze { * * void sysv( char uplo, int n, int nrhs, float* A, int lda, int* ipiv, float* * B, int ldb, float* work, int lwork, int* info ); * * void sysv( char uplo, int n, int nrhs, double* A, int lda, int* ipiv, double* * B, int ldb, double* work, int lwork, int* info ); * * void sysv( char uplo, int n, int nrhs, complex<float>* A, int lda, int* ipiv, * complex<float>* B, int ldb, complex<float>* work, int lwork, int* info ); * * void sysv( char uplo, int n, int nrhs, complex<double>* A, int lda, int* * ipiv, complex<double>* B, int ldb, complex<double>* work, int lwork, int* * info ); * * template< typename MT, bool SO, typename VT, bool TF > void sysv( * DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, int* ipiv ); * * template< typename MT1, bool SO1, typename MT2, bool SO2 > void sysv( * DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, int* ipiv ); * * } // namespace blaze \endcode * * // If the function exits successfully, the vector \a b or the matrix \a B * contain the // solution(s) of the linear system of equations and \a A has * been decomposed by means of an // \ref lapack_ldlt_decomposition. // // * The functions fail if ... // // - ... the given system matrix is not a * square matrix; // - ... the given \a uplo argument is neither 'L' nor * 'U'; // - ... the sizes of the two given matrices do not match; // - ... * the given system matrix is singular and not invertible. // // The first * four functions report failure via the \c info argument, the fifth function * throws a // \a std::invalid_argument exception in case of an error. // // * // \n \subsection lapack_ldlh_linear_system_solver LDLH-based Linear * System Solver // // The following functions provide an interface for the * LAPACK functions \c shesv(), \c dhesv(), // \c chesv(), and \c zhesv(), * which combine an \ref lapack_ldlh_decomposition and the according // \ref * lapack_ldlh_substitution : * * \code namespace blaze { * * void hesv( char uplo, int n, int nrhs, complex<float>* A, int lda, int* ipiv, * complex<float>* B, int ldb, complex<float>* work, int lwork, int* info ); * * void hesv( char uplo, int n, int nrhs, complex<double>* A, int lda, int* * ipiv, complex<double>* B, int ldb, complex<double>* work, int lwork, int* * info ); * * template< typename MT, bool SO, typename VT, bool TF > void hesv( * DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, int* ipiv ); * * template< typename MT1, bool SO1, typename MT2, bool SO2 > void hesv( * DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, int* ipiv ); * * } // namespace blaze \endcode * * // If the function exits successfully, the vector \a b or the matrix \a B * contain the // solution(s) of the linear system of equations and \a A has * been decomposed by means of an // \ref lapack_ldlh_decomposition. // // * The functions fail if ... // // - ... the given system matrix is not a * square matrix; // - ... the given \a uplo argument is neither 'L' nor * 'U'; // - ... the sizes of the two given matrices do not match; // - ... * the given system matrix is singular and not invertible. // // The first * two functions report failure via the \c info argument, the fifth function * throws a // \a std::invalid_argument exception in case of an error. // // * // \n \subsection lapack_llh_linear_system_solver Cholesky-based Linear * System Solver // // The following functions provide an interface for the * LAPACK functions \c sposv(), \c dposv(), // \c cposv(), and \c zposv(), * which combine an \ref lapack_llh_decomposition and the according // \ref * lapack_llh_substitution : * * \code namespace blaze { * * void posv( char uplo, int n, int nrhs, float* A, int lda, float* B, int ldb, * int* info ); * * void posv( char uplo, int n, int nrhs, double* A, int lda, double* B, int * ldb, int* info ); * * void posv( char uplo, int n, int nrhs, complex<float>* A, int lda, * complex<float>* B, int ldb, int* info ); * * void posv( char uplo, int n, int nrhs, complex<double>* A, int lda, * complex<double>* B, int ldb, int* info ); * * template< typename MT, bool SO, typename VT, bool TF > void posv( * DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo ); * * template< typename MT1, bool SO1, typename MT2, bool SO2 > void posv( * DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo ); * * } // namespace blaze \endcode * * // If the function exits successfully, the vector \a b or the matrix \a B * contain the // solution(s) of the linear system of equations and \a A has * been decomposed by means of an // \ref lapack_llh_decomposition. // // The * functions fail if ... // // - ... the given system matrix is not a square * matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - * ... the sizes of the two given matrices do not match; // - ... the given * system matrix is singular and not invertible. // // The first four * functions report failure via the \c info argument, the fifth function * throws a // \a std::invalid_argument exception in case of an error. // // * // \n \subsection lapack_triangular_linear_system_solver Linear System * Solver for Triangular Matrices // // The following functions provide an * interface for the LAPACK functions \c strsv(), \c dtrsv(), // \c ctrsv(), * and \c ztrsv(): * * \code namespace blaze { * * void trsv( char uplo, char trans, char diag, int n, const float* A, int lda, * float* x, int incX ); * * void trsv( char uplo, char trans, char diag, int n, const double* A, int lda, * double* x, int incX ); * * void trsv( char uplo, char trans, char diag, int n, const complex<float>* A, * int lda, complex<float>* x, int incX ); * * void trsv( char uplo, char trans, char diag, int n, const complex<double>* A, * int lda, complex<double>* x, int incX ); * * template< typename MT, bool SO, typename VT, bool TF > void trsv( const * DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, char trans, char * diag ); * * } // namespace blaze \endcode * * // If the function exits successfully, the vector \a b or the matrix \a B * contain the // solution(s) of the linear system of equations. // // The * functions fail if ... // // - ... the given system matrix is not a square * matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - * ... the given \a trans argument is neither 'N' nor 'T' nor 'C'; // - ... * the given \a diag argument is neither 'U' nor 'N'. // // The last function * throws a \a std::invalid_argument exception in case of an error. Note that * // none of the functions does perform any test for singularity or * near-singularity. Such tests // must be performed prior to calling this * function! // // // \n \section lapack_eigenvalues Eigenvalues/Eigenvectors * // // \subsection lapack_eigenvalues_general General Matrices // // The * following functions provide an interface for the LAPACK functions \c * sgeev(), \c dgeev(), // \c cgeev(), and \c zgeev(), which compute the * eigenvalues and optionally the eigenvectors of // the given general * matrix: * * \code namespace blaze { * * void geev( char jobvl, char jobvr, int n, float* A, int lda, float* wr, float* * wi, float* VL, int ldvl, float* VR, int ldvr, float* work, int lwork, int* * info ); * * void geev( char jobvl, char jobvr, int n, double* A, int lda, double* wr, * double* wi, double* VL, int ldvl, double* VR, int ldvr, double* work, int * lwork, int* info ); * * void geev( char jobvl, char jobvr, int n, complex<float>* A, int lda, * complex<float>* w, complex<float>* VL, int ldvl, complex<float>* VR, int * ldvr, complex<float>* work, int lwork, float* rwork, int* info ); * * void geev( char jobvl, char jobvr, int n, complex<double>* A, int lda, * complex<double>* w, complex<double>* VL, int ldvl, complex<double>* VR, * int ldvr, complex<double>* work, int lwork, double* rwork, int* info ); * * template< typename MT, bool SO, typename VT, bool TF > void geev( * DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w ); * * template< typename MT1, bool SO1, typename MT2, bool SO2, typename VT, bool * TF > void geev( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& VL, * DenseVector<VT,TF>& w ); * * template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool * SO2 > void geev( DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, * DenseMatrix<MT2,SO2>& VR ); * * template< typename MT1, bool SO1, typename MT2, bool SO2, typename VT, bool * TF, typename MT3, bool SO3 > void geev( DenseMatrix<MT1,SO1>& A, * DenseMatrix<MT2,SO2>& VL, DenseVector<VT,TF>& w, DenseMatrix<MT3,SO3>& VR * ); * * } // namespace blaze \endcode * * // The complex eigenvalues of the given matrix \a A are returned in the given * vector \a w. // Please note that no order of eigenvalues can be assumed, * except that complex conjugate pairs // of eigenvalues appear consecutively * with the eigenvalue having the positive imaginary part // first. // // If * \a VR is provided as an argument, the right eigenvectors are returned in * the rows of \a VR // in case \a VR is a row-major matrix and in the * columns of \a VR in case \a VR is a column-major // matrix. The right * eigenvector \f$v[j]\f$ of \a A satisfies * * \f[ A * v[j] = lambda[j] * v[j], \f] * * // where \f$lambda[j]\f$ is its eigenvalue. // // If \a VL is provided as an * argument, the left eigenvectors are returned in the rows of \a VL // in * case \a VL is a row-major matrix and in the columns of \a VL in case \a VL * is a column-major // matrix. The left eigenvector \f$u[j]\f$ of \a A * satisfies * * \f[ u[j]^{H} * A = lambda[j] * u[j]^{H}, \f] * * // where \f$u[j]^{H}\f$ denotes the conjugate transpose of \f$u[j]\f$. // // * \a w, \a VL, and \a VR are resized to the correct dimensions (if possible * and necessary). The // functions fail if ... // // - ... the given matrix * \a A is not a square matrix; // - ... the given matrix \a VL is a fixed * size matrix and the dimensions don't match; // - ... the given vector \a * w is a fixed size vector and the size doesn't match; // - ... the given * matrix \a VR is a fixed size matrix and the dimensions don't match; // - * ... the eigenvalue computation fails. // // The first four functions * report failure via the \c info argument, the last four functions throw // * an exception in case of an error. // // // \n \subsection * lapack_eigenvalues_symmetric Symmetric Matrices // // The following * functions provide an interface for the LAPACK functions \c ssyev() and \c * dsyev(), // which compute the eigenvalues and eigenvectors of the given * symmetric matrix: * * \code namespace blaze { * * void syev( char jobz, char uplo, int n, float* A, int lda, float* w, float* * work, int lwork, int* info ); * * void syev( char jobz, char uplo, int n, double* A, int lda, double* w, double* * work, int lwork, int* info ); * * template< typename MT, bool SO, typename VT, bool TF > void syev( * DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char jobz, char uplo ); * * } // namespace blaze \endcode * * // Alternatively, the following functions can be used, which provide an * interface to the LAPACK // functions \c ssyevd() and \c dsyevd(). In * contrast to the \c syev() functions they use a // divide-and-conquer * strategy for the computation of the left and right eigenvectors: * * \code namespace blaze { * * void syevd( char jobz, char uplo, int n, float* A, int lda, float* w, float* * work, int lwork, int* iwork, int liwork, int* info ); * * void syevd( char jobz, char uplo, int n, double* A, int lda, double* w, * double* work, int lwork, int* iwork, int liwork, int* info ); * * template< typename MT, bool SO, typename VT, bool TF > void syevd( * DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char jobz, char uplo ); * * } // namespace blaze \endcode * * // The real eigenvalues are returned in ascending order in the given vector * \a w. \a w is resized // to the correct size (if possible and necessary). * In case \a A is a row-major matrix, the left // eigenvectors are returned * in the rows of \a A, in case \a A is a column-major matrix, the right // * eigenvectors are returned in the columns of \a A. // // The functions fail * if ... // // - ... the given matrix \a A is not a square matrix; // - * ... the given vector \a w is a fixed size vector and the size doesn't * match; // - ... the given \a jobz argument is neither \c 'V' nor \c 'N'; * // - ... the given \a uplo argument is neither \c 'L' nor \c 'U'; // - * ... the eigenvalue computation fails. // // The first two functions report * failure via the \c info argument, the last function throws an // exception * in case of an error. // // Via the following functions, which wrap the * LAPACK functions \c ssyevx() and \c dsyevx(), it // is possible to compute * a subset of eigenvalues and/or eigenvectors of a symmetric matrix: * * \code namespace blaze { * * void syevx( char jobz, char range, char uplo, int n, float* A, int lda, float * vl, float vu, int il, int iu, float abstol, int* m, float* w, float* Z, * int ldz, float* work, int lwork, int* iwork, int* ifail, int* info ); * * void syevx( char jobz, char range, char uplo, int n, double* A, int lda, * double vl, double vu, int il, int iu, double abstol, int* m, double* w, * double* Z, int ldz, double* work, int lwork, int* iwork, int* ifail, int* * info ); * * template< typename MT, bool SO, typename VT, bool TF > size_t syevx( * DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char uplo ); * * template< typename MT, bool SO, typename VT, bool TF, typename ST > size_t * syevx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char uplo, ST low, ST * upp ); * * template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool * SO2 > size_t syevx( DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, * DenseMatrix<MT2,SO2>& Z, char uplo ); * * template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool * SO2, typename ST > size_t syevx( DenseMatrix<MT1,SO1>& A, * DenseVector<VT,TF>& w, DenseMatrix<MT2,SO2>& Z, char uplo, ST low, ST upp * ); * * } // namespace blaze \endcode * * // The number of eigenvalues to be computed is specified by the lower bound * \c low and the upper // bound \c upp, which either form an integral or a * floating point range. // // In case \a low and \a upp are of integral * type, the function computes all eigenvalues in the // index range * \f$[low..upp]\f$. The \a num resulting real eigenvalues are stored in * ascending // order in the given vector \a w, which is either resized (if * possible) or expected to be a // \a num-dimensional vector. The * eigenvectors are returned in the rows of \a Z in case \a Z is // row-major * matrix and in the columns of \a Z in case \a Z is a column-major matrix. * \a Z is // resized (if possible) or expected to be a \a num-by-\a n * row-major matrix or a \a n-by-\a num // column-major matrix. // // In case * \a low and \a upp are of floating point type, the function computes all * eigenvalues // in the half-open interval \f$(low..upp]\f$. The resulting * real eigenvalues are stored in // ascending order in the given vector \a * w, which is either resized (if possible) or expected // to be an \a * n-dimensional vector. The eigenvectors are returned in the rows of \a Z in * case // \a Z is a row-major matrix and in the columns of \a Z in case \a Z * is a column-major matrix. // \a Z is resized (if possible) or expected to * be a \a n-by-\a n matrix. // // The functions fail if ... // // - ... the * given matrix \a A is not a square matrix; // - ... the given vector \a w * is a fixed size vector and the size doesn't match; // - ... the given * matrix \a Z is a fixed size matrix and the dimensions don't match; // - * ... the given \a uplo argument is neither \c 'L' nor \c 'U'; // - ... the * eigenvalue computation fails. // // The first two functions report failure * via the \c info argument, the last four functions throw // an exception in * case of an error. // // // \n \subsection lapack_eigenvalues_hermitian * Hermitian Matrices // // The following functions provide an interface for * the LAPACK functions \c cheev() and \c zheev(), // which compute the * eigenvalues and eigenvectors of the given Hermitian matrix: * * \code namespace blaze { * * void heev( char jobz, char uplo, int n, complex<float>* A, int lda, float* w, * complex<float>* work, int lwork, float* rwork, int* info ); * * void heev( char jobz, char uplo, int n, complex<double>* A, int lda, double* * w, complex<double>* work, int lwork, float* rwork, int* info ); * * template< typename MT, bool SO, typename VT, bool TF > void heev( * DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char jobz, char uplo ); * * } // namespace blaze \endcode * * // Alternatively, the following functions can be used, which provide an * interface to the LAPACK // functions \c cheevd() and \c zheevd(). In * contrast to the \c heev() functions they use a // divide-and-conquer * strategy for the computation of the left and right eigenvectors: * * \code namespace blaze { * * void heevd( char jobz, char uplo, int n, complex<float>* A, int lda, float* * w, complex<float>* work, int lwork, float* rwork, int* lrwork, int* iwork, * int* liwork, int* info ); * * void heevd( char jobz, char uplo, int n, complex<double>* A, int lda, double* * w, complex<double>* work, int lwork, double* rwork, int lrwork, int* * iwork, int* liwork, int* info ); * * template< typename MT, bool SO, typename VT, bool TF > void heevd( * DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char jobz, char uplo ); * * } // namespace blaze \endcode * * // The real eigenvalues are returned in ascending order in the given vector * \a w. \a w is resized // to the correct size (if possible and necessary). * In case \a A is a row-major matrix, the left // eigenvectors are returned * in the rows of \a A, in case \a A is a column-major matrix, the right // * eigenvectors are returned in the columns of \a A. // // The functions fail * if ... // // - ... the given matrix \a A is not a square matrix; // - * ... the given vector \a w is a fixed size vector and the size doesn't * match; // - ... the given \a jobz argument is neither \c 'V' nor \c 'N'; * // - ... the given \a uplo argument is neither \c 'L' nor \c 'U'; // - * ... the eigenvalue computation fails. // // The first two functions report * failure via the \c info argument, the last function throws an // exception * in case of an error. // // Via the following functions, which wrap the * LAPACK functions \c cheevx() and \c zheevx(), it // is possible to compute * a subset of eigenvalues and/or eigenvectors of an Hermitian matrix: * * \code namespace blaze { * * void heevx( char jobz, char range, char uplo, int n, complex<float>* A, int * lda, float vl, float vu, int il, int iu, float abstol, int* m, float* w, * complex<float>* Z, int ldz, complex<float>* work, int lwork, float* rwork, * int* iwork, int* ifail, int* info ); * * void heevx( char jobz, char range, char uplo, int n, complex<double>* A, int * lda, double vl, double vu, int il, int iu, double abstol, int* m, double* * w, complex<double>* Z, int ldz, complex<double>* work, int lwork, double* * rwork, int* iwork, int* ifail, int* info ); * * template< typename MT, bool SO, typename VT, bool TF > size_t heevx( * DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char uplo ); * * template< typename MT, bool SO, typename VT, bool TF, typename ST > size_t * heevx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char uplo, ST low, ST * upp ); * * template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool * SO2 > size_t heevx( DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, * DenseMatrix<MT2,SO2>& Z, char uplo ); * * template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool * SO2, typename ST > size_t heevx( DenseMatrix<MT1,SO1>& A, * DenseVector<VT,TF>& w, DenseMatrix<MT2,SO2>& Z, char uplo, ST low, ST upp * ); * * } // namespace blaze \endcode * * // The number of eigenvalues to be computed is specified by the lower bound * \c low and the upper // bound \c upp, which either form an integral or a * floating point range. // // In case \a low and \a upp are of integral * type, the function computes all eigenvalues in the // index range * \f$[low..upp]\f$. The \a num resulting real eigenvalues are stored in * ascending // order in the given vector \a w, which is either resized (if * possible) or expected to be a // \a num-dimensional vector. The * eigenvectors are returned in the rows of \a Z in case \a Z is // row-major * matrix and in the columns of \a Z in case \a Z is a column-major matrix. * \a Z is // resized (if possible) or expected to be a \a num-by-\a n * row-major matrix or a \a n-by-\a num // column-major matrix. // // In case * \a low and \a upp are of floating point type, the function computes all * eigenvalues // in the half-open interval \f$(low..upp]\f$. The resulting * real eigenvalues are stored in // ascending order in the given vector \a * w, which is either resized (if possible) or expected // to be an \a * n-dimensional vector. The eigenvectors are returned in the rows of \a Z in * case // \a Z is a row-major matrix and in the columns of \a Z in case \a Z * is a column-major matrix. // \a Z is resized (if possible) or expected to * be a \a n-by-\a n matrix. // // The functions fail if ... // // - ... the * given matrix \a A is not a square matrix; // - ... the given vector \a w * is a fixed size vector and the size doesn't match; // - ... the given * matrix \a Z is a fixed size matrix and the dimensions don't match; // - * ... the given \a uplo argument is neither \c 'L' nor \c 'U'; // - ... the * eigenvalue computation fails. // // The first two functions report failure * via the \c info argument, the last four functions throw // an exception in * case of an error. // // // \n \section lapack_singular_values Singular * Values/Singular Vectors // // The following functions provide an interface * for the LAPACK functions \c sgesvd(), \c dgesvd(), // \c cgesvd(), and \c * zgesvd(), which perform a singular value decomposition (SVD) on the given * // general matrix: * * \code namespace blaze { * * void gesvd( char jobu, char jobv, int m, int n, float* A, int lda, float* s, * float* U, int ldu, float* V, int ldv, float* work, int lwork, int* info ); * * void gesvd( char jobu, char jobv, int m, int n, double* A, int lda, double* * s, double* U, int ldu, double* V, int ldv, double* work, int lwork, int* * info ); * * void gesvd( char jobu, char jobv, int m, int n, complex<float>* A, int lda, * float* s, complex<float>* U, int ldu, complex<float>* V, int ldv, * complex<float>* work, int lwork, float* rwork, int* info ); * * void gesvd( char jobu, char jobv, int m, int n, complex<double>* A, int lda, * double* s, complex<double>* U, int ldu, complex<double>* V, int ldv, * complex<double>* work, int lwork, double* rwork, int* info ); * * template< typename MT, bool SO, typename VT, bool TF > void gesvd( * DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s, char jobu, char jobv ); * * template< typename MT1, bool SO, typename MT2, typename VT, bool TF > void * gesvd( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& * s, char jobu, char jobv ); * * template< typename MT1, bool SO, typename VT, bool TF, typename MT2 > void * gesvd( DenseMatrix<MT1,SO>& A, DenseVector<VT,TF>& s, DenseMatrix<MT2,SO>& * V, char jobu, char jobv ); * * template< typename MT1, bool SO, typename MT2, typename VT, bool TF, typename * MT3 > void gesvd( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, * DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V, char jobu, char jobv ); * * } // namespace blaze \endcode * * // Alternatively, the following functions can be used, which provide an * interface to the LAPACK // functions \c sgesdd(), \c dgesdd(), \c * cgesdd(), and \c zgesdd(). In contrast to the \c gesvd() // functions they * compute the singular value decomposition (SVD) of the given general matrix * by // applying a divide-and-conquer strategy for the computation of the * left and right singular // vectors: * * \code namespace blaze { * * void gesdd( char jobz, int m, int n, float* A, int lda, float* s, float* U, * int ldu, float* V, int ldv, float* work, int lwork, int* iwork, int* info * ); * * void gesdd( char jobz, int m, int n, double* A, int lda, double* s, double* * U, int ldu, double* V, int ldv, double* work, int lwork, int* iwork, int* * info ); * * void gesdd( char jobz, int m, int n, complex<float>* A, int lda, float* s, * complex<float>* U, int ldu, complex<float>* V, int ldv, complex<float>* * work, int lwork, float* rwork, int* iwork, int* info ); * * void gesdd( char jobz, int m, int n, complex<double>* A, int lda, double* s, * complex<double>* U, int ldu, complex<double>* V, int ldv, complex<double>* * work, int lwork, double* rwork, int* iwork, int* info ); * * template< typename MT, bool SO, typename VT, bool TF > void gesdd( * DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s ); * * template< typename MT1, bool SO, typename MT2, typename VT, bool TF > void * gesdd( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& * s, char jobz ); * * template< typename MT1, bool SO, typename MT2, typename VT, bool TF > void * gesdd( DenseMatrix<MT1,SO>& A, DenseVector<VT,TF>& s, DenseMatrix<MT2,SO>& * V, char jobz ); * * template< typename MT1, bool SO, typename MT2, typename VT, bool TF, typename * MT3 > void gesdd( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, * DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V, char jobz ); * * } // namespace blaze \endcode * * // The resulting decomposition has the form * * \f[ A = U \cdot S \cdot V, \f] * * // where \a S is a \a m-by-\a n matrix, which is zero except for its min(\a * m,\a n) diagonal // elements, \a U is an \a m-by-\a m orthogonal matrix, * and \a V is a \a n-by-\a n orthogonal // matrix. The diagonal elements of * \a S are the singular values of \a A, the first min(\a m,\a n) // columns * of \a U and rows of \a V are the left and right singular vectors of \a A, * respectively. // // The resulting min(\a m,\a n) real and non-negative * singular values are returned in descending // order in the vector \a s, * which is resized to the correct size (if possible and necessary). // // * Via the following functions, which wrap the LAPACK functions \c sgesvdx(), * \c dgesvdx(), // \c cgesvdx(), and \c zgesvdx(), it is possible to compute * a subset of singular values and/or // vectors: * * \code namespace blaze { * * void gesvdx( char jobu, char jobv, char range, int m, int n, float* A, int * lda, float vl, float vu, int il, int iu, int* ns, float* s, float* U, int * ldu, float* V, int ldv, float* work, int lwork, int* iwork, int* info ); * * void gesvdx( char jobu, char jobv, char range, int m, int n, double* A, int * lda, double vl, double vu, int il, int iu, int* ns, double* s, double* U, * int ldu, double* V, int ldv, double* work, int lwork, int* iwork, int* * info ); * * void gesvdx( char jobu, char jobv, char range, int m, int n, complex<float>* * A, int lda, float vl, float vu, int il, int iu, int* ns, float* s, * complex<float>* U, int ldu, complex<float>* V, int ldv, complex<float>* * work, int lwork, float* rwork, int* iwork, int* info ); * * void gesvdx( char jobu, char jobv, char range, int m, int n, complex<double>* * A, int lda, double vl, double vu, int il, int iu, int* ns, double* s, * complex<double>* U, int ldu, complex<double>* V, int ldv, complex<double>* * work, int lwork, double* rwork, int* iwork, int* info ); * * template< typename MT, bool SO, typename VT, bool TF > size_t gesvdx( * DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s ); * * template< typename MT, bool SO, typename VT, bool TF, typename ST > size_t * gesvdx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s, ST low, ST upp ); * * template< typename MT1, bool SO, typename MT2, typename VT, bool TF > size_t * gesvdx( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, * DenseVector<VT,TF>& s ); * * template< typename MT1, bool SO, typename MT2, typename VT, bool TF, typename * ST > size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, * DenseVector<VT,TF>& s, ST low, ST upp ); * * template< typename MT1, bool SO, typename VT, bool TF, typename MT2 > size_t * gesvdx( DenseMatrix<MT1,SO>& A, DenseVector<VT,TF>& s, * DenseMatrix<MT2,SO>& V ); * * template< typename MT1, bool SO, typename VT, bool TF, typename MT2, typename * ST > size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseVector<VT,TF>& s, * DenseMatrix<MT2,SO>& V, ST low, ST upp ); * * template< typename MT1, bool SO, typename MT2, typename VT, bool TF, typename * MT3 > size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, * DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V ); * * template< typename MT1, bool SO, typename MT2, typename VT, bool TF, typename * MT3, typename ST > size_t gesvdx( DenseMatrix<MT1,SO>& A, * DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V, ST * low, ST upp ); * * } // namespace blaze \endcode * * // The number of singular values to be computed is specified by the lower * bound \a low and the // upper bound \a upp, which either form an integral * or a floating point range. // // In case \a low and \a upp form are of * integral type, the function computes all singular values // in the index * range \f$[low..upp]\f$. The \a num resulting real and non-negative * singular values // are stored in descending order in the given vector \a * s, which is either resized (if possible) // or expected to be a \a * num-dimensional vector. The resulting left singular vectors are stored // * in the given matrix \a U, which is either resized (if possible) or * expected to be a // \a m-by-\a num matrix. The resulting right singular * vectors are stored in the given matrix \a V, // which is either resized * (if possible) or expected to be a \a num-by-\a n matrix. // // In case \a * low and \a upp are of floating point type, the function computes all * singular values // in the half-open interval \f$(low..upp]\f$. The * resulting real and non-negative singular values // are stored in * descending order in the given vector \a s, which is either resized (if * possible) // or expected to be a min(\a m,\a n)-dimensional vector. The * resulting left singular vectors are // stored in the given matrix \a U, * which is either resized (if possible) or expected to be a // \a * m-by-min(\a m,\a n) matrix. The resulting right singular vectors are * stored in the given // matrix \a V, which is either resized (if possible) * or expected to be a min(\a m,\a n)-by-\a n // matrix. // // The functions * fail if ... // // - ... the given matrix \a U is a fixed size matrix and * the dimensions don't match; // - ... the given vector \a s is a fixed * size vector and the size doesn't match; // - ... the given matrix \a V is * a fixed size matrix and the dimensions don't match; // - ... the given * scalar values don't form a proper range; // - ... the singular value * decomposition fails. // // The first four functions report failure via the * \c info argument, the remaining functions throw // an exception in case of * an error. // // // \n Previous: \ref blas_functions &nbsp; &nbsp; Next: * \ref block_vectors_and_matrices \n */ //************************************************************************************************* //**Block Vectors and Matrices ********************************************************************* /* * !\page block_vectors_and_matrices Block Vectors and Matrices // // * \tableofcontents // // // \n \section block_vectors_and_matrices_general * General Concepts // <hr> // // In addition to fundamental element types, * the \b Blaze library supports vectors and matrices // with non-fundamental * element type. For instance, it is possible to define block matrices by // * using a matrix type as the element type: * * \code using blaze::DynamicMatrix; using blaze::DynamicVector; using * blaze::rowMajor; using blaze::columnVector; * * DynamicMatrix< DynamicMatrix<double,rowMajor>, rowMajor > A; DynamicVector< * DynamicVector<double,columnVector >, columnVector > x, y; * * // ... Resizing and initialization * * y = A * x; \endcode * * // The matrix/vector multiplication in this example runs fully parallel and * uses vectorization // for every inner matrix/vector multiplication and * vector addition. // // // \n \section block_vectors_and_matrices_pitfalls * Pitfalls // <hr> // // The only thing to keep in mind when using * non-fundamental element types is that all operations // between the * elements have to be well defined. More specifically, the size of vector * and matrix // elements has to match. The attempt to combine two * non-matching elements results in either a // compilation error (in case of * statically sized elements) or an exception (for dynamically sized // * elements): * * \code DynamicVector< StaticVector<int,2UL> > a; DynamicVector< * StaticVector<int,3UL> > b; * * DynamicVector< DynamicVector<int> > c( a + b ); // Compilation error: * element size doesn't match \endcode * * // Therefore please don't forget that dynamically sized elements (e.g. \c * blaze::DynamicVector, // \c blaze::HybridVector, \c blaze::DynamicMatrix, * \c blaze::HybridMatrix, ...) need to be sized // accordingly upfront. // * // // \n \section block_vectors_and_matrices_examples Examples // <hr> // * // The first example demonstrates the multiplication between a statically * sized block matrix // and a block vector: * * \code using namespace blaze; * * // ( ( 1 1 ) ( 2 2 ) ) ( ( 1 ) ) ( ( 10 ) ) // ( ( 1 1 ) ( 2 2 ) ) ( * ( 1 ) ) ( ( 10 ) ) // ( ) * ( ) = ( ) // ( * ( 3 3 ) ( 4 4 ) ) ( ( 2 ) ) ( ( 22 ) ) // ( ( 3 3 ) ( 4 4 ) ) ( ( * 2 ) ) ( ( 22 ) ) * * using M2x2 = StaticMatrix<int,2UL,2UL,rowMajor>; using V2 = * StaticVector<int,2UL,columnVector>; * * DynamicMatrix<M2x2,rowMajor> A{ { M2x2(1), M2x2(2) }, { M2x2(3), M2x2(4) } }; * * DynamicVector<V2,columnVector> x{ V2(1), V2(2) }; * * DynamicVector<V2,columnVector> y( A * x ); \endcode * * // The second example shows the multiplication between a compressed block * matrix with blocks of // varying size and a compressed block vector: * * \code using namespace blaze; * * // ( ( 1 -2 3 ) ( 5 -1 ) ) ( ( 1 ) ) ( ( -3 ) ) // ( ( 4 1 0 * ) ( 1 2 ) ) ( ( 0 ) ) ( ( 7 ) ) // ( ( 0 2 4 ) ( * 3 1 ) ) ( ( 1 ) ) ( ( 3 ) ) // ( ) ( * ) ( ) // ( ( 1 ) ) * ( ( 2 ) ) = ( ( 2 ) * ) // ( ) ( ) ( ) // ( ( 0 * -1 1 ) ( 1 0 ) ) ( ( -1 ) ) ( ( 0 ) ) // ( ( 2 -1 2 ) * ( 0 1 ) ) ( ( 2 ) ) ( ( 6 ) ) * * using M3x3 = HybridMatrix<int,3UL,3UL,rowMajor>; using V3 = * HybridVector<int,3UL,columnVector>; * * CompressedMatrix<M3x3,rowMajor> A( 3UL, 3UL, 5UL ); A(0,0) = M3x3{ { 1, -2, 3 * }, { 4, 1, 0 }, { 0, 2, 4 } }; A(0,2) = M3x3{ { 5, -1 }, { 1, 2 }, { 3, 1 * } }; A(1,1) = M3x3{ { 1 } }; A(2,0) = M3x3{ { 0, -1, 1 }, { 2, -1, 2 } }; * A(2,2) = M3x3{ { 1, 0 }, { 0, 1 } }; * * CompressedVector<V3,columnVector> x( 3UL, 3UL ); x[0] = V3{ 1, 0, 1 }; x[1] = * V3{ 2 }; x[2] = V3{ -1, 2 }; * * CompressedVector<V3,columnVector> y( A * x ); \endcode * * // \n Previous: \ref lapack_functions &nbsp; &nbsp; Next: \ref * intra_statement_optimization \n */ //************************************************************************************************* //**Intra - Statement Optimization ******************************************************************* /* * !\page intra_statement_optimization Intra-Statement Optimization // // One * of the prime features of the \b Blaze library is the automatic * intra-statement optimization. // In order to optimize the overall * performance of every single statement \b Blaze attempts to // rearrange * the operands based on their types. For instance, the following addition of * dense and // sparse vectors * * \code blaze::DynamicVector<double> d1, d2, d3; * blaze::CompressedVector<double> s1; * * // ... Resizing and initialization * * d3 = d1 + s1 + d2; \endcode * * // is automatically rearranged and evaluated as * * \code // ... d3 = d1 + d2 + s1; // <- Note that s1 and d2 have been * rearranged \endcode * * // This order of operands is highly favorable for the overall performance * since the addition of // the two dense vectors \c d1 and \c d2 can be * handled much more efficiently in a vectorized // fashion. // // This * intra-statement optimization can have a tremendous effect on the * performance of a statement. // Consider for instance the following * computation: * * \code blaze::DynamicMatrix<double> A, B; blaze::DynamicVector<double> x, y; * * // ... Resizing and initialization * * y = A * B * x; \endcode * * // Since multiplications are evaluated from left to right, this statement * would result in a // matrix/matrix multiplication, followed by a * matrix/vector multiplication. However, if the // right subexpression is * evaluated first, the performance can be dramatically improved since the // * matrix/matrix multiplication can be avoided in favor of a second * matrix/vector multiplication. // The \b Blaze library exploits this by * automatically restructuring the expression such that the // right * multiplication is evaluated first: * * \code // ... y = A * ( B * x ); \endcode * * // Note however that although this intra-statement optimization may result in * a measurable or // even significant performance improvement, this behavior * may be undesirable for several reasons, // for instance because of * numerical stability. Therefore, in case the order of evaluation matters, * // the best solution is to be explicit and to separate a statement into * several statements: * * \code blaze::DynamicVector<double> d1, d2, d3; * blaze::CompressedVector<double> s1; * * // ... Resizing and initialization * * d3 = d1 + s1; // Compute the dense vector/sparse vector addition first ... * d3 += d2; // ... and afterwards add the second dense vector \endcode * * \code // ... blaze::DynamicMatrix<double> A, B, C; * blaze::DynamicVector<double> x, y; * * // ... Resizing and initialization * * C = A * B; // Compute the left-hand side matrix-matrix multiplication first * ... y = C * x; // ... before the right-hand side matrix-vector * multiplication \endcode * * // Alternatively, it is also possible to use the \c eval() function to fix * the order of evaluation: * * \code blaze::DynamicVector<double> d1, d2, d3; * blaze::CompressedVector<double> s1; * * // ... Resizing and initialization * * d3 = d1 + eval( s1 + d2 ); \endcode * * \code blaze::DynamicMatrix<double> A, B; blaze::DynamicVector<double> x, y; * * // ... Resizing and initialization * * y = eval( A * B ) * x; \endcode * * // \n Previous: \ref block_vectors_and_matrices &nbsp; &nbsp; Next: \ref faq * \n */ //************************************************************************************************* //**FAQ ******************************************************************************************** /* * !\page faq Frequently Asked Questions (FAQ) // // \tableofcontents // // * // <hr> // \section faq_padding A StaticVector/StaticMatrix is larger than * expected. Is this a bug? // // The size of a \c StaticVector, \c * StaticMatrix, \c HybridVector, or \c HybridMatrix can // indeed be larger * than expected: * * \code StaticVector<int,3> a; StaticMatrix<int,3,3> A; * * sizeof( a ); // Evaluates to 16, 32, or even 64, but not 12 sizeof( A ); // * Evaluates to 48, 96, or even 144, but not 36 \endcode * * // In order to achieve the maximum possible performance the \b Blaze library * tries to enable // SIMD vectorization even for small vectors. For that * reason \b Blaze by default uses padding // elements for all dense vectors * and matrices to guarantee that at least a single SIMD vector // can be * loaded. Depending on the used SIMD technology that can significantly * increase the size // of a \c StaticVector, \c StaticMatrix, \c * HybridVector or \c HybridMatrix: * * \code StaticVector<int,3> a; StaticMatrix<int,3,3> A; * * sizeof( a ); // Evaluates to 16 in case of SSE, 32 in case of AVX, and 64 in * case of AVX-512 // (under the assumption that an integer occupies 4 bytes) * sizeof( A ); // Evaluates to 48 in case of SSE, 96 in case of AVX, and * 144 in case of AVX-512 // (under the assumption that an integer occupies 4 * bytes) \endcode * * // The configuration file <tt>./blaze/config/Optimizations.h</tt> provides a * compile time switch // that can be used to (de-)activate padding: * * \code #define BLAZE_USE_PADDING 1 \endcode * * // Alternatively it is possible to (de-)activate padding via command line or * by defining this // symbol manually before including any \b Blaze header * file: * * \code #define BLAZE_USE_PADDING 1 #include <blaze/Blaze.h> \endcode * * // If \c BLAZE_USE_PADDING is set to 1 padding is enabled for all dense * vectors and matrices, if // it is set to 0 padding is disabled. Note * however that disabling padding can considerably reduce // the performance * of all dense vector and matrix operations! // // // <hr> // \section * faq_alignment Despite disabling padding, a StaticVector/StaticMatrix is * still larger than expected. Is this a bug? // // Despite disabling padding * via the \c BLAZE_USE_PADDING compile time switch (see \ref faq_padding), * // the size of a \c StaticVector, \c StaticMatrix, \c HybridVector, or \c * HybridMatrix can still // be larger than expected: * * \code #define BLAZE_USE_PADDING 1 #include <blaze/Blaze.h> * * StaticVector<int,3> a; StaticVector<int,5> b; * * sizeof( a ); // Always evaluates to 12 sizeof( b ); // Evaluates to 32 with * SSE (larger than expected) and to 20 with AVX or AVX-512 (expected) * \endcode * * // The reason for this behavior is the used SIMD technology. If SSE is used, * which provides 128 // bit wide registers, a single SIMD pack can usually * hold 4 integers (128 bit divided by 32 bit). // Since the second vector * contains enough elements is possible to benefit from vectorization. // * However, SSE requires an alignment of 16 bytes, which ultimately results * in a total size of // 32 bytes for the \c StaticVector (2 times 16 bytes * due to 5 integer elements). If AVX or AVX-512 // is used, which provide * 256 bit or 512 bit wide registers, a single SIMD vector can hold 8 or 16 * // integers, respectively. Even the second vector does not hold enough * elements to benefit from // vectorization, which is why \b Blaze does not * enforce a 32 byte (for AVX) or even 64 byte // alignment (for AVX-512). // * // It is possible to disable the vectorization entirely by the compile * time switch in the // <tt>./blaze/config/Vectorization.h</tt> * configuration file: * * \code #define BLAZE_USE_VECTORIZATION 1 \endcode * * // It is also possible to (de-)activate vectorization via command line or by * defining this symbol // manually before including any \b Blaze header * file: * * \code #define BLAZE_USE_VECTORIZATION 1 #include <blaze/Blaze.h> \endcode * * // In case the switch is set to 1, vectorization is enabled and the \b Blaze * library is allowed // to use intrinsics and the necessary alignment to * speed up computations. In case the switch is // set to 0, vectorization is * disabled entirely and the \b Blaze library chooses default, // * non-vectorized functionality for the operations. Note that deactivating * the vectorization may // pose a severe performance limitation for a large * number of operations! // // // <hr> // \section faq_blas To which extend * does Blaze make use of BLAS functions under the hood? // // Currently the * only BLAS functions that are utilized by \b Blaze are the \c gemm() * functions // for the multiplication of two dense matrices (i.e. \c * sgemm(), \c dgemm(), \c cgemm(), and // \c zgemm()). All other operations * are always and unconditionally performed by native \b Blaze // kernels. // * // The \c BLAZE_BLAS_MODE config switch (see * <tt>./blaze/config/BLAS.h</tt>) determines whether // \b Blaze is allowed * to use BLAS kernels. If \c BLAZE_BLAS_MODE is set to 0 then \b Blaze // * does not utilize the BLAS kernels and unconditionally uses its own custom * kernels. If // \c BLAZE_BLAS_MODE is set to 1 then \b Blaze is allowed to * choose between using BLAS kernels // or its own custom kernels. In case of * the dense matrix multiplication this decision is based // on the size of * the dense matrices. For large matrices, \b Blaze uses the BLAS kernels, * for // small matrices it uses its own custom kernels. The threshold for * this decision can be // configured via the \c * BLAZE_DMATDMATMULT_THRESHOLD, \c BLAZE_DMATTDMATMULT_THRESHOLD, // \c * BLAZE_TDMATDMATMULT_THRESHOLD and \c BLAZE_TDMATTDMATMULT_THRESHOLD config * switches // (see <tt>./blaze/config/Thresholds.h</tt>). // // Please note * that the extend to which \b Blaze uses BLAS kernels can change in future * releases // of \b Blaze! // // // <hr> // \section faq_lapack To which * extend does Blaze make use of LAPACK functions under the hood? // // \b * Blaze uses LAPACK functions for matrix decomposition, matrix inversion, * computing the // determinants and eigenvalues, and the SVD. In contrast to * the BLAS functionality (see // \ref faq_blas), you cannot disable LAPACK * or switch to custom kernels. In case you try to // use any of these * functionalities, but do not provide (i.e. link) a LAPACK library you will * // get link time errors. // // Please note that the extend to which \b * Blaze uses LAPACK kernels can change in future releases // of \b Blaze! // * // // <hr> // \section faq_compile_times The compile time is too high if I * include <blaze/Blaze.h>. Can I reduce it? // // The include file * <tt><blaze/Blaze.h></tt> includes the entire functionality of the \b Blaze * // library, which by now is several hundred thousand lines of source code. * That means that a lot // of source code has to be parsed whenever * <tt><blaze/Blaze.h></tt> is encountered. However, it // is rare that * everything is required within a single compilation unit. Therefore it is * easily // possible to reduce compile times by including only those \b * Blaze features that are used within // the compilation unit. For instance, * instead of including <tt><blaze/Blaze.h></tt> it could be // enough to * include <tt><blaze/math/DynamicVector.h></tt>, which would reduce the * compilation // times by about 20%. // // Additionally we are taking care * to implement new \b Blaze functionality such that compile times // do not * explode and try to reduce the compile times of existing features. Thus * newer releases of // \b Blaze can also improve compile times. // // \n * Previous: \ref intra_statement_optimization &nbsp; &nbsp; Next: \ref * issue_creation_guidelines \n */ //************************************************************************************************* //**FAQ ******************************************************************************************** /* * !\page issue_creation_guidelines Issue Creation Guidelines // // * \tableofcontents // // // One of the most important aspects of the \b * Blaze project is the // <a * href="https://bitbucket.org/blaze-lib/blaze/issues">issue management</a> * on the official // \b Blaze Bitbucket page. We cordially invite all \b * Blaze users to submit feature requests // and bug reports, as we believe * that this is a significant part of making \b Blaze a better // library. * However, we are asking to follow a small set of guidelines when creating * an issue // to facilitate the issue management on our side and also to * make issues more useful for users // of \b Blaze. // // // <hr> // * \section issues_title Title // // The title is the most important detail * of an issue. A well chosen title makes it easy to grasp // the idea of an * issue and improves the discoverability. Therefore, please choose a title * that // is ... // // - ... as descriptive as possible; // - ... as * concise as possible; // - ... as unambiguous as possible. // // Also, * please create a separate issue for each idea/problem/etc. A very general * title or an // \"and\" in the title could be an indication that the issue * is not specific enough and should // be split into several issues. // // * \subsection issues_title_good_examples Good Examples // // - \"Provide * support for AVX-512 SIMD operations\" // - \"Add support for the Boost * Multiprecision Library\" // - \"Introduce reduction operations into * Blaze\" // - \"Compilation error on KNL with -march=knl\" // // * \subsection issues_title_bad_examples Bad Examples // // - \"Several * requests\" (instead create separate issues for each single request) // - * \"Improve the performance\" (instead specify which operation should * perform better) // - \"Blaze library compilation error\" (instead try to * be more specific) // // // <hr> // \section issues_description Description * // // The description should help us to understand your idea or problem in * as much detail as possible. // Also, it helps to clearly spell out your * expectations (how a feature is supposed to work, how // the behavior * should be, etc.). Please spend a couple of minutes to try to make the * description // as comprehensive as possible. // // // <hr> // \section * issues_assignee Assignee // // There is no need to assign the issue to a * particular person. It is perfectly ok if you just // ignore this setting. * // // // <hr> // \section issues_kind Kind of Issue // // There are four * kinds of issues available in the Bitbucket issue tracker: \ref * issues_kind_bug, // \ref issues_kind_enhancement, \ref * issues_kind_proposal, and \ref issues_kind_task. In the // following we * try to give guidelines on which kind to choose for a particular issue: // * // \subsection issues_kind_bug Bug // // Please choose the category \ref * issues_kind_bug if ... // // - ... you experience a compilation error * despite your best efforts to get it right; // - ... you experience a * crash/failure despite your best efforts to get it right; // - ... you * experience problems when combining features; // - ... a feature does not * work as specified/documented (i.e. can be considered broken). // // Please * \b don't choose the category \ref issues_kind_bug if ... // // - ... you * feel a feature should work differently than it currently does (instead * create a // \ref issues_kind_proposal with a convincing title and * description); // - ... you are not sure how to use a feature (instead * create an \ref issues_kind_enhancement // issue to extend the * documentation); // - ... you are missing a feature (instead create a \ref * issues_kind_proposal or // \ref issues_kind_enhancement issue). // * // If you select the category \ref issues_kind_bug, please also try to * provide a minimum example // that fails. That helps us to minimize the * time to resolve the bug. // // As we try to keep \b Blaze bug-free, we * will always prioritize bug issues. However, we will // also quickly close * bug issues as \"wontfix\" if the described issue is not a bug (i.e. one of * // the problems mentioned above). We will \b not relabel a bug issue to * \ref issues_kind_enhancement // or \ref issues_kind_proposal, even if they * would be reasonable extensions to \b Blaze. // // \subsection * issues_kind_enhancement Enhancement // // Please choose the category \ref * issues_kind_enhancement if ... // // - ... you need an add-on to an * existing feature; // - ... you need an extension of an existing feature; * // - ... you need an extended documentation for an existing feature. // * // \ref issues_kind_enhancement is very similar to \ref * issues_kind_proposal, so we don't mind // if an \ref * issues_kind_enhancement is labeled as a \ref issues_kind_proposal or vice * versa. // Just make sure you don't request an extension or new feature as * a \ref issues_kind_bug. // // \subsection issues_kind_proposal Proposal // * // Please choose the category \ref issues_kind_proposal if ... // // - * ... you want to request a new feature; // - ... you want to change an * existing feature. // // \ref issues_kind_proposal is very similar to \ref * issues_kind_enhancement, so we don't mind if // a \ref * issues_kind_proposal is labeled as an \ref issues_kind_enhancement or vice * versa. Just // make sure you don't request an extension or new feature as * a \ref issues_kind_bug. // // \subsection issues_kind_task Task // // * Please choose the category \ref issues_kind_task if ... // // - ... you * want us to do something not feature related; // - ... you have something * else in mind which does not fall in the other three categories. // // // * <hr> // \section issues_priority Priority // // Via the priority of an * issue you can tell us how important the issue is to you. Therefore the // * priority can have an influence on when we will deal with the issue. * However, unfortunately we // don't have an infinite amount of time and we * can not deal with an arbitrary amount of issues // at the same time. We * will therefore take the priority into account, but mainly schedule the // * issues based on impact to all \b Blaze users and the estimated time to * resolve it. // // You can choose between \ref issues_priority_blocker, * \ref issues_priority_critical, // \ref issues_priority_major, \ref * issues_priority_minor, and \ref issues_priority_trivial. // // \subsection * issues_priority_blocker Blocker // // Please choose a \ref * issues_priority_blocker priority if ... // // - ... you cannot work with * \b Blaze due to the described \ref issues_kind_bug; // - ... the \ref * issues_kind_bug likely has an influence on \b all \b Blaze users. // // * Please note that the categories \ref issues_kind_enhancement or \ref * issues_kind_proposal // should never be a \ref issues_priority_blocker! // * // \subsection issues_priority_critical Critical // // Please choose a * \ref issues_priority_critical priority if ... // // - ... you can work * around a \ref issues_kind_bug, but the workaround is (much) slower or * awful; // - ... you cannot use \b Blaze without the proposed feature; // * - ... you consider it to be essential for \b all \b Blaze users. // // * \subsection issues_priority_major Major // // Please choose a \ref * issues_priority_major priority if ... // // - ... a \ref issues_kind_bug * or feature request is not \ref issues_priority_critical, but // * still very important to you; // - ... you consider it to have a \ref * issues_priority_major impact on most \b Blaze users. // // The \ref * issues_priority_major category is the default setting in Bitbucket and we * therefore // consider it as the default priority for issues. // // * \subsection issues_priority_minor Minor // // Please choose a \ref * issues_priority_minor priority if ... // // - ... a \ref issues_kind_bug * does not affect many \b Blaze users; // - ... a feature request would * only be useful for a small number of \b Blaze users; // - ... a feature * would be nice to have, but is not particularly important. // // * \subsection issues_priority_trivial Trivial // // Please choose a \ref * issues_priority_trivial priority if ... // // - ... a \ref * issues_kind_bug hardly affects anyone; // - ... a feature request would * only be useful for very few \b Blaze users; // - ... the expected time to * resolve an issue is very small. // // // <hr> // \section * issues_attachment Attachments // // You can always provide us with * additional information in the form of attachments. Feel free // to attach * something to the issue if ... // // - ... it can help us to analyze a * \ref issues_kind_bug; // - ... you have some source code that * demonstrates a problem; // - ... you already have a working prototype * that sketches the idea; // - ... you have additional resources that could * help us. // // We appreciate anything that simplifies our work and speeds * up our progress. // // \n Previous: \ref faq &nbsp; &nbsp; Next: \ref * blaze_references \n */ //************************************************************************************************* //**Blaze References ******************************************************************************* /* * !\page blaze_references Blaze References // // In case you need references * to the \b Blaze library (for papers or other publications), please // feel * free to use one of the following references: * * \code @misc{blazelib, author = "Klaus {Iglberger}", title = * "Blaze C++ Linear Algebra Library", howpublished = * "https://bitbucket.org/blaze-lib", year = 2012 } \endcode * * \code @article{iglberger2012_1, author = "Klaus {Iglberger} and Georg * {Hager} and Jan {Treibig} and Ulrich {R{\"u}de}", title = "Expression * Templates Revisited: A Performance Analysis of Current Methodologies", * journal = "SIAM Journal on Scientific Computing", year = 2012, volume * = 34(2), pages = C42--C69 } \endcode * * \code @inproceedings{iglberger2012_2, author = "Klaus {Iglberger} and * Georg {Hager} and Jan {Treibig} and Ulrich {R{\"u}de}", title = "High * Performance Smart Expression Template Math Libraries", booktitle = * "Proceedings of the 2nd International Workshop on New Algorithms and * Programming Models for the Manycore Era (APMM 2012) at HPCS 2012", year * = 2012 } \endcode * * // \n Previous: \ref issue_creation_guidelines */ //************************************************************************************************* #endif
//================================================================================================= /* * ! // \file blaze/Tutorial.h // \brief Tutorial of the Blaze library // * // Copyright (C) 2012-2018 Klaus Iglberger - All Rights Reserved // // * This file is part of the Blaze library. You can redistribute it and/or * modify it under // the terms of the New (Revised) BSD License. * Redistribution and use in source and binary // forms, with or without * modification, are permitted provided that the following conditions // are * met: // // 1. Redistributions of source code must retain the above * copyright notice, this list of // conditions and the following * disclaimer. // 2. Redistributions in binary form must reproduce the above * copyright notice, this list // of conditions and the following * disclaimer in the documentation and/or other materials // provided * with the distribution. // 3. Neither the names of the Blaze development * group nor the names of its contributors // may be used to endorse or * promote products derived from this software without specific // prior * written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT * HOLDERS AND CONTRIBUTORS "AS IS" AND ANY // EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES // OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT // SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY * DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED // TO, PROCUREMENT OF SUBSTITUTE * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR // BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH // DAMAGE. */ //================================================================================================= #ifndef _BLAZE_TUTORIAL_H_ #define _BLAZE_TUTORIAL_H_ //================================================================================================= // //BLAZE TUTORIAL // //================================================================================================= //**Mainpage *************************************************************************************** /* * !\mainpage // // \image html blaze300x150.jpg // // This is the API for * the \b Blaze high performance C++ math library. It gives a complete // * overview of the individual features and sublibraries of \b Blaze. To get a * first impression // on \b Blaze, the short \ref getting_started tutorial * is a good place to start. Afterwards, // the following long tutorial * covers the most important aspects of the \b Blaze math library. // The * tabs at the top of the page allow a direct access to the individual * modules, namespaces, // classes, and files of the \b Blaze library.\n\n // * // \section table_of_content Table of Contents // // <ul> // <li> \ref * configuration_and_installation </li> // <li> \ref getting_started </li> * // <li> \ref vectors // <ul> // <li> \ref vector_types * </li> // <li> \ref vector_operations </li> // </ul> // * </li> // <li> \ref matrices // <ul> // <li> \ref * matrix_types </li> // <li> \ref matrix_operations </li> // * </ul> // </li> // <li> \ref adaptors // <ul> // <li> * \ref adaptors_symmetric_matrices </li> // <li> \ref * adaptors_hermitian_matrices </li> // <li> \ref * adaptors_triangular_matrices </li> // </ul> // </li> // <li> * \ref views // <ul> // <li> \ref views_subvectors </li> // * <li> \ref views_element_selections </li> // <li> \ref * views_submatrices </li> // <li> \ref views_rows </li> // * <li> \ref views_row_selections </li> // <li> \ref views_columns * </li> // <li> \ref views_column_selections </li> // <li> * \ref views_bands </li> // </ul> // </li> // <li> \ref * arithmetic_operations // <ul> // <li> \ref addition </li> * // <li> \ref subtraction </li> // <li> \ref * scalar_multiplication </li> // <li> \ref * vector_vector_multiplication // <ul> // <li> * \ref componentwise_multiplication </li> // <li> \ref * inner_product </li> // <li> \ref outer_product </li> // * <li> \ref cross_product </li> // </ul> // </li> // * <li> \ref vector_vector_division </li> // <li> \ref * matrix_vector_multiplication </li> // <li> \ref * matrix_matrix_multiplication // <ul> // <li> * \ref schur_product </li> // <li> \ref matrix_product </li> * // </ul> // </li> // </ul> // </li> // * <li> \ref shared_memory_parallelization // <ul> // <li> * \ref hpx_parallelization </li> // <li> \ref * cpp_threads_parallelization </li> // <li> \ref * boost_threads_parallelization </li> // <li> \ref * openmp_parallelization </li> // <li> \ref serial_execution </li> * // </ul> // </li> // <li> \ref serialization // <ul> // * <li> \ref vector_serialization </li> // <li> \ref * matrix_serialization </li> // </ul> // </li> // <li> \ref * customization // <ul> // <li> \ref configuration_files * </li> // <li> \ref vector_and_matrix_customization // * <ul> // <li> \ref custom_data_members </li> // * <li> \ref custom_operations </li> // <li> \ref * custom_data_types </li> // </ul> // </li> // * <li> \ref error_reporting_customization </li> // </ul> // </li> * // <li> \ref blas_functions </li> // <li> \ref lapack_functions * </li> // <li> \ref block_vectors_and_matrices </li> // <li> \ref * intra_statement_optimization </li> // <li> \ref faq </li> // <li> * \ref issue_creation_guidelines </li> // <li> \ref blaze_references * </li> // </ul> */ //************************************************************************************************* //**Configuration and Installation ***************************************************************** /* * !\page configuration_and_installation Configuration and Installation // // * \tableofcontents // // // Since \b Blaze is a header-only library, setting * up the \b Blaze library on a particular system // is a fairly easy two * step process. In the following, this two step process is explained in // * detail, preceded only by a short summary of the requirements. // // // \n * \section requirements Requirements // <hr> // // For maximum performance * the \b Blaze library expects you to have a BLAS library installed // (<a * href="http://software.intel.com/en-us/articles/intel-mkl/">Intel MKL</a>, * // <a href="http://developer.amd.com/libraries/acml/">ACML</a>, // <a * href="http://math-atlas.sourceforge.net">Atlas</a>, // <a * href="http://www.tacc.utexas.edu/tacc-projects/gotoblas2">Goto</a>, ...). * If you don't // have a BLAS library installed on your system, \b Blaze * will still work and will not be reduced // in functionality, but * performance may be limited. Thus it is strongly recommended to install a * // BLAS library. // // Additionally, for computing the determinant of a * dense matrix, for the decomposition of dense // matrices, for the dense * matrix inversion, and for the computation of eigenvalues and singular // * values \b Blaze requires <a * href="https://en.wikipedia.org/wiki/LAPACK">LAPACK</a>. When either // of * these features is used it is necessary to link the LAPACK library to the * final executable. // If no LAPACK library is available the use of these * features will result in a linker error. // // Furthermore, it is possible * to use Boost threads to run numeric operations in parallel. In this // * case the Boost library is required to be installed on your system. It is * recommended to use the // newest Boost library available, but \b Blaze * requires at minimum the Boost version 1.54.0. If // you don't have Boost * installed on your system, you can download it for free from // <a * href="http://www.boost.org">www.boost.org</a>. // // // \n \section * step_1_installation Step 1: Installation // <hr> // // \subsection * step_1_cmake Installation via CMake // // The first step is the * installation of the \b Blaze header files. The most convenient way // to * do this is via <a href="https://cmake.org">CMake</a>. Linux and macOS * users can use the // following two lines to copy the \b Blaze headers in * the <tt>./blaze</tt> subdirectory to // the directory \c * ${CMAKE_INSTALL_PREFIX}/include and the package configuration files to // * \c ${CMAKE_INSTALL_PREFIX}/share/blaze/cmake. * * \code cmake -DCMAKE_INSTALL_PREFIX=/usr/local/ sudo make install \endcode * * // Windows users can do the same via the cmake-gui. Alternatively, it is * possible to include // \b Blaze by adding the following lines in any \c * CMakeLists.txt file: * * \code find_package( blaze ) if( blaze_FOUND ) add_library( blaze_target * INTERFACE ) target_link_libraries( blaze_target INTERFACE blaze::blaze ) * endif() \endcode * * // \n \subsection step_1_vcpkg Installation via the VC++ Packaging Tool // // * An alternate way to install \b Blaze for Windows users is Microsoft's // * <a href="https://github.com/Microsoft/vcpkg">VC++ Packaging Tool * (vcpkg)</a>. \b Blaze can // be installed via the command line: * * \code C:\src\vcpkg> .\vcpkg install blaze \endcode * * // The tool automatically downloads the latest \b Blaze release and copies * the header files to // the common include directory. Please note that * since \b Blaze is a header-only library the // attempt to install any * static or dynamic library will fail! // // \n \subsection * step_1_installation_unix Manual Installation on Linux/macOS // // Since \b * Blaze only consists of header files, the <tt>./blaze</tt> subdirectory can * be simply // copied to a standard include directory (note that this * requires root privileges): * * \code cp -r ./blaze /usr/local/include \endcode * * // Alternatively, on Unix-based machines (which includes Linux and Mac OS X) * the // \c CPLUS_INCLUDE_PATH environment variable can be set. The * specified directory will be // searched after any directories specified on * the command line with the option \c -I and // before the standard default * directories (such as \c /usr/local/include and \c /usr/include). // * Assuming a user named 'Jon', the environment variable can be set as * follows: * * \code CPLUS_INCLUDE_PATH=/usr/home/jon/blaze export CPLUS_INCLUDE_PATH * \endcode * * // Last but not least, the <tt>./blaze</tt> subdirectory can be explicitly * specified on the // command line. The following example demonstrates this * by means of the GNU C++ compiler: * * \code g++ -I/usr/home/jon/blaze -o BlazeTest BlazeTest.cpp \endcode * * // \n \subsection step_1_installation_windows Manual Installation on Windows * // // Windows doesn't have a standard include directory. Therefore the \b * Blaze header files can be // copied to any other directory or simply left * in the default \b Blaze directory. However, the // chosen include * directory has to be explicitly specified as include path. In Visual * Studio, // this is done via the project property pages, configuration * properties, C/C++, General settings. // Here the additional include * directories can be specified. // // // \n \section step_2_configuration * Step 2: Configuration // <hr> // // The second step is the configuration * and customization of the \b Blaze library. Many aspects // of \b Blaze can * be adapted to specific requirements, environments and architectures. The * most // convenient way to configure \b Blaze is to modify the headers in * the <tt>./blaze/config/</tt> // subdirectory by means of <a * href="https://cmake.org">CMake</a>. Alternatively these header // files * can be customized manually. In both cases, however, the files are * modified. If this is // not an option it is possible to configure \b Blaze * via the command line (see the tutorial // section \ref configuration_files * or the documentation in the configuration files). // // Since the default * settings are reasonable for most systems this step can also be skipped. // * However, in order to achieve maximum performance a customization of at * least the following // configuration files is required: // // - * <b><tt><blaze/config/BLAS.h></tt></b>: Via this configuration file \b * Blaze can be enabled // to use a third-party BLAS library for several * basic linear algebra functions (such as for // instance dense matrix * multiplications). In case no BLAS library is used, all linear algebra // * functions use the default implementations of the \b Blaze library and * therefore BLAS is not a // requirement for the compilation process. * However, please note that performance may be limited. // - * <b><tt><blaze/config/CacheSize.h></tt></b>: This file contains the * hardware specific cache // settings. \b Blaze uses this information to * optimize its cache usage. For maximum performance // it is recommended * to adapt these setting to a specific target architecture. // - * <b><tt><blaze/config/Thresholds.h></tt></b>: This file contains all * thresholds for the // customization of the \b Blaze compute kernels. In * order to tune the kernels for a specific // architecture and to * maximize performance it can be necessary to adjust the thresholds, // * especially for a parallel execution (see \ref * shared_memory_parallelization). // // For an overview of other * customization options and more details, please see the section // \ref * configuration_files. // // // \n \section blaze_version Blaze Version // * <hr> // // The current major and minor version number of the \b Blaze * library can be found in the // <b><tt><blaze/system/Version.h></tt></b> * header file. It is automatically included via the // * <b><tt><blaze/Blaze.h></tt></b> header file. The file contains the two * following macros, // which can for instance be used for conditional * compilation: * * \code #define BLAZE_MAJOR_VERSION 3 #define BLAZE_MINOR_VERSION 2 \endcode * * // \n Next: \ref getting_started */ //************************************************************************************************* //**Getting Started ******************************************************************************** /* * !\page getting_started Getting Started // // This short tutorial serves * the purpose to give a quick overview of the way mathematical // * expressions have to be formulated in \b Blaze. Starting with \ref * vector_types, the following // long tutorial covers the most important * aspects of the \b Blaze math library. // // // \n \section * getting_started_vector_example A First Example // // \b Blaze is written * such that using mathematical expressions is as close to mathematical // * textbooks as possible and therefore as intuitive as possible. In nearly * all cases the seemingly // easiest solution is the right solution and most * users experience no problems when trying to // use \b Blaze in the most * natural way. The following example gives a first impression of the // * formulation of a vector addition in \b Blaze: * * \code #include <iostream> #include <blaze/Math.h> * * using blaze::StaticVector; using blaze::DynamicVector; * * // Instantiation of a static 3D column vector. The vector is directly * initialized as // ( 4 -2 5 ) StaticVector<int,3UL> a{ 4, -2, 5 }; * * // Instantiation of a dynamic 3D column vector. Via the subscript operator * the values are set to // ( 2 5 -3 ) DynamicVector<int> b( 3UL ); b[0] = * 2; b[1] = 5; b[2] = -3; * * // Adding the vectors a and b DynamicVector<int> c = a + b; * * // Printing the result of the vector addition std::cout << "c =\n" << c << * "\n"; \endcode * * // Note that the entire \b Blaze math library can be included via the \c * blaze/Math.h header // file. Alternatively, the entire \b Blaze library, * including both the math and the entire // utility module, can be included * via the \c blaze/Blaze.h header file. Also note that all // classes and * functions of \b Blaze are contained in the blaze namespace.\n\n // // * Assuming that this program resides in a source file called \c * FirstExample.cpp, it can be // compiled for instance via the GNU C++ * compiler: * * \code g++ -ansi -O3 -DNDEBUG -mavx -o FirstExample FirstExample.cpp \endcode * * // Note the definition of the \c NDEBUG preprocessor symbol. In order to * achieve maximum // performance, it is necessary to compile the program in * release mode, which deactivates // all debugging functionality inside \b * Blaze. It is also strongly recommended to specify // the available * architecture specific instruction set (as for instance the AVX instruction * // set, which if available can be activated via the \c -mavx flag). This * allows \b Blaze // to optimize computations via vectorization.\n\n // // * When running the resulting executable \c FirstExample, the output of the * last line of // this small program is * * \code c = 6 3 2 \endcode * * // \n \section getting_started_matrix_example An Example Involving Matrices * // // Similarly easy and intuitive are expressions involving matrices: * * \code #include <blaze/Math.h> * * using namespace blaze; * * // Instantiating a dynamic 3D column vector DynamicVector<int> x{ 4, -1, 3 }; * * // Instantiating a dynamic 2x3 row-major matrix, preinitialized with 0. Via * the function call // operator three values of the matrix are explicitly * set to get the matrix // ( 1 0 4 ) // ( 0 -2 0 ) DynamicMatrix<int> * A( 2UL, 3UL, 0 ); A(0,0) = 1; A(0,2) = 4; A(1,1) = -2; * * // Performing a matrix/vector multiplication DynamicVector<int> y = A * x; * * // Printing the resulting vector std::cout << "y =\n" << y << "\n"; * * // Instantiating a static column-major matrix. The matrix is directly * initialized as // ( 3 -1 ) // ( 0 2 ) // ( -1 0 ) * StaticMatrix<int,3UL,2UL,columnMajor> B{ { 3, -1 }, { 0, 2 }, { -1, 0 } }; * * // Performing a matrix/matrix multiplication DynamicMatrix<int> C = A * B; * * // Printing the resulting matrix std::cout << "C =\n" << C << "\n"; \endcode * * // The output of this program is * * \code y = 16 2 * * C = ( -1 -1 ) ( 0 -4 ) \endcode * * // \n \section getting_started_complex_example A Complex Example // // The * following example is much more sophisticated. It shows the implementation * of the Conjugate // Gradient (CG) algorithm * (http://en.wikipedia.org/wiki/Conjugate_gradient) by means of the // \b * Blaze library: // // \image html cg.jpg // // In this example it is not * important to understand the CG algorithm itself, but to see the // * advantage of the API of the \b Blaze library. In the \b Blaze * implementation we will use a // sparse matrix/dense vector multiplication * for a 2D Poisson equation using \f$ N \times N \f$ // unknowns. It becomes * apparent that the core of the algorithm is very close to the mathematical * // formulation and therefore has huge advantages in terms of readability * and maintainability, // while the performance of the code is close to the * expected theoretical peak performance: * * \code const size_t NN( N*N ); * * blaze::CompressedMatrix<double,rowMajor> A( NN, NN ); * blaze::DynamicVector<double,columnVector> x( NN, 1.0 ), b( NN, 0.0 ), r( * NN ), p( NN ), Ap( NN ); double alpha, beta, delta; * * // ... Initializing the sparse matrix A * * // Performing the CG algorithm r = b - A * x; p = r; delta = (r,r); * * for( size_t iteration=0UL; iteration<iterations; ++iteration ) { Ap = A * p; * alpha = delta / (p,Ap); x += alpha * p; r -= alpha * Ap; beta = (r,r); if( * std::sqrt( beta ) < 1E-8 ) break; p = r + ( beta / delta ) * p; delta = * beta; } \endcode * * // \n Hopefully this short tutorial gives a good first impression of how * mathematical expressions // are formulated with \b Blaze. The following * long tutorial, starting with \ref vector_types, // will cover all aspects * of the \b Blaze math library, i.e. it will introduce all vector and // * matrix types, all possible operations on vectors and matrices, and of * course all possible // mathematical expressions. // // \n Previous: \ref * configuration_and_installation &nbsp; &nbsp; Next: \ref vectors */ //************************************************************************************************* //**Vectors **************************************************************************************** /* * !\page vectors Vectors // // \tableofcontents // // // \n \section * vectors_general General Concepts // <hr> // // The \b Blaze library * currently offers four dense vector types (\ref vector_types_static_vector, * // \ref vector_types_dynamic_vector, \ref vector_types_hybrid_vector, and * \ref vector_types_custom_vector) // and one sparse vector type (\ref * vector_types_compressed_vector). All vectors can be specified // as either * column vectors or row vectors: * * \code using blaze::DynamicVector; using blaze::columnVector; using * blaze::rowVector; * * // Setup of the 3-dimensional dense column vector // // ( 1 ) // ( 2 ) * // ( 3 ) // DynamicVector<int,columnVector> a{ 1, 2, 3 }; * * // Setup of the 3-dimensional dense row vector // // ( 4 5 6 ) // * DynamicVector<int,rowVector> b{ 4, 5, 6 }; \endcode * * // Per default, all vectors in \b Blaze are column vectors: * * \code // Instantiation of a 3-dimensional column vector * blaze::DynamicVector<int> c( 3UL ); \endcode * * // \n \section vectors_details Vector Details // <hr> // // - \ref * vector_types // - \ref vector_operations // // // \n \section * vectors_examples Examples // <hr> * * \code using blaze::StaticVector; using blaze::DynamicVector; using * blaze::CompressedVector; using blaze::rowVector; using * blaze::columnVector; * * StaticVector<int,6UL> a; // Instantiation of a 6-dimensional * static column vector CompressedVector<int,rowVector> b; // Instantiation * of a compressed row vector DynamicVector<int,columnVector> c; // * Instantiation of a dynamic column vector * * // ... Resizing and initialization * * c = a + trans( b ); \endcode * * // \n Previous: \ref getting_started &nbsp; &nbsp; Next: \ref vector_types */ //************************************************************************************************* //**Vector Types *********************************************************************************** /* * !\page vector_types Vector Types // // \tableofcontents // // // \n * \section vector_types_static_vector StaticVector // <hr> // // The * blaze::StaticVector class template is the representation of a fixed size * vector with // statically allocated elements of arbitrary type. It can be * included via the header file * * \code #include <blaze/math/StaticVector.h> \endcode * * // The type of the elements, the number of elements, and the transpose flag * of the vector can // be specified via the three template parameters: * * \code template< typename Type, size_t N, bool TF > class StaticVector; * \endcode * * // - \c Type: specifies the type of the vector elements. StaticVector can be * used with any // non-cv-qualified, non-reference, non-pointer * element type. // - \c N : specifies the total number of vector * elements. It is expected that StaticVector is // only used for * tiny and small vectors. // - \c TF : specifies whether the vector is a * row vector (\c blaze::rowVector) or a column // vector (\c * blaze::columnVector). The default value is \c blaze::columnVector. // // * The blaze::StaticVector is perfectly suited for small to medium vectors * whose size is known at // compile time: * * \code // Definition of a 3-dimensional integral column vector * blaze::StaticVector<int,3UL> a; * * // Definition of a 4-dimensional single precision column vector * blaze::StaticVector<float,4UL,blaze::columnVector> b; * * // Definition of a 6-dimensional double precision row vector * blaze::StaticVector<double,6UL,blaze::rowVector> c; \endcode * * // \n \section vector_types_dynamic_vector DynamicVector // <hr> // // The * blaze::DynamicVector class template is the representation of an arbitrary * sized vector // with dynamically allocated elements of arbitrary type. It * can be included via the header file * * \code #include <blaze/math/DynamicVector.h> \endcode * * // The type of the elements and the transpose flag of the vector can be * specified via the two // template parameters: * * \code template< typename Type, bool TF > class DynamicVector; \endcode * * // - \c Type: specifies the type of the vector elements. DynamicVector can * be used with any // non-cv-qualified, non-reference, * non-pointer element type. // - \c TF : specifies whether the vector is a * row vector (\c blaze::rowVector) or a column // vector (\c * blaze::columnVector). The default value is \c blaze::columnVector. // // * The blaze::DynamicVector is the default choice for all kinds of dense * vectors and the best // choice for medium to large vectors. Its size can * be modified at runtime: * * \code // Definition of a 3-dimensional integral column vector * blaze::DynamicVector<int> a( 3UL ); * * // Definition of a 4-dimensional single precision column vector * blaze::DynamicVector<float,blaze::columnVector> b( 4UL ); * * // Definition of a double precision row vector with size 0 * blaze::DynamicVector<double,blaze::rowVector> c; \endcode * * // \n \section vector_types_hybrid_vector HybridVector // <hr> // // The * blaze::HybridVector class template combines the advantages of the * blaze::StaticVector and // the blaze::DynamicVector class templates. It * represents a fixed size vector with statically // allocated elements, but * still can be dynamically resized (within the bounds of the available // * memory). It can be included via the header file * * \code #include <blaze/math/HybridVector.h> \endcode * * // The type of the elements, the number of elements, and the transpose flag * of the vector can // be specified via the three template parameters: * * \code template< typename Type, size_t N, bool TF > class HybridVector; * \endcode * * // - \c Type: specifies the type of the vector elements. HybridVector can be * used with any // non-cv-qualified, non-reference, non-pointer * element type. // - \c N : specifies the maximum number of vector * elements. It is expected that HybridVector // is only used for * tiny and small vectors. // - \c TF : specifies whether the vector is a * row vector (\c blaze::rowVector) or a column // vector (\c * blaze::columnVector). The default value is \c blaze::columnVector. // // * The blaze::HybridVector is a suitable choice for small to medium vectors, * whose size is not // known at compile time or not fixed at runtime, but * whose maximum size is known at compile // time: * * \code // Definition of a 3-dimensional integral column vector with a maximum * size of 6 blaze::HybridVector<int,6UL> a( 3UL ); * * // Definition of a 4-dimensional single precision column vector with a * maximum size of 16 blaze::HybridVector<float,16UL,blaze::columnVector> b( * 4UL ); * * // Definition of a double precision row vector with size 0 and a maximum size * of 6 blaze::HybridVector<double,6UL,blaze::rowVector> c; \endcode * * // \n \section vector_types_custom_vector CustomVector // <hr> // // The * blaze::CustomVector class template provides the functionality to represent * an external // array of elements of arbitrary type and a fixed size as a * native \b Blaze dense vector data // structure. Thus in contrast to all * other dense vector types a custom vector does not perform // any kind of * memory allocation by itself, but it is provided with an existing array of * element // during construction. A custom vector can therefore be * considered an alias to the existing // array. It can be included via the * header file * * \code #include <blaze/math/CustomVector.h> \endcode * * // The type of the elements, the properties of the given array of elements * and the transpose // flag of the vector can be specified via the following * four template parameters: * * \code template< typename Type, bool AF, bool PF, bool TF > class * CustomVector; \endcode * * // - Type: specifies the type of the vector elements. blaze::CustomVector * can be used with // any non-cv-qualified, non-reference, * non-pointer element type. // - AF : specifies whether the represented, * external arrays are properly aligned with // respect to the * available instruction set (SSE, AVX, ...) or not. // - PF : specified * whether the represented, external arrays are properly padded with // * respect to the available instruction set (SSE, AVX, ...) or not. // - TF * : specifies whether the vector is a row vector (\c blaze::rowVector) or a * column // vector (\c blaze::columnVector). The default value is * \c blaze::columnVector. // // The blaze::CustomVector is the right choice * if any external array needs to be represented as // a \b Blaze dense * vector data structure or if a custom memory allocation strategy needs to * be // realized: * * \code using blaze::CustomVector; using blaze::Deallocate; using * blaze::aligned; using blaze::unaligned; using blaze::padded; using * blaze::unpadded; * * // Definition of an unmanaged custom column vector for unaligned, unpadded * integer arrays using UnalignedUnpadded = * CustomVector<int,unaligned,unpadded,columnVector>; std::vector<int> vec( * 7UL ); UnalignedUnpadded a( &vec[0], 7UL ); * * // Definition of a managed custom column vector for unaligned but padded * 'float' arrays using UnalignedPadded = * CustomVector<float,unaligned,padded,columnVector>; * std::unique_ptr<float[]> memory1( new float[16] ); UnalignedPadded b( * memory1.get(), 9UL, 16UL ); * * // Definition of a managed custom row vector for aligned, unpadded 'double' * arrays using AlignedUnpadded = * CustomVector<double,aligned,unpadded,rowVector>; * std::unique_ptr<double[],Deallocate> memory2( blaze::allocate<double>( 7UL * ) ); AlignedUnpadded c( memory2.get(), 7UL ); * * // Definition of a managed custom row vector for aligned, padded * 'complex<double>' arrays using cplx = complex<double>; using AlignedPadded * = CustomVector<cplx,aligned,padded,columnVector>; * std::unique_ptr<cplx[],Deallocate> memory3( allocate<cplx>( 8UL ) ); * AlignedPadded d( memory3.get(), 5UL, 8UL ); \endcode * * // In comparison with the remaining \b Blaze dense vector types * blaze::CustomVector has several // special characteristics. All of these * result from the fact that a custom vector is not // performing any kind of * memory allocation, but instead is given an existing array of elements. // * The following sections discuss all of these characteristics: // // -# * <b>\ref vector_types_custom_vector_memory_management</b> // -# <b>\ref * vector_types_custom_vector_copy_operations</b> // -# <b>\ref * vector_types_custom_vector_alignment</b> // -# <b>\ref * vector_types_custom_vector_padding</b> // // \n \subsection * vector_types_custom_vector_memory_management Memory Management // // The * blaze::CustomVector class template acts as an adaptor for an existing * array of elements. As // such it provides everything that is required to * use the array just like a native \b Blaze dense // vector data structure. * However, this flexibility comes with the price that the user of a custom * // vector is responsible for the resource management. // // The following * examples give an impression of several possible types of custom vectors: * * \code using blaze::CustomVector; using blaze::Deallocate; using * blaze::allocate; using blaze::aligned; using blaze::unaligned; using * blaze::padded; using blaze::unpadded; * * // Definition of a 3-dimensional custom vector with unaligned, unpadded and * externally // managed integer array. Note that the std::vector must be * guaranteed to outlive the // custom vector! std::vector<int> vec( 3UL ); * CustomVector<int,unaligned,unpadded> a( &vec[0], 3UL ); * * // Definition of a custom vector with size 3 and capacity 16 with aligned, * padded and // externally managed integer array. Note that the * std::unique_ptr must be guaranteed // to outlive the custom vector! * std::unique_ptr<int[],Deallocate> memory( allocate<int>( 16UL ) ); * CustomVector<int,aligned,padded> b( memory.get(), 3UL, 16UL ); \endcode * * // \n \subsection vector_types_custom_vector_copy_operations Copy Operations * // // As with all dense vectors it is possible to copy construct a custom * vector: * * \code using blaze::CustomVector; using blaze::unaligned; using * blaze::unpadded; * * using CustomType = CustomVector<int,unaligned,unpadded>; * * std::vector<int> vec( 5UL, 10 ); // Vector of 5 integers of the value 10 * CustomType a( &vec[0], 5UL ); // Represent the std::vector as Blaze * dense vector a[1] = 20; // Also modifies the * std::vector * * CustomType b( a ); // Creating a copy of vector a b[2] = 20; // * Also affects vector a and the std::vector \endcode * * // It is important to note that a custom vector acts as a reference to the * specified array. Thus // the result of the copy constructor is a new * custom vector that is referencing and representing // the same array as * the original custom vector. // // In contrast to copy construction, just * as with references, copy assignment does not change // which array is * referenced by the custom vector, but modifies the values of the array: * * \code std::vector<int> vec2( 5UL, 4 ); // Vector of 5 integers of the value * 4 CustomType c( &vec2[0], 5UL ); // Represent the std::vector as Blaze * dense vector * * a = c; // Copy assignment: Set all values of vector a and b to 4. \endcode * * // \n \subsection vector_types_custom_vector_alignment Alignment // // In * case the custom vector is specified as \c aligned the passed array must be * guaranteed to // be aligned according to the requirements of the used * instruction set (SSE, AVX, ...). For // instance, if AVX is active an * array of integers must be 32-bit aligned: * * \code using blaze::CustomVector; using blaze::Deallocate; using * blaze::allocate; using blaze::aligned; using blaze::unpadded; * * // Allocation of 32-bit aligned memory std::unique_ptr<int[],Deallocate> * memory( allocate<int>( 5UL ) ); * * CustomVector<int,aligned,unpadded> a( memory.get(), 5UL ); \endcode * * // In case the alignment requirements are violated, a \c * std::invalid_argument exception is // thrown. // // \n \subsection * vector_types_custom_vector_padding Padding // // Adding padding elements * to the end of an array can have a significant impact on the performance. * // For instance, assuming that AVX is available, then two aligned, padded, * 3-dimensional vectors // of double precision values can be added via a * single SIMD addition operation: * * \code using blaze::CustomVector; using blaze::Deallocate; using * blaze::allocate; using blaze::aligned; using blaze::padded; * * using CustomType = CustomVector<double,aligned,padded>; * * std::unique_ptr<int[],Deallocate> memory1( allocate<double>( 4UL ) ); * std::unique_ptr<int[],Deallocate> memory2( allocate<double>( 4UL ) ); * std::unique_ptr<int[],Deallocate> memory3( allocate<double>( 4UL ) ); * * // Creating padded custom vectors of size 3 and a capacity of 4 CustomType a( * memory1.get(), 3UL, 4UL ); CustomType b( memory2.get(), 3UL, 4UL ); * CustomType c( memory3.get(), 3UL, 4UL ); * * // ... Initialization * * c = a + b; // AVX-based vector addition \endcode * * // In this example, maximum performance is possible. However, in case no * padding elements are // inserted, a scalar addition has to be used: * * \code using blaze::CustomVector; using blaze::Deallocate; using * blaze::allocate; using blaze::aligned; using blaze::unpadded; * * using CustomType = CustomVector<double,aligned,unpadded>; * * std::unique_ptr<int[],Deallocate> memory1( allocate<double>( 3UL ) ); * std::unique_ptr<int[],Deallocate> memory2( allocate<double>( 3UL ) ); * std::unique_ptr<int[],Deallocate> memory3( allocate<double>( 3UL ) ); * * // Creating unpadded custom vector of size 3 CustomType a( allocate<double>( * 3UL ), 3UL ); CustomType b( allocate<double>( 3UL ), 3UL ); CustomType c( * allocate<double>( 3UL ), 3UL ); * * // ... Initialization * * c = a + b; // Scalar vector addition \endcode * * // Note the different number of constructor parameters for unpadded and * padded custom vectors: // In contrast to unpadded vectors, where during * the construction only the size of the array // has to be specified, during * the construction of a padded custom vector it is additionally // necessary * to explicitly specify the capacity of the array. // // The number of * padding elements is required to be sufficient with respect to the * available // instruction set: In case of an aligned padded custom vector * the added padding elements must // guarantee that the capacity is greater * or equal than the size and a multiple of the SIMD vector // width. In case * of unaligned padded vectors the number of padding elements can be greater * or // equal the number of padding elements of an aligned padded custom * vector. In case the padding // is insufficient with respect to the * available instruction set, a \a std::invalid_argument // exception is * thrown. // // Please also note that \b Blaze will zero initialize the * padding elements in order to achieve // maximum performance! // // // \n * \section vector_types_compressed_vector CompressedVector // <hr> // // The * blaze::CompressedVector class is the representation of an arbitrarily * sized sparse // vector, which stores only non-zero elements of arbitrary * type. It can be included via the // header file * * \code #include <blaze/math/CompressedVector.h> \endcode * * // The type of the elements and the transpose flag of the vector can be * specified via the two // template parameters: * * \code template< typename Type, bool TF > class CompressedVector; \endcode * * // - \c Type: specifies the type of the vector elements. CompressedVector * can be used with any // non-cv-qualified, non-reference, * non-pointer element type. // - \c TF : specifies whether the vector is a * row vector (\c blaze::rowVector) or a column // vector (\c * blaze::columnVector). The default value is \c blaze::columnVector. // // * The blaze::CompressedVector is the right choice for all kinds of sparse * vectors: * * \code // Definition of a 3-dimensional integral column vector * blaze::CompressedVector<int> a( 3UL ); * * // Definition of a 4-dimensional single precision column vector with capacity * for 3 non-zero elements blaze::CompressedVector<float,blaze::columnVector> * b( 4UL, 3UL ); * * // Definition of a double precision row vector with size 0 * blaze::CompressedVector<double,blaze::rowVector> c; \endcode * * // \n Previous: \ref vectors &nbsp; &nbsp; Next: \ref vector_operations */ //************************************************************************************************* //**Vector Operations ****************************************************************************** /* * !\page vector_operations Vector Operations // // \tableofcontents // // // * \n \section vector_operations_constructors Constructors // <hr> // // * Instantiating and setting up a vector is very easy and intuitive. However, * there are a few // rules to take care of: // - In case the last template * parameter (the transpose flag) is omitted, the vector is per // default * a column vector. // - The elements of a \c StaticVector or \c * HybridVector are default initialized (i.e. built-in // data types are * initialized to 0, class types are initialized via the default * constructor). // - Newly allocated elements of a \c DynamicVector or \c * CompressedVector remain uninitialized // if they are of built-in type * and are default constructed if they are of class type. // // \n * \subsection vector_operations_default_construction Default Construction * * \code using blaze::StaticVector; using blaze::DynamicVector; using * blaze::CompressedVector; * * // All vectors can be default constructed. Whereas the size // of * StaticVectors is fixed via the second template parameter, // the initial * size of a default constructed DynamicVector or // CompressedVector is 0. * StaticVector<int,2UL> v1; // Instantiation of a 2D integer * column vector. // All elements are initialized to 0. * StaticVector<long,3UL,columnVector> v2; // Instantiation of a 3D long * integer column vector. // Again, all elements are initialized to 0L. * DynamicVector<float> v3; // Instantiation of a dynamic * single precision column // vector of size 0. * DynamicVector<double,rowVector> v4; // Instantiation of a dynamic * double precision row // vector of size 0. CompressedVector<int> v5; * // Instantiation of a compressed integer column // vector of size 0. * CompressedVector<double,rowVector> v6; // Instantiation of a compressed * double precision row // vector of size 0. \endcode * * // \n \subsection vector_operations_size_construction Construction with * Specific Size // // The \c DynamicVector, \c HybridVector and \c * CompressedVector classes offer a constructor that // allows to immediately * give the vector the required size. Whereas both dense vectors (i.e. // \c * DynamicVector and \c HybridVector) use this information to allocate memory * for all vector // elements, \c CompressedVector merely acquires the size * but remains empty. * * \code DynamicVector<int,columnVector> v7( 9UL ); // Instantiation of an * integer dynamic column vector // of size 9. The elements are NOT * initialized! HybridVector< complex<float>, 5UL > v8( 2UL ); // * Instantiation of a column vector with two single // precision complex * values. The elements are // default constructed. * CompressedVector<int,rowVector> v9( 10UL ); // Instantiation of a * compressed row vector with // size 10. Initially, the vector provides no * // capacity for non-zero elements. \endcode * * // \n \subsection vector_operations_initialization_constructors * Initialization Constructors // // All dense vector classes offer a * constructor that allows for a direct, homogeneous initialization // of all * vector elements. In contrast, for sparse vectors the predicted number of * non-zero elements // can be specified * * \code StaticVector<int,3UL,rowVector> v10( 2 ); // Instantiation * of a 3D integer row vector. // All elements are initialized to 2. * DynamicVector<float> v11( 3UL, 7.0F ); // Instantiation of a * dynamic single precision // column vector of size 3. All elements are // * set to 7.0F. CompressedVector<float,rowVector> v12( 15UL, 3UL ); // * Instantiation of a single precision column // vector of size 15, which * provides enough // space for at least 3 non-zero elements. \endcode * * // \n \subsection vector_operations_array_construction Array Construction // * // Alternatively, all dense vector classes offer a constructor for an * initialization with a dynamic // or static array. If the vector is * initialized from a dynamic array, the constructor expects the // actual * size of the array as first argument, the array as second argument. In case * of a static // array, the fixed size of the array is used: * * \code const unique_ptr<double[]> array1( new double[2] ); // ... * Initialization of the dynamic array blaze::StaticVector<double,2UL> v13( * 2UL, array1.get() ); * * int array2[4] = { 4, -5, -6, 7 }; blaze::StaticVector<int,4UL> v14( array2 ); * \endcode * * // \n \subsection vector_operations_initializer_list_construction Initializer * List Construction // // In addition, all dense and sparse vector classes * can be directly initialized by means of an // initializer list: * * \code blaze::DynamicVector<float> v15{ 1.0F, 2.0F, 3.0F, 4.0F }; * blaze::CompressedVector<int> v16{ 0, 2, 0, 0, 5, 0, 7, 0 }; \endcode * * // In case of sparse vectors, only the non-zero elements are used to * initialize the vector. // // \n \subsection * vector_operations_copy_construction Copy Construction // // All dense and * sparse vectors can be created as the copy of any other dense or sparse * vector // with the same transpose flag (i.e. blaze::rowVector or * blaze::columnVector). * * \code StaticVector<int,9UL,columnVector> v17( v7 ); // Instantiation of the * dense column vector v17 // as copy of the dense column vector v7. * DynamicVector<int,rowVector> v18( v9 ); // Instantiation of the * dense row vector v18 as // copy of the sparse row vector v9. * CompressedVector<int,columnVector> v19( v1 ); // Instantiation of the * sparse column vector v19 // as copy of the dense column vector v1. * CompressedVector<float,rowVector> v20( v12 ); // Instantiation of the * sparse row vector v20 as // copy of the row vector v12. \endcode * * // Note that it is not possible to create a \c StaticVector as a copy of a * vector with a different // size: * * \code StaticVector<int,5UL,columnVector> v21( v7 ); // Runtime error: Size * does not match! StaticVector<int,4UL,rowVector> v22( v10 ); // Compile * time error: Size does not match! \endcode * * // \n \section vector_operations_assignment Assignment // <hr> // // There * are several types of assignment to dense and sparse vectors: // \ref * vector_operations_homogeneous_assignment, \ref * vector_operations_array_assignment, // \ref * vector_operations_copy_assignment, and \ref * vector_operations_compound_assignment. // // \n \subsection * vector_operations_homogeneous_assignment Homogeneous Assignment // // * Sometimes it may be necessary to assign the same value to all elements of * a dense vector. // For this purpose, the assignment operator can be used: * * \code blaze::StaticVector<int,3UL> v1; blaze::DynamicVector<double> v2; * * // Setting all integer elements of the StaticVector to 2 v1 = 2; * * // Setting all double precision elements of the DynamicVector to 5.0 v2 = * 5.0; \endcode * * // \n \subsection vector_operations_array_assignment Array Assignment // // * Dense vectors can also be assigned a static array: * * \code blaze::StaticVector<float,2UL> v1; * blaze::DynamicVector<double,rowVector> v2; * * float array1[2] = { 1.0F, 2.0F }; double array2[5] = { 2.1, 4.0, -1.7, 8.6, * -7.2 }; * * v1 = array1; v2 = array2; \endcode * * // \n \subsection vector_operations_initializer_list_assignment Initializer * List Assignment // // Alternatively, it is possible to directly assign an * initializer list to a dense or sparse // vector: * * \code blaze::DynamicVector<float> v1; * blaze::CompressedVector<double,rowVector> v2; * * v1 = { 1.0F, 2.0F }; v2 = { 2.1, 0.0, -1.7, 0.0, -7.2 }; \endcode * * // In case of sparse vectors, only the non-zero elements are considered. // * // \n \subsection vector_operations_copy_assignment Copy Assignment // // * For all vector types it is generally possible to assign another vector * with the same transpose // flag (i.e. blaze::columnVector or * blaze::rowVector). Note that in case of \c StaticVectors, the // assigned * vector is required to have the same size as the \c StaticVector since the * size of a // \c StaticVector cannot be adapted! * * \code blaze::StaticVector<int,3UL,columnVector> v1; * blaze::DynamicVector<int,columnVector> v2( 3UL ); * blaze::DynamicVector<float,columnVector> v3( 5UL ); * blaze::CompressedVector<int,columnVector> v4( 3UL ); * blaze::CompressedVector<float,rowVector> v5( 3UL ); * * // ... Initialization of the vectors * * v1 = v2; // OK: Assignment of a 3D dense column vector to another 3D dense * column vector v1 = v4; // OK: Assignment of a 3D sparse column vector to * a 3D dense column vector v1 = v3; // Runtime error: Cannot assign a 5D * vector to a 3D static vector v1 = v5; // Compilation error: Cannot assign * a row vector to a column vector \endcode * * // \n \subsection vector_operations_compound_assignment Compound Assignment * // // Next to plain assignment, it is also possible to use addition * assignment, subtraction // assignment, and multiplication assignment. Note * however, that in contrast to plain assignment // the size and the * transpose flag of the vectors has be to equal in order to able to perform * a // compound assignment. * * \code blaze::StaticVector<int,5UL,columnVector> v1; * blaze::DynamicVector<int,columnVector> v2( 5UL ); * blaze::CompressedVector<float,columnVector> v3( 7UL ); * blaze::DynamicVector<float,rowVector> v4( 7UL ); * blaze::CompressedVector<float,rowVector> v5( 7UL ); * * // ... Initialization of the vectors * * v1 += v2; // OK: Addition assignment between two column vectors of the same * size v1 += v3; // Runtime error: No compound assignment between vectors * of different size v1 -= v4; // Compilation error: No compound assignment * between vectors of different transpose flag v4 *= v5; // OK: * Multiplication assignment between two row vectors of the same size * \endcode * * // \n \section vector_operations_element_access Element Access // <hr> // // * \n \subsection vector_operations_subscript_operator_1 Subscript Operator * // // The easiest and most intuitive way to access a dense or sparse * vector is via the subscript // operator. The indices to access a vector * are zero-based: * * \code blaze::DynamicVector<int> v1( 5UL ); v1[0] = 1; v1[1] = 3; // ... * * blaze::CompressedVector<float> v2( 5UL ); v2[2] = 7.3F; v2[4] = -1.4F; * \endcode * * // Whereas using the subscript operator on a dense vector only accesses the * already existing // element, accessing an element of a sparse vector via * the subscript operator potentially // inserts the element into the vector * and may therefore be more expensive. Consider the // following example: * * \code blaze::CompressedVector<int> v1( 10UL ); * * for( size_t i=0UL; i<v1.size(); ++i ) { ... = v1[i]; } \endcode * * // Although the compressed vector is only used for read access within the for * loop, using the // subscript operator temporarily inserts 10 non-zero * elements into the vector. Therefore the // preferred way to traverse the * non-zero elements of a sparse vector is to use iterators. // // \n * \subsection vector_operations_iterators Iterators // // All vectors * (sparse as well as dense) offer an alternate way via the \c begin(), \c * cbegin(), // \c end(), and \c cend() functions to traverse the currently * contained elements by iterators. // In case of non-const vectors, \c * begin() and \c end() return an \c Iterator, which allows a // manipulation * of the non-zero value, in case of a constant vector or in case \c cbegin() * or // \c cend() are used a \c ConstIterator is returned: * * \code using blaze::CompressedVector; * * CompressedVector<int> v1( 10UL ); * * // ... Initialization of the vector * * // Traversing the vector by Iterator for( CompressedVector<int>::Iterator * it=v1.begin(); it!=v1.end(); ++it ) { it->value() = ...; // OK: Write * access to the value of the non-zero element. ... = it->value(); // OK: * Read access to the value of the non-zero element. it->index() = ...; // * Compilation error: The index of a non-zero element cannot be changed. ... * = it->index(); // OK: Read access to the index of the non-zero element. } * * // Traversing the vector by ConstIterator for( * CompressedVector<int>::ConstIterator it=v1.cbegin(); it!=v1.cend(); ++it ) * { it->value() = ...; // Compilation error: Assignment to the value via a * ConstIterator is invalid. ... = it->value(); // OK: Read access to the * value of the non-zero element. it->index() = ...; // Compilation error: * The index of a non-zero element cannot be changed. ... = it->index(); // * OK: Read access to the index of the non-zero element. } \endcode * * // Note that \c begin(), \c cbegin(), \c end(), and \c cend() are also * available as free functions: * * \code for( CompressedVector<int>::Iterator it=begin( v1 ); it!=end( v1 ); * ++it ) { // ... } * * for( CompressedVector<int>::ConstIterator it=cbegin( v1 ); it!=cend( v1 ); * ++it ) { // ... } \endcode * * // \n \section vector_operations_element_insertion Element Insertion // <hr> * // // In contrast to dense vectors, that store all elements independent of * their value and that // offer direct access to all elements, spares * vectors only store the non-zero elements contained // in the vector. * Therefore it is necessary to explicitly add elements to the vector. // // * \n \subsection vector_operations_subscript_operator_2 Subscript Operator * // // The first option to add elements to a sparse vector is the subscript * operator: * * \code using blaze::CompressedVector; * * CompressedVector<int> v1( 3UL ); v1[1] = 2; \endcode * * // In case the element at the given index is not yet contained in the vector, * it is automatically // inserted. Otherwise the old value is replaced by * the new value 2. The operator returns a // reference to the sparse vector * element. // // \n \subsection vector_operations_set .set() // // An * alternative to the subscript operator is the \c set() function: In case * the element is not // yet contained in the vector the element is inserted, * else the element's value is modified: * * \code // Insert or modify the value at index 3 v1.set( 3, 1 ); \endcode * * // \n \subsection vector_operations_insert .insert() // // The insertion of * elements can be better controlled via the \c insert() function. In * contrast to // the subscript operator and the \c set() function it emits * an exception in case the element is // already contained in the vector. In * order to check for this case, the \c find() function can be // used: * * \code // In case the element at index 4 is not yet contained in the matrix it * is inserted // with a value of 6. if( v1.find( 4 ) == v1.end() ) * v1.insert( 4, 6 ); \endcode * * // \n \subsection vector_operations_append .append() // // Although the \c * insert() function is very flexible, due to performance reasons it is not * suited // for the setup of large sparse vectors. A very efficient, yet * also very low-level way to fill // a sparse vector is the \c append() * function. It requires the sparse vector to provide enough // capacity to * insert a new element. Additionally, the index of the new element must be * larger // than the index of the previous element. Violating these * conditions results in undefined // behavior! * * \code v1.reserve( 10 ); // Reserving space for 10 non-zero elements * v1.append( 5, -2 ); // Appending the element -2 at index 5 v1.append( 6, * 4 ); // Appending the element 4 at index 6 // ... \endcode * * // \n \section vector_operations_element_removal Element Removal // <hr> // * // \subsection vector_operations_erase .erase() // // The \c erase() * member functions can be used to remove elements from a sparse vector. The * // following example gives an impression of the five different flavors of * \c erase(): * * \code using blaze::CompressedVector; * * CompressedVector<int> v( 42 ); // ... Initialization of the vector * * // Erasing the element at index 21 v.erase( 21 ); * * // Erasing a single element via iterator v.erase( v.find( 4 ) ); * * // Erasing all non-zero elements in the range [7..24] v.erase( v.lowerBound( * 7 ), v.upperBound( 24 ) ); * * // Erasing all non-zero elements with a value larger than 9 by passing a * unary predicate v.erase( []( int i ){ return i > 9; } ); * * // Erasing all non-zero elements in the range [30..40] with a value larger * than 5 v.erase( v.lowerBound( 30 ), v.upperBound( 40 ), []( int i ){ * return i > 5; } ); \endcode * * // \n \section vector_operations_element_lookup Element Lookup // <hr> // // * A sparse vector only stores the non-zero elements contained in the vector. * Therefore, whenever // accessing a vector element at a specific index a * lookup operation is required. Whereas the // subscript operator is * performing this lookup automatically, it is also possible to use the // \c * find(), \c lowerBound(), and \c upperBound() member functions for a manual * lookup. // // \n \subsection vector_operations_find .find() // // The \c * find() function can be used to check whether a specific element is * contained in a sparse // vector. It specifically searches for the element * at the given index. In case the element is // found, the function returns * an iterator to the element. Otherwise an iterator just past the // last * non-zero element of the compressed vector (the \c end() iterator) is * returned. Note that // the returned iterator is subject to invalidation * due to inserting operations via the subscript // operator, the \c set() * function or the \c insert() function! * * \code using blaze::CompressedVector; * * CompressedVector<int> a( 42 ); // ... Initialization of the vector * * // Searching the element at index 7. In case the element is not // contained * in the vector, the end() iterator is returned. * CompressedVector<int>::Iterator pos( a.find( 7 ) ); * * if( pos != a.end( 7 ) ) { // ... } \endcode * * // \n \subsection vector_operations_lowerbound .lowerBound() // // The \c * lowerBound() function returns an iterator to the first element with an * index not less // then the given index. In combination with the \c * upperBound() function this function can be // used to create a pair of * iterators specifying a range of indices. Note that the returned // * iterator is subject to invalidation due to inserting operations via the * subscript operator, // the \c set() function or the \c insert() function! * * \code using blaze::CompressedVector; * * CompressedVector<int> a( 42 ); // ... Initialization of the vector * * // Searching the lower bound of index 17. CompressedVector<int>::Iterator * pos1( A.lowerBound( 17 ) ); * * // Searching the upper bound of index 28 CompressedVector<int>::Iterator * pos2( A.upperBound( 28 ) ); * * // Erasing all elements in the specified range a.erase( pos1, pos2 ); * \endcode * * // \n \subsection vector_operations_upperbound .upperBound() // // The \c * upperBound() function returns an iterator to the first element with an * index greater then // the given index. In combination with the \c * lowerBound() function this function can be used to // create a pair of * iterators specifying a range of indices. Note that the returned iterator * is // subject to invalidation due to inserting operations via the * subscript operator, the \c set() // function or the \c insert() function! * * \code using blaze::CompressedVector; * * CompressedVector<int> a( 42 ); // ... Initialization of the vector * * // Searching the lower bound of index 17. CompressedVector<int>::Iterator * pos1( A.lowerBound( 17 ) ); * * // Searching the upper bound of index 28 CompressedVector<int>::Iterator * pos2( A.upperBound( 28 ) ); * * // Erasing all elements in the specified range a.erase( pos1, pos2 ); * \endcode * * // \n \section vector_operations_non_modifying_operations Non-Modifying * Operations // <hr> // // \subsection vector_operations_size .size() / * size() // // Via the \c size() member function, the current size of a * dense or sparse vector can be queried: * * \code // Instantiating a dynamic vector with size 10 * blaze::DynamicVector<int> v1( 10UL ); v1.size(); // Returns 10 * * // Instantiating a compressed vector with size 12 and capacity for 3 non-zero * elements blaze::CompressedVector<double> v2( 12UL, 3UL ); v2.size(); // * Returns 12 \endcode * * // Alternatively, the free function \c size() can be used to query to current * size of a vector. // In contrast to the member function, the free function * can also be used to query the size of // vector expressions: * * \code size( v1 ); // Returns 10, i.e. has the same effect as the member * function size( v2 ); // Returns 12, i.e. has the same effect as the * member function * * blaze::DynamicMatrix<int> A( 15UL, 12UL ); size( A * v2 ); // Returns 15, * i.e. the size of the resulting vector \endcode * * // \n \subsection vector_operations_capacity .capacity() / capacity() // // * Via the \c capacity() (member) function the internal capacity of a dense * or sparse vector // can be queried. Note that the capacity of a vector * doesn't have to be equal to the size // of a vector. In case of a dense * vector the capacity will always be greater or equal than // the size of * the vector, in case of a sparse vector the capacity may even be less than * // the size. * * \code v1.capacity(); // Returns at least 10 \endcode * * // For symmetry reasons, there is also a free function /c capacity() * available that can be used // to query the capacity: * * \code capacity( v1 ); // Returns at least 10, i.e. has the same effect as * the member function \endcode * * // Note, however, that it is not possible to query the capacity of a vector * expression: * * \code capacity( A * v1 ); // Compilation error! \endcode * * // \n \subsection vector_operations_nonzeros .nonZeros() / nonZeros() // // * For both dense and sparse vectors the number of non-zero elements can be * determined via the // \c nonZeros() member function. Sparse vectors * directly return their number of non-zero // elements, dense vectors * traverse their elements and count the number of non-zero elements. * * \code v1.nonZeros(); // Returns the number of non-zero elements in the dense * vector v2.nonZeros(); // Returns the number of non-zero elements in the * sparse vector \endcode * * // There is also a free function \c nonZeros() available to query the current * number of non-zero // elements: * * \code nonZeros( v1 ); // Returns the number of non-zero elements in the * dense vector nonZeros( v2 ); // Returns the number of non-zero elements * in the sparse vector \endcode * * // The free \c nonZeros() function can also be used to query the number of * non-zero elements in // a vector expression. However, the result is not * the exact number of non-zero elements, but // may be a rough estimation: * * \code nonZeros( A * v1 ); // Estimates the number of non-zero elements in * the vector expression \endcode * * // \n \subsection vector_operations_isempty isEmpty() // // The \c isEmpty() * function returns whether the total number of elements of the vector is * zero: * * \code blaze::DynamicVector<int> a; // Create an empty vector isEmpty( a ); * // Returns true a.resize( 10 ); // Resize to 10 elements * isEmpty( a ); // Returns false \endcode * * // \n \subsection vector_operations_isnan isnan() // // The \c isnan() * function provides the means to check a dense or sparse vector for * non-a-number // elements: * * \code blaze::DynamicVector<double> a; // ... Resizing and initialization if( * isnan( a ) ) { ... } \endcode * * \code blaze::CompressedVector<double> a; // ... Resizing and initialization * if( isnan( a ) ) { ... } \endcode * * // If at least one element of the vector is not-a-number, the function * returns \c true, otherwise // it returns \c false. Please note that this * function only works for vectors with floating point // elements. The * attempt to use it for a vector with a non-floating point element type * results in // a compile time error. // // // \n \subsection * vector_operations_isdefault isDefault() // // The \c isDefault() function * returns whether the given dense or sparse vector is in default state: * * \code blaze::HybridVector<int,20UL> a; // ... Resizing and initialization if( * isDefault( a ) ) { ... } \endcode * * // A vector is in default state if it appears to just have been default * constructed. All resizable // vectors (\c HybridVector, \c DynamicVector, * or \c CompressedVector) and \c CustomVector are // in default state if its * size is equal to zero. A non-resizable vector (\c StaticVector, all // * subvectors, element selections, rows, and columns) is in default state if * all its elements are // in default state. For instance, in case the vector * is instantiated for a built-in integral or // floating point data type, * the function returns \c true in case all vector elements are 0 and // \c * false in case any vector element is not 0. // // // \n \subsection * vector_operations_isUniform isUniform() // // In order to check if all * vector elements are identical, the \c isUniform function can be used: * * \code blaze::DynamicVector<int> a; // ... Resizing and initialization if( * isUniform( a ) ) { ... } \endcode * * // Note that in case of sparse vectors also the zero elements are also taken * into account! // // // \n \subsection vector_operations_length length() / * sqrLength() // // In order to calculate the length (magnitude) of a dense * or sparse vector, both the \c length() // and \c sqrLength() function can * be used: * * \code blaze::StaticVector<float,3UL,rowVector> v{ -1.2F, 2.7F, -2.3F }; * * const float len = length ( v ); // Computes the current length of the * vector const float sqrlen = sqrLength( v ); // Computes the square length * of the vector \endcode * * // Note that both functions can only be used for vectors with built-in or * complex element type! // // // \n \subsection * vector_operations_vector_trans trans() // // As already mentioned, vectors * can either be column vectors (blaze::columnVector) or row vectors // * (blaze::rowVector). A column vector cannot be assigned to a row vector and * vice versa. However, // vectors can be transposed via the \c trans() * function: * * \code blaze::DynamicVector<int,columnVector> v1( 4UL ); * blaze::CompressedVector<int,rowVector> v2( 4UL ); * * v1 = v2; // Compilation error: Cannot assign a row vector to a * column vector v1 = trans( v2 ); // OK: Transposing the row vector to a * column vector and assigning it // to the column vector v1 v2 = trans( * v1 ); // OK: Transposing the column vector v1 and assigning it to the * row vector v2 v1 += trans( v2 ); // OK: Addition assignment of two column * vectors \endcode * * // \n \subsection vector_operations_ctrans ctrans() // // It is also possible * to compute the conjugate transpose of a vector. This operation is * available // via the \c ctrans() function: * * \code blaze::CompressedVector< complex<float>, rowVector > v1( 4UL ); * blaze::DynamicVector< complex<float>, columnVector > v2( 4UL ); * * v1 = ctrans( v2 ); // Compute the conjugate transpose vector \endcode * * // Note that the \c ctrans() function has the same effect as manually * applying the \c conj() and // \c trans() function in any order: * * \code v1 = trans( conj( v2 ) ); // Computing the conjugate transpose vector * v1 = conj( trans( v2 ) ); // Computing the conjugate transpose vector * \endcode * * // \n \subsection vector_operations_evaluate eval() / evaluate() // // The \c * evaluate() function forces an evaluation of the given vector expression * and enables // an automatic deduction of the correct result type of an * operation. The following code example // demonstrates its intended use for * the multiplication of a dense and a sparse vector: * * \code using blaze::DynamicVector; using blaze::CompressedVector; * * blaze::DynamicVector<double> a; blaze::CompressedVector<double> b; // ... * Resizing and initialization * * auto c = evaluate( a * b ); \endcode * * // In this scenario, the \c evaluate() function assists in deducing the exact * result type of // the operation via the \c auto keyword. Please note that * if \c evaluate() is used in this // way, no temporary vector is created * and no copy operation is performed. Instead, the result // is directly * written to the target vector due to the return value optimization (RVO). * However, // if \c evaluate() is used in combination with an explicit * target type, a temporary will be // created and a copy operation will be * performed if the used type differs from the type // returned from the * function: * * \code CompressedVector<double> d( a * b ); // No temporary & no copy * operation DynamicVector<double> e( a * b ); // Temporary & copy * operation d = evaluate( a * b ); // Temporary & copy * operation \endcode * * // Sometimes it might be desirable to explicitly evaluate a sub-expression * within a larger // expression. However, please note that \c evaluate() is * not intended to be used for this // purpose. This task is more elegantly * and efficiently handled by the \c eval() function: * * \code blaze::DynamicVector<double> a, b, c, d; * * d = a + evaluate( b * c ); // Unnecessary creation of a temporary vector d = * a + eval( b * c ); // No creation of a temporary vector \endcode * * // In contrast to the \c evaluate() function, \c eval() can take the complete * expression // into account and therefore can guarantee the most efficient * way to evaluate it (see also // \ref intra_statement_optimization). // // * // \n \section vector_operations_modifying_operations Modifying Operations * // <hr> // // \subsection vector_operations_resize_reserve .resize() / * .reserve() // // The size of a \c StaticVector is fixed by the second * template parameter and a \c CustomVector // cannot be resized. In * contrast, the size of \c DynamicVectors, \c HybridVectors as well as // \c * CompressedVectors can be changed via the \c resize() function: * * \code using blaze::DynamicVector; using blaze::CompressedVector; * * DynamicVector<int,columnVector> v1; CompressedVector<int,rowVector> v2( 4 ); * v2[1] = -2; v2[3] = 11; * * // Adapting the size of the dynamic and compressed vectors. The (optional) * second parameter // specifies whether the existing elements should be * preserved. Per default, the existing // elements are preserved. v1.resize( * 5UL ); // Resizing vector v1 to 5 elements. Elements of built-in * type remain // uninitialized, elements of class type are default * constructed. v1.resize( 3UL, false ); // Resizing vector v1 to 3 * elements. The old elements are lost, the // new elements are NOT * initialized! v2.resize( 8UL, true ); // Resizing vector v2 to 8 * elements. The old elements are preserved. v2.resize( 5UL, false ); // * Resizing vector v2 to 5 elements. The old elements are lost. \endcode * * // Note that resizing a vector invalidates all existing views (see e.g. \ref * views_subvectors) // on the vector: * * \code blaze::DynamicVector<int,rowVector> v1( 10UL ); // Creating a dynamic * vector of size 10 auto sv = subvector( v1, 2UL, 5UL ); // * Creating a view on the range [2..6] v1.resize( 6UL ); * // Resizing the vector invalidates the view \endcode * * // When the internal capacity of a vector is no longer sufficient, the * allocation of a larger // junk of memory is triggered. In order to avoid * frequent reallocations, the \c reserve() // function can be used up front * to set the internal capacity: * * \code blaze::DynamicVector<int> v1; v1.reserve( 100 ); v1.size(); // * Returns 0 v1.capacity(); // Returns at least 100 \endcode * * // Note that the size of the vector remains unchanged, but only the internal * capacity is set // according to the specified value! // // \n \subsection * vector_operations_shrinkToFit .shrinkToFit() // // The internal capacity * of vectors with dynamic memory is preserved in order to minimize the // * number of reallocations. For that reason, the \c resize() and \c reserve() * functions can lead // to memory overhead. The \c shrinkToFit() member * function can be used to minimize the internal // capacity: * * \code blaze::DynamicVector<int> v1( 1000UL ); // Create a vector of 1000 * integers v1.resize( 10UL ); // Resize to 10, but the * capacity is preserved v1.shrinkToFit(); // Remove * the unused capacity \endcode * * // Please note that due to padding the capacity might not be reduced exactly * to \c size(). Please // also note that in case a reallocation occurs, all * iterators (including \c end() iterators), all // pointers and references * to elements of the vector are invalidated. // // \subsection * vector_operations_reset_clear reset() / clear() // // In order to reset * all elements of a vector, the \c reset() function can be used: * * \code // Setup of a single precision column vector, whose elements are * initialized with 2.0F. blaze::DynamicVector<float> v1( 3UL, 2.0F ); * * // Resetting all elements to 0.0F. Only the elements are reset, the size of * the vector is unchanged. reset( v1 ); // Resetting all elements * v1.size(); // Returns 3: size and capacity remain unchanged \endcode * * // In order to return a vector to its default state (i.e. the state of a * default constructed // vector), the \c clear() function can be used: * * \code // Setup of a single precision column vector, whose elements are * initialized with -1.0F. blaze::DynamicVector<float> v1( 5, -1.0F ); * * // Resetting the entire vector. clear( v1 ); // Resetting the entire vector * v1.size(); // Returns 0: size is reset, but capacity remains unchanged * \endcode * * // Note that resetting or clearing both dense and sparse vectors does not * change the capacity // of the vectors. // // // \n \subsection * vector_operations_swap swap() // // Via the \c swap() function it is * possible to completely swap the contents of two vectors of // the same * type: * * \code blaze::DynamicVector<int,columnVector> v1( 10UL ); * blaze::DynamicVector<int,columnVector> v2( 20UL ); * * swap( v1, v2 ); // Swapping the contents of v1 and v2 \endcode * * // \n \section vector_operations_arithmetic_operations Arithmetic Operations * // <hr> // // \subsection vector_operations_normalize normalize() // // * The \c normalize() function can be used to scale any non-zero vector to a * length of 1. In // case the vector does not contain a single non-zero * element (i.e. is a zero vector), the // \c normalize() function returns a * zero vector. * * \code blaze::DynamicVector<float,columnVector> v1( 10UL ); * blaze::CompressedVector<double,columnVector> v2( 12UL ); * * v1 = normalize( v1 ); // Normalizing the dense vector v1 length( v1 ); * // Returns 1 (or 0 in case of a zero vector) v1 = normalize( v2 ); // * Assigning v1 the normalized vector v2 length( v1 ); // Returns 1 * (or 0 in case of a zero vector) \endcode * * // Note that the \c normalize() function only works for floating point * vectors. The attempt to // use it for an integral vector results in a * compile time error. // // // \n \subsection vector_operations_min_max * min() / max() // // The \c min() and \c max() functions can be used for a * single vector or multiple vectors. If // passed a single vector, the * functions return the smallest and largest element of the given // dense * vector or the smallest and largest non-zero element of the given sparse * vector, // respectively: * * \code blaze::StaticVector<int,4UL,rowVector> a{ -5, 2, 7, -4 }; * * min( a ); // Returns -5 max( a ); // Returns 7 \endcode * * \code blaze::CompressedVector<int> b{ 1, 0, 3, 0 }; * * min( b ); // Returns 1 max( b ); // Returns 3 \endcode * * // For more information on the unary \c min() and \c max() reduction * operations see the // \ref vector_operations_reduction_operations section. * // // If passed two or more dense vectors, the \c min() and \c max() * functions compute the // componentwise minimum or maximum of the given * vectors, respectively: * * \code blaze::StaticVector<int,4UL,rowVector> c{ -5, 1, -7, 4 }; * blaze::StaticVector<int,4UL,rowVector> d{ -5, 3, 0, 2 }; * * min( a, c ); // Results in the vector ( -5, 1, -7, -4 ) max( a, c, d ); * // Results in the vector ( -5, 3, 7, 4 ) \endcode * * // Please note that sparse vectors can only be used in the unary \c min() and * \c max() functions. // Also note that all forms of the \c min() and \c * max() functions can be used to compute the // smallest and largest element * of a vector expression: * * \code min( a + b + c ); // Returns -9, i.e. the smallest value of the * resulting vector max( a - b - c ); // Returns 11, i.e. the largest value * of the resulting vector * * min( a + c, c - d ); // Results in ( -10 -2 -7 0 ) max( a - c, c + d ); // * Results in ( 0 4 14 6 ) \endcode * * // \n \subsection vector_operators_softmax softmax() // // The <a * href="https://en.wikipedia.org/wiki/Softmax_function">softmax * function</a>, also called // the normalized exponential function, of a * given dense vector can be computed via \c softmax(). // The resulting * dense vector consists of real values in the range (0..1], which add up to * 1. * * \code blaze::StaticVector<double,7UL,rowVector> x{ 1.0, 2.0, 3.0, 4.0, 1.0, * 2.0, 3.0 }; blaze::StaticVector<double,7UL,rowVector> y; * * // Evaluating the softmax function y = softmax( x ); // Results in ( * 0.024 0.064 0.175 0.475 0.024 0.064 0.175 ) double s = sum( y ); // * Results in 1 \endcode * * // \n \subsection vector_operators_abs abs() // // The \c abs() function can * be used to compute the absolute values of each element of a vector. // For * instance, the following computation * * \code blaze::StaticVector<int,3UL,rowVector> a{ -1, 2, -3 }; * blaze::StaticVector<int,3UL,rowVector> b( abs( a ) ); \endcode * * // results in the vector * * \f$ b = \left(\begin{array}{*{1}{c}} 1 \\ 2 \\ 3 \\ \end{array}\right)\f$ * * // \n \subsection vector_operators_sign sign() // // The \c sign() function * can be used to evaluate the sign of each element of a vector \a a. For // * each element \c i the corresponding result is 1 if \a a[i] is greater than * zero, 0 if \a a[i] // is zero, and -1 if \a a[i] is less than zero. For * instance, the following use of the \c sign() // function * * \code blaze::StaticVector<int,3UL,rowVector> a{ -1, 2, 0 }; * blaze::StaticVector<int,3UL,rowVector> b( sign( a ) ); \endcode * * // results in the vector * * \f$ b = \left(\begin{array}{*{1}{c}} -1 \\ 1 \\ 0 \\ \end{array}\right)\f$ * * // \n \subsection vector_operations_rounding_functions floor() / ceil() / * trunc() / round() // // The \c floor(), \c ceil(), \c trunc(), and \c * round() functions can be used to round down/up // each element of a * vector, respectively: * * \code blaze::StaticVector<double,3UL,rowVector> a, b; * * b = floor( a ); // Rounding down each element of the vector b = ceil ( a ); * // Rounding up each element of the vector b = trunc( a ); // Truncating * each element of the vector b = round( a ); // Rounding each element of * the vector \endcode * * // \n \subsection vector_operators_conj conj() // // The \c conj() function * can be applied on a dense or sparse vector to compute the complex // * conjugate of each element of the vector: * * \code using blaze::StaticVector; * * using cplx = std::complex<double>; * * // Creating the vector // ( (-2,-1) ) // ( ( 1, 1) ) * StaticVector<cplx,2UL> a{ cplx(-2.0,-1.0), cplx(1.0,1.0) }; * * // Computing the vector of complex conjugates // ( (-2, 1) ) // ( ( * 1,-1) ) StaticVector<cplx,2UL> b; b = conj( a ); \endcode * * // Additionally, vectors can be conjugated in-place via the \c conjugate() * function: * * \code blaze::DynamicVector<cplx> c( 5UL ); * * conjugate( c ); // In-place conjugate operation. c = conj( c ); // Same as * above \endcode * * // \n \subsection vector_operators_real real() // // The \c real() function * can be used on a dense or sparse vector to extract the real part of // * each element of the vector: * * \code using blaze::StaticVector; * * using cplx = std::complex<double>; * * // Creating the vector // ( (-2,-1) ) // ( ( 1, 1) ) * StaticVector<cplx,2UL> a{ cplx(-2.0,-1.0), cplx(1.0,1.0) }; * * // Extracting the real part of each vector element // ( -2 ) // ( 1 ) * StaticVector<double,2UL> b; b = real( a ); \endcode * * // \n \subsection vector_operators_imag imag() // // The \c imag() function * can be used on a dense or sparse vector to extract the imaginary part // * of each element of the vector: * * \code using blaze::StaticVector; * * using cplx = std::complex<double>; * * // Creating the vector // ( (-2,-1) ) // ( ( 1, 1) ) * StaticVector<cplx,2UL> a{ cplx(-2.0,-1.0), cplx(1.0,1.0) }; * * // Extracting the imaginary part of each vector element // ( -1 ) // ( * 1 ) StaticVector<double,2UL> b; b = imag( a ); \endcode * * // \n \subsection vector_operations_sqrt sqrt() / invsqrt() // // Via the \c * sqrt() and \c invsqrt() functions the (inverse) square root of each * element of a // vector can be computed: * * \code blaze::DynamicVector<double> a, b, c; * * b = sqrt( a ); // Computes the square root of each element c = invsqrt( a * ); // Computes the inverse square root of each element \endcode * * // Note that in case of sparse vectors only the non-zero elements are taken * into account! // // // \n \subsection vector_operations_cbrt cbrt() / * invcbrt() // // The \c cbrt() and \c invcbrt() functions can be used to * compute the the (inverse) cubic root // of each element of a vector: * * \code blaze::HybridVector<double,3UL> a, b, c; * * b = cbrt( a ); // Computes the cubic root of each element c = invcbrt( a * ); // Computes the inverse cubic root of each element \endcode * * // Note that in case of sparse vectors only the non-zero elements are taken * into account! // // // \n \subsection vector_operations_hypot hypot() // * // The \c hypot() function can be used to compute the componentwise * hypotenous for a pair of // dense vectors: * * \code blaze::StaticVector<double,3UL> a, b, c; * * c = hypot( a, b ); // Computes the componentwise hypotenuous \endcode * * // \n \subsection vector_operations_clamp clamp() // // The \c clamp() * function can be used to restrict all elements of a vector to a specific * range: * * \code blaze::DynamicVector<double> a, b * * b = clamp( a, -1.0, 1.0 ); // Restrict all elements to the range [-1..1] * \endcode * * // Note that in case of sparse vectors only the non-zero elements are taken * into account! // // // \n \subsection vector_operations_pow pow() // // * The \c pow() function can be used to compute the exponential value of each * element of a vector. // If passed a vector and a numeric exponent, the * function computes the exponential value of each // element of the vector * using the same exponent. If passed a second vector, the function computes * // the componentwise exponential value: * * \code blaze::StaticVector<double,3UL> a, b, c; * * c = pow( a, 1.2 ); // Computes the exponential value of each element c = * pow( a, b ); // Computes the componentwise exponential value \endcode * * // \n \subsection vector_operations_exp exp() / exp2() / exp10() // // \c * exp(), \c exp2() and \c exp10() compute the base e/2/10 exponential of * each element of a // vector, respectively: * * \code blaze::DynamicVector<double> a, b; * * b = exp( a ); // Computes the base e exponential of each element b = exp2( * a ); // Computes the base 2 exponential of each element b = exp10( a ); * // Computes the base 10 exponential of each element \endcode * * // Note that in case of sparse vectors only the non-zero elements are taken * into account! // // // \n \subsection vector_operations_log log() / log2() * / log10() // // The \c log(), \c log2() and \c log10() functions can be * used to compute the natural, binary // and common logarithm of each * element of a vector: * * \code blaze::StaticVector<double,3UL> a, b; * * b = log( a ); // Computes the natural logarithm of each element b = log2( * a ); // Computes the binary logarithm of each element b = log10( a ); * // Computes the common logarithm of each element \endcode * * // \n \subsection vector_operations_trigonometric_functions sin() / cos() / * tan() / asin() / acos() / atan() // // The following trigonometric * functions are available for both dense and sparse vectors: * * \code blaze::DynamicVector<double> a, b; * * b = sin( a ); // Computes the sine of each element of the vector b = cos( a * ); // Computes the cosine of each element of the vector b = tan( a ); // * Computes the tangent of each element of the vector * * b = asin( a ); // Computes the inverse sine of each element of the vector b * = acos( a ); // Computes the inverse cosine of each element of the vector * b = atan( a ); // Computes the inverse tangent of each element of the * vector \endcode * * // Note that in case of sparse vectors only the non-zero elements are taken * into account! // // // \n \subsection * vector_operations_hyperbolic_functions sinh() / cosh() / tanh() / asinh() * / acosh() / atanh() // // The following hyperbolic functions are available * for both dense and sparse vectors: * * \code blaze::DynamicVector<double> a, b; * * b = sinh( a ); // Computes the hyperbolic sine of each element of the vector * b = cosh( a ); // Computes the hyperbolic cosine of each element of the * vector b = tanh( a ); // Computes the hyperbolic tangent of each element * of the vector * * b = asinh( a ); // Computes the inverse hyperbolic sine of each element of * the vector b = acosh( a ); // Computes the inverse hyperbolic cosine of * each element of the vector b = atanh( a ); // Computes the inverse * hyperbolic tangent of each element of the vector \endcode * * // Note that in case of sparse vectors only the non-zero elements are taken * into account! // // // \n \subsection vector_operations_atan2 atan2() // * // The multi-valued inverse tangent is available for a pair of dense * vectors: * * \code blaze::DynamicVector<double> a, b, c; * * c = atan2( a, b ); // Computes the componentwise multi-valued inverse * tangent \endcode * * // \n \subsection vector_operations_erf erf() / erfc() // // The \c erf() and * \c erfc() functions compute the (complementary) error function of each // * element of a vector: * * \code blaze::StaticVector<double,3UL,rowVector> a, b; * * b = erf( a ); // Computes the error function of each element b = erfc( a ); * // Computes the complementary error function of each element \endcode * * // Note that in case of sparse vectors only the non-zero elements are taken * into account! // // // \n \subsection vector_operations_map map() / * forEach() // // Via the unary and binary \c map() functions it is possible * to execute componentwise custom // operations on vectors. The unary \c * map() function can be used to apply a custom operation // on each element * of a dense or sparse vector. For instance, the following example * demonstrates // a custom square root computation via a lambda: * * \code blaze::DynamicVector<double> a, b; * * b = map( a, []( double d ) { return std::sqrt( d ); } ); \endcode * * // The binary \c map() function can be used to apply an operation pairwise to * the elements of // two dense vectors. The following example demonstrates * the merging of two vectors of double // precision values into a vector of * double precision complex numbers: * * \code blaze::DynamicVector<double> real{ 2.1, -4.2, 1.0, 0.6 }; * blaze::DynamicVector<double> imag{ 0.3, 1.4, 2.9, -3.4 }; * * blaze::DynamicVector< complex<double> > cplx; * * // Creating the vector // ( (-2.1, 0.3) ) // ( (-4.2, -1.4) ) // ( * ( 1.0, 2.9) ) // ( ( 0.6, -3.4) ) cplx = map( real, imag, []( double * r, double i ){ return complex( r, i ); } ); \endcode * * // Although the computation can be parallelized it is not vectorized and thus * cannot perform at // peak performance. However, it is also possible to * create vectorized custom operations. See // \ref custom_operations for a * detailed overview of the possibilities of custom operations. // // Please * note that unary custom operations on vectors have been introduced in \b * Blaze 3.0 in // form of the \c forEach() function. With the introduction * of binary custom functions, the // \c forEach() function has been renamed * to \c map(). The \c forEach() function can still be // used (even for * binary custom operations), but the function might be deprecated in future * // releases of \b Blaze. // // // \n \section * vector_operations_reduction_operations Reduction Operations // <hr> // // * \subsection vector_operations_reduction_operations_reduce reduce() // // * The \c reduce() function performs a total reduction of the elements of the * given dense vector // or the non-zero elements of the given sparse vector. * The following examples demonstrate the // total reduction of a dense and * sparse vector: * * \code blaze::DynamicVector<double> a; // ... Resizing and initialization * * const double totalsum1 = reduce( a, blaze::Add() ); const double totalsum2 = * reduce( a, []( double a, double b ){ return a + b; } ); \endcode * * \code blaze::CompressedVector<double> a; // ... Resizing and initialization * * const double totalmin1 = reduce( a, blaze::Min() ); const double totalmin2 = * reduce( a, []( double a, double b ){ return blaze::min( a, b ); } ); * \endcode * * // As demonstrated in the examples it is possible to pass any binary callable * as custom reduction // operation. However, for instance in the case of * lambdas the vectorization of the reduction // operation is compiler * dependent and might not perform at peak performance. However, it is also * // possible to create vectorized custom operations. See \ref * custom_operations for a detailed // overview of the possibilities of * custom operations. // // Please note that the evaluation order of the \c * reduce() function is unspecified. Thus the // behavior is * non-deterministic if the given reduction operation is not associative or * not // commutative. Also, the operation is undefined if the given * reduction operation modifies the // values. // // \n \subsection * vector_operations_reduction_operations_sum sum() // // The \c sum() * function reduces the elements of the given dense vector or the non-zero * elements // of the given sparse vector by means of addition: * * \code blaze::DynamicVector<int> a{ 1, 2, 3, 4 }; * * const int totalsum = sum( a ); // Results in 10 \endcode * * \code blaze::CompressedVector<int> a{ 1, 2, 3, 4 }; * * const int totalsum = sum( a ); // Results in 10 \endcode * * // Please note that the evaluation order of the \c sum() function is * unspecified. // // \n \subsection * vector_operations_reduction_operations_prod prod() // // The \c prod() * function reduces the elements of the given dense vector or the non-zero * elements // of the given sparse vector by means of multiplication: * * \code blaze::DynamicVector<int> a{ 1, 2, 3, 4 }; * * const int totalprod = prod( a ); // Results in 24 \endcode * * \code blaze::CompressedVector<int> a{ 1, 2, 3, 4 }; * * const int totalprod = prod( a ); // Results in 24 \endcode * * // \n \subsection vector_operations_reduction_operations_min min() // // The * unary \c min() function returns the smallest element of the given dense * vector or the // smallest non-zero element of the given sparse vector. It * can only be used for element types // that support the smaller-than * relationship. In case the given vector currently has a size // of 0, the * returned value is the default value (e.g. 0 in case of fundamental data * types). * * \code blaze::DynamicVector<int> a{ 1, -2, 3, 0 }; * * const int totalmin = min( a ); // Results in -2 \endcode * * \code blaze::CompressedVector<int> a{ 1, 0, 3, 0 }; * * const int totalmin = min( a ); // Results in 1 \endcode * * // \note In case the sparse vector is not completely filled, the implicit * zero elements are NOT // taken into account. In the previous example the * compressed vector has only 2 non-zero elements. // However, the minimum of * the vector is 1. // // \n \subsection * vector_operations_reduction_operations_max max() // // The unary \c max() * function returns the largest element of the given dense vector or the // * largest non-zero element of the given sparse vector. It can only be used * for element types // that support the smaller-than relationship. In case * the given vector currently has a size // of 0, the returned value is the * default value (e.g. 0 in case of fundamental data types). * * \code blaze::DynamicVector<int> a{ 1, -2, 3, 0 }; * * const int totalmax = max( a ); // Results in 3 \endcode * * \code blaze::CompressedVector<int> a{ -1, 0, -3, 0 }; * * const int totalmin = max( a ); // Results in -1 \endcode * * // \note In case the sparse vector is not completely filled, the implicit * zero elements are NOT // taken into account. In the previous example the * compressed vector has only 2 non-zero elements. // However, the maximum of * the vector is -1. // // // \n \section vector_operations_norms Norms // * <hr> // // \subsection vector_operations_norms_norm norm() // // The \c * norm() function computes the L2 norm of the given dense or sparse vector: * * \code blaze::DynamicVector<double> a; // ... Resizing and initialization * const double l2 = norm( a ); \endcode * * // \n \subsection vector_operations_norms_sqrnorm sqrNorm() // // The \c * sqrNorm() function computes the squared L2 norm of the given dense or * sparse vector: * * \code blaze::DynamicVector<double> a; // ... Resizing and initialization * const double l2 = sqrNorm( a ); \endcode * * // \n \subsection vector_operations_norms_l1norm l1Norm() // // The \c * l1Norm() function computes the squared L1 norm of the given dense or * sparse vector: * * \code blaze::DynamicVector<double> a; // ... Resizing and initialization * const double l1 = l1Norm( a ); \endcode * * // \n \subsection vector_operations_norms_l2norm l2Norm() // // The \c * l2Norm() function computes the squared L2 norm of the given dense or * sparse vector: * * \code blaze::DynamicVector<double> a; // ... Resizing and initialization * const double l2 = l2Norm( a ); \endcode * * // \n \subsection vector_operations_norms_l3norm l3Norm() // // The \c * l3Norm() function computes the squared L3 norm of the given dense or * sparse vector: * * \code blaze::DynamicVector<double> a; // ... Resizing and initialization * const double l3 = l3Norm( a ); \endcode * * // \n \subsection vector_operations_norms_l4norm l4Norm() // // The \c * l4Norm() function computes the squared L4 norm of the given dense or * sparse vector: * * \code blaze::DynamicVector<double> a; // ... Resizing and initialization * const double l4 = l4Norm( a ); \endcode * * // \n \subsection vector_operations_norms_lpnorm lpNorm() // // The \c * lpNorm() function computes the general Lp norm of the given dense or * sparse vector, // where the norm is specified by either a compile time or * a runtime argument: * * \code blaze::DynamicVector<double> a; // ... Resizing and initialization * const double lp1 = lpNorm<2>( a ); // Compile time argument const * double lp2 = lpNorm( a, 2.3 ); // Runtime argument \endcode * * // \n \subsection vector_operations_norms_maxnorm maxNorm() // // The \c * maxNorm() function computes the maximum norm of the given dense or sparse * vector: * * \code blaze::DynamicVector<double> a; // ... Resizing and initialization * const double max = maxNorm( a ); \endcode * * // \n Previous: \ref vector_types &nbsp; &nbsp; Next: \ref matrices */ //************************************************************************************************* //**Matrices *************************************************************************************** /* * !\page matrices Matrices // // \tableofcontents // // // \n \section * matrices_general General Concepts // <hr> // // The \b Blaze library * currently offers four dense matrix types (\ref matrix_types_static_matrix, * // \ref matrix_types_dynamic_matrix, \ref matrix_types_hybrid_matrix, and * \ref matrix_types_custom_matrix) // and one sparse matrix type (\ref * matrix_types_compressed_matrix). All matrices can either be // stored as * row-major matrices or column-major matrices: * * \code using blaze::DynamicMatrix; using blaze::rowMajor; using * blaze::columnMajor; * * // Setup of the 2x3 row-major dense matrix // // ( 1 2 3 ) // ( 4 5 * 6 ) // DynamicMatrix<int,rowMajor> A{ { 1, 2, 3 }, { 4, 5, 6 } }; * * // Setup of the 3x2 column-major dense matrix // // ( 1 4 ) // ( 2 5 * ) // ( 3 6 ) // DynamicMatrix<int,columnMajor> B{ { 1, 4 }, { 2, 5 }, * { 3, 6 } }; \endcode * * // Per default, all matrices in \b Blaze are row-major matrices: * * \code // Instantiation of a 3x3 row-major matrix blaze::DynamicMatrix<int> C( * 3UL, 3UL ); \endcode * * // \n \section matrices_details Matrix Details // <hr> // // - \ref * matrix_types // - \ref matrix_operations // // // \n \section * matrices_examples Examples // <hr> * * \code using blaze::StaticMatrix; using blaze::DynamicMatrix; using * blaze::CompressedMatrix; using blaze::rowMajor; using blaze::columnMajor; * * StaticMatrix<double,6UL,20UL> A; // Instantiation of a 6x20 row-major * static matrix CompressedMatrix<double,rowMajor> B; // Instantiation of a * row-major compressed matrix DynamicMatrix<double,columnMajor> C; // * Instantiation of a column-major dynamic matrix * * // ... Resizing and initialization * * C = A * B; \endcode * * // \n Previous: \ref vector_operations &nbsp; &nbsp; Next: \ref matrix_types */ //************************************************************************************************* //**Matrix Types *********************************************************************************** /* * !\page matrix_types Matrix Types // // \tableofcontents // // // \n * \section matrix_types_static_matrix StaticMatrix // <hr> // // The * blaze::StaticMatrix class template is the representation of a fixed size * matrix with // statically allocated elements of arbitrary type. It can be * included via the header file * * \code #include <blaze/math/StaticMatrix.h> \endcode * * // The type of the elements, the number of rows and columns, and the storage * order of the matrix // can be specified via the four template parameters: * * \code template< typename Type, size_t M, size_t N, bool SO > class * StaticMatrix; \endcode * * // - \c Type: specifies the type of the matrix elements. StaticMatrix can be * used with any // non-cv-qualified, non-reference element type. * // - \c M : specifies the total number of rows of the matrix. // - \c * N : specifies the total number of columns of the matrix. Note that it is * expected // that StaticMatrix is only used for tiny and small * matrices. // - \c SO : specifies the storage order (blaze::rowMajor, * blaze::columnMajor) of the matrix. // The default value is * blaze::rowMajor. // // The blaze::StaticMatrix is perfectly suited for * small to medium matrices whose dimensions are // known at compile time: * * \code // Definition of a 3x4 integral row-major matrix * blaze::StaticMatrix<int,3UL,4UL> A; * * // Definition of a 4x6 single precision row-major matrix * blaze::StaticMatrix<float,4UL,6UL,blaze::rowMajor> B; * * // Definition of a 6x4 double precision column-major matrix * blaze::StaticMatrix<double,6UL,4UL,blaze::columnMajor> C; \endcode * * // \n \section matrix_types_dynamic_matrix DynamicMatrix // <hr> // // The * blaze::DynamicMatrix class template is the representation of an arbitrary * sized matrix // with \f$ M \cdot N \f$ dynamically allocated elements of * arbitrary type. It can be included // via the header file * * \code #include <blaze/math/DynamicMatrix.h> \endcode * * // The type of the elements and the storage order of the matrix can be * specified via the two // template parameters: * * \code template< typename Type, bool SO > class DynamicMatrix; \endcode * * // - \c Type: specifies the type of the matrix elements. DynamicMatrix can * be used with any // non-cv-qualified, non-reference element * type. // - \c SO : specifies the storage order (blaze::rowMajor, * blaze::columnMajor) of the matrix. // The default value is * blaze::rowMajor. // // The blaze::DynamicMatrix is the default choice for * all kinds of dense matrices and the best // choice for medium to large * matrices. The number of rows and columns can be modified at runtime: * * \code // Definition of a 3x4 integral row-major matrix * blaze::DynamicMatrix<int> A( 3UL, 4UL ); * * // Definition of a 4x6 single precision row-major matrix * blaze::DynamicMatrix<float,blaze::rowMajor> B( 4UL, 6UL ); * * // Definition of a double precision column-major matrix with 0 rows and * columns blaze::DynamicMatrix<double,blaze::columnMajor> C; \endcode * * // \n \section matrix_types_hybrid_matrix HybridMatrix // <hr> // // The * HybridMatrix class template combines the flexibility of a dynamically * sized matrix with // the efficiency and performance of a fixed size * matrix. It is implemented as a crossing between // the blaze::StaticMatrix * and the blaze::DynamicMatrix class templates: Similar to the static // * matrix it uses static stack memory instead of dynamically allocated memory * and similar to the // dynamic matrix it can be resized (within the extend * of the static memory). It can be included // via the header file * * \code #include <blaze/math/HybridMatrix.h> \endcode * * // The type of the elements, the maximum number of rows and columns and the * storage order of the // matrix can be specified via the four template * parameters: * * \code template< typename Type, size_t M, size_t N, bool SO > class * HybridMatrix; \endcode * * // - Type: specifies the type of the matrix elements. HybridMatrix can be * used with any // non-cv-qualified, non-reference, non-pointer * element type. // - M : specifies the maximum number of rows of the * matrix. // - N : specifies the maximum number of columns of the matrix. * Note that it is expected // that HybridMatrix is only used for * tiny and small matrices. // - SO : specifies the storage order * (blaze::rowMajor, blaze::columnMajor) of the matrix. // The * default value is blaze::rowMajor. // // The blaze::HybridMatrix is a * suitable choice for small to medium matrices, whose dimensions // are not * known at compile time or not fixed at runtime, but whose maximum * dimensions are known // at compile time: * * \code // Definition of a 3x4 integral row-major matrix with maximum * dimensions of 6x8 blaze::HybridMatrix<int,6UL,8UL> A( 3UL, 4UL ); * * // Definition of a 4x6 single precision row-major matrix with maximum * dimensions of 12x16 blaze::HybridMatrix<float,12UL,16UL,blaze::rowMajor> * B( 4UL, 6UL ); * * // Definition of a 0x0 double precision column-major matrix and maximum * dimensions of 6x6 blaze::HybridMatrix<double,6UL,6UL,blaze::columnMajor> * C; \endcode * * // \n \section matrix_types_custom_matrix CustomMatrix // <hr> // // The * blaze::CustomMatrix class template provides the functionality to represent * an external // array of elements of arbitrary type and a fixed size as a * native \b Blaze dense matrix data // structure. Thus in contrast to all * other dense matrix types a custom matrix does not perform // any kind of * memory allocation by itself, but it is provided with an existing array of * element // during construction. A custom matrix can therefore be * considered an alias to the existing // array. It can be included via the * header file * * \code #include <blaze/math/CustomMatrix.h> \endcode * * // The type of the elements, the properties of the given array of elements * and the storage order // of the matrix can be specified via the following * four template parameters: * * \code template< typename Type, bool AF, bool PF, bool SO > class * CustomMatrix; \endcode * * // - Type: specifies the type of the matrix elements. blaze::CustomMatrix * can be used with // any non-cv-qualified, non-reference, * non-pointer element type. // - AF : specifies whether the represented, * external arrays are properly aligned with // respect to the * available instruction set (SSE, AVX, ...) or not. // - PF : specified * whether the represented, external arrays are properly padded with // * respect to the available instruction set (SSE, AVX, ...) or not. // - SO * : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the * matrix. // The default value is blaze::rowMajor. // // The * blaze::CustomMatrix is the right choice if any external array needs to be * represented as // a \b Blaze dense matrix data structure or if a custom * memory allocation strategy needs to be // realized: * * \code using blaze::CustomMatrix; using blaze::Deallocate; using * blaze::aligned; using blaze::unaligned; using blaze::padded; using * blaze::unpadded; * * // Definition of an unmanaged 3x4 custom matrix for unaligned, unpadded * integer arrays using UnalignedUnpadded = * CustomMatrix<int,unaligned,unpadded,rowMajor>; std::vector<int> vec( 12UL * ) UnalignedUnpadded A( &vec[0], 3UL, 4UL ); * * // Definition of a managed 5x6 custom matrix for unaligned but padded 'float' * arrays using UnalignedPadded = * CustomMatrix<float,unaligned,padded,columnMajor>; std::unique_ptr<float[]> * memory1( new float[40] ); UnalignedPadded B( memory1.get(), 5UL, 6UL, 8UL * ); * * // Definition of a managed 12x13 custom matrix for aligned, unpadded 'double' * arrays using AlignedUnpadded = * CustomMatrix<double,aligned,unpadded,rowMajor>; * std::unique_ptr<double[],Deallocate> memory2( blaze::allocate<double>( * 192UL ) ); AlignedUnpadded C( memory2.get(), 12UL, 13UL, 16UL ); * * // Definition of a 7x14 custom matrix for aligned, padded 'complex<double>' * arrays using cplx = complex<double>; using AlignedPadded = * CustomMatrix<cplx,aligned,padded,columnMajor>; * std::unique_ptr<cplx[],Deallocate> memory3( blaze::allocate<cplx>( 112UL ) * ); AlignedPadded D( memory3.get(), 7UL, 14UL, 16UL ); \endcode * * // In comparison with the remaining \b Blaze dense matrix types * blaze::CustomMatrix has several // special characteristics. All of these * result from the fact that a custom matrix is not // performing any kind of * memory allocation, but instead is given an existing array of elements. // * The following sections discuss all of these characteristics: // // -# * <b>\ref matrix_types_custom_matrix_memory_management</b> // -# <b>\ref * matrix_types_custom_matrix_copy_operations</b> // -# <b>\ref * matrix_types_custom_matrix_alignment</b> // -# <b>\ref * matrix_types_custom_matrix_padding</b> // // \n \subsection * matrix_types_custom_matrix_memory_management Memory Management // // The * blaze::CustomMatrix class template acts as an adaptor for an existing * array of elements. As // such it provides everything that is required to * use the array just like a native \b Blaze dense // matrix data structure. * However, this flexibility comes with the price that the user of a custom * // matrix is responsible for the resource management. // // The following * examples give an impression of several possible types of custom matrices: * * \code using blaze::CustomMatrix; using blaze::Deallocate; using * blaze::allocate; using blaze::aligned; using blaze::unaligned; using * blaze::padded; using blaze::unpadded; * * // Definition of a 3x4 custom row-major matrix with unaligned, unpadded and * externally // managed integer array. Note that the std::vector must be * guaranteed to outlive the // custom matrix! std::vector<int> vec( 12UL ); * CustomMatrix<int,unaligned,unpadded> A( &vec[0], 3UL, 4UL ); * * // Definition of a custom 8x12 matrix for an aligned and padded integer array * of // capacity 128 (including 8 padding elements per row). Note that the * std::unique_ptr // must be guaranteed to outlive the custom matrix! * std::unique_ptr<int[],Deallocate> memory( allocate<int>( 128UL ) ); * CustomMatrix<int,aligned,padded> B( memory.get(), 8UL, 12UL, 16UL ); * \endcode * * // \n \subsection matrix_types_custom_matrix_copy_operations Copy Operations * // // As with all dense matrices it is possible to copy construct a custom * matrix: * * \code using blaze::CustomMatrix; using blaze::unaligned; using * blaze::unpadded; * * using CustomType = CustomMatrix<int,unaligned,unpadded>; * * std::vector<int> vec( 6UL, 10 ); // Vector of 6 integers of the value 10 * CustomType A( &vec[0], 2UL, 3UL ); // Represent the std::vector as Blaze * dense matrix a[1] = 20; // Also modifies the * std::vector * * CustomType B( a ); // Creating a copy of vector a b[2] = 20; // * Also affects matrix A and the std::vector \endcode * * // It is important to note that a custom matrix acts as a reference to the * specified array. Thus // the result of the copy constructor is a new * custom matrix that is referencing and representing // the same array as * the original custom matrix. // // In contrast to copy construction, just * as with references, copy assignment does not change // which array is * referenced by the custom matrices, but modifies the values of the array: * * \code std::vector<int> vec2( 6UL, 4 ); // Vector of 6 integers of the * value 4 CustomType C( &vec2[0], 2UL, 3UL ); // Represent the std::vector * as Blaze dense matrix * * A = C; // Copy assignment: Set all values of matrix A and B to 4. \endcode * * // \n \subsection matrix_types_custom_matrix_alignment Alignment // // In * case the custom matrix is specified as \c aligned the passed array must * adhere to some // alignment restrictions based on the alignment * requirements of the used data type and the // used instruction set (SSE, * AVX, ...). The restriction applies to the first element of each // * row/column: In case of a row-major matrix the first element of each row * must be properly // aligned, in case of a column-major matrix the first * element of each column must be properly // aligned. For instance, if a * row-major matrix is used and AVX is active the first element of // each * row must be 32-bit aligned: * * \code using blaze::CustomMatrix; using blaze::Deallocate; using * blaze::allocate; using blaze::aligned; using blaze::padded; using * blaze::rowMajor; * * // Allocation of 32-bit aligned memory std::unique_ptr<int[],Deallocate> * memory( allocate<int>( 40UL ) ); * * CustomMatrix<int,aligned,padded,rowMajor> A( memory.get(), 5UL, 6UL, 8UL ); * \endcode * * // In the example, the row-major matrix has six columns. However, since with * AVX eight integer // values are loaded together the matrix is padded with * two additional elements. This guarantees // that the first element of each * row is 32-bit aligned. In case the alignment requirements are // violated, * a \c std::invalid_argument exception is thrown. // // \n \subsection * matrix_types_custom_matrix_padding Padding // // Adding padding elements * to the end of each row/column can have a significant impact on the // * performance. For instance, assuming that AVX is available, then two * aligned, padded, 3x3 double // precision matrices can be added via three * SIMD addition operations: * * \code using blaze::CustomMatrix; using blaze::Deallocate; using * blaze::allocate; using blaze::aligned; using blaze::padded; * * using CustomType = CustomMatrix<double,aligned,padded>; * * std::unique_ptr<int[],Deallocate> memory1( allocate<double>( 12UL ) ); * std::unique_ptr<int[],Deallocate> memory2( allocate<double>( 12UL ) ); * std::unique_ptr<int[],Deallocate> memory3( allocate<double>( 12UL ) ); * * // Creating padded custom 3x3 matrix with an additional padding element in * each row CustomType A( memory1.get(), 3UL, 3UL, 4UL ); CustomType B( * memory2.get(), 3UL, 3UL, 4UL ); CustomType C( memory3.get(), 3UL, 3UL, 4UL * ); * * // ... Initialization * * C = A + B; // AVX-based matrix addition \endcode * * // In this example, maximum performance is possible. However, in case no * padding elements are // inserted a scalar addition has to be used: * * \code using blaze::CustomMatrix; using blaze::Deallocate; using * blaze::allocate; using blaze::aligned; using blaze::unpadded; * * using CustomType = CustomMatrix<double,aligned,unpadded>; * * std::unique_ptr<int[],Deallocate> memory1( allocate<double>( 9UL ) ); * std::unique_ptr<int[],Deallocate> memory2( allocate<double>( 9UL ) ); * std::unique_ptr<int[],Deallocate> memory3( allocate<double>( 9UL ) ); * * // Creating unpadded custom 3x3 matrix CustomType A( memory1.get(), 3UL, 3UL * ); CustomType B( memory2.get(), 3UL, 3UL ); CustomType C( memory3.get(), * 3UL, 3UL ); * * // ... Initialization * * C = A + B; // Scalar matrix addition \endcode * * // Note that the construction of padded and unpadded aligned matrices looks * identical. However, // in case of padded matrices, \b Blaze will zero * initialize the padding element and use them // in all computations in * order to achieve maximum performance. In case of an unpadded matrix // \b * Blaze will ignore the elements with the downside that it is not possible * to load a complete // row to an AVX register, which makes it necessary to * fall back to a scalar addition. // // The number of padding elements is * required to be sufficient with respect to the available // instruction * set: In case of an aligned padded custom matrix the added padding elements * must // guarantee that the total number of elements in each row/column is * a multiple of the SIMD // vector width. In case of an unaligned padded * matrix the number of padding elements can be // greater or equal the * number of padding elements of an aligned padded custom matrix. In case // * the padding is insufficient with respect to the available instruction set, * a // \c std::invalid_argument exception is thrown. // // // \n \section * matrix_types_compressed_matrix CompressedMatrix // <hr> // // The * blaze::CompressedMatrix class template is the representation of an * arbitrary sized sparse // matrix with \f$ M \cdot N \f$ dynamically * allocated elements of arbitrary type. It can be // included via the header * file * * \code #include <blaze/math/CompressedMatrix.h> \endcode * * // The type of the elements and the storage order of the matrix can be * specified via the two // template parameters: * * \code template< typename Type, bool SO > class CompressedMatrix; \endcode * * // - \c Type: specifies the type of the matrix elements. CompressedMatrix * can be used with // any non-cv-qualified, non-reference, * non-pointer element type. // - \c SO : specifies the storage order * (blaze::rowMajor, blaze::columnMajor) of the matrix. // The * default value is blaze::rowMajor. // // The blaze::CompressedMatrix is the * right choice for all kinds of sparse matrices: * * \code // Definition of a 3x4 integral row-major matrix * blaze::CompressedMatrix<int> A( 3UL, 4UL ); * * // Definition of a 4x6 single precision row-major matrix * blaze::CompressedMatrix<float,blaze::rowMajor> B( 4UL, 6UL ); * * // Definition of a double precision column-major matrix with 0 rows and * columns blaze::CompressedMatrix<double,blaze::columnMajor> C; \endcode * * // \n \section matrix_types_identity_matrix IdentityMatrix // <hr> // // The * blaze::IdentityMatrix class template is the representation of an * immutable, arbitrary // sized identity matrix with \f$ N \cdot N \f$ * elements of arbitrary type. It can be included // via the header file * * \code #include <blaze/math/IdentityMatrix.h> \endcode * * // The type of the elements and the storage order of the matrix can be * specified via the two // template parameters: * * \code template< typename Type, bool SO > class IdentityMatrix; \endcode * * // - Type: specifies the type of the matrix elements. IdentityMatrix can be * used with any // non-cv-qualified, non-reference, non-pointer * element type. // - SO : specifies the storage order (blaze::rowMajor, * blaze::columnMajor) of the matrix. // The default value is * blaze::rowMajor. // // The blaze::IdentityMatrix is the perfect choice to * represent an identity matrix: * * \code // Definition of a 3x3 integral row-major identity matrix * blaze::IdentityMatrix<int> A( 3UL ); * * // Definition of a 6x6 single precision row-major identity matrix * blaze::IdentityMatrix<float,blaze::rowMajor> B( 6UL ); * * // Definition of a double precision column-major identity matrix with 0 rows * and columns blaze::IdentityMatrix<double,blaze::columnMajor> C; \endcode * * // \n Previous: \ref matrices &nbsp; &nbsp; Next: \ref matrix_operations */ //************************************************************************************************* //**Matrix Operations ****************************************************************************** /* * !\page matrix_operations Matrix Operations // // \tableofcontents // // // * \n \section matrix_operations_constructors Constructors // <hr> // // * Matrices are just as easy and intuitive to create as vectors. Still, there * are a few rules // to be aware of: // - In case the last template * parameter (the storage order) is omitted, the matrix is per // default * stored in row-major order. // - The elements of a \c StaticMatrix or \c * HybridMatrix are default initialized (i.e. built-in // data types are * initialized to 0, class types are initialized via the default * constructor). // - Newly allocated elements of a \c DynamicMatrix or \c * CompressedMatrix remain uninitialized // if they are of built-in type * and are default constructed if they are of class type. // // \n * \subsection matrix_operations_default_construction Default Construction * * \code using blaze::StaticMatrix; using blaze::DynamicMatrix; using * blaze::CompressedMatrix; * * // All matrices can be default constructed. Whereas the size of // a * StaticMatrix is fixed via the second and third template // parameter, the * initial size of a constructed DynamicMatrix // or CompressedMatrix is 0. * StaticMatrix<int,2UL,2UL> M1; // Instantiation of a 2x2 * integer row-major // matrix. All elements are initialized to 0. * DynamicMatrix<float> M2; // Instantiation of a single * precision dynamic // row-major matrix with 0 rows and 0 columns. * DynamicMatrix<double,columnMajor> M3; // Instantiation of a double * precision dynamic // column-major matrix with 0 rows and 0 columns. * CompressedMatrix<int> M4; // Instantiation of a compressed * integer // row-major matrix of size 0x0. * CompressedMatrix<double,columnMajor> M5; // Instantiation of a compressed * double precision // column-major matrix of size 0x0. \endcode * * // \n \subsection matrix_operations_size_construction Construction with * Specific Size // // The \c DynamicMatrix, \c HybridMatrix, and \c * CompressedMatrix classes offer a constructor // that allows to immediately * give the matrices a specific number of rows and columns: * * \code DynamicMatrix<int> M6( 5UL, 4UL ); // Instantiation * of a 5x4 dynamic row-major // matrix. The elements are not initialized. * HybridMatrix<double,5UL,9UL> M7( 3UL, 7UL ); // Instantiation of a * 3x7 hybrid row-major // matrix. The elements are not initialized. * CompressedMatrix<float,columnMajor> M8( 8UL, 6UL ); // Instantiation of * an empty 8x6 compressed // column-major matrix. \endcode * * // Note that dense matrices (in this case \c DynamicMatrix and \c * HybridMatrix) immediately // allocate enough capacity for all matrix * elements. Sparse matrices on the other hand (in this // example \c * CompressedMatrix) merely acquire the size, but don't necessarily allocate * memory. // // // \n \subsection * matrix_operations_initialization_constructors Initialization Constructors * // // All dense matrix classes offer a constructor for a direct, * homogeneous initialization of all // matrix elements. In contrast, for * sparse matrices the predicted number of non-zero elements // can be * specified. * * \code StaticMatrix<int,4UL,3UL,columnMajor> M9( 7 ); // Instantiation of a * 4x3 integer column-major // matrix. All elements are initialized to 7. * DynamicMatrix<float> M10( 2UL, 5UL, 2.0F ); // Instantiation of a 2x5 * single precision row-major // matrix. All elements are initialized to * 2.0F. CompressedMatrix<int> M11( 3UL, 4UL, 4 ); // Instantiation of * a 3x4 integer row-major // matrix with capacity for 4 non-zero elements. * \endcode * * // \n \subsection matrix_operations_array_construction Array Construction // * // Alternatively, all dense matrix classes offer a constructor for an * initialization with a // dynamic or static array. If the matrix is * initialized from a dynamic array, the constructor // expects the * dimensions of values provided by the array as first and second argument, * the // array as third argument. In case of a static array, the fixed size * of the array is used: * * \code const std::unique_ptr<double[]> array1( new double[6] ); // ... * Initialization of the dynamic array blaze::StaticMatrix<double,2UL,3UL> * M12( 2UL, 3UL, array1.get() ); * * int array2[2][2] = { { 4, -5 }, { -6, 7 } }; * blaze::StaticMatrix<int,2UL,2UL,rowMajor> M13( array2 ); \endcode * * // \n \subsection matrix_operations_initializer_list_construction // // In * addition, all dense and sparse matrix classes can be directly initialized * by means of an // initializer list: * * \code blaze::DynamicMatrix<float,columnMajor> M14{ { 3.1F, 6.4F }, { -0.9F, * -1.2F }, { 4.8F, 0.6F } }; blaze::CompressedMatrix<int,rowMajor> M15{ { * 3 }, { 1 }, { 0, 2 } }; \endcode * * // In case of sparse matrices, only the non-zero elements are used to * initialize the matrix. // Missing values are considered to be default * values. // // \n \subsection matrix_operations_copy_construction Copy * Construction // // All dense and sparse matrices can be created as a copy * of another dense or sparse matrix. * * \code StaticMatrix<int,5UL,4UL,rowMajor> M16( M6 ); // Instantiation of * the dense row-major matrix M16 // as copy of the dense row-major matrix * M6. DynamicMatrix<float,columnMajor> M17( M8 ); // Instantiation of * the dense column-major matrix M17 // as copy of the sparse column-major * matrix M8. CompressedMatrix<double,columnMajor> M18( M7 ); // * Instantiation of the compressed column-major matrix // M18 as copy of the * dense row-major matrix M7. CompressedMatrix<float,rowMajor> M19( M8 ); * // Instantiation of the compressed row-major matrix // M19 as copy of the * compressed column-major matrix M8. \endcode * * // Note that it is not possible to create a \c StaticMatrix as a copy of a * matrix with a different // number of rows and/or columns: * * \code StaticMatrix<int,4UL,5UL,rowMajor> M20( M6 ); // Runtime error: * Number of rows and columns // does not match! * StaticMatrix<int,4UL,4UL,columnMajor> M21( M9 ); // Compile time error: * Number of columns does // not match! \endcode * * // \n \section matrix_operations_assignment Assignment // <hr> // // There * are several types of assignment to dense and sparse matrices: // \ref * matrix_operations_homogeneous_assignment, \ref * matrix_operations_array_assignment, // \ref * matrix_operations_copy_assignment, and \ref * matrix_operations_compound_assignment. // // // \n \subsection * matrix_operations_homogeneous_assignment Homogeneous Assignment // // It * is possible to assign the same value to all elements of a dense matrix. * All dense matrix // classes provide an according assignment operator: * * \code blaze::StaticMatrix<int,3UL,2UL> M1; blaze::DynamicMatrix<double> M2; * * // Setting all integer elements of the StaticMatrix to 4 M1 = 4; * * // Setting all double precision elements of the DynamicMatrix to 3.5 M2 = 3.5 * \endcode * * // \n \subsection matrix_operations_array_assignment Array Assignment // // * Dense matrices can also be assigned a static array: * * \code blaze::StaticMatrix<int,2UL,2UL,rowMajor> M1; * blaze::StaticMatrix<int,2UL,2UL,columnMajor> M2; * blaze::DynamicMatrix<double> M3; * * int array1[2][2] = { { 1, 2 }, { 3, 4 } }; double array2[3][2] = { { 3.1, 6.4 * }, { -0.9, -1.2 }, { 4.8, 0.6 } }; * * M1 = array1; M2 = array1; M3 = array2; \endcode * * // Note that the dimensions of the static array have to match the size of a * \c StaticMatrix, // whereas a \c DynamicMatrix is resized according to the * array dimensions: * * \f$ M3 = \left(\begin{array}{*{2}{c}} 3.1 & 6.4 \\ -0.9 & -1.2 \\ 4.8 & 0.6 * \\ \end{array}\right)\f$ * * // \n \subsection matrix_operations_initializer_list_assignment Initializer * List Assignment // // Alternatively, it is possible to directly assign an * initializer list to a dense or sparse // matrix: * * \code blaze::DynamicMatrix<double> M1; blaze::CompressedMatrix<int> M2; * * M1 = { { 3.1, 6.4 }, { -0.9, -1.2 }, { 4.8, 0.6 } }; M2 = { { 1, 0 }, {}, { * 0, 1 }, { 2 } }; \endcode * * // In case of sparse matrices, only the non-zero elements are considered. * Missing values are // considered to be default values. // // \n * \subsection matrix_operations_copy_assignment Copy Assignment // // All * kinds of matrices can be assigned to each other. The only restriction is * that since a // \c StaticMatrix cannot change its size, the assigned * matrix must match both in the number of // rows and in the number of * columns. * * \code blaze::StaticMatrix<int,3UL,2UL,rowMajor> M1; * blaze::DynamicMatrix<int,rowMajor> M2( 3UL, 2UL ); * blaze::DynamicMatrix<float,rowMajor> M3( 5UL, 2UL ); * blaze::CompressedMatrix<int,rowMajor> M4( 3UL, 2UL ); * blaze::CompressedMatrix<float,columnMajor> M5( 3UL, 2UL ); * * // ... Initialization of the matrices * * M1 = M2; // OK: Assignment of a 3x2 dense row-major matrix to another 3x2 * dense row-major matrix M1 = M4; // OK: Assignment of a 3x2 sparse * row-major matrix to a 3x2 dense row-major matrix M1 = M3; // Runtime * error: Cannot assign a 5x2 matrix to a 3x2 static matrix M1 = M5; // OK: * Assignment of a 3x2 sparse column-major matrix to a 3x2 dense row-major * matrix \endcode * * // \n \subsection matrix_operations_compound_assignment Compound Assignment * // // Compound assignment is also available for matrices: addition * assignment, subtraction assignment, // and multiplication assignment. In * contrast to plain assignment, however, the number of rows // and columns * of the two operands have to match according to the arithmetic operation. * * \code blaze::StaticMatrix<int,2UL,3UL,rowMajor> M1; * blaze::DynamicMatrix<int,rowMajor> M2( 2UL, 3UL ); * blaze::CompressedMatrix<float,columnMajor> M3( 2UL, 3UL ); * blaze::CompressedMatrix<float,rowMajor> M4( 2UL, 4UL ); * blaze::StaticMatrix<float,2UL,4UL,rowMajor> M5; * blaze::CompressedMatrix<float,rowMajor> M6( 3UL, 2UL ); * * // ... Initialization of the matrices * * M1 += M2; // OK: Addition assignment between two row-major matrices of the * same dimensions M1 -= M3; // OK: Subtraction assignment between between a * row-major and a column-major matrix M1 += M4; // Runtime error: No * compound assignment between matrices of different size M1 -= M5; // * Compilation error: No compound assignment between matrices of different * size M2 *= M6; // OK: Multiplication assignment between two row-major * matrices \endcode * * // Note that the multiplication assignment potentially changes the number of * columns of the // target matrix: * * \f$\left(\begin{array}{*{3}{c}} 2 & 0 & 1 \\ 0 & 3 & 2 \\ \end{array}\right) * \times \left(\begin{array}{*{2}{c}} 4 & 0 \\ 1 & 0 \\ 0 & 3 \\ * \end{array}\right) = \left(\begin{array}{*{2}{c}} 8 & 3 \\ 3 & 6 \\ * \end{array}\right)\f$ * * // Since a \c StaticMatrix cannot change its size, only a square StaticMatrix * can be used in a // multiplication assignment with other square matrices * of the same dimensions. // // // \n \section * matrix_operations_element_access Element Access // <hr> // // \n * \subsection matrix_operations_function_call_operator_1 Function Call * Operator // // The easiest way to access a specific dense or sparse matrix * element is via the function call // operator. The indices to access a * matrix are zero-based: * * \code blaze::DynamicMatrix<int> M1( 4UL, 6UL ); M1(0,0) = 1; M1(0,1) = 3; // * ... * * blaze::CompressedMatrix<double> M2( 5UL, 3UL ); M2(0,2) = 4.1; M2(1,1) = * -6.3; \endcode * * // Since dense matrices allocate enough memory for all contained elements, * using the function // call operator on a dense matrix directly returns a * reference to the accessed value. In case // of a sparse matrix, if the * accessed value is currently not contained in the matrix, the // value is * inserted into the matrix prior to returning a reference to the value, * which can // be much more expensive than the direct access to a dense * matrix. Consider the following // example: * * \code blaze::CompressedMatrix<int> M1( 4UL, 4UL ); * * for( size_t i=0UL; i<M1.rows(); ++i ) { for( size_t j=0UL; j<M1.columns(); * ++j ) { ... = M1(i,j); } } \endcode * * // Although the compressed matrix is only used for read access within the for * loop, using the // function call operator temporarily inserts 16 non-zero * elements into the matrix. Therefore // the preferred way to traverse the * non-zero elements of a sparse matrix is to use iterators. // // \n * \subsection matrix_operations_iterators Iterators // // All matrices * (sparse as well as dense) offer an alternate way via the \c begin(), \c * cbegin(), // \c end() and \c cend() functions to traverse all contained * elements by iterator. Note that // it is not possible to traverse all * elements of the matrix, but that it is only possible to // traverse * elements in a row/column-wise fashion. In case of a non-const matrix, \c * begin() and // \c end() return an \c Iterator, which allows a manipulation * of the non-zero value, in case of // a constant matrix or in case \c * cbegin() or \c cend() are used a \c ConstIterator is returned: * * \code using blaze::CompressedMatrix; * * CompressedMatrix<int,rowMajor> M1( 4UL, 6UL ); * * // Traversing the matrix by Iterator for( size_t i=0UL; i<A.rows(); ++i ) { * for( CompressedMatrix<int,rowMajor>::Iterator it=A.begin(i); it!=A.end(i); * ++it ) { it->value() = ...; // OK: Write access to the value of the * non-zero element. ... = it->value(); // OK: Read access to the value of * the non-zero element. it->index() = ...; // Compilation error: The index * of a non-zero element cannot be changed. ... = it->index(); // OK: Read * access to the index of the non-zero element. } } * * // Traversing the matrix by ConstIterator for( size_t i=0UL; i<A.rows(); ++i * ) { for( CompressedMatrix<int,rowMajor>::ConstIterator it=A.cbegin(i); * it!=A.cend(i); ++it ) { it->value() = ...; // Compilation error: * Assignment to the value via a ConstIterator is invalid. ... = it->value(); * // OK: Read access to the value of the non-zero element. it->index() = * ...; // Compilation error: The index of a non-zero element cannot be * changed. ... = it->index(); // OK: Read access to the index of the * non-zero element. } } \endcode * * // Note that \c begin(), \c cbegin(), \c end(), and \c cend() are also * available as free functions: * * \code for( size_t i=0UL; i<A.rows(); ++i ) { for( * CompressedMatrix<int,rowMajor>::Iterator it=begin( A, i ); it!=end( A, i * ); ++it ) { // ... } } * * for( size_t i=0UL; i<A.rows(); ++i ) { for( * CompressedMatrix<int,rowMajor>::ConstIterator it=cbegin( A, i ); it!=cend( * A, i ); ++it ) { // ... } } \endcode * * // \n \section matrix_operations_element_insertion Element Insertion // <hr> * // // Whereas a dense matrix always provides enough capacity to store all * matrix elements, a sparse // matrix only stores the non-zero elements. * Therefore it is necessary to explicitly add elements // to the matrix. // * // \n \subsection matrix_operations_function_call_operator_2 Function Call * Operator // // The first possibility to add elements to a sparse matrix is * the function call operator: * * \code using blaze::CompressedMatrix; * * CompressedMatrix<int> M1( 3UL, 4UL ); M1(1,2) = 9; \endcode * * // In case the element at the given position is not yet contained in the * sparse matrix, it is // automatically inserted. Otherwise the old value is * replaced by the new value 2. The operator // returns a reference to the * sparse vector element. // // \n \subsection matrix_operations_set .set() * // // An alternative to the function call operator is the \c set() * function: In case the element is // not yet contained in the matrix the * element is inserted, else the element's value is modified: * * \code // Insert or modify the value at position (2,0) M1.set( 2, 0, 1 ); * \endcode * * // \n \subsection matrix_operations_insert .insert() * * // The insertion of elements can be better controlled via the \c insert() * function. In contrast // to the function call operator and the \c set() * function it emits an exception in case the // element is already contained * in the matrix. In order to check for this case, the \c find() // function * can be used: * * \code // In case the element at position (2,3) is not yet contained in the * matrix it is inserted // with a value of 4. if( M1.find( 2, 3 ) == M1.end( * 2 ) ) M1.insert( 2, 3, 4 ); \endcode * * // \n \subsection matrix_operations_append .append() // // Although the \c * insert() function is very flexible, due to performance reasons it is not * // suited for the setup of large sparse matrices. A very efficient, yet * also very low-level // way to fill a sparse matrix is the \c append() * function. It requires the sparse matrix to // provide enough capacity to * insert a new element in the specified row/column. Additionally, // the * index of the new element must be larger than the index of the previous * element in the // same row/column. Violating these conditions results in * undefined behavior! * * \code M1.reserve( 0, 3 ); // Reserving space for three non-zero elements * in row 0 M1.append( 0, 1, 2 ); // Appending the element 2 in row 0 at * column index 1 M1.append( 0, 2, -4 ); // Appending the element -4 in row * 0 at column index 2 // ... \endcode * * // The most efficient way to fill a sparse matrix with elements, however, is * a combination of // \c reserve(), \c append(), and the \c finalize() * function: * * \code // Setup of the compressed row-major matrix // // ( 0 1 0 2 0 ) * // A = ( 0 0 0 0 0 ) // ( 3 0 0 0 0 ) // * blaze::CompressedMatrix<int> M1( 3UL, 5UL ); M1.reserve( 3 ); // * Reserving enough space for 3 non-zero elements M1.append( 0, 1, 1 ); // * Appending the value 1 in row 0 with column index 1 M1.append( 0, 3, 2 ); * // Appending the value 2 in row 0 with column index 3 M1.finalize( 0 ); * // Finalizing row 0 M1.finalize( 1 ); // Finalizing the empty row 1 * to prepare row 2 M1.append( 2, 0, 3 ); // Appending the value 3 in row 2 * with column index 0 M1.finalize( 2 ); // Finalizing row 2 \endcode * * // \note The \c finalize() function has to be explicitly called for each row * or column, even // for empty ones! // \note Although \c append() does not * allocate new memory, it still invalidates all iterators // returned by the * \c end() functions! // // // \n \section matrix_operations_element_removal * Element Removal // <hr> // // \subsection matrix_operations_erase .erase() * // // The \c erase() member functions can be used to remove elements from * a sparse matrix. The // following example gives an impression of the five * different flavors of \c erase(): * * \code using blaze::CompressedMatrix; * * CompressedMatrix<int,rowMajor> A( 42, 53 ); // ... Initialization of the * matrix * * // Erasing the element at position (21,23) A.erase( 21, 23 ); * * // Erasing a single element in row 17 via iterator A.erase( 17, A.find( 4 ) * ); * * // Erasing all non-zero elements in the range [7..24] of row 33 A.erase( 33, * A.lowerBound( 33, 7 ), A.upperBound( 33, 24 ) ); * * // Erasing all non-zero elements with a value larger than 9 by passing a * unary predicate A.erase( []( int i ){ return i > 9; } ); * * // Erasing all non-zero elements in the range [30..40] of row 37 with a value * larger than 5 CompressedMatrix<int,rowMajor>::Iterator pos1( A.lowerBound( * 37, 30 ) ); CompressedMatrix<int,rowMajor>::Iterator pos2( A.upperBound( * 37, 40 ) ); A.erase( 37, pos1, pos2, []( int i ){ return i > 5; } ); * \endcode * * // \n \section matrix_operations_element_lookup Element Lookup // <hr> // // * A sparse matrix only stores the non-zero elements contained in the matrix. * Therefore, whenever // accessing a matrix element at a specific position a * lookup operation is required. Whereas the // function call operator is * performing this lookup automatically, it is also possible to use the // \c * find(), \c lowerBound(), and \c upperBound() member functions for a manual * lookup. // // \n \subsection matrix_operations_find .find() // // The \c * find() function can be used to check whether a specific element is * contained in the // sparse matrix. It specifically searches for the * element at the specified position. In case // the element is found, the * function returns an iterator to the element. Otherwise an iterator // just * past the last non-zero element of the according row or column (the \c * end() iterator) // is returned. Note that the returned iterator is subject * to invalidation due to inserting // operations via the function call * operator, the \c set() function or the \c insert() function! * * \code using blaze::CompressedMatrix; * * CompressedMatrix<int,rowMajor> A( 42, 53 ); // ... Initialization of the * matrix * * // Searching the element at position (7,17). In case the element is not // * contained in the vector, the end() iterator of row 7 is returned. * CompressedMatrix<int,rowMajor>::Iterator pos( A.find( 7, 17 ) ); * * if( pos != A.end( 7 ) ) { // ... } \endcode * * // \n \subsection matrix_operations_lowerbound .lowerBound() // // In case of * a row-major matrix, this function returns a row iterator to the first * element with // an index not less then the given column index. In case of * a column-major matrix, the function // returns a column iterator to the * first element with an index not less then the given row // index. In * combination with the \c upperBound() function this function can be used to * create a // pair of iterators specifying a range of indices. Note that the * returned iterator is subject // to invalidation due to inserting * operations via the function call operator, the \c set() // function or the * \c insert() function! * * \code using blaze::CompressedMatrix; * * CompressedMatrix<int,rowMajor> A( 42, 53 ); // ... Initialization of the * matrix * * // Searching the lower bound of column index 17 in row 7. * CompressedMatrix<int,rowMajor>::Iterator pos1( A.lowerBound( 7, 17 ) ); * * // Searching the upper bound of column index 28 in row 7 * CompressedMatrix<int,rowMajor>::Iterator pos2( A.upperBound( 7, 28 ) ); * * // Erasing all elements in the specified range A.erase( 7, pos1, pos2 ); * \endcode * * // \n \subsection matrix_operations_upperbound .upperBound() // // In case of * a row-major matrix, this function returns a row iterator to the first * element with // an index greater then the given column index. In case of a * column-major matrix, the function // returns a column iterator to the * first element with an index greater then the given row // index. In * combination with the \c lowerBound() function this function can be used to * create a // pair of iterators specifying a range of indices. Note that the * returned iterator is subject // to invalidation due to inserting * operations via the function call operator, the \c set() // function or the * \c insert() function! * * \code using blaze::CompressedMatrix; * * CompressedMatrix<int,columnMajor> A( 42, 53 ); // ... Initialization of the * matrix * * // Searching the lower bound of row index 17 in column 9. * CompressedMatrix<int,columnMajor>::Iterator pos1( A.lowerBound( 17, 9 ) ); * * // Searching the upper bound of row index 28 in column 9 * CompressedMatrix<int,columnMajor>::Iterator pos2( A.upperBound( 28, 9 ) ); * * // Erasing all elements in the specified range A.erase( 9, pos1, pos2 ); * \endcode * * // \n \section matrix_operations_non_modifying_operations Non-Modifying * Operations // <hr> // // \subsection matrix_operations_rows .rows() / * rows() // // The current number of rows of a matrix can be acquired via * the \c rows() member function: * * \code // Instantiating a dynamic matrix with 10 rows and 8 columns * blaze::DynamicMatrix<int> M1( 10UL, 8UL ); M1.rows(); // Returns 10 * * // Instantiating a compressed matrix with 8 rows and 12 columns * blaze::CompressedMatrix<double> M2( 8UL, 12UL ); M2.rows(); // Returns 8 * \endcode * * // Alternatively, the free functions \c rows() can be used to query the * current number of rows of // a matrix. In contrast to the member function, * the free function can also be used to query the // number of rows of a * matrix expression: * * \code rows( M1 ); // Returns 10, i.e. has the same effect as the member * function rows( M2 ); // Returns 8, i.e. has the same effect as the member * function * * rows( M1 * M2 ); // Returns 10, i.e. the number of rows of the resulting * matrix \endcode * * // \n \subsection matrix_operations_columns .columns() / columns() // // The * current number of columns of a matrix can be acquired via the \c columns() * member function: * * \code // Instantiating a dynamic matrix with 6 rows and 8 columns * blaze::DynamicMatrix<int> M1( 6UL, 8UL ); M1.columns(); // Returns 8 * * // Instantiating a compressed matrix with 8 rows and 7 columns * blaze::CompressedMatrix<double> M2( 8UL, 7UL ); M2.columns(); // Returns * 7 \endcode * * // There is also a free function \c columns() available, which can also be * used to query the number // of columns of a matrix expression: * * \code columns( M1 ); // Returns 8, i.e. has the same effect as the member * function columns( M2 ); // Returns 7, i.e. has the same effect as the * member function * * columns( M1 * M2 ); // Returns 7, i.e. the number of columns of the * resulting matrix \endcode * * // \subsection matrix_operations_size size() // // The \c size() function * returns the total number of elements of a matrix: * * \code // Instantiating a dynamic matrix with 6 rows and 8 columns * blaze::DynamicMatrix<int> M1( 6UL, 8UL ); size( M1 ); // Returns 48 * * // Instantiating a compressed matrix with 8 rows and 7 columns * blaze::CompressedMatrix<double> M2( 8UL, 7UL ); size( M2 ); // Returns 56 * \endcode * * // \subsection matrix_operations_spacing .spacing() / spacing() // // The * total number of elements of a row or column of a dense matrix, including * potential padding // elements, can be acquired via the \c spacing member * function. In case of a row-major matrix // (i.e. in case the storage order * is set to blaze::rowMajor) the function returns the spacing // between two * rows, in case of a column-major matrix (i.e. in case the storage flag is * set to // blaze::columnMajor) the function returns the spacing between two * columns: * * \code // Instantiating a row-major dynamic matrix with 7 rows and 8 columns * blaze::DynamicMatrix<int,blaze::rowMajor> M1( 7UL, 8UL ); M1.spacing(); * // Returns the total number of elements in a row * * // Instantiating a column-major dynamic matrix with 8 rows and 12 columns * blaze::CompressedMatrix<double> M2( 8UL, 12UL ); M2.spacing(); // Returns * the total number of element in a column \endcode * * // Alternatively, the free functions \c spacing() can be used to query the * current number of // elements in a row/column. * * \code spacing( M1 ); // Returns the total number of elements in a row * spacing( M2 ); // Returns the total number of elements in a column * \endcode * * // \n \subsection matrix_operations_capacity .capacity() / capacity() // // * The \c capacity() member function returns the internal capacity of a dense * or sparse matrix. // Note that the capacity of a matrix doesn't have to be * equal to the size of a matrix. In case of // a dense matrix the capacity * will always be greater or equal than the total number of elements // of * the matrix. In case of a sparse matrix, the capacity will usually be much * less than the // total number of elements. * * \code blaze::DynamicMatrix<float> M1( 5UL, 7UL ); * blaze::StaticMatrix<float,7UL,4UL> M2; M1.capacity(); // Returns at least * 35 M2.capacity(); // Returns at least 28 \endcode * * // There is also a free function \c capacity() available to query the * capacity. However, please // note that this function cannot be used to * query the capacity of a matrix expression: * * \code capacity( M1 ); // Returns at least 35, i.e. has the same effect as * the member function capacity( M2 ); // Returns at least 28, i.e. has the * same effect as the member function * * capacity( M1 * M2 ); // Compilation error! \endcode * * // \n \subsection matrix_operations_nonzeros .nonZeros() / nonZeros() // // * For both dense and sparse matrices the current number of non-zero elements * can be queried // via the \c nonZeros() member function. In case of * matrices there are two flavors of the // \c nonZeros() function: One * returns the total number of non-zero elements in the matrix, // the second * returns the number of non-zero elements in a specific row (in case of a * row-major // matrix) or column (in case of a column-major matrix). Sparse * matrices directly return their // number of non-zero elements, dense * matrices traverse their elements and count the number of // non-zero * elements. * * \code blaze::DynamicMatrix<int,rowMajor> M1( 3UL, 5UL ); * * // ... Initializing the dense matrix * * M1.nonZeros(); // Returns the total number of non-zero elements in the * dense matrix M1.nonZeros( 2 ); // Returns the number of non-zero elements * in row 2 \endcode * * \code blaze::CompressedMatrix<double,columnMajor> M2( 4UL, 7UL ); * * // ... Initializing the sparse matrix * * M2.nonZeros(); // Returns the total number of non-zero elements in the * sparse matrix M2.nonZeros( 3 ); // Returns the number of non-zero * elements in column 3 \endcode * * // The free \c nonZeros() function can also be used to query the number of * non-zero elements in a // matrix expression. However, the result is not * the exact number of non-zero elements, but may be // a rough estimation: * * \code nonZeros( M1 ); // Has the same effect as the member function * nonZeros( M1, 2 ); // Has the same effect as the member function * * nonZeros( M2 ); // Has the same effect as the member function nonZeros( * M2, 3 ); // Has the same effect as the member function * * nonZeros( M1 * M2 ); // Estimates the number of non-zero elements in the * matrix expression \endcode * * // \n \subsection matrix_operations_isempty isEmpty() // // The \c isEmpty() * function returns whether the total number of elements of the matrix is * zero: * * \code blaze::DynamicMatrix<int> A; // Create an empty matrix isEmpty( A ); * // Returns true a.resize( 5, 0 ); // Resize to a 5x0 matrix * isEmpty( A ); // Returns true a.resize( 5, 3 ); * // Resize to a 5x3 matrix isEmpty( A ); // Returns false * \endcode * * // \n \subsection matrix_operations_isnan isnan() // // The \c isnan() * function provides the means to check a dense or sparse matrix for * non-a-number // elements: * * \code blaze::DynamicMatrix<double> A( 3UL, 4UL ); // ... Initialization if( * isnan( A ) ) { ... } \endcode * * \code blaze::CompressedMatrix<double> A( 3UL, 4UL ); // ... Initialization * if( isnan( A ) ) { ... } \endcode * * // If at least one element of the matrix is not-a-number, the function * returns \c true, otherwise // it returns \c false. Please note that this * function only works for matrices with floating point // elements. The * attempt to use it for a matrix with a non-floating point element type * results in // a compile time error. // // // \n \subsection * matrix_operations_isdefault isDefault() // // The \c isDefault() function * returns whether the given dense or sparse matrix is in default state: * * \code blaze::HybridMatrix<int,5UL,4UL> A; // ... Resizing and initialization * if( isDefault( A ) ) { ... } \endcode * * // A matrix is in default state if it appears to just have been default * constructed. All resizable // matrices (\c HybridMatrix, \c DynamicMatrix, * or \c CompressedMatrix) and \c CustomMatrix are in // default state if its * size is equal to zero. A non-resizable matrix (\c StaticMatrix and all // * submatrices) is in default state if all its elements are in default state. * For instance, in case // the matrix is instantiated for a built-in * integral or floating point data type, the function // returns \c true in * case all matrix elements are 0 and \c false in case any matrix element is * // not 0. // // // \n \subsection matrix_operations_isSquare isSquare() // * // Whether a dense or sparse matrix is a square matrix (i.e. if the number * of rows is equal to the // number of columns) can be checked via the \c * isSquare() function: * * \code blaze::DynamicMatrix<double> A; // ... Resizing and initialization if( * isSquare( A ) ) { ... } \endcode * * // \n \subsection matrix_operations_issymmetric isSymmetric() // // Via the * \c isSymmetric() function it is possible to check whether a dense or * sparse matrix // is symmetric: * * \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( * isSymmetric( A ) ) { ... } \endcode * * // Note that non-square matrices are never considered to be symmetric! // // * // \n \subsection matrix_operations_isUniform isUniform() // // In order * to check if all matrix elements are identical, the \c isUniform function * can be used: * * \code blaze::DynamicMatrix<int> A; // ... Resizing and initialization if( * isUniform( A ) ) { ... } \endcode * * // Note that in case of a sparse matrix also the zero elements are also taken * into account! // // // \n \subsection matrix_operations_islower isLower() * // // Via the \c isLower() function it is possible to check whether a * dense or sparse matrix is // lower triangular: * * \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( * isLower( A ) ) { ... } \endcode * * // Note that non-square matrices are never considered to be lower triangular! * // // // \n \subsection matrix_operations_isunilower isUniLower() // // * Via the \c isUniLower() function it is possible to check whether a dense * or sparse matrix is // lower unitriangular: * * \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( * isUniLower( A ) ) { ... } \endcode * * // Note that non-square matrices are never considered to be lower * unitriangular! // // // \n \subsection matrix_operations_isstrictlylower * isStrictlyLower() // // Via the \c isStrictlyLower() function it is * possible to check whether a dense or sparse matrix // is strictly lower * triangular: * * \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( * isStrictlyLower( A ) ) { ... } \endcode * * // Note that non-square matrices are never considered to be strictly lower * triangular! // // // \n \subsection matrix_operations_isUpper isUpper() // * // Via the \c isUpper() function it is possible to check whether a dense * or sparse matrix is // upper triangular: * * \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( * isUpper( A ) ) { ... } \endcode * * // Note that non-square matrices are never considered to be upper triangular! * // // // \n \subsection matrix_operations_isuniupper isUniUpper() // // * Via the \c isUniUpper() function it is possible to check whether a dense * or sparse matrix is // upper unitriangular: * * \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( * isUniUpper( A ) ) { ... } \endcode * * // Note that non-square matrices are never considered to be upper * unitriangular! // // // \n \subsection matrix_operations_isstrictlyupper * isStrictlyUpper() // // Via the \c isStrictlyUpper() function it is * possible to check whether a dense or sparse matrix // is strictly upper * triangular: * * \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( * isStrictlyUpper( A ) ) { ... } \endcode * * // Note that non-square matrices are never considered to be strictly upper * triangular! // // // \n \subsection matrix_operations_isdiagonal * isDiagonal() // // The \c isDiagonal() function checks if the given dense * or sparse matrix is a diagonal matrix, // i.e. if it has only elements on * its diagonal and if the non-diagonal elements are default // elements: * * \code blaze::CompressedMatrix<float> A; // ... Resizing and initialization * if( isDiagonal( A ) ) { ... } \endcode * * // Note that non-square matrices are never considered to be diagonal! // // * // \n \subsection matrix_operations_isidentity isIdentity() // // The \c * isIdentity() function checks if the given dense or sparse matrix is an * identity matrix, // i.e. if all diagonal elements are 1 and all * non-diagonal elements are 0: * * \code blaze::CompressedMatrix<float> A; // ... Resizing and initialization * if( isIdentity( A ) ) { ... } \endcode * * // Note that non-square matrices are never considered to be identity * matrices! // // // \n \subsection matrix_operations_matrix_determinant * det() // // The determinant of a square dense matrix can be computed by * means of the \c det() function: * * \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and * initialization double d = det( A ); // Compute the determinant of A * \endcode * * // In case the given dense matrix is not a square matrix, a \c * std::invalid_argument exception is // thrown. // // \note The \c det() * function can only be used for dense matrices with \c float, \c double, // * \c complex<float> or \c complex<double> element type. The attempt to call * the function with // matrices of any other element type or with a sparse * matrix results in a compile time error! // // \note The function is * depending on LAPACK kernels. Thus the function can only be used if the // * fitting LAPACK library is available and linked to the executable. * Otherwise a linker error // will be created. // // // \n \subsection * matrix_operations_matrix_trans trans() // // Matrices can be transposed * via the \c trans() function. Row-major matrices are transposed into // a * column-major matrix and vice versa: * * \code blaze::DynamicMatrix<int,rowMajor> M1( 5UL, 2UL ); * blaze::CompressedMatrix<int,columnMajor> M2( 3UL, 7UL ); * * M1 = M2; // Assigning a column-major matrix to a row-major matrix * M1 = trans( M2 ); // Assigning the transpose of M2 (i.e. a row-major * matrix) to M1 M1 += trans( M2 ); // Addition assignment of two row-major * matrices \endcode * * // \n \subsection matrix_operations_ctrans ctrans() // // The conjugate * transpose of a dense or sparse matrix (also called adjoint matrix, * Hermitian // conjugate, or transjugate) can be computed via the \c * ctrans() function: * * \code blaze::DynamicMatrix< complex<float>, rowMajor > M1( 5UL, 2UL ); * blaze::CompressedMatrix< complex<float>, columnMajor > M2( 2UL, 5UL ); * * M1 = ctrans( M2 ); // Compute the conjugate transpose matrix \endcode * * // Note that the \c ctrans() function has the same effect as manually * applying the \c conj() and // \c trans() function in any order: * * \code M1 = trans( conj( M2 ) ); // Computing the conjugate transpose matrix * M1 = conj( trans( M2 ) ); // Computing the conjugate transpose matrix * \endcode * * // \n \subsection matrix_operations_matrix_evaluate eval() / evaluate() // // * The \c evaluate() function forces an evaluation of the given matrix * expression and enables // an automatic deduction of the correct result * type of an operation. The following code example // demonstrates its * intended use for the multiplication of a lower and a strictly lower dense * // matrix: * * \code using blaze::DynamicMatrix; using blaze::LowerMatrix; using * blaze::StrictlyLowerMatrix; * * LowerMatrix< DynamicMatrix<double> > A; StrictlyLowerMatrix< * DynamicMatrix<double> > B; // ... Resizing and initialization * * auto C = evaluate( A * B ); \endcode * * // In this scenario, the \c evaluate() function assists in deducing the exact * result type of // the operation via the \c auto keyword. Please note that * if \c evaluate() is used in this // way, no temporary matrix is created * and no copy operation is performed. Instead, the result // is directly * written to the target matrix due to the return value optimization (RVO). * However, // if \c evaluate() is used in combination with an explicit * target type, a temporary will be // created and a copy operation will be * performed if the used type differs from the type // returned from the * function: * * \code StrictlyLowerMatrix< DynamicMatrix<double> > D( A * B ); // No * temporary & no copy operation LowerMatrix< DynamicMatrix<double> > E( A * * B ); // Temporary & copy operation DynamicMatrix<double> F( A * B * ); // Temporary & copy operation D = evaluate( A * * B ); // Temporary & copy operation * \endcode * * // Sometimes it might be desirable to explicitly evaluate a sub-expression * within a larger // expression. However, please note that \c evaluate() is * not intended to be used for this // purpose. This task is more elegantly * and efficiently handled by the \c eval() function: * * \code blaze::DynamicMatrix<double> A, B, C, D; * * D = A + evaluate( B * C ); // Unnecessary creation of a temporary matrix D = * A + eval( B * C ); // No creation of a temporary matrix \endcode * * // In contrast to the \c evaluate() function, \c eval() can take the complete * expression // into account and therefore can guarantee the most efficient * way to evaluate it (see also // \ref intra_statement_optimization). // // * // \n \section matrix_operations_modifying_operations Modifying Operations * // <hr> // // \subsection matrix_operations_resize_reserve .resize() / * .reserve() // // The dimensions of a \c StaticMatrix are fixed at compile * time by the second and third template // parameter and a \c CustomMatrix * cannot be resized. In contrast, the number or rows and columns // of \c * DynamicMatrix, \c HybridMatrix, and \c CompressedMatrix can be changed at * runtime: * * \code using blaze::DynamicMatrix; using blaze::CompressedMatrix; * * DynamicMatrix<int,rowMajor> M1; CompressedMatrix<int,columnMajor> M2( 3UL, * 2UL ); * * // Adapting the number of rows and columns via the resize() function. The * (optional) // third parameter specifies whether the existing elements * should be preserved. Per // default, the existing elements are preserved. * M1.resize( 2UL, 2UL ); // Resizing matrix M1 to 2x2 elements. * Elements of built-in type // remain uninitialized, elements of class type * are default // constructed. M1.resize( 3UL, 1UL, false ); // Resizing M1 * to 3x1 elements. The old elements are lost, the // new elements are NOT * initialized! M2.resize( 5UL, 7UL, true ); // Resizing M2 to 5x7 * elements. The old elements are preserved. M2.resize( 3UL, 2UL, false ); * // Resizing M2 to 3x2 elements. The old elements are lost. \endcode * * // Note that resizing a matrix invalidates all existing views (see e.g. \ref * views_submatrices) // on the matrix: * * \code blaze::DynamicMatrix<int,rowMajor> M1( 10UL, 20UL ); // Creating a * 10x20 matrix auto row8 = row( M1, 8UL ); // Creating a view on the 8th * row of the matrix M1.resize( 6UL, 20UL ); // Resizing the matrix * invalidates the view \endcode * * // When the internal capacity of a matrix is no longer sufficient, the * allocation of a larger // junk of memory is triggered. In order to avoid * frequent reallocations, the \c reserve() // function can be used up front * to set the internal capacity: * * \code blaze::DynamicMatrix<int> M1; M1.reserve( 100 ); M1.rows(); // * Returns 0 M1.capacity(); // Returns at least 100 \endcode * * // Additionally it is possible to reserve memory in a specific row (for a * row-major matrix) or // column (for a column-major matrix): * * \code blaze::CompressedMatrix<int> M1( 4UL, 6UL ); M1.reserve( 1, 4 ); // * Reserving enough space for four non-zero elements in row 1 \endcode * * // \n \subsection matrix_operations_shrinkToFit .shrinkToFit() // // The * internal capacity of matrices with dynamic memory is preserved in order to * minimize the // number of reallocations. For that reason, the \c resize() * and \c reserve() functions can lead // to memory overhead. The \c * shrinkToFit() member function can be used to minimize the internal // * capacity: * * \code blaze::DynamicMatrix<int> M1( 100UL, 100UL ); // Create a 100x100 * integer matrix M1.resize( 10UL, 10UL ); // Resize to * 10x10, but the capacity is preserved M1.shrinkToFit(); * // Remove the unused capacity \endcode * * // Please note that due to padding the capacity might not be reduced exactly * to \c rows() times // \c columns(). Please also note that in case a * reallocation occurs, all iterators (including // \c end() iterators), all * pointers and references to elements of this matrix are invalidated. // // * // \subsection matrix_operations_reset_clear reset() / clear // // In * order to reset all elements of a dense or sparse matrix, the \c reset() * function can be // used. The number of rows and columns of the matrix are * preserved: * * \code // Setting up a single precision row-major matrix, whose elements are * initialized with 2.0F. blaze::DynamicMatrix<float> M1( 4UL, 5UL, 2.0F ); * * // Resetting all elements to 0.0F. reset( M1 ); // Resetting all elements * M1.rows(); // Returns 4: size and capacity remain unchanged \endcode * * // Alternatively, only a single row or column of the matrix can be resetted: * * \code blaze::DynamicMatrix<int,blaze::rowMajor> M1( 7UL, 6UL, 5 ); // * Setup of a row-major matrix blaze::DynamicMatrix<int,blaze::columnMajor> * M2( 4UL, 5UL, 4 ); // Setup of a column-major matrix * * reset( M1, 2UL ); // Resetting the 2nd row of the row-major matrix reset( * M2, 3UL ); // Resetting the 3rd column of the column-major matrix * \endcode * * // In order to reset a row of a column-major matrix or a column of a * row-major matrix, use a // row or column view (see \ref views_rows and * views_colums). // // In order to return a matrix to its default state * (i.e. the state of a default constructed // matrix), the \c clear() * function can be used: * * \code // Setting up a single precision row-major matrix, whose elements are * initialized with 2.0F. blaze::DynamicMatrix<float> M1( 4UL, 5UL, 2.0F ); * * // Resetting all elements to 0.0F. clear( M1 ); // Resetting the entire * matrix M1.rows(); // Returns 0: size is reset, but capacity remains * unchanged \endcode * * // \n \subsection matrix_operations_matrix_transpose transpose() // // In * addition to the non-modifying \c trans() function, matrices can be * transposed in-place via // the \c transpose() function: * * \code blaze::DynamicMatrix<int,rowMajor> M( 5UL, 2UL ); * * transpose( M ); // In-place transpose operation. M = trans( M ); // Same as * above \endcode * * // Note however that the transpose operation fails if ... // // - ... the * given matrix has a fixed size and is non-square; // - ... the given * matrix is a triangular matrix; // - ... the given submatrix affects the * restricted parts of a triangular matrix; // - ... the given submatrix * would cause non-deterministic results in a symmetric/Hermitian matrix. // * // // \n \subsection matrix_operations_ctranspose ctranspose() // // The * \c ctranspose() function can be used to perform an in-place conjugate * transpose operation: * * \code blaze::DynamicMatrix<int,rowMajor> M( 5UL, 2UL ); * * ctranspose( M ); // In-place conjugate transpose operation. M = ctrans( M ); * // Same as above \endcode * * // Note however that the conjugate transpose operation fails if ... // // - * ... the given matrix has a fixed size and is non-square; // - ... the * given matrix is a triangular matrix; // - ... the given submatrix affects * the restricted parts of a triangular matrix; // - ... the given submatrix * would cause non-deterministic results in a symmetric/Hermitian matrix. // * // // \n \subsection matrix_operations_swap swap() // // Via the \c \c * swap() function it is possible to completely swap the contents of two * matrices // of the same type: * * \code blaze::DynamicMatrix<int,blaze::rowMajor> M1( 10UL, 15UL ); * blaze::DynamicMatrix<int,blaze::rowMajor> M2( 20UL, 10UL ); * * swap( M1, M2 ); // Swapping the contents of M1 and M2 \endcode * * // \n \section matrix_operations_arithmetic_operations Arithmetic Operations * // <hr> // // \subsection matrix_operations_min_max min() / max() // // * The \c min() and \c max() functions can be used for a single vector or * multiple vectors. If // passed a single matrix, the functions return the * smallest and largest element of the given // dense matrix or the smallest * and largest non-zero element of the given sparse matrix, // respectively: * * \code blaze::StaticMatrix<int,2UL,3UL> A{ { -5, 2, 7 }, { -4, 0, 1 } }; * * min( A ); // Returns -5 max( A ); // Returns 7 \endcode * * \code blaze::CompressedMatrix<int> B{ { 1, 0, 3 }, { 0, 0, 0 } }; * * min( B ); // Returns 1 max( B ); // Returns 3 \endcode * * // For more information on the unary \c min() and \c max() reduction * operations see the // \ref matrix_operations_reduction_operations section. * // // If passed two or more dense matrices, the \c min() and \c max() * functions compute the // componentwise minimum or maximum of the given * matrices, respectively: * * \code blaze::StaticMatrix<int,2UL,3UL,rowMajor> C{ { -5, 1, -7 }, { 4, 1, 0 } * }; blaze::StaticMatrix<int,2UL,3UL,rowMajor> D{ { -5, 3, 0 }, { 2, 2, -2 * } }; * * min( A, C ); // Results in the matrix ( -5, 1, -7 ) ( -4, 0, 0 ) max( A, * C, D ); // Results in the matrix ( -5, 3, 7 ) ( 4, 2, 1 ) \endcode * * // Please note that sparse matrices can only be used in the unary \c min() * and \c max() functions. // Also note that all forms of the \c min() and \c * max() functions can be used to compute the // smallest and largest element * of a matrix expression: * * \code min( A + B + C ); // Returns -9, i.e. the smallest value of the * resulting matrix max( A - B - C ); // Returns 11, i.e. the largest value * of the resulting matrix \endcode * * // \n \subsection matrix_operators_softmax softmax() // // The <a * href="https://en.wikipedia.org/wiki/Softmax_function">softmax * function</a>, also called // the normalized exponential function, of a * given dense matrix can be computed via \c softmax(). // The resulting * dense matrix consists of real values in the range (0..1], which add up to * 1. * * \code blaze::StaticMatrix<double,3UL,3UL> A{ { 1.0, 2.0, 3.0 } , { 4.0, 1.0, * 2.0 } , { 3.0, 4.0, 1.0 } }; blaze::StaticMatrix<double,3UL,3UL> B; * * // Evaluating the softmax function B = softmax( A ); // Results in ( * 0.0157764 0.0428847 0.116573 ) // ( 0.316878 0.0157764 * 0.0428847 ) // ( 0.116573 0.316878 0.0157764 ) double s = * sum( B ); // Results in 1 \endcode * * // \n \subsection matrix_operators_trace trace() // // The \c trace() * function sums the diagonal elements of a square dense or sparse matrix: * * \code blaze::StaticMatrix<int,3UL,3UL> A{ { -1, 2, -3 } , { -4, -5, 6 } , { * 7, -8, -9 } }; * * trace( A ); // Returns the sum of the diagonal elements, i.e. -15 \endcode * * // In case the given matrix is not a square matrix, a \c * std::invalid_argument exception is // thrown. // // // \n \subsection * matrix_operators_abs abs() // // The \c abs() function can be used to * compute the absolute values of each element of a matrix. // For instance, * the following computation * * \code blaze::StaticMatrix<int,2UL,3UL,rowMajor> A{ { -1, 2, -3 }, { 4, -5, * 6 } }; blaze::StaticMatrix<int,2UL,3UL,rowMajor> B( abs( A ) ); \endcode * * // results in the matrix * * \f$ B = \left(\begin{array}{*{3}{c}} 1 & 2 & 3 \\ 4 & 5 & 6 \\ * \end{array}\right)\f$ * * // \n \subsection matrix_operators_sign sign() // // The \c sign() function * can be used to evaluate the sign of each element of a matrix \a A. For // * each element \c (i,j) the corresponding result is 1 if \a A(i,j) is * greater than zero, 0 if // \a A(i,j) is zero, and -1 if \a A(i,j) is less * than zero. For instance, the following use of // the \c sign() function * * \code blaze::StaticMatrix<int,2UL,3UL,rowMajor> A{ { -1, 2, 0 }, { 4, 0, * -6 } }; blaze::StaticMatrix<int,2UL,3UL,rowMajor> B( sign( A ) ); \endcode * * // results in the matrix * * \f$ B = \left(\begin{array}{*{3}{c}} -1 & 1 & 0 \\ 1 & 0 & -1 \\ * \end{array}\right)\f$ * * // \n \subsection matrix_operators_rounding_functions floor() / ceil() / * trunc() / round() // // The \c floor(), \c ceil(), \c trunc(), and \c * round() functions can be used to round down/up // each element of a * matrix, respectively: * * \code blaze::StaticMatrix<double,3UL,3UL> A, B; * * B = floor( A ); // Rounding down each element of the matrix B = ceil ( A ); * // Rounding up each element of the matrix B = trunc( A ); // Truncating * each element of the matrix B = round( A ); // Rounding each element of * the matrix \endcode * * // \n \subsection matrix_operators_conj conj() // // The \c conj() function * can be applied on a dense or sparse matrix to compute the complex // * conjugate of each element of the matrix: * * \code using blaze::StaticMatrix; * * using cplx = std::complex<double>; * * // Creating the matrix // ( (1,0) (-2,-1) ) // ( (1,1) ( 0, 1) ) * StaticMatrix<cplx,2UL,2UL> A{ { cplx( 1.0, 0.0 ), cplx( -2.0, -1.0 ) }, { * cplx( 1.0, 1.0 ), cplx( 0.0, 1.0 ) } }; * * // Computing the matrix of conjugate values // ( (1, 0) (-2, 1) ) // ( * (1,-1) ( 0,-1) ) StaticMatrix<cplx,2UL,2UL> B; B = conj( A ); \endcode * * // Additionally, matrices can be conjugated in-place via the \c conjugate() * function: * * \code blaze::DynamicMatrix<cplx> C( 5UL, 2UL ); * * conjugate( C ); // In-place conjugate operation. C = conj( C ); // Same as * above \endcode * * // \n \subsection matrix_operators_real real() // // The \c real() function * can be used on a dense or sparse matrix to extract the real part of // * each element of the matrix: * * \code using blaze::StaticMatrix; * * using cplx = std::complex<double>; * * // Creating the matrix // ( (1,0) (-2,-1) ) // ( (1,1) ( 0, 1) ) * StaticMatrix<cplx,2UL,2UL> A{ { cplx( 1.0, 0.0 ), cplx( -2.0, -1.0 ) }, { * cplx( 1.0, 1.0 ), cplx( 0.0, 1.0 ) } }; * * // Extracting the real part of each matrix element // ( 1 -2 ) // ( 1 * 0 ) StaticMatrix<double,2UL,2UL> B; B = real( A ); \endcode * * // \n \subsection matrix_operators_imag imag() // // The \c imag() function * can be used on a dense or sparse matrix to extract the imaginary part // * of each element of the matrix: * * \code using blaze::StaticMatrix; * * using cplx = std::complex<double>; * * // Creating the matrix // ( (1,0) (-2,-1) ) // ( (1,1) ( 0, 1) ) * StaticMatrix<cplx,2UL,2UL> A{ { cplx( 1.0, 0.0 ), cplx( -2.0, -1.0 ) }, { * cplx( 1.0, 1.0 ), cplx( 0.0, 1.0 ) } }; * * // Extracting the imaginary part of each matrix element // ( 0 -1 ) // * ( 1 1 ) StaticMatrix<double,2UL,2UL> B; B = imag( A ); \endcode * * // \n \subsection matrix_operators_sqrt sqrt() / invsqrt() // // Via the \c * sqrt() and \c invsqrt() functions the (inverse) square root of each * element of a // matrix can be computed: * * \code blaze::StaticMatrix<double,3UL,3UL> A, B, C; * * B = sqrt( A ); // Computes the square root of each element C = invsqrt( A * ); // Computes the inverse square root of each element \endcode * * // Note that in case of sparse matrices only the non-zero elements are taken * into account! // // // \n \subsection matrix_operators_cbrt cbrt() / * invcbrt() // // The \c cbrt() and \c invcbrt() functions can be used to * compute the the (inverse) cubic root // of each element of a matrix: * * \code blaze::DynamicMatrix<double> A, B, C; * * B = cbrt( A ); // Computes the cubic root of each element C = invcbrt( A * ); // Computes the inverse cubic root of each element \endcode * * // Note that in case of sparse matrices only the non-zero elements are taken * into account! // // // \n \subsection matrix_operations_hypot hypot() // * // The \c hypot() function can be used to compute the componentwise * hypotenous for a pair of // dense matrices: * * \code blaze::StaticMatrix<double,3UL,3UL> A, B, C; * * C = hypot( A, B ); // Computes the componentwise hypotenuous \endcode * * // \n \subsection matrix_operators_clamp clamp() // // The \c clamp() * function can be used to restrict all elements of a matrix to a specific * range: * * \code blaze::DynamicMatrix<double> A, B; * * B = clamp( A, -1.0, 1.0 ); // Restrict all elements to the range [-1..1] * \endcode * * // Note that in case of sparse matrices only the non-zero elements are taken * into account! // // // \n \subsection matrix_operators_pow pow() // // The * \c pow() function can be used to compute the exponential value of each * element of a matrix. // If passed a matrix and a numeric exponent, the * function computes the exponential value of each // element of the matrix * using the same exponent. If passed a second matrix, the function computes * // the componentwise exponential value: * * \code blaze::StaticMatrix<double,3UL,3UL> A, B, C; * * C = pow( A, 1.2 ); // Computes the exponential value of each element C = * pow( A, B ); // Computes the componentwise exponential value \endcode * * // \n \subsection matrix_operators_exp exp() // // \c exp(), \c exp2() and \c * exp10() compute the base e/2/10 exponential of each element of a // * matrix, respectively: * * \code blaze::HybridMatrix<double,3UL,3UL> A, B; * * B = exp( A ); // Computes the base e exponential of each element B = exp2( * A ); // Computes the base 2 exponential of each element B = exp10( A ); * // Computes the base 10 exponential of each element \endcode * * // Note that in case of sparse matrices only the non-zero elements are taken * into account! // // // \n \subsection matrix_operators_log log() / log2() * / log10() // // The \c log(), \c log2() and \c log10() functions can be * used to compute the natural, binary // and common logarithm of each * element of a matrix: * * \code blaze::StaticMatrix<double,3UL,3UL> A, B; * * B = log( A ); // Computes the natural logarithm of each element B = log2( * A ); // Computes the binary logarithm of each element B = log10( A ); * // Computes the common logarithm of each element \endcode * * // \n \subsection matrix_operators_trigonometric_functions sin() / cos() / * tan() / asin() / acos() / atan() // // The following trigonometric * functions are available for both dense and sparse matrices: * * \code blaze::DynamicMatrix<double> A, B; * * B = sin( A ); // Computes the sine of each element of the matrix B = cos( A * ); // Computes the cosine of each element of the matrix B = tan( A ); // * Computes the tangent of each element of the matrix * * B = asin( A ); // Computes the inverse sine of each element of the matrix B * = acos( A ); // Computes the inverse cosine of each element of the matrix * B = atan( A ); // Computes the inverse tangent of each element of the * matrix \endcode * * // Note that in case of sparse matrices only the non-zero elements are taken * into account! // // // \n \subsection * matrix_operators_hyperbolic_functions sinh() / cosh() / tanh() / asinh() / * acosh() / atanh() // // The following hyperbolic functions are available * for both dense and sparse matrices: * * \code blaze::DynamicMatrix<double> A, B; * * B = sinh( A ); // Computes the hyperbolic sine of each element of the matrix * B = cosh( A ); // Computes the hyperbolic cosine of each element of the * matrix B = tanh( A ); // Computes the hyperbolic tangent of each element * of the matrix * * B = asinh( A ); // Computes the inverse hyperbolic sine of each element of * the matrix B = acosh( A ); // Computes the inverse hyperbolic cosine of * each element of the matrix B = atanh( A ); // Computes the inverse * hyperbolic tangent of each element of the matrix \endcode * * // \n \subsection matrix_operations_atan2 atan2() // // The multi-valued * inverse tangent is available for a pair of dense matrices: * * \code blaze::DynamicMatrix<double> A, B, C; * * C = atan2( A, B ); // Computes the componentwise multi-valued inverse * tangent \endcode * * // \n \subsection matrix_operators_erf erf() / erfc() // // The \c erf() and * \c erfc() functions compute the (complementary) error function of each // * element of a matrix: * * \code blaze::StaticMatrix<double,3UL,3UL> A, B; * * B = erf( A ); // Computes the error function of each element B = erfc( A ); * // Computes the complementary error function of each element \endcode * * // Note that in case of sparse matrices only the non-zero elements are taken * into account! // // // \n \subsection matrix_operations_map map() / * forEach() // // Via the unary and binary \c map() functions it is possible * to execute componentwise custom // operations on matrices. The unary \c * map() function can be used to apply a custom operation // on each element * of a dense or sparse matrix. For instance, the following example * demonstrates // a custom square root computation via a lambda: * * \code blaze::DynamicMatrix<double> A, B; * * B = map( A, []( double d ) { return std::sqrt( d ); } ); \endcode * * // The binary \c map() function can be used to apply an operation pairwise to * the elements of // two dense matrices. The following example demonstrates * the merging of two matrices of double // precision values into a matrix of * double precision complex numbers: * * \code blaze::DynamicMatrix<double> real{ { 2.1, -4.2 }, { 1.0, 0.6 } }; * blaze::DynamicMatrix<double> imag{ { 0.3, 1.4 }, { 2.9, -3.4 } }; * * blaze::DynamicMatrix< complex<double> > cplx; * * // Creating the matrix // ( (-2.1, 0.3) (-4.2, -1.4) ) // ( ( 1.0, * 2.9) ( 0.6, -3.4) ) cplx = map( real, imag, []( double r, double i ){ * return complex( r, i ); } ); \endcode * * // Although the computation can be parallelized it is not vectorized and thus * cannot perform at // peak performance. However, it is also possible to * create vectorized custom operations. See // \ref custom_operations for a * detailed overview of the possibilities of custom operations. // // Please * note that unary custom operations on vectors have been introduced in \b * Blaze 3.0 in // form of the \c forEach() function. With the introduction * of binary custom functions, the // \c forEach() function has been renamed * to \c map(). The \c forEach() function can still be // used (even for * binary custom operations), but the function might be deprecated in future * // releases of \b Blaze. // // // \n \section * matrix_operations_reduction_operations Reduction Operations // <hr> // // * \subsection matrix_operations_reduction_operations_reduce reduce() // // * The \c reduce() function performs either a total reduction, a rowwise * reduction or a columnwise // reduction of the elements of the given dense * matrix or the non-zero elements of the given sparse // matrix. The * following examples demonstrate the total reduction of a dense and sparse * matrix: * * \code blaze::DynamicMatrix<double> A; // ... Resizing and initialization * * const double totalsum1 = reduce( A, blaze::Add() ); const double totalsum2 = * reduce( A, []( double a, double b ){ return a + b; } ); \endcode * * \code blaze::CompressedMatrix<double> A; // ... Resizing and initialization * * const double totalsum1 = reduce( A, blaze::Add() ); const double totalsum2 = * reduce( A, []( double a, double b ){ return a + b; } ); \endcode * * // By specifying \c blaze::columnwise or \c blaze::rowwise the \c reduce() * function performs a // column-wise or row-wise reduction, respectively. In * case \c blaze::columnwise is specified, the // (non-zero) elements of the * matrix are reduced column-wise and the result is a row vector. In // case * \c blaze::rowwise is specified, the (non-zero) elements of the matrix are * reduced row-wise // and the result is a column vector: * * \code blaze::DynamicMatrix<double> A; blaze::CompressedMatrix<double> B; * blaze::DynamicVector<double,rowVector> colsum1, colsum2; // ... Resizing * and initialization * * colsum1 = reduce<columnwise>( A, blaze::Add() ); colsum2 = * reduce<columnwise>( B, []( double a, double b ){ return a + b; } ); * \endcode * * \code blaze::DynamicMatrix<double> A; blaze::CompressedMatrix<double> B; * blaze::DynamicVector<double,columnVector> rowsum1, rowsum2; // ... * Resizing and initialization * * rowsum1 = reduce<rowwise>( A, blaze::Add() ); rowsum2 = reduce<rowwise>( B, * []( double a, double b ){ return a + b; } ); \endcode * * // As demonstrated in the examples it is possible to pass any binary callable * as custom reduction // operation. However, for instance in the case of * lambdas the vectorization of the reduction // operation is compiler * dependent and might not perform at peak performance. However, it is also * // possible to create vectorized custom operations. See \ref * custom_operations for a detailed // overview of the possibilities of * custom operations. // // Please note that the evaluation order of the \c * reduce() function is unspecified. Thus the // behavior is * non-deterministic if the given reduction operation is not associative or * not // commutative. Also, the operation is undefined if the given * reduction operation modifies the // values. // // \n \subsection * matrix_operations_reduction_operations_sum sum() // // The \c sum() * function reduces the elements of the given dense vector or the non-zero * elements // of the given sparse vector by means of addition: * * \code blaze::DynamicMatrix<int> A{ { 1, 2 }, { 3, 4 } }; * * const int totalsum = sum( A ); // Results in 10 \endcode * * \code blaze::CompressedMatrix<int> a{ { 1, 2 }, { 3, 4 } }; * * const int totalsum = sum( A ); // Results in 10 \endcode * * // By specifying \c blaze::columnwise or \c blaze::rowwise the \c sum() * function performs a // column-wise or row-wise summation, respectively. In * case \c blaze::columnwise is specified, // the (non-zero) elements of the * matrix are summed up column-wise and the result is a row vector. // In * case \c blaze::rowwise is specified, the (non-zero) elements of the matrix * are summed up // row-wise and the result is a column vector: * * \code using blaze::columnwise; * * blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } }; * blaze::CompressedMatrix<int> B{ { 1, 0, 2 }, { 1, 3, 4 } }; * blaze::DynamicVector<int,rowVector> colsum1, colsum2; * * colsum1 = sum<columnwise>( A ); // Results in ( 2, 3, 6 ) colsum2 = * sum<columnwise>( B ); // Same result \endcode * * \code using blaze::rowwise; * * blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } }; * blaze::CompressedMatrix<int> B{ { 1, 0, 2 }, { 1, 3, 4 } }; * blaze::DynamicVector<int,columnVector> rowsum1, rowsum2; * * rowsum1 = sum<rowwise>( A ); // Results in ( 3, 8 ) rowsum2 = sum<rowwise>( * B ); // Same result \endcode * * // Please note that the evaluation order of the \c sum() function is * unspecified. // // \n \subsection * matrix_operations_reduction_operations_prod prod() // // The \c prod() * function reduces the elements of the given dense vector or the non-zero * elements // of the given sparse vector by means of multiplication: * * \code blaze::DynamicMatrix<int> A{ { 1, 2 }, { 3, 4 } }; * * const int totalprod = prod( A ); // Results in 24 \endcode * * \code blaze::CompressedMatrix<int> A{ { 1, 2 }, { 3, 4 } }; * * const int totalprod = prod( A ); // Results in 24 \endcode * * // By specifying \c blaze::columnwise or \c blaze::rowwise the \c prod() * function performs a // column-wise or row-wise multiplication, * respectively. In case \c blaze::columnwise is specified, // the (non-zero) * elements of the matrix are multiplied column-wise and the result is a row * vector. // In case \c blaze::rowwise is specified, the (non-zero) elements * of the matrix are multiplied // row-wise and the result is a column * vector: * * \code using blaze::columnwise; * * blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } }; * blaze::CompressedMatrix<int> B{ { 1, 0, 2 }, { 1, 3, 4 } }; * blaze::DynamicVector<int,rowVector> colprod1, colprod2; * * colprod1 = prod<columnwise>( A ); // Results in ( 1, 0, 8 ) colprod2 = * prod<columnwise>( A ); // Results in ( 1, 3, 8 ) \endcode * * \code using blaze::rowwise; * * blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } }; * blaze::CompressedMatrix<int> B{ { 1, 0, 2 }, { 1, 3, 4 } }; * blaze::DynamicVector<int,columnVector> rowprod1, rowprod2; * * rowprod1 = prod<rowwise>( A ); // Results in ( 0, 12 ) rowprod2 = * prod<rowwise>( A ); // Results in ( 2, 12 ) \endcode * * // Please note that the evaluation order of the \c prod() function is * unspecified. // // \n \subsection * matrix_operations_reduction_operations_min min() // // The unary \c min() * function returns the smallest element of the given dense matrix or the // * smallest non-zero element of the given sparse matrix. This function can * only be used for // element types that support the smaller-than * relationship. In case the given matrix currently // has either 0 rows or 0 * columns, the returned value is the default value (e.g. 0 in case of // * fundamental data types). * * \code blaze::DynamicMatrix<int> A{ { 1, 2 }, { 3, 4 } }; * * const int totalmin = min( A ); // Results in 1 \endcode * * \code blaze::CompressedMatrix<int> A{ { 1, 0 }, { 3, 0 } }; * * const int totalmin = min( A ); // Results in 1 \endcode * * // \note In case the sparse matrix is not completely filled, the implicit * zero elements are NOT // taken into account. In the previous example the * compressed matrix has only 2 non-zero elements. // However, the minimum of * this matrix is 1. // // By specifying \c blaze::columnwise or \c * blaze::rowwise the \c min() function determines the // smallest (non-zero) * element in each row or column, respectively. In case \c blaze::columnwise * // is specified, the smallest (non-zero) element of each column is * determined and the result is // a row vector. In case \c blaze::rowwise is * specified, the smallest (non-zero) element of each // row is determined * and the result is a column vector. * * \code using blaze::columnwise; * * blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } }; * blaze::CompressedMatrix<int> B{ { 1, 0, 2 }, { 1, 3, 4 } }; * blaze::DynamicVector<int,rowVector> colmin1, colmin2; * * colmin1 = min<columnwise>( A ); // Results in ( 1, 0, 2 ) colmin2 = * min<columnwise>( B ); // Results in ( 1, 3, 2 ) \endcode * * \code using blaze::rowwise; * * blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } }; * blaze::CompressedMatrix<int> B{ { 1, 0, 2 }, { 1, 3, 4 } }; * blaze::DynamicVector<int,columnVector> rowmin1, rowmin2; * * rowmin1 = min<rowwise>( A ); // Results in ( 0, 1 ) rowmin2 = min<rowwise>( * B ); // Results in ( 1, 1 ) \endcode * * // \note In case the sparse matrix is not completely filled, the implicit * zero elements are NOT // taken into account. // // \n \subsection * matrix_operations_reduction_operations_max max() // // The unary \c max() * function returns the largest element of the given dense matrix or the // * largest non-zero element of the given sparse matrix. This function can * only be used for // element types that support the smaller-than * relationship. In case the given matrix currently // has either 0 rows or 0 * columns, the returned value is the default value (e.g. 0 in case of // * fundamental data types). * * \code blaze::DynamicMatrix<int> A{ { 1, 2 }, { 3, 4 } }; * * const int totalmax = max( A ); // Results in 4 \endcode * * \code blaze::CompressedMatrix<int> A{ { -1, 0 }, { -3, 0 } }; * * const int totalmax = max( A ); // Results in -1 \endcode * * // \note In case the sparse matrix is not completely filled, the implicit * zero elements are NOT // taken into account. In the previous example the * compressed matrix has only 2 non-zero elements. // However, the maximum of * this matrix is -1. // // By specifying \c blaze::columnwise or \c * blaze::rowwise the \c max() function determines the // largest (non-zero) * element in each row or column, respectively. In case \c blaze::columnwise * // is specified, the largest (non-zero) element of each column is * determined and the result is // a row vector. In case \c blaze::rowwise is * specified, the largest (non-zero) element of each // row is determined and * the result is a column vector. * * \code using blaze::columnwise; * * blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } }; * blaze::CompressedMatrix<int> B{ { -1, 0, -2 }, { -1, -3, -4 } }; * blaze::DynamicVector<int,rowVector> colmax1, colmax2; * * colmax1 = max<columnwise>( A ); // Results in ( 1, 3, 4 ) colmax2 = * max<columnwise>( B ); // Results in ( -1, -3, -2 ) \endcode * * \code using blaze::rowwise; * * blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } }; * blaze::CompressedMatrix<int> B{ { -1, 0, -2 }, { -1, -3, -4 } }; * blaze::DynamicVector<int,columnVector> rowmax1, rowmax2; * * rowmax1 = max<rowwise>( A ); // Results in ( 2, 4 ) rowmax2 = max<rowwise>( * B ); // Results in ( -1, -1 ) \endcode * * // \note In case the sparse matrix is not completely filled, the implicit * zero elements are NOT // taken into account. // // // \n \section * matrix_operations_norms Norms // <hr> // // \subsection * matrix_operations_norms_norm norm() // // The \c norm() function computes * the L2 norm of the given dense or sparse matrix: * * \code blaze::DynamicMatrix<double> A; // ... Resizing and initialization * const double l2 = norm( A ); \endcode * * // \n \subsection matrix_operations_norms_sqrnorm sqrNorm() // // The \c * sqrNorm() function computes the squared L2 norm of the given dense or * sparse matrix: * * \code blaze::DynamicMatrix<double> A; // ... Resizing and initialization * const double l2 = sqrNorm( A ); \endcode * * // \n \subsection matrix_operations_norms_l1norm l1Norm() // // The \c * l1Norm() function computes the squared L1 norm of the given dense or * sparse matrix: * * \code blaze::DynamicMatrix<double> A; // ... Resizing and initialization * const double l1 = l1Norm( A ); \endcode * * // \n \subsection matrix_operations_norms_l2norm l2Norm() // // The \c * l2Norm() function computes the squared L2 norm of the given dense or * sparse matrix: * * \code blaze::DynamicMatrix<double> A; // ... Resizing and initialization * const double l2 = l2Norm( A ); \endcode * * // \n \subsection matrix_operations_norms_l3norm l3Norm() // // The \c * l3Norm() function computes the squared L3 norm of the given dense or * sparse matrix: * * \code blaze::DynamicMatrix<double> A; // ... Resizing and initialization * const double l3 = l3Norm( A ); \endcode * * // \n \subsection matrix_operations_norms_l4norm l4Norm() // // The \c * l4Norm() function computes the squared L4 norm of the given dense or * sparse matrix: * * \code blaze::DynamicMatrix<double> A; // ... Resizing and initialization * const double l4 = l4Norm( A ); \endcode * * // \n \subsection matrix_operations_norms_lpnorm lpNorm() // // The \c * lpNorm() function computes the general Lp norm of the given dense or * sparse matrix, // where the norm is specified by either a compile time or * a runtime argument: * * \code blaze::DynamicMatrix<double> A; // ... Resizing and initialization * const double lp1 = lpNorm<2>( A ); // Compile time argument const * double lp2 = lpNorm( A, 2.3 ); // Runtime argument \endcode * * // \n \subsection matrix_operations_norms_maxnorm maxNorm() // // The \c * maxNorm() function computes the maximum norm of the given dense or sparse * matrix: * * \code blaze::DynamicMatrix<double> A; // ... Resizing and initialization * const double max = maxNorm( A ); \endcode * * // \n \section matrix_operations_declaration_operations Declaration * Operations // <hr> // // \subsection matrix_operations_declsym declsym() * // // The \c declsym() operation can be used to explicitly declare any * matrix or matrix expression // as symmetric: * * \code blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization * * B = declsym( A ); \endcode * * // Any matrix or matrix expression that has been declared as symmetric via \c * declsym() will // gain all the benefits of a symmetric matrix, which range * from reduced runtime checking to // a considerable speed-up in * computations: * * \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; * * DynamicMatrix<double> A, B, C; SymmetricMatrix< DynamicMatrix<double> > S; // * ... Resizing and initialization * * isSymmetric( declsym( A ) ); // Will always return true without runtime * effort * * S = declsym( A ); // Omit any runtime check for symmetry * * C = declsym( A * B ); // Declare the result of the matrix multiplication as * symmetric, // i.e. perform an optimized matrix multiplication \endcode * * // \warning The \c declsym() operation has the semantics of a cast: The * caller is completely // responsible and the system trusts the given * information. Declaring a non-symmetric matrix or // matrix expression as * symmetric via the \c declsym() operation leads to undefined behavior // * (which can be violated invariants or wrong computation results)! // // // * \n \subsection matrix_operations_declherm declherm() // // The \c * declherm() operation can be used to explicitly declare any matrix or * matrix expression // as Hermitian: * * \code blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization * * B = declherm( A ); \endcode * * // Any matrix or matrix expression that has been declared as Hermitian via \c * declherm() will // gain all the benefits of an Hermitian matrix, which * range from reduced runtime checking to // a considerable speed-up in * computations: * * \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; * * DynamicMatrix<double> A, B, C; HermitianMatrix< DynamicMatrix<double> > S; // * ... Resizing and initialization * * isHermitian( declherm( A ) ); // Will always return true without runtime * effort * * S = declherm( A ); // Omit any runtime check for Hermitian symmetry * * C = declherm( A * B ); // Declare the result of the matrix multiplication as * Hermitian, // i.e. perform an optimized matrix multiplication \endcode * * // \warning The \c declherm() operation has the semantics of a cast: The * caller is completely // responsible and the system trusts the given * information. Declaring a non-Hermitian matrix or // matrix expression as * Hermitian via the \c declherm() operation leads to undefined behavior // * (which can be violated invariants or wrong computation results)! // // // * \n \subsection matrix_operations_decllow decllow() // // The \c decllow() * operation can be used to explicitly declare any matrix or matrix * expression // as lower triangular: * * \code blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization * * B = decllow( A ); \endcode * * // Any matrix or matrix expression that has been declared as lower triangular * via \c decllow() // will gain all the benefits of a lower triangular * matrix, which range from reduced runtime // checking to a considerable * speed-up in computations: * * \code using blaze::DynamicMatrix; using blaze::LowerMatrix; * * DynamicMatrix<double> A, B, C; LowerMatrix< DynamicMatrix<double> > L; // ... * Resizing and initialization * * isLower( decllow( A ) ); // Will always return true without runtime effort * * L = decllow( A ); // Omit any runtime check for A being a lower matrix * * C = decllow( A * B ); // Declare the result of the matrix multiplication as * lower triangular, // i.e. perform an optimized matrix multiplication * \endcode * * // \warning The \c decllow() operation has the semantics of a cast: The * caller is completely // responsible and the system trusts the given * information. Declaring a non-lower matrix or // matrix expression as lower * triangular via the \c decllow() operation leads to undefined // behavior * (which can be violated invariants or wrong computation results)! // // // * \n \subsection matrix_operations_declupp declupp() // // The \c declupp() * operation can be used to explicitly declare any matrix or matrix * expression // as upper triangular: * * \code blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization * * B = declupp( A ); \endcode * * // Any matrix or matrix expression that has been declared as upper triangular * via \c declupp() // will gain all the benefits of a upper triangular * matrix, which range from reduced runtime // checking to a considerable * speed-up in computations: * * \code using blaze::DynamicMatrix; using blaze::UpperMatrix; * * DynamicMatrix<double> A, B, C; UpperMatrix< DynamicMatrix<double> > U; // ... * Resizing and initialization * * isUpper( declupp( A ) ); // Will always return true without runtime effort * * U = declupp( A ); // Omit any runtime check for A being a upper matrix * * C = declupp( A * B ); // Declare the result of the matrix multiplication as * upper triangular, // i.e. perform an optimized matrix multiplication * \endcode * * // \warning The \c declupp() operation has the semantics of a cast: The * caller is completely // responsible and the system trusts the given * information. Declaring a non-upper matrix or // matrix expression as upper * triangular via the \c declupp() operation leads to undefined // behavior * (which can be violated invariants or wrong computation results)! // // // * \n \subsection matrix_operations_decldiag decldiag() // // The \c * decldiag() operation can be used to explicitly declare any matrix or * matrix expression // as diagonal: * * \code blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization * * B = decldiag( A ); \endcode * * // Any matrix or matrix expression that has been declared as diagonal via \c * decldiag() will // gain all the benefits of a diagonal matrix, which range * from reduced runtime checking to // a considerable speed-up in * computations: * * \code using blaze::DynamicMatrix; using blaze::DiagonalMatrix; * * DynamicMatrix<double> A, B, C; DiagonalMatrix< DynamicMatrix<double> > D; // * ... Resizing and initialization * * isDiagonal( decldiag( A ) ); // Will always return true without runtime * effort * * D = decldiag( A ); // Omit any runtime check for A being a diagonal matrix * * C = decldiag( A * B ); // Declare the result of the matrix multiplication as * diagonal, // i.e. perform an optimized matrix multiplication \endcode * * // \warning The \c decldiag() operation has the semantics of a cast: The * caller is completely // responsible and the system trusts the given * information. Declaring a non-diagonal matrix // or matrix expression as * diagonal via the \c decldiag() operation leads to undefined // behavior * (which can be violated invariants or wrong computation results)! // // // * \n \subsection matrix_operations_declid declid() // // The \c declid() * operation can be used to explicitly declare any matrix or matrix * expression // as identity matrix: * * \code blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization * * B = declid( A ); \endcode * * // Any matrix or matrix expression that has been declared as identity matrix * via \c declid() will // gain all the benefits of an identity matrix, which * range from reduced runtime checking to a // considerable speed-up in * computations: * * \code using blaze::DynamicMatrix; using blaze::DiagonalMatrix; * * DynamicMatrix<double> A, B, C; DiagonalMatrix< DynamicMatrix<double> > D; // * ... Resizing and initialization * * isIdentity( declid( A ) ); // Will always return true without runtime effort * * D = declid( A ); // Omit any runtime check for A being a diagonal matrix * * C = declid( A ) * B; // Declare the left operand of the matrix * multiplication as an // identity matrix, i.e. perform an optimized matrix * multiplication \endcode * * // \warning The \c declid() operation has the semantics of a cast: The caller * is completely // responsible and the system trusts the given information. * Declaring a non-identity matrix // or matrix expression as identity matrix * via the \c declid() operation leads to undefined // behavior (which can be * violated invariants or wrong computation results)! // // // \n \section * matrix_operations_matrix_inversion Matrix Inversion // <hr> // // The * inverse of a square dense matrix can be computed via the \c inv() * function: * * \code blaze::DynamicMatrix<float,blaze::rowMajor> A, B; // ... Resizing and * initialization B = inv( A ); // Compute the inverse of A \endcode * * // Alternatively, an in-place inversion of a dense matrix can be performed * via the \c invert() // function: * * \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and * initialization invert( A ); // In-place matrix inversion \endcode * * // Both the \c inv() and the \c invert() functions will automatically select * the most suited matrix // inversion algorithm depending on the size and * type of the given matrix. For small matrices of // up to 6x6, both * functions use manually optimized kernels for maximum performance. For * matrices // larger than 6x6 the inversion is performed by means of the * most suited matrix decomposition // method: In case of a general matrix * the LU decomposition is used, for symmetric matrices the // LDLT * decomposition is applied, for Hermitian matrices the LDLH decomposition is * performed, and // for triangular matrices the inverse is computed via a * forward or back substitution. // // In case the type of the matrix does * not provide additional compile time information about its // structure * (symmetric, lower, upper, diagonal, ...), the information can be provided * manually // when calling the \c invert() function: * * \code using blaze::asGeneral; using blaze::asSymmetric; using * blaze::asHermitian; using blaze::asLower; using blaze::asUniLower; using * blaze::asUpper; using blaze::asUniUpper; using blaze::asDiagonal; * * invert<asGeneral> ( A ); // In-place inversion of a general matrix * invert<asSymmetric>( A ); // In-place inversion of a symmetric matrix * invert<asHermitian>( A ); // In-place inversion of a Hermitian matrix * invert<asLower> ( A ); // In-place inversion of a lower triangular * matrix invert<asUniLower> ( A ); // In-place inversion of a lower * unitriangular matrix invert<asUpper> ( A ); // In-place inversion of a * upper triangular matrix invert<asUniUpper> ( A ); // In-place inversion * of a upper unitriangular matrix invert<asDiagonal> ( A ); // In-place * inversion of a diagonal matrix \endcode * * // Alternatively, via the \c invert() function it is possible to explicitly * specify the inversion // algorithm: * * \code using blaze::byLU; using blaze::byLDLT; using blaze::byLDLH; using * blaze::byLLH; * * // In-place inversion of a general matrix by means of an LU decomposition * invert<byLU>( A ); * * // In-place inversion of a symmetric indefinite matrix by means of a * Bunch-Kaufman decomposition invert<byLDLT>( A ); * * // In-place inversion of a Hermitian indefinite matrix by means of a * Bunch-Kaufman decomposition invert<byLDLH>( A ); * * // In-place inversion of a positive definite matrix by means of a Cholesky * decomposition invert<byLLH>( A ); \endcode * * // Whereas the inversion by means of an LU decomposition works for every * general square matrix, // the inversion by LDLT only works for symmetric * indefinite matrices, the inversion by LDLH is // restricted to Hermitian * indefinite matrices and the Cholesky decomposition (LLH) only works // for * Hermitian positive definite matrices. Please note that it is in the * responsibility of the // function caller to guarantee that the selected * algorithm is suited for the given matrix. In // case this precondition is * violated the result can be wrong and might not represent the inverse // of * the given matrix! // // For both the \c inv() and \c invert() function the * matrix inversion fails if ... // // - ... the given matrix is not a * square matrix; // - ... the given matrix is singular and not invertible. * // // In all failure cases either a compilation error is created if the * failure can be predicted at // compile time or a \c std::invalid_argument * exception is thrown. // // \note The matrix inversion can only be used for * dense matrices with \c float, \c double, // \c complex<float> or \c * complex<double> element type. The attempt to call the function with // * matrices of any other element type or with a sparse matrix results in a * compile time error! // // \note The functions invert the dense matrix by * means of LAPACK kernels. Thus the functions can // only be used if a * fitting LAPACK library is available and linked to the executable. * Otherwise // a linker error will be created. // // \note It is not * possible to use any kind of view on the expression object returned by the * // \c inv() function. Also, it is not possible to access individual * elements via the function call // operator on the expression object: * * \code row( inv( A ), 2UL ); // Compilation error: Views cannot be used on an * inv() expression! inv( A )(1,2); // Compilation error: It is not * possible to access individual elements! \endcode * * // \note The inversion functions do not provide any exception safety * guarantee, i.e. in case an // exception is thrown the matrix may already * have been modified. // // // \n \section matrix_operations_decomposition * Matrix Decomposition // <hr> // // \note All decomposition functions can * only be used for dense matrices with \c float, \c double, // \c * complex<float> or \c complex<double> element type. The attempt to call the * function with // matrices of any other element type or with a sparse * matrix results in a compile time error! // // \note The functions * decompose a dense matrix by means of LAPACK kernels. Thus the functions * can // only be used if a fitting LAPACK library is available and linked to * the executable. Otherwise // a linker error will be created. // // * \subsection matrix_operations_decomposition_lu LU Decomposition // // The * LU decomposition of a dense matrix can be computed via the \c lu() * function: * * \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and * initialization * * blaze::DynamicMatrix<double,blaze::rowMajor> L, U, P; * * lu( A, L, U, P ); // LU decomposition of a row-major matrix * * assert( A == L * U * P ); \endcode * * \code blaze::DynamicMatrix<double,blaze::columnMajor> A; // ... Resizing and * initialization * * blaze::DynamicMatrix<double,blaze::columnMajor> L, U, P; * * lu( A, L, U, P ); // LU decomposition of a column-major matrix * * assert( A == P * L * U ); \endcode * * // The function works for both \c rowMajor and \c columnMajor matrices. Note, * however, that the // three matrices \c A, \c L and \c U are required to * have the same storage order. Also, please // note that the way the * permutation matrix \c P needs to be applied differs between row-major and * // column-major matrices, since the algorithm uses column interchanges for * row-major matrices and // row interchanges for column-major matrices. // * // Furthermore, \c lu() can be used with adaptors. For instance, the * following example demonstrates // the LU decomposition of a symmetric * matrix into a lower and upper triangular matrix: * * \code blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> * > A; // ... Resizing and initialization * * blaze::LowerMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > L; * blaze::UpperMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > U; * blaze::DynamicMatrix<double,blaze::columnMajor> P; * * lu( A, L, U, P ); // LU decomposition of A \endcode * * // \n \subsection matrix_operations_decomposition_llh Cholesky Decomposition * // // The Cholesky (LLH) decomposition of a dense matrix can be computed * via the \c llh() function: * * \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and * initialization * * blaze::DynamicMatrix<double,blaze::rowMajor> L; * * llh( A, L ); // LLH decomposition of a row-major matrix * * assert( A == L * ctrans( L ) ); \endcode * * // The function works for both \c rowMajor and \c columnMajor matrices and * the two matrices \c A // and \c L can have any storage order. // // * Furthermore, \c llh() can be used with adaptors. For instance, the * following example demonstrates // the LLH decomposition of a symmetric * matrix into a lower triangular matrix: * * \code blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> * > A; // ... Resizing and initialization * * blaze::LowerMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > L; * * llh( A, L ); // Cholesky decomposition of A \endcode * * // \n \subsection matrix_operations_decomposition_qr QR Decomposition // // * The QR decomposition of a dense matrix can be computed via the \c qr() * function: * * \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and * initialization * * blaze::DynamicMatrix<double,blaze::columnMajor> Q; * blaze::DynamicMatrix<double,blaze::rowMajor> R; * * qr( A, Q, R ); // QR decomposition of a row-major matrix * * assert( A == Q * R ); \endcode * * // The function works for both \c rowMajor and \c columnMajor matrices and * the three matrices // \c A, \c Q and \c R can have any storage order. // * // Furthermore, \c qr() can be used with adaptors. For instance, the * following example demonstrates // the QR decomposition of a symmetric * matrix into a general matrix and an upper triangular matrix: * * \code blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> * > A; // ... Resizing and initialization * * blaze::DynamicMatrix<double,blaze::rowMajor> Q; blaze::UpperMatrix< * blaze::DynamicMatrix<double,blaze::columnMajor> > R; * * qr( A, Q, R ); // QR decomposition of A \endcode * * // \n \subsection matrix_operations_decomposition_rq RQ Decomposition // // * Similar to the QR decomposition, the RQ decomposition of a dense matrix * can be computed via // the \c rq() function: * * \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and * initialization * * blaze::DynamicMatrix<double,blaze::rowMajor> R; * blaze::DynamicMatrix<double,blaze::columnMajor> Q; * * rq( A, R, Q ); // RQ decomposition of a row-major matrix * * assert( A == R * Q ); \endcode * * // The function works for both \c rowMajor and \c columnMajor matrices and * the three matrices // \c A, \c R and \c Q can have any storage order. // * // Also the \c rq() function can be used in combination with matrix * adaptors. For instance, the // following example demonstrates the RQ * decomposition of an Hermitian matrix into a general // matrix and an upper * triangular matrix: * * \code blaze::HermitianMatrix< * blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > A; // ... * Resizing and initialization * * blaze::UpperMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> * > R; blaze::DynamicMatrix<complex<double>,blaze::rowMajor> Q; * * rq( A, R, Q ); // RQ decomposition of A \endcode * * // \n \subsection matrix_operations_decomposition_ql QL Decomposition // // * The QL decomposition of a dense matrix can be computed via the \c ql() * function: * * \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and * initialization * * blaze::DynamicMatrix<double,blaze::rowMajor> Q; * blaze::DynamicMatrix<double,blaze::columnMajor> L; * * ql( A, Q, L ); // QL decomposition of a row-major matrix * * assert( A == Q * L ); \endcode * * // The function works for both \c rowMajor and \c columnMajor matrices and * the three matrices // \c A, \c Q and \c L can have any storage order. // * // Also the \c ql() function can be used in combination with matrix * adaptors. For instance, the // following example demonstrates the QL * decomposition of a symmetric matrix into a general // matrix and a lower * triangular matrix: * * \code blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> * > A; // ... Resizing and initialization * * blaze::DynamicMatrix<double,blaze::rowMajor> Q; blaze::LowerMatrix< * blaze::DynamicMatrix<double,blaze::columnMajor> > L; * * ql( A, Q, L ); // QL decomposition of A \endcode * * // \n \subsection matrix_operations_decomposition_lq LQ Decomposition // // * The LQ decomposition of a dense matrix can be computed via the \c lq() * function: * * \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and * initialization * * blaze::DynamicMatrix<double,blaze::rowMajor> L; * blaze::DynamicMatrix<double,blaze::columnMajor> Q; * * lq( A, L, Q ); // LQ decomposition of a row-major matrix * * assert( A == L * Q ); \endcode * * // The function works for both \c rowMajor and \c columnMajor matrices and * the three matrices // \c A, \c L and \c Q can have any storage order. // * // Furthermore, \c lq() can be used with adaptors. For instance, the * following example demonstrates // the LQ decomposition of an Hermitian * matrix into a lower triangular matrix and a general matrix: * * \code blaze::HermitianMatrix< * blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > A; // ... * Resizing and initialization * * blaze::LowerMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> * > L; blaze::DynamicMatrix<complex<double>,blaze::rowMajor> Q; * * lq( A, L, Q ); // LQ decomposition of A \endcode * * // \n \section matrix_operations_eigenvalues Eigenvalues/Eigenvectors // <hr> * // // The eigenvalues and eigenvectors of a dense matrix can be computed * via the \c eigen() functions: * * \code namespace blaze { * * template< typename MT, bool SO, typename VT, bool TF > void eigen( const * DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w ); * * template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool * SO2 > void eigen( const DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, * DenseMatrix<MT2,SO2>& V ); * * } // namespace blaze \endcode * * // The first function computes only the eigenvalues of the given \a n-by-\a n * matrix, the second // function additionally computes the eigenvectors. The * eigenvalues are returned in the given vector // \a w and the eigenvectors * are returned in the given matrix \a V, which are both resized to the // * correct dimensions (if possible and necessary). // // Depending on the * given matrix type, the resulting eigenvalues are either of floating point * // or complex type: In case the given matrix is either a compile time * symmetric matrix with // floating point elements or an Hermitian matrix * with complex elements, the resulting eigenvalues // will be of floating * point type and therefore the elements of the given eigenvalue vector are * // expected to be of floating point type. In all other cases they are * expected to be of complex // type. Please note that for complex * eigenvalues no order of eigenvalues can be assumed, except // that complex * conjugate pairs of eigenvalues appear consecutively with the eigenvalue * having // the positive imaginary part first. // // In case \a A is a * row-major matrix, the left eigenvectors are returned in the rows of \a V, * // in case \a A is a column-major matrix, the right eigenvectors are * returned in the columns of // \a V. In case the given matrix is a compile * time symmetric matrix with floating point elements, // the resulting * eigenvectors will be of floating point type and therefore the elements of * the // given eigenvector matrix are expected to be of floating point type. * In all other cases they // are expected to be of complex type. // // The * following examples give an impression of the computation of eigenvalues * and eigenvectors // for a general, a symmetric, and an Hermitian matrix: * * \code using blaze::DynamicMatrix; using blaze::DynamicVector; using * blaze::rowMajor; using blaze::columnVector; * * DynamicMatrix<double,rowMajor> A( 5UL, 5UL ); // The general matrix A // ... * Initialization * * DynamicVector<complex<double>,columnVector> w( 5UL ); // The vector for the * complex eigenvalues DynamicMatrix<complex<double>,rowMajor> V( 5UL, 5UL ); * // The matrix for the left eigenvectors * * eigen( A, w, V ); \endcode * * \code using blaze::SymmetricMatrix; using blaze::DynamicMatrix; using * blaze::DynamicVector; using blaze::rowMajor; using blaze::columnVector; * * SymmetricMatrix< DynamicMatrix<double,rowMajor> > A( 5UL, 5UL ); // The * symmetric matrix A // ... Initialization * * DynamicVector<double,columnVector> w( 5UL ); // The vector for the real * eigenvalues DynamicMatrix<double,rowMajor> V( 5UL, 5UL ); // The * matrix for the left eigenvectors * * eigen( A, w, V ); \endcode * * \code using blaze::HermitianMatrix; using blaze::DynamicMatrix; using * blaze::DynamicVector; using blaze::rowMajor; using blaze::columnVector; * * HermitianMatrix< DynamicMatrix<complex<double>,rowMajor> > A( 5UL, 5UL ); // * The Hermitian matrix A // ... Initialization * * DynamicVector<double,columnVector> w( 5UL ); // The vector for the * real eigenvalues DynamicMatrix<complex<double>,rowMajor> V( 5UL, 5UL ); * // The matrix for the left eigenvectors * * eigen( A, w, V ); \endcode * * // The functions fail if ... // // - ... the given matrix \a A is not a * square matrix; // - ... the given vector \a w is a fixed size vector and * the size doesn't match; // - ... the given matrix \a V is a fixed size * matrix and the dimensions don't match; // - ... the eigenvalue * computation fails. // // In all failure cases an exception is thrown. // * // \note All \c eigen() functions can only be used for dense matrices with * \c float, \c double, // \c complex<float> or \c complex<double> element * type. The attempt to call the function with // matrices of any other * element type or with a sparse matrix results in a compile time error! // * // \note The functions compute the eigenvalues and/or eigenvectors of a * dense matrix by means of // LAPACK kernels. Thus the functions can only be * used if a fitting LAPACK library is available // and linked to the * executable. Otherwise a linker error will be created. // // // \n \section * matrix_operations_singularvalues Singular Values/Singular Vectors // <hr> * // // The singular value decomposition (SVD) of a dense matrix can be * computed via the \c svd() // functions: * * \code namespace blaze { * * template< typename MT, bool SO, typename VT, bool TF > void svd( const * DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s ); * * template< typename MT1, bool SO, typename VT, bool TF, typename MT2, typename * MT3 > void svd( const DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, * DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V ); * * template< typename MT, bool SO, typename VT, bool TF, typename ST > size_t * svd( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s, ST low, ST upp ); * * template< typename MT1, bool SO, typename VT, bool TF, typename MT2, typename * MT3, typename ST > size_t svd( const DenseMatrix<MT1,SO>& A, * DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V, ST * low, ST upp ); * * } // namespace blaze \endcode * * // The first and third function compute only singular values of the given * general \a m-by-\a n // matrix, the second and fourth function * additionally compute singular vectors. The resulting // singular values * are returned in the given vector \a s, the left singular vectors are * returned // in the given matrix \a U, and the right singular vectors are * returned in the matrix \a V. \a s, // \a U, and \a V are resized to the * correct dimensions (if possible and necessary). // // The third and fourth * function allow for the specification of a subset of singular values and/or * // vectors. The number of singular values and vectors to be computed is * specified by the lower // bound \a low and the upper bound \a upp, which * either form an integral or a floating point // range. // // In case \a low * and \a upp form are of integral type, the function computes all singular * values // in the index range \f$[low..upp]\f$. The \a num resulting real * and non-negative singular values // are stored in descending order in the * given vector \a s, which is either resized (if possible) // or expected to * be a \a num-dimensional vector. The resulting left singular vectors are * stored // in the given matrix \a U, which is either resized (if possible) * or expected to be a // \a m-by-\a num matrix. The resulting right singular * vectors are stored in the given matrix \a V, // which is either resized * (if possible) or expected to be a \a num-by-\a n matrix. // // In case \a * low and \a upp are of floating point type, the function computes all * singular values // in the half-open interval \f$(low..upp]\f$. The * resulting real and non-negative singular values // are stored in * descending order in the given vector \a s, which is either resized (if * possible) // or expected to be a min(\a m,\a n)-dimensional vector. The * resulting left singular vectors are // stored in the given matrix \a U, * which is either resized (if possible) or expected to be a // \a * m-by-min(\a m,\a n) matrix. The resulting right singular vectors are * stored in the given // matrix \a V, which is either resized (if possible) * or expected to be a min(\a m,\a n)-by-\a n // matrix. // // The functions * fail if ... // // - ... the given matrix \a U is a fixed size matrix and * the dimensions don't match; // - ... the given vector \a s is a fixed * size vector and the size doesn't match; // - ... the given matrix \a V is * a fixed size matrix and the dimensions don't match; // - ... the given * scalar values don't form a proper range; // - ... the singular value * decomposition fails. // // In all failure cases an exception is thrown. // * // Examples: * * \code using blaze::DynamicMatrix; using blaze::DynamicVector; using * blaze::rowMajor; using blaze::columnVector; * * DynamicMatrix<double,rowMajor> A( 5UL, 8UL ); // The general matrix A // * ... Initialization * * DynamicMatrix<double,rowMajor> U; // The matrix for the left singular * vectors DynamicVector<double,columnVector> s; // The vector for the * singular values DynamicMatrix<double,rowMajor> V; // The matrix for * the right singular vectors * * svd( A, U, s, V ); \endcode * * \code using blaze::DynamicMatrix; using blaze::DynamicVector; using * blaze::rowMajor; using blaze::columnVector; * * DynamicMatrix<complex<double>,rowMajor> A( 5UL, 8UL ); // The general * matrix A // ... Initialization * * DynamicMatrix<complex<double>,rowMajor> U; // The matrix for the left * singular vectors DynamicVector<double,columnVector> s; // The vector * for the singular values DynamicMatrix<complex<double>,rowMajor> V; // The * matrix for the right singular vectors * * svd( A, U, s, V, 0, 2 ); \endcode * * // \note All \c svd() functions can only be used for dense matrices with \c * float, \c double, // \c complex<float> or \c complex<double> element type. * The attempt to call the function with // matrices of any other element * type or with a sparse matrix results in a compile time error! // // \note * The functions compute the singular values and/or singular vectors of a * dense matrix by // means of LAPACK kernels. Thus the functions can only be * used if a fitting LAPACK library is // available and linked to the * executable. Otherwise a linker error will be created. // // // \n * Previous: \ref matrix_types &nbsp; &nbsp; Next: \ref adaptors */ //************************************************************************************************* //**Adaptors *************************************************************************************** /* * !\page adaptors Adaptors // // \tableofcontents // // // \section * adaptors_general General Concepts // <hr> // // Adaptors act as wrappers * around the general \ref matrix_types. They adapt the interface of the // * matrices such that certain invariants are preserved. Due to this adaptors * can provide a compile // time guarantee of certain properties, which can * be exploited for optimized performance. // // The \b Blaze library * provides a total of 9 different adaptors: // // <ul> // <li> \ref * adaptors_symmetric_matrices </li> // <li> \ref * adaptors_hermitian_matrices </li> // <li> \ref * adaptors_triangular_matrices // <ul> // <li> \ref * adaptors_triangular_matrices "Lower Triangular Matrices" // * <ul> // <li> \ref adaptors_triangular_matrices_lowermatrix * </li> // <li> \ref * adaptors_triangular_matrices_unilowermatrix </li> // <li> * \ref adaptors_triangular_matrices_strictlylowermatrix </li> // * </ul> // </li> // <li> \ref adaptors_triangular_matrices * "Upper Triangular Matrices" // <ul> // <li> * \ref adaptors_triangular_matrices_uppermatrix </li> // <li> * \ref adaptors_triangular_matrices_uniuppermatrix </li> // * <li> \ref adaptors_triangular_matrices_strictlyuppermatrix </li> // * </ul> // </li> // <li> \ref adaptors_triangular_matrices * "Diagonal Matrices" // <ul> // <li> \ref * adaptors_triangular_matrices_diagonalmatrix </li> // </ul> // * </li> // </ul> // </li> // </ul> // // In combination with the * general matrix types, \b Blaze provides a total of 40 different matrix // * types that make it possible to exactly adapt the type of matrix to every * specific problem. // // // \n \section adaptors_examples Examples // <hr> * // // The following code examples give an impression on the use of * adaptors. The first example shows // the multiplication between two lower * matrices: * * \code using blaze::DynamicMatrix; using blaze::LowerMatrix; using * blaze::rowMajor; using blaze::columnMajor; * * LowerMatrix< DynamicMatrix<double,rowMajor> > A; LowerMatrix< * DynamicMatrix<double,columnMajor> > B; DynamicMatrix<double,columnMajor> * C; * * // ... Resizing and initialization * * C = A * B; \endcode * * // When multiplying two matrices, at least one of which is triangular, \b * Blaze can exploit the // fact that either the lower or upper part of the * matrix contains only default elements and // restrict the algorithm to the * non-zero elements. Thus the adaptor provides a significant // performance * advantage in comparison to a general matrix multiplication, especially for * large // matrices. // // The second example shows the \c SymmetricMatrix * adaptor in a row-major dense matrix/sparse // vector multiplication: * * \code using blaze::DynamicMatrix; using blaze::DynamicVector; using * blaze::CompressedVector; using blaze::rowMajor; using blaze::columnVector; * * SymmetricMatrix< DynamicMatrix<double,rowMajor> > A; * CompressedVector<double,columnVector> x; * DynamicVector<double,columnVector> y; * * // ... Resizing and initialization * * y = A * x; \endcode * * // In this example it is not intuitively apparent that using a row-major * matrix is not the best // possible choice in terms of performance since * the computation cannot be vectorized. Choosing // a column-major matrix * instead, however, would enable a vectorized computation. Therefore // \b * Blaze exploits the fact that \c A is symmetric, selects the best suited * storage order and // evaluates the multiplication as * * \code y = trans( A ) * x; \endcode * * // which significantly increases the performance. // // \n Previous: \ref * matrix_operations &nbsp; &nbsp; Next: \ref adaptors_symmetric_matrices */ //************************************************************************************************* //**Symmetric Matrices ***************************************************************************** /* * !\page adaptors_symmetric_matrices Symmetric Matrices // // * \tableofcontents // // // \n \section adaptors_symmetric_matrices_general * Symmetric Matrices // <hr> // // In contrast to general matrices, which * have no restriction in their number of rows and columns // and whose * elements can have any value, symmetric matrices provide the compile time * guarantee // to be square matrices with pair-wise identical values. * Mathematically, this means that a // symmetric matrix is always equal to * its transpose (\f$ A = A^T \f$) and that all non-diagonal // values have * an identical counterpart (\f$ a_{ij} == a_{ji} \f$). This symmetry * property can // be exploited to provide higher efficiency and/or lower * memory consumption. Within the \b Blaze // library, symmetric matrices are * realized by the \ref adaptors_symmetric_matrices_symmetricmatrix // class * template. // // // \n \section adaptors_symmetric_matrices_symmetricmatrix * SymmetricMatrix // <hr> // // The SymmetricMatrix class template is an * adapter for existing dense and sparse matrix types. // It inherits the * properties and the interface of the given matrix type \c MT and extends it * // by enforcing the additional invariant of symmetry (i.e. the matrix is * always equal to its // transpose \f$ A = A^T \f$). It can be included via * the header file * * \code #include <blaze/math/SymmetricMatrix.h> \endcode * * // The type of the adapted matrix can be specified via template parameter: * * \code template< typename MT > class SymmetricMatrix; \endcode * * // \c MT specifies the type of the matrix to be adapted. SymmetricMatrix can * be used with any // non-cv-qualified, non-reference, non-pointer, * non-expression dense or sparse matrix type. Note // that the given matrix * type must be either resizable (as for instance blaze::HybridMatrix or // * blaze::DynamicMatrix) or must be square at compile time (as for instance * blaze::StaticMatrix). // // The following examples give an impression of * several possible symmetric matrices: * * \code using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; * using blaze::columnMajor; * * // Definition of a 3x3 row-major dense symmetric matrix with static memory * blaze::SymmetricMatrix< blaze::StaticMatrix<int,3UL,3UL,rowMajor> > A; * * // Definition of a resizable column-major dense symmetric matrix based on * HybridMatrix blaze::SymmetricMatrix< * blaze::HybridMatrix<float,4UL,4UL,columnMajor> B; * * // Definition of a resizable row-major dense symmetric matrix based on * DynamicMatrix blaze::SymmetricMatrix< * blaze::DynamicMatrix<double,rowMajor> > C; * * // Definition of a fixed size row-major dense symmetric matrix based on * CustomMatrix blaze::SymmetricMatrix< * blaze::CustomMatrix<double,unaligned,unpadded,rowMajor> > D; * * // Definition of a compressed row-major single precision symmetric matrix * blaze::SymmetricMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > * E; \endcode * * // The storage order of a symmetric matrix is depending on the storage order * of the adapted matrix // type \c MT. In case the adapted matrix is stored * in a row-wise fashion (i.e. is specified as // blaze::rowMajor), the * symmetric matrix will also be a row-major matrix. Otherwise, if the // * adapted matrix is column-major (i.e. is specified as blaze::columnMajor), * the symmetric matrix // will also be a column-major matrix. // // // \n * \section adaptors_symmetric_matrices_special_properties Special Properties * of Symmetric Matrices // <hr> // // A symmetric matrix is used exactly * like a matrix of the underlying, adapted matrix type \c MT. // It also * provides (nearly) the same interface as the underlying matrix type. * However, there are // some important exceptions resulting from the * symmetry constraint: // // -# <b>\ref * adaptors_symmetric_matrices_square</b> // -# <b>\ref * adaptors_symmetric_matrices_symmetry</b> // -# <b>\ref * adaptors_symmetric_matrices_initialization</b> // // \n \subsection * adaptors_symmetric_matrices_square Symmetric Matrices Must Always be * Square! // // In case a resizable matrix is used (as for instance * blaze::HybridMatrix, blaze::DynamicMatrix, // or blaze::CompressedMatrix), * this means that the according constructors, the \c resize() and // the \c * extend() functions only expect a single parameter, which specifies both * the number of // rows and columns, instead of two (one for the number of * rows and one for the number of columns): * * \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; using * blaze::rowMajor; * * // Default constructed, default initialized, row-major 3x3 symmetric dynamic * matrix SymmetricMatrix< DynamicMatrix<double,rowMajor> > A( 3 ); * * // Resizing the matrix to 5x5 A.resize( 5 ); * * // Extending the number of rows and columns by 2, resulting in a 7x7 matrix * A.extend( 2 ); \endcode * * // In case a matrix with a fixed size is used (as for instance * blaze::StaticMatrix), the number // of rows and number of columns must be * specified equally: * * \code using blaze::StaticMatrix; using blaze::SymmetricMatrix; using * blaze::columnMajor; * * // Correct setup of a fixed size column-major 3x3 symmetric static matrix * SymmetricMatrix< StaticMatrix<int,3UL,3UL,columnMajor> > A; * * // Compilation error: the provided matrix type is not a square matrix type * SymmetricMatrix< StaticMatrix<int,3UL,4UL,columnMajor> > B; \endcode * * // \n \subsection adaptors_symmetric_matrices_symmetry The Symmetric Property * is Always Enforced! // // This means that modifying the element \f$ a_{ij} * \f$ of a symmetric matrix also modifies its // counterpart element \f$ * a_{ji} \f$. Also, it is only possible to assign matrices that are // * symmetric themselves: * * \code using blaze::CompressedMatrix; using blaze::DynamicMatrix; using * blaze::StaticMatrix; using blaze::SymmetricMatrix; using blaze::rowMajor; * * // Default constructed, row-major 3x3 symmetric compressed matrix * SymmetricMatrix< CompressedMatrix<double,rowMajor> > A( 3 ); * * // Initializing three elements via the function call operator A(0,0) = 1.0; * // Initialization of the diagonal element (0,0) A(0,2) = 2.0; // * Initialization of the elements (0,2) and (2,0) * * // Inserting three more elements via the insert() function A.insert( 1, 1, * 3.0 ); // Inserting the diagonal element (1,1) A.insert( 1, 2, 4.0 ); // * Inserting the elements (1,2) and (2,1) * * // Access via a non-const iterator A.begin(1UL) = 10.0; // Modifies both * elements (1,0) and (0,1) * * // Erasing elements via the erase() function A.erase( 0, 0 ); // Erasing the * diagonal element (0,0) A.erase( 0, 2 ); // Erasing the elements (0,2) and * (2,0) * * // Construction from a symmetric dense matrix StaticMatrix<double,3UL,3UL> B{ * { 3.0, 8.0, -2.0 }, { 8.0, 0.0, -1.0 }, { -2.0, -1.0, 4.0 } }; * * SymmetricMatrix< DynamicMatrix<double,rowMajor> > C( B ); // OK * * // Assignment of a non-symmetric dense matrix StaticMatrix<double,3UL,3UL> D{ * { 3.0, 7.0, -2.0 }, { 8.0, 0.0, -1.0 }, { -2.0, -1.0, 4.0 } }; * * C = D; // Throws an exception; symmetric invariant would be violated! * \endcode * * // The same restriction also applies to the \c append() function for sparse * matrices: Appending // the element \f$ a_{ij} \f$ additionally inserts the * element \f$ a_{ji} \f$ into the matrix. // Despite the additional * insertion, the \c append() function still provides the most efficient // * way to set up a symmetric sparse matrix. In order to achieve the maximum * efficiency, the // capacity of the individual rows/columns of the matrix * should to be specifically prepared with // \c reserve() calls: * * \code using blaze::CompressedMatrix; using blaze::SymmetricMatrix; using * blaze::rowMajor; * * // Setup of the symmetric matrix // // ( 0 1 3 ) // A = ( 1 2 0 ) // * ( 3 0 0 ) // SymmetricMatrix< CompressedMatrix<double,rowMajor> > A( 3 ); * * A.reserve( 5 ); // Reserving enough space for 5 non-zero elements * A.reserve( 0, 2 ); // Reserving two non-zero elements in the first * row A.reserve( 1, 2 ); // Reserving two non-zero elements in the * second row A.reserve( 2, 1 ); // Reserving a single non-zero element * in the third row A.append( 0, 1, 1.0 ); // Appending the value 1 at * position (0,1) and (1,0) A.append( 1, 1, 2.0 ); // Appending the value 2 * at position (1,1) A.append( 2, 0, 3.0 ); // Appending the value 3 at * position (2,0) and (0,2) \endcode * * // The symmetry property is also enforced for symmetric custom matrices: In * case the given array // of elements does not represent a symmetric matrix, * a \c std::invalid_argument exception is // thrown: * * \code using blaze::CustomMatrix; using blaze::SymmetricMatrix; using * blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; * * using CustomSymmetric = SymmetricMatrix< * CustomMatrix<double,unaligned,unpadded,rowMajor> >; * * // Creating a 3x3 symmetric custom matrix from a properly initialized array * double array[9] = { 1.0, 2.0, 4.0, 2.0, 3.0, 5.0, 4.0, 5.0, 6.0 }; * CustomSymmetric A( array, 3UL ); // OK * * // Attempt to create a second 3x3 symmetric custom matrix from an * uninitialized array std::unique_ptr<double[]> memory( new double[9UL] ); * CustomSymmetric B( memory.get(), 3UL ); // Throws an exception \endcode * * // Finally, the symmetry property is enforced for views (rows, columns, * submatrices, ...) on the // symmetric matrix. The following example * demonstrates that modifying the elements of an entire // row of the * symmetric matrix also affects the counterpart elements in the according * column of // the matrix: * * \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; * * // Setup of the symmetric matrix // // ( 0 1 0 2 ) // A = ( 1 3 4 0 ) * // ( 0 4 0 5 ) // ( 2 0 5 0 ) // SymmetricMatrix< * DynamicMatrix<int> > A( 4 ); A(0,1) = 1; A(0,3) = 2; A(1,1) = 3; A(1,2) = * 4; A(2,3) = 5; * * // Setting all elements in the 1st row to 0 results in the matrix // // * ( 0 0 0 2 ) // A = ( 0 0 0 0 ) // ( 0 0 0 5 ) // ( 2 0 5 0 ) * // row( A, 1 ) = 0; \endcode * * // The next example demonstrates the (compound) assignment to submatrices of * symmetric matrices. // Since the modification of element \f$ a_{ij} \f$ of * a symmetric matrix also modifies the // element \f$ a_{ji} \f$, the matrix * to be assigned must be structured such that the symmetry // of the * symmetric matrix is preserved. Otherwise a \c std::invalid_argument * exception is // thrown: * * \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; * * // Setup of two default 4x4 symmetric matrices SymmetricMatrix< * DynamicMatrix<int> > A1( 4 ), A2( 4 ); * * // Setup of the 3x2 dynamic matrix // // ( 1 2 ) // B = ( 3 4 ) // * ( 5 6 ) // DynamicMatrix<int> B{ { 1, 2 }, { 3, 4 }, { 5, 6 } }; * * // OK: Assigning B to a submatrix of A1 such that the symmetry can be * preserved // // ( 0 0 1 2 ) // A1 = ( 0 0 3 4 ) // ( 1 3 5 * 6 ) // ( 2 4 6 0 ) // submatrix( A1, 0UL, 2UL, 3UL, 2UL ) = B; // * OK * * // Error: Assigning B to a submatrix of A2 such that the symmetry cannot be * preserved! // The elements marked with X cannot be assigned * unambiguously! // // ( 0 1 2 0 ) // A2 = ( 1 3 X 0 ) // ( * 2 X 6 0 ) // ( 0 0 0 0 ) // submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = * B; // Assignment throws an exception! \endcode * * // \n \subsection adaptors_symmetric_matrices_initialization The Elements of * a Dense Symmetric Matrix are Always Default Initialized! // // Although * this results in a small loss of efficiency (especially in case all default * values are // overridden afterwards), this property is important since * otherwise the symmetric property of // dense symmetric matrices could not * be guaranteed: * * \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; * * // Uninitialized, 5x5 row-major dynamic matrix DynamicMatrix<int,rowMajor> A( * 5, 5 ); * * // Default initialized, 5x5 row-major symmetric dynamic matrix * SymmetricMatrix< DynamicMatrix<int,rowMajor> > B( 5 ); \endcode * * // \n \section adaptors_symmetric_matrices_arithmetic_operations Arithmetic * Operations // <hr> // // A SymmetricMatrix matrix can participate in * numerical operations in any way any other dense // or sparse matrix can * participate. It can also be combined with any other dense or sparse vector * // or matrix. The following code example gives an impression of the use of * SymmetricMatrix within // arithmetic operations: * * \code using blaze::SymmetricMatrix; using blaze::DynamicMatrix; using * blaze::HybridMatrix; using blaze::StaticMatrix; using * blaze::CompressedMatrix; using blaze::rowMajor; using blaze::columnMajor; * * DynamicMatrix<double,rowMajor> A( 3, 3 ); CompressedMatrix<double,rowMajor> * B( 3, 3 ); * * SymmetricMatrix< DynamicMatrix<double,rowMajor> > C( 3 ); SymmetricMatrix< * CompressedMatrix<double,rowMajor> > D( 3 ); * * SymmetricMatrix< HybridMatrix<float,3UL,3UL,rowMajor> > E; SymmetricMatrix< * StaticMatrix<float,3UL,3UL,columnMajor> > F; * * E = A + B; // Matrix addition and assignment to a row-major symmetric * matrix (includes runtime check) F = C - D; // Matrix subtraction and * assignment to a column-major symmetric matrix (only compile time check) F * = A * D; // Matrix multiplication between a dense and a sparse matrix * (includes runtime check) * * C *= 2.0; // In-place scaling of matrix C E = 2.0 * B; // Scaling of * matrix B (includes runtime check) F = C * 2.0; // Scaling of matrix C * (only compile time check) * * E += A - B; // Addition assignment (includes runtime check) F -= C + D; * // Subtraction assignment (only compile time check) F *= A * D; // * Multiplication assignment (includes runtime check) \endcode * * // Note that it is possible to assign any kind of matrix to a symmetric * matrix. In case the matrix // to be assigned is not symmetric at compile * time, a runtime check is performed. // // // \n \section * adaptors_symmetric_matrices_block_matrices Symmetric Block Matrices // * <hr> // // It is also possible to use symmetric block matrices: * * \code using blaze::CompressedMatrix; using blaze::StaticMatrix; using * blaze::SymmetricMatrix; * * // Definition of a 3x3 symmetric block matrix based on CompressedMatrix * SymmetricMatrix< CompressedMatrix< StaticMatrix<int,3UL,3UL> > > A( 3 ); * \endcode * * // Also in this case, the SymmetricMatrix class template enforces the * invariant of symmetry and // guarantees that a modifications of element * \f$ a_{ij} \f$ of the adapted matrix is also // applied to element \f$ * a_{ji} \f$: * * \code // Inserting the elements (2,4) and (4,2) A.insert( 2, 4, * StaticMatrix<int,3UL,3UL>{ { 1, -4, 5 }, { 6, 8, -3 }, { 2, -1, 2 } } * ); * * // Manipulating the elements (2,4) and (4,2) A(2,4)(1,1) = -5; \endcode * * // For more information on block matrices, see the tutorial on \ref * block_vectors_and_matrices. // // // \n \section * adaptors_symmetric_matrices_performance Performance Considerations // <hr> * // // When the symmetric property of a matrix is known beforehands using * the SymmetricMatrix adaptor // instead of a general matrix can be a * considerable performance advantage. The \b Blaze library // tries to * exploit the properties of symmetric matrices whenever possible. However, * there are // also situations when using a symmetric matrix introduces some * overhead. The following examples // demonstrate several situations where * symmetric matrices can positively or negatively impact // performance. // * // \n \subsection adaptors_symmetric_matrices_matrix_matrix_multiplication * Positive Impact: Matrix/Matrix Multiplication // // When multiplying two * matrices, at least one of which is symmetric, \b Blaze can exploit the * fact // that \f$ A = A^T \f$ and choose the fastest and most suited * combination of storage orders for the // multiplication. The following * example demonstrates this by means of a dense matrix/sparse matrix // * multiplication: * * \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; using * blaze::rowMajor; using blaze::columnMajor; * * SymmetricMatrix< DynamicMatrix<double,rowMajor> > A; SymmetricMatrix< * CompressedMatrix<double,columnMajor> > B; * DynamicMatrix<double,columnMajor> C; * * // ... Resizing and initialization * * C = A * B; \endcode * * // Intuitively, the chosen combination of a row-major and a column-major * matrix is the most suited // for maximum performance. However, \b Blaze * evaluates the multiplication as * * \code C = A * trans( B ); \endcode * * // which significantly increases the performance since in contrast to the * original formulation the // optimized form can be vectorized. Therefore, * in the context of matrix multiplications, using the // SymmetricMatrix * adapter is obviously an advantage. // // \n \subsection * adaptors_symmetric_matrices_matrix_vector_multiplication Positive Impact: * Matrix/Vector Multiplication // // A similar optimization is possible in * case of matrix/vector multiplications: * * \code using blaze::DynamicMatrix; using blaze::DynamicVector; using * blaze::CompressedVector; using blaze::rowMajor; using blaze::columnVector; * * SymmetricMatrix< DynamicMatrix<double,rowMajor> > A; * CompressedVector<double,columnVector> x; * DynamicVector<double,columnVector> y; * * // ... Resizing and initialization * * y = A * x; \endcode * * // In this example it is not intuitively apparent that using a row-major * matrix is not the best // possible choice in terms of performance since * the computation cannot be vectorized. Choosing // a column-major matrix * instead, however, would enable a vectorized computation. Therefore // \b * Blaze exploits the fact that \c A is symmetric, selects the best suited * storage order and // evaluates the multiplication as * * \code y = trans( A ) * x; \endcode * * // which also significantly increases the performance. // // \n \subsection * adaptors_symmetric_matrices_views Positive Impact: Row/Column Views on * Column/Row-Major Matrices // // Another example is the optimization of a * row view on a column-major symmetric matrix: * * \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; using * blaze::columnMajor; * * SymmetricMatrix< DynamicMatrix<double,columnMajor> > A( 10UL ); auto row5 = * row( A, 5UL ); \endcode * * // Usually, a row view on a column-major matrix results in a considerable * performance decrease in // comparison to a row view on a row-major matrix * due to the non-contiguous storage of the matrix // elements. However, in * case of symmetric matrices, \b Blaze instead uses the according column of * // the matrix, which provides the same performance as if the matrix would * be row-major. Note that // this also works for column views on row-major * matrices, where \b Blaze can use the according // row instead of a column * in order to provide maximum performance. // // \n \subsection * adaptors_symmetric_matrices_assignment Negative Impact: Assignment of a * General Matrix // // In contrast to using a symmetric matrix on the * right-hand side of an assignment (i.e. for read // access), which * introduces absolutely no performance penalty, using a symmetric matrix on * the // left-hand side of an assignment (i.e. for write access) may * introduce additional overhead when // it is assigned a general matrix, * which is not symmetric at compile time: * * \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; * * SymmetricMatrix< DynamicMatrix<double> > A, C; DynamicMatrix<double> B; * * B = A; // Only read-access to the symmetric matrix; no performance penalty C * = A; // Assignment of a symmetric matrix to another symmetric matrix; no * runtime overhead C = B; // Assignment of a general matrix to a symmetric * matrix; some runtime overhead \endcode * * // When assigning a general, potentially not symmetric matrix to a symmetric * matrix it is necessary // to check whether the matrix is symmetric at * runtime in order to guarantee the symmetry property // of the symmetric * matrix. In case it turns out to be symmetric, it is assigned as * efficiently as // possible, if it is not, an exception is thrown. In order * to prevent this runtime overhead it is // therefore generally advisable to * assign symmetric matrices to other symmetric matrices.\n // In this * context it is especially noteworthy that in contrast to additions and * subtractions the // multiplication of two symmetric matrices does not * necessarily result in another symmetric matrix: * * \code SymmetricMatrix< DynamicMatrix<double> > A, B, C; * * C = A + B; // Results in a symmetric matrix; no runtime overhead C = A - B; * // Results in a symmetric matrix; no runtime overhead C = A * B; // Is * not guaranteed to result in a symmetric matrix; some runtime overhead * \endcode * * // \n Previous: \ref adaptors &nbsp; &nbsp; Next: \ref * adaptors_hermitian_matrices */ //************************************************************************************************* //**Hermitian Matrices ***************************************************************************** /* * !\page adaptors_hermitian_matrices Hermitian Matrices // // * \tableofcontents // // // \n \section adaptors_hermitian_matrices_general * Hermitian Matrices // <hr> // // In addition to symmetric matrices, \b * Blaze also provides an adaptor for Hermitian matrices. // Hermitian * matrices provide the compile time guarantee to be square matrices with * pair-wise // conjugate complex values. Mathematically, this means that an * Hermitian matrix is always equal // to its conjugate transpose (\f$ A = * \overline{A^T} \f$) and that all non-diagonal values have // a complex * conjugate counterpart (\f$ a_{ij} == \overline{a_{ji}} \f$). Within the \b * Blaze // library, Hermitian matrices are realized by the \ref * adaptors_hermitian_matrices_hermitianmatrix // class template. // // // \n * \section adaptors_hermitian_matrices_hermitianmatrix HermitianMatrix // * <hr> // // The HermitianMatrix class template is an adapter for existing * dense and sparse matrix types. // It inherits the properties and the * interface of the given matrix type \c MT and extends it by // enforcing * the additional invariant of Hermitian symmetry (i.e. the matrix is always * equal to // its conjugate transpose \f$ A = \overline{A^T} \f$). It can be * included via the header file * * \code #include <blaze/math/HermitianMatrix.h> \endcode * * // The type of the adapted matrix can be specified via template parameter: * * \code template< typename MT > class HermitianMatrix; \endcode * * // \c MT specifies the type of the matrix to be adapted. HermitianMatrix can * be used with any // non-cv-qualified, non-reference, non-pointer, * non-expression dense or sparse matrix type. Also, // the given matrix type * must have numeric element types (i.e. all integral types except \c bool, * // floating point and complex types). Note that the given matrix type must * be either resizable (as // for instance blaze::HybridMatrix or * blaze::DynamicMatrix) or must be square at compile time (as // for * instance blaze::StaticMatrix). // // The following examples give an * impression of several possible Hermitian matrices: * * \code using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; * using blaze::columnMajor; * * // Definition of a 3x3 row-major dense Hermitian matrix with static memory * blaze::HermitianMatrix< blaze::StaticMatrix<int,3UL,3UL,rowMajor> > A; * * // Definition of a resizable column-major dense Hermitian matrix based on * HybridMatrix blaze::HermitianMatrix< * blaze::HybridMatrix<float,4UL,4UL,columnMajor> B; * * // Definition of a resizable row-major dense Hermitian matrix based on * DynamicMatrix blaze::HermitianMatrix< * blaze::DynamicMatrix<std::complex<double>,rowMajor> > C; * * // Definition of a fixed size row-major dense Hermitian matrix based on * CustomMatrix blaze::HermitianMatrix< * blaze::CustomMatrix<double,unaligned,unpadded,rowMajor> > D; * * // Definition of a compressed row-major single precision complex Hermitian * matrix blaze::HermitianMatrix< * blaze::CompressedMatrix<std::complex<float>,rowMajor> > E; \endcode * * // The storage order of a Hermitian matrix is depending on the storage order * of the adapted matrix // type \c MT. In case the adapted matrix is stored * in a row-wise fashion (i.e. is specified as // blaze::rowMajor), the * Hermitian matrix will also be a row-major matrix. Otherwise, if the // * adapted matrix is column-major (i.e. is specified as blaze::columnMajor), * the Hermitian matrix // will also be a column-major matrix. // // // \n * \section adaptors_hermitian_matrices_vs_symmetric_matrices Hermitian * Matrices vs. Symmetric Matrices // // The blaze::HermitianMatrix adaptor * and the blaze::SymmetricMatrix adaptor share several traits. // However, * there are a couple of differences, both from a mathematical point of view * as well as // from an implementation point of view. // // From a * mathematical point of view, a matrix is called symmetric when it is equal * to its // transpose (\f$ A = A^T \f$) and it is called Hermitian when it * is equal to its conjugate // transpose (\f$ A = \overline{A^T} \f$). For * matrices of real values, however, these two // conditions coincide, which * means that symmetric matrices of real values are also Hermitian // and * Hermitian matrices of real values are also symmetric. // // From an * implementation point of view, \b Blaze restricts Hermitian matrices to * numeric data // types (i.e. all integral types except \c bool, floating * point and complex types), whereas // symmetric matrices can also be block * matrices (i.e. can have vector or matrix elements). // For built-in * element types, the HermitianMatrix adaptor behaves exactly like the * according // SymmetricMatrix implementation. For complex element types, * however, the Hermitian property // is enforced (see also \ref * adaptors_hermitian_matrices_hermitian). * * \code using blaze::DynamicMatrix; using blaze::DynamicVector; using * blaze::HermitianMatrix; using blaze::SymmetricMatrix; * * // The following two matrices provide an identical experience (including * performance) HermitianMatrix< DynamicMatrix<double> > A; // Both * Hermitian and symmetric SymmetricMatrix< DynamicMatrix<double> > B; // * Both Hermitian and symmetric * * // The following two matrices will behave differently HermitianMatrix< * DynamicMatrix< complex<double> > > C; // Only Hermitian SymmetricMatrix< * DynamicMatrix< complex<double> > > D; // Only symmetric * * // Hermitian block matrices are not allowed HermitianMatrix< DynamicMatrix< * DynamicVector<double> > > E; // Compilation error! SymmetricMatrix< * DynamicMatrix< DynamicVector<double> > > F; // Symmetric block matrix * \endcode * * // \n \section adaptors_hermitian_matrices_special_properties Special * Properties of Hermitian Matrices // <hr> // // A Hermitian matrix is used * exactly like a matrix of the underlying, adapted matrix type \c MT. // It * also provides (nearly) the same interface as the underlying matrix type. * However, there are // some important exceptions resulting from the * Hermitian symmetry constraint: // // -# <b>\ref * adaptors_hermitian_matrices_square</b> // -# <b>\ref * adaptors_hermitian_matrices_hermitian</b> // -# <b>\ref * adaptors_hermitian_matrices_initialization</b> // // \n \subsection * adaptors_hermitian_matrices_square Hermitian Matrices Must Always be * Square! // // In case a resizable matrix is used (as for instance * blaze::HybridMatrix, blaze::DynamicMatrix, // or blaze::CompressedMatrix), * this means that the according constructors, the \c resize() and // the \c * extend() functions only expect a single parameter, which specifies both * the number of // rows and columns, instead of two (one for the number of * rows and one for the number of columns): * * \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; using * blaze::rowMajor; * * // Default constructed, default initialized, row-major 3x3 Hermitian dynamic * matrix HermitianMatrix< DynamicMatrix<std::complex<double>,rowMajor> > A( * 3 ); * * // Resizing the matrix to 5x5 A.resize( 5 ); * * // Extending the number of rows and columns by 2, resulting in a 7x7 matrix * A.extend( 2 ); \endcode * * // In case a matrix with a fixed size is used (as for instance * blaze::StaticMatrix), the number // of rows and number of columns must be * specified equally: * * \code using blaze::StaticMatrix; using blaze::HermitianMatrix; using * blaze::columnMajor; * * // Correct setup of a fixed size column-major 3x3 Hermitian static matrix * HermitianMatrix< StaticMatrix<std::complex<float>,3UL,3UL,columnMajor> > * A; * * // Compilation error: the provided matrix type is not a square matrix type * HermitianMatrix< StaticMatrix<std::complex<float>,3UL,4UL,columnMajor> > * B; \endcode * * // \n \subsection adaptors_hermitian_matrices_hermitian The Hermitian * Property is Always Enforced! // // This means that the following * properties of a Hermitian matrix are always guaranteed: // // - The * diagonal elements are real numbers, i.e. the imaginary part is zero // - * Element \f$ a_{ij} \f$ is always the complex conjugate of element \f$ * a_{ji} \f$ // // Thus modifying the element \f$ a_{ij} \f$ of a Hermitian * matrix also modifies its // counterpart element \f$ a_{ji} \f$. Also, it * is only possible to assign matrices that // are Hermitian themselves: * * \code using blaze::CompressedMatrix; using blaze::DynamicMatrix; using * blaze::StaticMatrix; using blaze::HermitianMatrix; using blaze::rowMajor; * * using cplx = std::complex<double>; * * // Default constructed, row-major 3x3 Hermitian compressed matrix * HermitianMatrix< CompressedMatrix<cplx,rowMajor> > A( 3 ); * * // Initializing the matrix via the function call operator // // ( (1, 0) * (0,0) (2,1) ) // ( (0, 0) (0,0) (0,0) ) // ( (2,-1) (0,0) (0,0) ) // * A(0,0) = cplx( 1.0, 0.0 ); // Initialization of the diagonal element * (0,0) A(0,2) = cplx( 2.0, 1.0 ); // Initialization of the elements (0,2) * and (2,0) * * // Inserting three more elements via the insert() function // // ( (1,-3) * (0,0) (2, 1) ) // ( (0, 0) (2,0) (4,-2) ) // ( (2,-1) (4,2) (0, 0) ) // * A.insert( 1, 1, cplx( 2.0, 0.0 ) ); // Inserting the diagonal element * (1,1) A.insert( 1, 2, cplx( 4.0, -2.0 ) ); // Inserting the elements * (1,2) and (2,1) * * // Access via a non-const iterator // // ( (1,-3) (8,1) (2, 1) ) // ( * (8,-1) (2,0) (4,-2) ) // ( (2,-1) (4,2) (0, 0) ) // A.begin(1UL) = cplx( * 8.0, -1.0 ); // Modifies both elements (1,0) and (0,1) * * // Erasing elements via the erase() function // // ( (0, 0) (8,1) (0, 0) ) * // ( (8,-1) (2,0) (4,-2) ) // ( (0, 0) (4,2) (0, 0) ) // A.erase( 0, 0 * ); // Erasing the diagonal element (0,0) A.erase( 0, 2 ); // Erasing the * elements (0,2) and (2,0) * * // Construction from a Hermitian dense matrix StaticMatrix<cplx,3UL,3UL> B{ { * cplx( 3.0, 0.0 ), cplx( 8.0, 2.0 ), cplx( -2.0, 2.0 ) }, { cplx( 8.0, * 1.0 ), cplx( 0.0, 0.0 ), cplx( -1.0, -1.0 ) }, { cplx( -2.0, -2.0 ), * cplx( -1.0, 1.0 ), cplx( 4.0, 0.0 ) } }; * * HermitianMatrix< DynamicMatrix<double,rowMajor> > C( B ); // OK * * // Assignment of a non-Hermitian dense matrix StaticMatrix<cplx,3UL,3UL> D{ * { cplx( 3.0, 0.0 ), cplx( 7.0, 2.0 ), cplx( 3.0, 2.0 ) }, { cplx( * 8.0, 1.0 ), cplx( 0.0, 0.0 ), cplx( 6.0, 4.0 ) }, { cplx( -2.0, 2.0 ), * cplx( -1.0, 1.0 ), cplx( 4.0, 0.0 ) } }; * * C = D; // Throws an exception; Hermitian invariant would be violated! * \endcode * * // The same restriction also applies to the \c append() function for sparse * matrices: Appending // the element \f$ a_{ij} \f$ additionally inserts the * element \f$ a_{ji} \f$ into the matrix. // Despite the additional * insertion, the \c append() function still provides the most efficient // * way to set up a Hermitian sparse matrix. In order to achieve the maximum * efficiency, the // capacity of the individual rows/columns of the matrix * should to be specifically prepared with // \c reserve() calls: * * \code using blaze::CompressedMatrix; using blaze::HermitianMatrix; using * blaze::rowMajor; * * using cplx = std::complex<double>; * * // Setup of the Hermitian matrix // // ( (0, 0) (1,2) (3,-4) ) // A = * ( (1,-2) (2,0) (0, 0) ) // ( (3, 4) (0,0) (0, 0) ) // * HermitianMatrix< CompressedMatrix<cplx,rowMajor> > A( 3 ); * * A.reserve( 5 ); // Reserving enough space for 5 non-zero elements * A.reserve( 0, 2 ); // Reserving two non-zero elements in the first * row A.reserve( 1, 2 ); // Reserving two non-zero elements in the * second row A.reserve( 2, 1 ); // Reserving a single non-zero element * in the third row * * A.append( 0, 1, cplx( 1.0, 2.0 ) ); // Appending an element at position * (0,1) and (1,0) A.append( 1, 1, cplx( 2.0, 0.0 ) ); // Appending an * element at position (1,1) A.append( 2, 0, cplx( 3.0, 4.0 ) ); // * Appending an element at position (2,0) and (0,2) \endcode * * // The Hermitian property is also enforced for Hermitian custom matrices: In * case the given array // of elements does not represent a Hermitian matrix, * a \c std::invalid_argument exception is // thrown: * * \code using blaze::CustomMatrix; using blaze::HermitianMatrix; using * blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; * * using CustomHermitian = HermitianMatrix< * CustomMatrix<double,unaligned,unpadded,rowMajor> >; * * // Creating a 3x3 Hermitian custom matrix from a properly initialized array * double array[9] = { 1.0, 2.0, 4.0, 2.0, 3.0, 5.0, 4.0, 5.0, 6.0 }; * CustomHermitian A( array, 3UL ); // OK * * // Attempt to create a second 3x3 Hermitian custom matrix from an * uninitialized array std::unique_ptr<double[]> memory( new double[9UL] ); * CustomHermitian B( memory.get(), 3UL ); // Throws an exception \endcode * * // Finally, the Hermitian property is enforced for views (rows, columns, * submatrices, ...) on the // Hermitian matrix. The following example * demonstrates that modifying the elements of an entire // row of the * Hermitian matrix also affects the counterpart elements in the according * column of // the matrix: * * \code using blaze::DynamicMatrix; using blaze::HermtianMatrix; * * using cplx = std::complex<double>; * * // Setup of the Hermitian matrix // // ( (0, 0) (1,-1) (0,0) (2, 1) ) * // A = ( (1, 1) (3, 0) (4,2) (0, 0) ) // ( (0, 0) (4,-2) (0,0) * (5,-3) ) // ( (2,-1) (0, 0) (5,3) (0, 0) ) // HermitianMatrix< * DynamicMatrix<int> > A( 4 ); A(0,1) = cplx( 1.0, -1.0 ); A(0,3) = cplx( * 2.0, 1.0 ); A(1,1) = cplx( 3.0, 0.0 ); A(1,2) = cplx( 4.0, 2.0 ); * A(2,3) = cplx( 5.0, 3.0 ); * * // Setting all elements in the 1st row to 0 results in the matrix // // * ( (0, 0) (0,0) (0,0) (2, 1) ) // A = ( (0, 0) (0,0) (0,0) (0, 0) ) // * ( (0, 0) (0,0) (0,0) (5,-3) ) // ( (2,-1) (0,0) (5,3) (0, 0) ) // * row( A, 1 ) = cplx( 0.0, 0.0 ); \endcode * * // The next example demonstrates the (compound) assignment to submatrices of * Hermitian matrices. // Since the modification of element \f$ a_{ij} \f$ of * a Hermitian matrix also modifies the // element \f$ a_{ji} \f$, the matrix * to be assigned must be structured such that the Hermitian // symmetry of * the matrix is preserved. Otherwise a \c std::invalid_argument exception is * thrown: * * \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; * * std::complex<double> cplx; * * // Setup of two default 4x4 Hermitian matrices HermitianMatrix< * DynamicMatrix<cplx> > A1( 4 ), A2( 4 ); * * // Setup of the 3x2 dynamic matrix // // ( (1,-1) (2, 5) ) // B = ( * (3, 0) (4,-6) ) // ( (5, 0) (6, 0) ) // DynamicMatrix<int> B( 3UL, * 2UL ); B(0,0) = cplx( 1.0, -1.0 ); B(0,1) = cplx( 2.0, 5.0 ); B(1,0) = * cplx( 3.0, 0.0 ); B(1,1) = cplx( 4.0, -6.0 ); B(2,1) = cplx( 5.0, 0.0 ); * B(2,2) = cplx( 6.0, 7.0 ); * * // OK: Assigning B to a submatrix of A1 such that the Hermitian property is * preserved // // ( (0, 0) (0, 0) (1,-1) (2, 5) ) // A1 = ( (0, 0) * (0, 0) (3, 0) (4,-6) ) // ( (1, 1) (3, 0) (5, 0) (6, 0) ) // * ( (2,-5) (4, 6) (6, 0) (0, 0) ) // submatrix( A1, 0UL, 2UL, 3UL, 2UL ) = * B; // OK * * // Error: Assigning B to a submatrix of A2 such that the Hermitian property * isn't preserved! // The elements marked with X cannot be assigned * unambiguously! // // ( (0, 0) (1,-1) (2,5) (0,0) ) // A2 = ( (1, * 1) (3, 0) (X,X) (0,0) ) // ( (2,-5) (X, X) (6,0) (0,0) ) // * ( (0, 0) (0, 0) (0,0) (0,0) ) // submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = B; * // Assignment throws an exception! \endcode * * // \n \subsection adaptors_hermitian_matrices_initialization The Elements of * a Dense Hermitian Matrix are Always Default Initialized! // // Although * this results in a small loss of efficiency (especially in case all default * values are // overridden afterwards), this property is important since * otherwise the Hermitian property of // dense Hermitian matrices could not * be guaranteed: * * \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; * * // Uninitialized, 5x5 row-major dynamic matrix DynamicMatrix<int,rowMajor> A( * 5, 5 ); * * // Default initialized, 5x5 row-major Hermitian dynamic matrix * HermitianMatrix< DynamicMatrix<int,rowMajor> > B( 5 ); \endcode * * // \n \section adaptors_hermitian_matrices_arithmetic_operations Arithmetic * Operations // <hr> // // A HermitianMatrix can be used within all * numerical operations in any way any other dense or // sparse matrix can be * used. It can also be combined with any other dense or sparse vector or // * matrix. The following code example gives an impression of the use of * HermitianMatrix within // arithmetic operations: * * \code using blaze::HermitianMatrix; using blaze::DynamicMatrix; using * blaze::HybridMatrix; using blaze::StaticMatrix; using * blaze::CompressedMatrix; using blaze::rowMajor; using blaze::columnMajor; * * using cplx = complex<float>; * * DynamicMatrix<cplx,rowMajor> A( 3, 3 ); CompressedMatrix<cplx,rowMajor> B( 3, * 3 ); * * HermitianMatrix< DynamicMatrix<cplx,rowMajor> > C( 3 ); HermitianMatrix< * CompressedMatrix<cplx,rowMajor> > D( 3 ); * * HermitianMatrix< HybridMatrix<cplx,3UL,3UL,rowMajor> > E; HermitianMatrix< * StaticMatrix<cplx,3UL,3UL,columnMajor> > F; * * E = A + B; // Matrix addition and assignment to a row-major Hermitian * matrix (includes runtime check) F = C - D; // Matrix subtraction and * assignment to a column-major Hermitian matrix (only compile time check) F * = A * D; // Matrix multiplication between a dense and a sparse matrix * (includes runtime check) * * C *= 2.0; // In-place scaling of matrix C E = 2.0 * B; // Scaling of * matrix B (includes runtime check) F = C * 2.0; // Scaling of matrix C * (only compile time check) * * E += A - B; // Addition assignment (includes runtime check) F -= C + D; * // Subtraction assignment (only compile time check) F *= A * D; // * Multiplication assignment (includes runtime check) \endcode * * // Note that it is possible to assign any kind of matrix to a Hermitian * matrix. In case the matrix // to be assigned is not Hermitian at compile * time, a runtime check is performed. // // // \n \section * adaptors_hermitian_matrices_performance Performance Considerations // <hr> * // // When the Hermitian property of a matrix is known beforehands using * the HermitianMatrix adaptor // instead of a general matrix can be a * considerable performance advantage. This is particularly // true in case * the Hermitian matrix is also symmetric (i.e. has built-in element types). * The // \b Blaze library tries to exploit the properties of Hermitian * (symmetric) matrices whenever // possible. However, there are also * situations when using a Hermitian matrix introduces some // overhead. The * following examples demonstrate several situations where Hermitian matrices * can // positively or negatively impact performance. // // \n \subsection * adaptors_hermitian_matrices_matrix_matrix_multiplication Positive Impact: * Matrix/Matrix Multiplication // // When multiplying two matrices, at least * one of which is symmetric, \b Blaze can exploit the fact // that \f$ A = * A^T \f$ and choose the fastest and most suited combination of storage * orders for the // multiplication. The following example demonstrates this * by means of a dense matrix/sparse matrix // multiplication: * * \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; using * blaze::rowMajor; using blaze::columnMajor; * * HermitianMatrix< DynamicMatrix<double,rowMajor> > A; // Both Hermitian * and symmetric HermitianMatrix< CompressedMatrix<double,columnMajor> > B; * // Both Hermitian and symmetric DynamicMatrix<double,columnMajor> C; * * // ... Resizing and initialization * * C = A * B; \endcode * * // Intuitively, the chosen combination of a row-major and a column-major * matrix is the most suited // for maximum performance. However, \b Blaze * evaluates the multiplication as * * \code C = A * trans( B ); \endcode * * // which significantly increases the performance since in contrast to the * original formulation the // optimized form can be vectorized. Therefore, * in the context of matrix multiplications, using a // symmetric matrix is * obviously an advantage. // // \n \subsection * adaptors_hermitian_matrices_matrix_vector_multiplication Positive Impact: * Matrix/Vector Multiplication // // A similar optimization is possible in * case of matrix/vector multiplications: * * \code using blaze::DynamicMatrix; using blaze::DynamicVector; using * blaze::CompressedVector; using blaze::HermitianMatrix; using * blaze::rowMajor; using blaze::columnVector; * * HermitianMatrix< DynamicMatrix<double,rowMajor> > A; // Hermitian and * symmetric CompressedVector<double,columnVector> x; * DynamicVector<double,columnVector> y; * * // ... Resizing and initialization * * y = A * x; \endcode * * // In this example it is not intuitively apparent that using a row-major * matrix is not the best // possible choice in terms of performance since * the computation cannot be vectorized. Choosing // a column-major matrix * instead, however, would enable a vectorized computation. Therefore // \b * Blaze exploits the fact that \c A is symmetric, selects the best suited * storage order and // evaluates the multiplication as * * \code y = trans( A ) * x; \endcode * * // which also significantly increases the performance. // // \n \subsection * adaptors_hermitian_matrices_views Positive Impact: Row/Column Views on * Column/Row-Major Matrices // // Another example is the optimization of a * row view on a column-major symmetric matrix: * * \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; using * blaze::columnMajor; * * HermitianMatrix< DynamicMatrix<double,columnMajor> > A( 10UL ); // Both * Hermitian and symmetric auto row5 = row( A, 5UL ); \endcode * * // Usually, a row view on a column-major matrix results in a considerable * performance decrease in // comparison to a row view on a row-major matrix * due to the non-contiguous storage of the matrix // elements. However, in * case of symmetric matrices, \b Blaze instead uses the according column of * // the matrix, which provides the same performance as if the matrix would * be row-major. Note that // this also works for column views on row-major * matrices, where \b Blaze can use the according // row instead of a column * in order to provide maximum performance. // // \n \subsection * adaptors_hermitian_matrices_assignment Negative Impact: Assignment of a * General Matrix // // In contrast to using a Hermitian matrix on the * right-hand side of an assignment (i.e. for read // access), which * introduces absolutely no performance penalty, using a Hermitian matrix on * the // left-hand side of an assignment (i.e. for write access) may * introduce additional overhead when // it is assigned a general matrix, * which is not Hermitian at compile time: * * \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; * * HermitianMatrix< DynamicMatrix< complex<double> > > A, C; * DynamicMatrix<double> B; * * B = A; // Only read-access to the Hermitian matrix; no performance penalty C * = A; // Assignment of a Hermitian matrix to another Hermitian matrix; no * runtime overhead C = B; // Assignment of a general matrix to a Hermitian * matrix; some runtime overhead \endcode * * // When assigning a general, potentially not Hermitian matrix to a Hermitian * matrix it is necessary // to check whether the matrix is Hermitian at * runtime in order to guarantee the Hermitian property // of the Hermitian * matrix. In case it turns out to be Hermitian, it is assigned as * efficiently as // possible, if it is not, an exception is thrown. In order * to prevent this runtime overhead it is // therefore generally advisable to * assign Hermitian matrices to other Hermitian matrices.\n // In this * context it is especially noteworthy that in contrast to additions and * subtractions the // multiplication of two Hermitian matrices does not * necessarily result in another Hermitian matrix: * * \code HermitianMatrix< DynamicMatrix<double> > A, B, C; * * C = A + B; // Results in a Hermitian matrix; no runtime overhead C = A - B; * // Results in a Hermitian matrix; no runtime overhead C = A * B; // Is * not guaranteed to result in a Hermitian matrix; some runtime overhead * \endcode * * // \n Previous: \ref adaptors_symmetric_matrices &nbsp; &nbsp; Next: \ref * adaptors_triangular_matrices */ //************************************************************************************************* //**Triangular Matrices **************************************************************************** /* * !\page adaptors_triangular_matrices Triangular Matrices // // * \tableofcontents // // // \n \section adaptors_triangular_matrices_general * Triangular Matrices // <hr> // // Triangular matrices come in three * flavors: Lower triangular matrices provide the compile time // guarantee * to be square matrices and that the upper part of the matrix contains only * default // elements that cannot be modified. Upper triangular matrices on * the other hand provide the // compile time guarantee to be square and that * the lower part of the matrix contains only fixed // default elements. * Finally, diagonal matrices provide the compile time guarantee to be square * // and that both the lower and upper part of the matrix contain only * immutable default elements. // These properties can be exploited to gain * higher performance and/or to save memory. Within the // \b Blaze library, * several kinds of lower and upper triangular and diagonal matrices are * realized // by the following class templates: // // Lower triangular * matrices: // - <b>\ref adaptors_triangular_matrices_lowermatrix</b> // - * <b>\ref adaptors_triangular_matrices_unilowermatrix</b> // - <b>\ref * adaptors_triangular_matrices_strictlylowermatrix</b> // // Upper * triangular matrices: // - <b>\ref * adaptors_triangular_matrices_uppermatrix</b> // - <b>\ref * adaptors_triangular_matrices_uniuppermatrix</b> // - <b>\ref * adaptors_triangular_matrices_strictlyuppermatrix</b> // // Diagonal * matrices // - <b>\ref adaptors_triangular_matrices_diagonalmatrix</b> // * // // \n \section adaptors_triangular_matrices_lowermatrix LowerMatrix // * <hr> // // The blaze::LowerMatrix class template is an adapter for * existing dense and sparse matrix types. // It inherits the properties and * the interface of the given matrix type \c MT and extends it by // * enforcing the additional invariant that all matrix elements above the * diagonal are 0 (lower // triangular matrix): * * \f[\left(\begin{array}{*{5}{c}} l_{0,0} & 0 & 0 & \cdots & 0 * \\ l_{1,0} & l_{1,1} & 0 & \cdots & 0 \\ l_{2,0} & l_{2,1} & * l_{2,2} & \cdots & 0 \\ \vdots & \vdots & \vdots & \ddots & * \vdots \\ l_{N,0} & l_{N,1} & l_{N,2} & \cdots & l_{N,N} \\ * \end{array}\right).\f] * * // It can be included via the header file * * \code #include <blaze/math/LowerMatrix.h> \endcode * * // The type of the adapted matrix can be specified via the first template * parameter: * * \code template< typename MT > class LowerMatrix; \endcode * * // \c MT specifies the type of the matrix to be adapted. blaze::LowerMatrix * can be used with any // non-cv-qualified, non-reference, non-pointer, * non-expression dense or sparse matrix type. Note // that the given matrix * type must be either resizable (as for instance blaze::HybridMatrix or // * blaze::DynamicMatrix) or must be square at compile time (as for instance * blaze::StaticMatrix). // // The following examples give an impression of * several possible lower matrices: * * \code using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; * using blaze::columnMajor; * * // Definition of a 3x3 row-major dense lower matrix with static memory * blaze::LowerMatrix< blaze::StaticMatrix<int,3UL,3UL,rowMajor> > A; * * // Definition of a resizable column-major dense lower matrix based on * HybridMatrix blaze::LowerMatrix< * blaze::HybridMatrix<float,4UL,4UL,columnMajor> B; * * // Definition of a resizable row-major dense lower matrix based on * DynamicMatrix blaze::LowerMatrix< blaze::DynamicMatrix<double,rowMajor> > * C; * * // Definition of a fixed size row-major dense lower matrix based on * CustomMatrix blaze::LowerMatrix< * blaze::CustomMatrix<double,unaligned,unpadded,rowMajor> > D; * * // Definition of a compressed row-major single precision lower matrix * blaze::LowerMatrix< blaze::CompressedMatrix<float,rowMajor> > E; \endcode * * // The storage order of a lower matrix is depending on the storage order of * the adapted matrix // type \c MT. In case the adapted matrix is stored in * a row-wise fashion (i.e. is specified // as blaze::rowMajor), the lower * matrix will also be a row-major matrix. Otherwise, if the // adapted * matrix is column-major (i.e. is specified as blaze::columnMajor), the * lower matrix // will also be a column-major matrix. // // // \n \section * adaptors_triangular_matrices_unilowermatrix UniLowerMatrix // <hr> // // * The blaze::UniLowerMatrix class template is an adapter for existing dense * and sparse matrix // types. It inherits the properties and the interface * of the given matrix type \c MT and extends // it by enforcing the * additional invariant that all diagonal matrix elements are 1 and all * matrix // elements above the diagonal are 0 (lower unitriangular matrix): * * \f[\left(\begin{array}{*{5}{c}} 1 & 0 & 0 & \cdots & 0 * \\ l_{1,0} & 1 & 0 & \cdots & 0 \\ l_{2,0} & l_{2,1} & 1 * & \cdots & 0 \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ * l_{N,0} & l_{N,1} & l_{N,2} & \cdots & 1 \\ \end{array}\right).\f] * * // It can be included via the header file * * \code #include <blaze/math/UniLowerMatrix.h> \endcode * * // The type of the adapted matrix can be specified via the first template * parameter: * * \code template< typename MT > class UniLowerMatrix; \endcode * * // \c MT specifies the type of the matrix to be adapted. * blaze::UniLowerMatrix can be used with any // non-cv-qualified, * non-reference, non-pointer, non-expression dense or sparse matrix type. * Also, // the given matrix type must have numeric element types (i.e. all * integral types except \c bool, // floating point and complex types). Note * that the given matrix type must be either resizable (as // for instance * blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile * time (as // for instance blaze::StaticMatrix). // // The following * examples give an impression of several possible lower unitriangular * matrices: * * \code // Definition of a 3x3 row-major dense unilower matrix with static * memory blaze::UniLowerMatrix< * blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; * * // Definition of a resizable column-major dense unilower matrix based on * HybridMatrix blaze::UniLowerMatrix< * blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; * * // Definition of a resizable row-major dense unilower matrix based on * DynamicMatrix blaze::UniLowerMatrix< * blaze::DynamicMatrix<double,blaze::rowMajor> > C; * * // Definition of a compressed row-major single precision unilower matrix * blaze::UniLowerMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; * \endcode * * // The storage order of a lower unitriangular matrix is depending on the * storage order of the // adapted matrix type \c MT. In case the adapted * matrix is stored in a row-wise fashion (i.e. // is specified as * blaze::rowMajor), the unilower matrix will also be a row-major matrix. // * Otherwise if the adapted matrix is column-major (i.e. is specified as * blaze::columnMajor), // the unilower matrix will also be a column-major * matrix. // // // \n \section * adaptors_triangular_matrices_strictlylowermatrix StrictlyLowerMatrix // * <hr> // // The blaze::StrictlyLowerMatrix class template is an adapter for * existing dense and sparse matrix // types. It inherits the properties and * the interface of the given matrix type \c MT and extends // it by * enforcing the additional invariant that all diagonal matrix elements and * all matrix // elements above the diagonal are 0 (strictly lower triangular * matrix): * * \f[\left(\begin{array}{*{5}{c}} 0 & 0 & 0 & \cdots & 0 * \\ l_{1,0} & 0 & 0 & \cdots & 0 \\ l_{2,0} & l_{2,1} & 0 * & \cdots & 0 \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ * l_{N,0} & l_{N,1} & l_{N,2} & \cdots & 0 \\ \end{array}\right).\f] * * // It can be included via the header file * * \code #include <blaze/math/StrictlyLowerMatrix.h> \endcode * * // The type of the adapted matrix can be specified via the first template * parameter: * * \code template< typename MT > class StrictlyLowerMatrix; \endcode * * // \c MT specifies the type of the matrix to be adapted. * blaze::StrictlyLowerMatrix can be used // with any non-cv-qualified, * non-reference, non-pointer, non-expression dense or sparse matrix // type. * Note that the given matrix type must be either resizable (as for instance * // blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at * compile time (as for instance // blaze::StaticMatrix). // // The following * examples give an impression of several possible strictly lower triangular * matrices: * * \code // Definition of a 3x3 row-major dense strictly lower matrix with * static memory blaze::StrictlyLowerMatrix< * blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; * * // Definition of a resizable column-major dense strictly lower matrix based * on HybridMatrix blaze::StrictlyLowerMatrix< * blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; * * // Definition of a resizable row-major dense strictly lower matrix based on * DynamicMatrix blaze::StrictlyLowerMatrix< * blaze::DynamicMatrix<double,blaze::rowMajor> > C; * * // Definition of a compressed row-major single precision strictly lower * matrix blaze::StrictlyLowerMatrix< * blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode * * // The storage order of a strictly lower triangular matrix is depending on * the storage order of // the adapted matrix type \c MT. In case the adapted * matrix is stored in a row-wise fashion (i.e. // is specified as * blaze::rowMajor), the strictly lower matrix will also be a row-major * matrix. // Otherwise if the adapted matrix is column-major (i.e. is * specified as blaze::columnMajor), // the strictly lower matrix will also * be a column-major matrix. // // // \n \section * adaptors_triangular_matrices_uppermatrix UpperMatrix // <hr> // // The * blaze::UpperMatrix class template is an adapter for existing dense and * sparse matrix types. // It inherits the properties and the interface of * the given matrix type \c MT and extends it by // enforcing the additional * invariant that all matrix elements below the diagonal are 0 (upper // * triangular matrix): * * \f[\left(\begin{array}{*{5}{c}} u_{0,0} & u_{0,1} & u_{0,2} & \cdots & * u_{0,N} \\ 0 & u_{1,1} & u_{1,2} & \cdots & u_{1,N} \\ 0 & 0 * & u_{2,2} & \cdots & u_{2,N} \\ \vdots & \vdots & \vdots & \ddots & * \vdots \\ 0 & 0 & 0 & \cdots & u_{N,N} \\ * \end{array}\right).\f] * * // It can be included via the header file * * \code #include <blaze/math/UpperMatrix.h> \endcode * * // The type of the adapted matrix can be specified via the first template * parameter: * * \code template< typename MT > class UpperMatrix; \endcode * * // \c MT specifies the type of the matrix to be adapted. blaze::UpperMatrix * can be used with any // non-cv-qualified, non-reference, non-pointer, * non-expression dense or sparse matrix type. Note // that the given matrix * type must be either resizable (as for instance blaze::HybridMatrix or // * blaze::DynamicMatrix) or must be square at compile time (as for instance * blaze::StaticMatrix). // // The following examples give an impression of * several possible upper matrices: * * \code // Definition of a 3x3 row-major dense upper matrix with static memory * blaze::UpperMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; * * // Definition of a resizable column-major dense upper matrix based on * HybridMatrix blaze::UpperMatrix< * blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; * * // Definition of a resizable row-major dense upper matrix based on * DynamicMatrix blaze::UpperMatrix< * blaze::DynamicMatrix<double,blaze::rowMajor> > C; * * // Definition of a compressed row-major single precision upper matrix * blaze::UpperMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; * \endcode * * // The storage order of an upper matrix is depending on the storage order of * the adapted matrix // type \c MT. In case the adapted matrix is stored in * a row-wise fashion (i.e. is specified // as blaze::rowMajor), the upper * matrix will also be a row-major matrix. Otherwise, if the // adapted * matrix is column-major (i.e. is specified as blaze::columnMajor), the * upper matrix // will also be a column-major matrix. // // // \n \section * adaptors_triangular_matrices_uniuppermatrix UniUpperMatrix // <hr> // // * The blaze::UniUpperMatrix class template is an adapter for existing dense * and sparse matrix // types. It inherits the properties and the interface * of the given matrix type \c MT and extends // it by enforcing the * additional invariant that all diagonal matrix elements are 1 and all * matrix // elements below the diagonal are 0 (upper unitriangular matrix): * * \f[\left(\begin{array}{*{5}{c}} 1 & u_{0,1} & u_{0,2} & \cdots & * u_{0,N} \\ 0 & 1 & u_{1,2} & \cdots & u_{1,N} \\ 0 & 0 * & 1 & \cdots & u_{2,N} \\ \vdots & \vdots & \vdots & \ddots & * \vdots \\ 0 & 0 & 0 & \cdots & 1 \\ * \end{array}\right).\f] * * // It can be included via the header file * * \code #include <blaze/math/UniUpperMatrix.h> \endcode * * // The type of the adapted matrix can be specified via the first template * parameter: * * \code template< typename MT > class UniUpperMatrix; \endcode * * // \c MT specifies the type of the matrix to be adapted. * blaze::UniUpperMatrix can be used with any // non-cv-qualified, * non-reference, non-pointer, non-expression dense or sparse matrix type. * Also, // the given matrix type must have numeric element types (i.e. all * integral types except \c bool, // floating point and complex types). Note * that the given matrix type must be either resizable (as // for instance * blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile * time (as // for instance blaze::StaticMatrix). // // The following * examples give an impression of several possible upper unitriangular * matrices: * * \code // Definition of a 3x3 row-major dense uniupper matrix with static * memory blaze::UniUpperMatrix< * blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; * * // Definition of a resizable column-major dense uniupper matrix based on * HybridMatrix blaze::UniUpperMatrix< * blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; * * // Definition of a resizable row-major dense uniupper matrix based on * DynamicMatrix blaze::UniUpperMatrix< * blaze::DynamicMatrix<double,blaze::rowMajor> > C; * * // Definition of a compressed row-major single precision uniupper matrix * blaze::UniUpperMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; * \endcode * * // The storage order of an upper unitriangular matrix is depending on the * storage order of the // adapted matrix type \c MT. In case the adapted * matrix is stored in a row-wise fashion (i.e. // is specified as * blaze::rowMajor), the uniupper matrix will also be a row-major matrix. // * Otherwise, if the adapted matrix is column-major (i.e. is specified as * blaze::columnMajor), // the uniupper matrix will also be a column-major * matrix. // // // \n \section * adaptors_triangular_matrices_strictlyuppermatrix StrictlyUpperMatrix // * <hr> // // The blaze::StrictlyUpperMatrix class template is an adapter for * existing dense and sparse matrix // types. It inherits the properties and * the interface of the given matrix type \c MT and extends // it by * enforcing the additional invariant that all diagonal matrix elements and * all matrix // elements below the diagonal are 0 (strictly upper triangular * matrix): * * \f[\left(\begin{array}{*{5}{c}} 0 & u_{0,1} & u_{0,2} & \cdots & * u_{0,N} \\ 0 & 0 & u_{1,2} & \cdots & u_{1,N} \\ 0 & 0 * & 0 & \cdots & u_{2,N} \\ \vdots & \vdots & \vdots & \ddots & * \vdots \\ 0 & 0 & 0 & \cdots & 0 \\ * \end{array}\right).\f] * * // It can be included via the header file * * \code #include <blaze/math/StrictlyUpperMatrix.h> \endcode * * // The type of the adapted matrix can be specified via the first template * parameter: * * \code template< typename MT > class StrictlyUpperMatrix; \endcode * * // \c MT specifies the type of the matrix to be adapted. * blaze::StrictlyUpperMatrix can be used // with any non-cv-qualified, * non-reference, non-pointer, non-expression dense or sparse matrix // type. * Note that the given matrix type must be either resizable (as for instance * // blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at * compile time (as for instance // blaze::StaticMatrix). // // The following * examples give an impression of several possible strictly upper triangular * matrices: * * \code // Definition of a 3x3 row-major dense strictly upper matrix with * static memory blaze::StrictlyUpperMatrix< * blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; * * // Definition of a resizable column-major dense strictly upper matrix based * on HybridMatrix blaze::StrictlyUpperMatrix< * blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; * * // Definition of a resizable row-major dense strictly upper matrix based on * DynamicMatrix blaze::StrictlyUpperMatrix< * blaze::DynamicMatrix<double,blaze::rowMajor> > C; * * // Definition of a compressed row-major single precision strictly upper * matrix blaze::StrictlyUpperMatrix< * blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode * * // The storage order of a strictly upper triangular matrix is depending on * the storage order of // the adapted matrix type \c MT. In case the adapted * matrix is stored in a row-wise fashion (i.e. // is specified as * blaze::rowMajor), the strictly upper matrix will also be a row-major * matrix. // Otherwise, if the adapted matrix is column-major (i.e. is * specified as blaze::columnMajor), // the strictly upper matrix will also * be a column-major matrix. // // // \n \section * adaptors_triangular_matrices_diagonalmatrix DiagonalMatrix // <hr> // // * The blaze::DiagonalMatrix class template is an adapter for existing dense * and sparse matrix // types. It inherits the properties and the interface * of the given matrix type \c MT and extends // it by enforcing the * additional invariant that all matrix elements above and below the diagonal * // are 0 (diagonal matrix): * * \f[\left(\begin{array}{*{5}{c}} l_{0,0} & 0 & 0 & \cdots & 0 * \\ 0 & l_{1,1} & 0 & \cdots & 0 \\ 0 & 0 & * l_{2,2} & \cdots & 0 \\ \vdots & \vdots & \vdots & \ddots & * \vdots \\ 0 & 0 & 0 & \cdots & l_{N,N} \\ * \end{array}\right).\f] * * // It can be included via the header file * * \code #include <blaze/math/DiagonalMatrix.h> \endcode * * // The type of the adapted matrix can be specified via the first template * parameter: * * \code template< typename MT > class DiagonalMatrix; \endcode * * // \c MT specifies the type of the matrix to be adapted. * blaze::DiagonalMatrix can be used with any // non-cv-qualified, * non-reference, non-pointer, non-expression dense or sparse matrix type. * Note // that the given matrix type must be either resizable (as for * instance blaze::HybridMatrix or // blaze::DynamicMatrix) or must be square * at compile time (as for instance blaze::StaticMatrix). // // The following * examples give an impression of several possible diagonal matrices: * * \code // Definition of a 3x3 row-major dense diagonal matrix with static * memory blaze::DiagonalMatrix< * blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; * * // Definition of a resizable column-major dense diagonal matrix based on * HybridMatrix blaze::DiagonalMatrix< * blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; * * // Definition of a resizable row-major dense diagonal matrix based on * DynamicMatrix blaze::DiagonalMatrix< * blaze::DynamicMatrix<double,blaze::rowMajor> > C; * * // Definition of a compressed row-major single precision diagonal matrix * blaze::DiagonalMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; * \endcode * * // The storage order of a diagonal matrix is depending on the storage order * of the adapted matrix // type \c MT. In case the adapted matrix is stored * in a row-wise fashion (i.e. is specified // as blaze::rowMajor), the * diagonal matrix will also be a row-major matrix. Otherwise, if the // * adapted matrix is column-major (i.e. is specified as blaze::columnMajor), * the diagonal matrix // will also be a column-major matrix. // // // \n * \section adaptors_triangular_matrices_special_properties Special * Properties of Triangular Matrices // <hr> // // A triangular matrix is * used exactly like a matrix of the underlying, adapted matrix type \c MT. * // It also provides (nearly) the same interface as the underlying matrix * type. However, there are // some important exceptions resulting from the * triangular matrix constraint: // // -# <b>\ref * adaptors_triangular_matrices_square</b> // -# <b>\ref * adaptors_triangular_matrices_triangular</b> // -# <b>\ref * adaptors_triangular_matrices_initialization</b> // -# <b>\ref * adaptors_triangular_matrices_storage</b> // -# <b>\ref * adaptors_triangular_matrices_scaling</b> // // \n \subsection * adaptors_triangular_matrices_square Triangular Matrices Must Always be * Square! // // In case a resizable matrix is used (as for instance * blaze::HybridMatrix, blaze::DynamicMatrix, // or blaze::CompressedMatrix), * this means that the according constructors, the \c resize() and // the \c * extend() functions only expect a single parameter, which specifies both * the number of // rows and columns, instead of two (one for the number of * rows and one for the number of columns): * * \code using blaze::DynamicMatrix; using blaze::LowerMatrix; using * blaze::rowMajor; * * // Default constructed, default initialized, row-major 3x3 lower dynamic * matrix LowerMatrix< DynamicMatrix<double,rowMajor> > A( 3 ); * * // Resizing the matrix to 5x5 A.resize( 5 ); * * // Extending the number of rows and columns by 2, resulting in a 7x7 matrix * A.extend( 2 ); \endcode * * // In case a matrix with a fixed size is used (as for instance * blaze::StaticMatrix), the number // of rows and number of columns must be * specified equally: * * \code using blaze::StaticMatrix; using blaze::LowerMatrix; using * blaze::columnMajor; * * // Correct setup of a fixed size column-major 3x3 lower static matrix * LowerMatrix< StaticMatrix<int,3UL,3UL,columnMajor> > A; * * // Compilation error: the provided matrix type is not a square matrix type * LowerMatrix< StaticMatrix<int,3UL,4UL,columnMajor> > B; \endcode * * // \n \subsection adaptors_triangular_matrices_triangular The Triangular * Property is Always Enforced! // // This means that it is only allowed to * modify elements in the lower part or the diagonal of // a lower triangular * matrix and in the upper part or the diagonal of an upper triangular * matrix. // Unitriangular and strictly triangular matrices are even more * restrictive and don't allow the // modification of diagonal elements. * Also, triangular matrices can only be assigned matrices that // don't * violate their triangular property. The following example demonstrates this * restriction // by means of the blaze::LowerMatrix adaptor. For examples * with other triangular matrix types // see the according class * documentations. * * \code using blaze::CompressedMatrix; using blaze::DynamicMatrix; using * blaze::StaticMatrix; using blaze::LowerMatrix; using blaze::rowMajor; * * using CompressedLower = LowerMatrix< CompressedMatrix<double,rowMajor> >; * * // Default constructed, row-major 3x3 lower compressed matrix CompressedLower * A( 3 ); * * // Initializing elements via the function call operator A(0,0) = 1.0; // * Initialization of the diagonal element (0,0) A(2,0) = 2.0; // * Initialization of the lower element (2,0) A(1,2) = 9.0; // Throws an * exception; invalid modification of upper element * * // Inserting two more elements via the insert() function A.insert( 1, 0, 3.0 * ); // Inserting the lower element (1,0) A.insert( 2, 1, 4.0 ); // * Inserting the lower element (2,1) A.insert( 0, 2, 9.0 ); // Throws an * exception; invalid insertion of upper element * * // Appending an element via the append() function A.reserve( 1, 3 ); // * Reserving enough capacity in row 1 A.append( 1, 1, 5.0 ); // Appending * the diagonal element (1,1) A.append( 1, 2, 9.0 ); // Throws an exception; * appending an element in the upper part * * // Access via a non-const iterator CompressedLower::Iterator it = A.begin(1); * it = 6.0; // Modifies the lower element (1,0) ++it; it = 9.0; // * Modifies the diagonal element (1,1) * * // Erasing elements via the erase() function A.erase( 0, 0 ); // Erasing the * diagonal element (0,0) A.erase( 2, 0 ); // Erasing the lower element * (2,0) * * // Construction from a lower dense matrix StaticMatrix<double,3UL,3UL> B{ { * 3.0, 0.0, 0.0 }, { 8.0, 0.0, 0.0 }, { -2.0, -1.0, 4.0 } }; * * LowerMatrix< DynamicMatrix<double,rowMajor> > C( B ); // OK * * // Assignment of a non-lower dense matrix StaticMatrix<double,3UL,3UL> D{ { * 3.0, 0.0, -2.0 }, { 8.0, 0.0, 0.0 }, { -2.0, -1.0, 4.0 } }; * * C = D; // Throws an exception; lower matrix invariant would be violated! * \endcode * * // The triangular property is also enforced during the construction of * triangular custom matrices: // In case the given array of elements does * not represent the according triangular matrix type, a // \c * std::invalid_argument exception is thrown: * * \code using blaze::CustomMatrix; using blaze::LowerMatrix; using * blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; * * using CustomLower = LowerMatrix< * CustomMatrix<double,unaligned,unpadded,rowMajor> >; * * // Creating a 3x3 lower custom matrix from a properly initialized array * double array[9] = { 1.0, 0.0, 0.0, 2.0, 3.0, 0.0, 4.0, 5.0, 6.0 }; * CustomLower A( array, 3UL ); // OK * * // Attempt to create a second 3x3 lower custom matrix from an uninitialized * array std::unique_ptr<double[]> memory( new double[9UL] ); CustomLower B( * memory.get(), 3UL ); // Throws an exception \endcode * * // Finally, the triangular matrix property is enforced for views (rows, * columns, submatrices, ...) // on the triangular matrix. The following * example demonstrates that modifying the elements of an // entire row and * submatrix of a lower matrix only affects the lower and diagonal matrix * elements. // Again, this example uses blaze::LowerMatrix, for examples * with other triangular matrix types // see the according class * documentations. * * \code using blaze::DynamicMatrix; using blaze::LowerMatrix; * * // Setup of the lower matrix // // ( 0 0 0 0 ) // A = ( 1 2 0 0 ) // * ( 0 3 0 0 ) // ( 4 0 5 0 ) // LowerMatrix< DynamicMatrix<int> > A( 4 * ); A(1,0) = 1; A(1,1) = 2; A(2,1) = 3; A(3,0) = 4; A(3,2) = 5; * * // Setting the lower and diagonal elements in the 2nd row to 9 results in the * matrix // // ( 0 0 0 0 ) // A = ( 1 2 0 0 ) // ( 9 9 9 0 ) * // ( 4 0 5 0 ) // row( A, 2 ) = 9; * * // Setting the lower and diagonal elements in the 1st and 2nd column to 7 * results in // // ( 0 0 0 0 ) // A = ( 1 7 0 0 ) // ( 9 7 7 0 * ) // ( 4 7 7 0 ) // submatrix( A, 0, 1, 4, 2 ) = 7; \endcode * * // The next example demonstrates the (compound) assignment to rows/columns * and submatrices of // triangular matrices. Since only lower/upper and * potentially diagonal elements may be modified // the matrix to be assigned * must be structured such that the triangular matrix invariant of the // * matrix is preserved. Otherwise a \c std::invalid_argument exception is * thrown: * * \code using blaze::DynamicMatrix; using blaze::DynamicVector; using * blaze::LowerMatrix; using blaze::rowVector; * * // Setup of two default 4x4 lower matrices LowerMatrix< DynamicMatrix<int> > * A1( 4 ), A2( 4 ); * * // Setup of a 4-dimensional vector // // v = ( 1 2 3 0 ) // * DynamicVector<int,rowVector> v{ 1, 2, 3, 0 }; * * // OK: Assigning v to the 2nd row of A1 preserves the lower matrix invariant * // // ( 0 0 0 0 ) // A1 = ( 0 0 0 0 ) // ( 1 2 3 0 ) // * ( 0 0 0 0 ) // row( A1, 2 ) = v; // OK * * // Error: Assigning v to the 1st row of A1 violates the lower matrix * invariant! The element // marked with X cannot be assigned and triggers * an exception. // // ( 0 0 0 0 ) // A1 = ( 1 2 X 0 ) // ( 1 * 2 3 0 ) // ( 0 0 0 0 ) // row( A1, 1 ) = v; // Assignment throws * an exception! * * // Setup of the 3x2 dynamic matrix // // ( 0 0 ) // B = ( 7 0 ) // * ( 8 9 ) // DynamicMatrix<int> B( 3UL, 2UL, 0 ); B(1,0) = 7; B(2,0) = 8; * B(2,1) = 9; * * // OK: Assigning B to a submatrix of A2 such that the lower matrix invariant * can be preserved // // ( 0 0 0 0 ) // A2 = ( 0 7 0 0 ) // * ( 0 8 9 0 ) // ( 0 0 0 0 ) // submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = * B; // OK * * // Error: Assigning B to a submatrix of A2 such that the lower matrix * invariant cannot be // preserved! The elements marked with X cannot be * assigned without violating the invariant! // // ( 0 0 0 0 ) // A2 * = ( 0 7 X 0 ) // ( 0 8 8 X ) // ( 0 0 0 0 ) // submatrix( * A2, 0UL, 2UL, 3UL, 2UL ) = B; // Assignment throws an exception! \endcode * * // \n \subsection adaptors_triangular_matrices_initialization The Elements of * a Dense Triangular Matrix are Always Default Initialized! // // Although * this results in a small loss of efficiency during the creation of a dense * lower or // upper matrix this initialization is important since otherwise * the lower/upper matrix property // of dense lower matrices would not be * guaranteed: * * \code using blaze::DynamicMatrix; using blaze::LowerMatrix; using * blaze::UpperMatrix; * * // Uninitialized, 5x5 row-major dynamic matrix DynamicMatrix<int,rowMajor> A( * 5, 5 ); * * // 5x5 row-major lower dynamic matrix with default initialized upper matrix * LowerMatrix< DynamicMatrix<int,rowMajor> > B( 5 ); * * // 7x7 column-major upper dynamic matrix with default initialized lower * matrix UpperMatrix< DynamicMatrix<int,columnMajor> > C( 7 ); * * // 3x3 row-major diagonal dynamic matrix with default initialized lower and * upper matrix DiagonalMatrix< DynamicMatrix<int,rowMajor> > D( 3 ); * \endcode * * // \n \subsection adaptors_triangular_matrices_storage Dense Triangular * Matrices Store All Elements! // // All dense triangular matrices store all * \f$ N \times N \f$ elements, including the immutable // elements in the * lower or upper part, respectively. Therefore dense triangular matrices * don't // provide any kind of memory reduction! There are two main reasons * for this: First, storing also // the zero elements guarantees maximum * performance for many algorithms that perform vectorized // operations on * the triangular matrices, which is especially true for small dense * matrices. // Second, conceptually all triangular adaptors merely restrict * the interface to the matrix type // \c MT and do not change the data * layout or the underlying matrix type. // // This property matters most for * diagonal matrices. In order to achieve the perfect combination // of * performance and memory consumption for a diagonal matrix it is recommended * to use dense // matrices for small diagonal matrices and sparse matrices * for large diagonal matrices: * * \code // Recommendation 1: use dense matrices for small diagonal matrices * using SmallDiagonalMatrix = blaze::DiagonalMatrix< * blaze::StaticMatrix<float,3UL,3UL> >; * * // Recommendation 2: use sparse matrices for large diagonal matrices using * LargeDiagonalMatrix = blaze::DiagonalMatrix< * blaze::CompressedMatrix<float> >; \endcode * * // \n \subsection adaptors_triangular_matrices_scaling Unitriangular Matrices * Cannot Be Scaled! // // Since the diagonal elements of a unitriangular * matrix have a fixed value of 1 it is not possible // to self-scale such a * matrix: * * \code using blaze::DynamicMatrix; using blaze::UniLowerMatrix; * * UniLowerMatrix< DynamicMatrix<int> > A( 4 ); * * A *= 2; // Compilation error; Scale operation is not available on an * unilower matrix A /= 2; // Compilation error; Scale operation is * not available on an unilower matrix A.scale( 2 ); // Compilation error; * Scale function is not available on an unilower matrix * * A = A * 2; // Throws an exception; Invalid assignment of non-unilower matrix * A = A / 2; // Throws an exception; Invalid assignment of non-unilower * matrix \endcode * * // \n \section adaptors_triangular_matrices_arithmetic_operations Arithmetic * Operations // <hr> // // A lower and upper triangular matrix can * participate in numerical operations in any way any other // dense or * sparse matrix can participate. It can also be combined with any other * dense or sparse // vector or matrix. The following code example gives an * impression of the use of blaze::LowerMatrix // within arithmetic * operations: * * \code using blaze::LowerMatrix; using blaze::DynamicMatrix; using * blaze::HybridMatrix; using blaze::StaticMatrix; using * blaze::CompressedMatrix; using blaze::rowMajor; using blaze::columnMajor; * * DynamicMatrix<double,rowMajor> A( 3, 3 ); CompressedMatrix<double,rowMajor> * B( 3, 3 ); * * LowerMatrix< DynamicMatrix<double,rowMajor> > C( 3 ); LowerMatrix< * CompressedMatrix<double,rowMajor> > D( 3 ); * * LowerMatrix< HybridMatrix<float,3UL,3UL,rowMajor> > E; LowerMatrix< * StaticMatrix<float,3UL,3UL,columnMajor> > F; * * E = A + B; // Matrix addition and assignment to a row-major lower matrix * (includes runtime check) F = C - D; // Matrix subtraction and * assignment to a column-major lower matrix (only compile time check) F = A * * D; // Matrix multiplication between a dense and a sparse matrix * (includes runtime check) * * C *= 2.0; // In-place scaling of matrix C E = 2.0 * B; // Scaling of * matrix B (includes runtime check) F = C * 2.0; // Scaling of matrix C * (only compile time check) * * E += A - B; // Addition assignment (includes runtime check) F -= C + D; * // Subtraction assignment (only compile time check) F *= A * D; // * Multiplication assignment (includes runtime check) \endcode * * // Note that it is possible to assign any kind of matrix to a triangular * matrix. In case the // matrix to be assigned does not satisfy the * invariants of the triangular matrix at compile // time, a runtime check is * performed. Also note that upper triangular, diagonal, unitriangular // and * strictly triangular matrix types can be used in the same way, but may pose * some additional // restrictions (see the according class documentations). * // // // \n \section adaptors_triangular_matrices_block_matrices * Triangular Block Matrices // <hr> // // It is also possible to use * triangular block matrices: * * \code using blaze::CompressedMatrix; using blaze::DynamicMatrix; using * blaze::StaticMatrix; using blaze::LowerMatrix; using blaze::UpperMatrix; * * // Definition of a 5x5 lower block matrix based on DynamicMatrix LowerMatrix< * DynamicMatrix< StaticMatrix<int,3UL,3UL> > > A( 5 ); * * // Definition of a 7x7 upper block matrix based on CompressedMatrix * UpperMatrix< CompressedMatrix< StaticMatrix<int,3UL,3UL> > > B( 7 ); * \endcode * * // Also in this case the triangular matrix invariant is enforced, i.e. it is * not possible to // manipulate elements in the upper part (lower triangular * matrix) or the lower part (upper // triangular matrix) of the matrix: * * \code const StaticMatrix<int,3UL,3UL> C{ { 1, -4, 5 }, { 6, 8, -3 }, { 2, * -1, 2 } }; * * A(2,4)(1,1) = -5; // Invalid manipulation of upper matrix element; * Results in an exception B.insert( 4, 2, C ); // Invalid insertion of the * elements (4,2); Results in an exception \endcode * * // Note that unitriangular matrices are restricted to numeric element types * and therefore cannot // be used for block matrices: * * \code using blaze::CompressedMatrix; using blaze::DynamicMatrix; using * blaze::StaticMatrix; using blaze::UniLowerMatrix; using * blaze::UniUpperMatrix; * * // Compilation error: lower unitriangular matrices are restricted to numeric * element types UniLowerMatrix< DynamicMatrix< StaticMatrix<int,3UL,3UL> > > * A( 5 ); * * // Compilation error: upper unitriangular matrices are restricted to numeric * element types UniUpperMatrix< CompressedMatrix< StaticMatrix<int,3UL,3UL> * > > B( 7 ); \endcode * * // For more information on block matrices, see the tutorial on \ref * block_vectors_and_matrices. // // // \n \section * adaptors_triangular_matrices_performance Performance Considerations // * <hr> // // The \b Blaze library tries to exploit the properties of lower * and upper triangular matrices // whenever and wherever possible. Therefore * using triangular matrices instead of a general // matrices can result in a * considerable performance improvement. However, there are also // * situations when using a triangular matrix introduces some overhead. The * following examples // demonstrate several common situations where * triangular matrices can positively or negatively // impact performance. // * // \n \subsection * adaptors_triangular_matrices_matrix_matrix_multiplication Positive Impact: * Matrix/Matrix Multiplication // // When multiplying two matrices, at least * one of which is triangular, \b Blaze can exploit the // fact that either * the lower or upper part of the matrix contains only default elements and * // restrict the algorithm to the non-zero elements. The following example * demonstrates this by // means of a dense matrix/dense matrix * multiplication with lower triangular matrices: * * \code using blaze::DynamicMatrix; using blaze::LowerMatrix; using * blaze::rowMajor; using blaze::columnMajor; * * LowerMatrix< DynamicMatrix<double,rowMajor> > A; LowerMatrix< * DynamicMatrix<double,columnMajor> > B; DynamicMatrix<double,columnMajor> * C; * * // ... Resizing and initialization * * C = A * B; \endcode * * // In comparison to a general matrix multiplication, the performance * advantage is significant, // especially for large matrices. Therefore is * it highly recommended to use the blaze::LowerMatrix // and * blaze::UpperMatrix adaptors when a matrix is known to be lower or upper * triangular, // respectively. Note however that the performance advantage * is most pronounced for dense matrices // and much less so for sparse * matrices. // // \n \subsection * adaptors_triangular_matrices_matrix_vector_multiplication Positive Impact: * Matrix/Vector Multiplication // // A similar performance improvement can * be gained when using a triangular matrix in a matrix/vector // * multiplication: * * \code using blaze::DynamicMatrix; using blaze::DynamicVector; using * blaze::rowMajor; using blaze::columnVector; * * LowerMatrix< DynamicMatrix<double,rowMajor> > A; * DynamicVector<double,columnVector> x, y; * * // ... Resizing and initialization * * y = A * x; \endcode * * // In this example, \b Blaze also exploits the structure of the matrix and * approx. halves the // runtime of the multiplication. Also in case of * matrix/vector multiplications the performance // improvement is most * pronounced for dense matrices and much less so for sparse matrices. // // * \n \subsection adaptors_triangular_matrices_assignment Negative Impact: * Assignment of a General Matrix // // In contrast to using a triangular * matrix on the right-hand side of an assignment (i.e. for // read access), * which introduces absolutely no performance penalty, using a triangular * matrix // on the left-hand side of an assignment (i.e. for write access) * may introduce additional // overhead when it is assigned a general matrix, * which is not triangular at compile time: * * \code using blaze::DynamicMatrix; using blaze::LowerMatrix; * * LowerMatrix< DynamicMatrix<double> > A, C; DynamicMatrix<double> B; * * B = A; // Only read-access to the lower matrix; no performance penalty C = * A; // Assignment of a lower matrix to another lower matrix; no runtime * overhead C = B; // Assignment of a general matrix to a lower matrix; some * runtime overhead \endcode * * // When assigning a general (potentially not lower triangular) matrix to a * lower matrix or a // general (potentially not upper triangular) matrix to * an upper matrix it is necessary to check // whether the matrix is lower or * upper at runtime in order to guarantee the triangular property // of the * matrix. In case it turns out to be lower or upper, respectively, it is * assigned as // efficiently as possible, if it is not, an exception is * thrown. In order to prevent this runtime // overhead it is therefore * generally advisable to assign lower or upper triangular matrices to // * other lower or upper triangular matrices.\n // In this context it is * especially noteworthy that the addition, subtraction, and multiplication * // of two triangular matrices of the same structure always results in * another triangular matrix: * * \code LowerMatrix< DynamicMatrix<double> > A, B, C; * * C = A + B; // Results in a lower matrix; no runtime overhead C = A - B; // * Results in a lower matrix; no runtime overhead C = A * B; // Results in a * lower matrix; no runtime overhead \endcode * * \code UpperMatrix< DynamicMatrix<double> > A, B, C; * * C = A + B; // Results in a upper matrix; no runtime overhead C = A - B; // * Results in a upper matrix; no runtime overhead C = A * B; // Results in a * upper matrix; no runtime overhead \endcode * * // \n Previous: \ref adaptors_hermitian_matrices &nbsp; &nbsp; Next: \ref * views */ //************************************************************************************************* //**Views ****************************************************************************************** /* * !\page views Views // // \tableofcontents // // // \section views_general * General Concepts // <hr> // // Views represents parts of a vector or * matrix, such as a subvector, a submatrix, or a specific // row, column, or * band of a matrix. As such, views act as a reference to specific elements * of // a vector or matrix. This reference is valid and can be used in every * way as any other vector // or matrix can be used as long as the referenced * vector or matrix is not resized or entirely // destroyed. Views also act * as alias to the elements of the vector or matrix: Changes made to the // * elements (e.g. modifying values, inserting or erasing elements) via the * view are immediately // visible in the vector or matrix and changes made * via the vector or matrix are immediately // visible in the view. // // It * is also possible to create nested views (compound views), such as for * instance bands of // submatrices or row selections on column selections. A * compound view also acts as reference // to specific elements of the * underlying vector or matrix and is valid as long as the underlying, // * referenced vector or matrix is not resized or entirely destroyed. // // * The \b Blaze library provides the following views on vectors and matrices: * // // Vector views: // - \ref views_subvectors // - \ref * views_element_selections // // Matrix views: // - \ref views_submatrices * // - \ref views_rows // - \ref views_row_selections // - \ref * views_columns // - \ref views_column_selections // - \ref views_bands // * // // \n \section views_examples Examples * * \code using blaze::DynamicMatrix; using blaze::StaticVector; * * // Setup of the 3x5 row-major matrix DynamicMatrix<int> A{ { 1, 0, -2, 3, * 0 }, { 0, 2, 5, -1, -1 }, { 1, 0, 0, 2, 1 } }; * * // Setup of the 2-dimensional row vector StaticVector<int,2UL,rowVector> * vec{ 18, 19 }; * * // Assigning to the elements (1,2) and (1,3) via a subvector of a row // // * ( 1 0 -2 3 0 ) // ( 0 2 18 19 -1 ) // ( 1 0 0 2 1 ) // * subvector( row( A, 1UL ), 2UL, 2UL ) = vec; * * // Switching rows 0 and 2 of A // // ( 1 0 0 2 1 ) // ( 0 2 18 19 -1 ) * // ( 1 0 -2 3 0 ) // rows<0,2>( A ) = rows<2,0>( A ); * * // Warning: It is the programmer's responsibility to ensure the view does not * outlive // the viewed vector or matrix (dangling reference)! auto * row1 = row<1UL>( DynamicMatrix<int>{ { 1, 2, 3 }, { 4, 5, 6 } } ); * \endcode * * // \n Previous: \ref adaptors_triangular_matrices &nbsp; &nbsp; Next: \ref * views_subvectors */ //************************************************************************************************* //**Subvectors ************************************************************************************* /* * !\page views_subvectors Subvectors // // \tableofcontents // // // * Subvectors provide views on a specific part of a dense or sparse vector. * As such, subvectors // act as a reference to a specific range within a * vector. This reference is valid and can be // used in every way any other * dense or sparse vector can be used as long as the vector containing // the * subvector is not resized or entirely destroyed. The subvector also acts as * an alias to the // vector elements in the specified range: Changes made to * the elements (e.g. modifying values, // inserting or erasing elements) are * immediately visible in the vector and changes made via the // vector are * immediately visible in the subvector. // // // \n \section * views_subvectors_setup Setup of Subvectors // <hr> // // A view on a dense * or sparse subvector can be created very conveniently via the \c * subvector() // function. It can be included via the header file * * \code #include <blaze/math/Subvector.h> \endcode * * // The first parameter specifies the offset of the subvector within the * underlying dense or sparse // vector, the second parameter specifies the * size of the subvector. The two parameters can be // specified either at * compile time or at runtime: * * \code blaze::DynamicVector<double,blaze::rowVector> x; // ... Resizing and * initialization * * // Create a subvector from index 4 with a size of 12 (i.e. in the range * [4..15]) (compile time arguments) auto sv1 = subvector<4UL,12UL>( x ); * * // Create a subvector from index 8 with a size of 16 (i.e. in the range * [8..23]) (runtime arguments) auto sv2 = subvector( x, 8UL, 16UL ); * \endcode * * // The \c subvector() function returns an expression representing the * subvector view. The type of // this expression depends on the given * subvector arguments, primarily the type of the vector and // the compile * time arguments. If the type is required, it can be determined via the \c * decltype // specifier: * * \code using VectorType = blaze::DynamicVector<int>; using SubvectorType = * decltype( blaze::subvector<4UL,12UL>( std::declval<VectorType>() ) ); * \endcode * * // The resulting view can be treated as any other dense or sparse vector, * i.e. it can be assigned // to, it can be copied from, and it can be used * in arithmetic operations. A subvector created // from a row vector can be * used as any other row vector, a subvector created from a column vector // * can be used as any other column vector. The view can also be used on both * sides of an assignment: // The subvector can either be used as an alias to * grant write access to a specific subvector of a // vector primitive on the * left-hand side of an assignment or to grant read-access to a specific // * subvector of a vector primitive or expression on the right-hand side of an * assignment. The // following example demonstrates this in detail: * * \code blaze::DynamicVector<double,blaze::rowVector> x; * blaze::CompressedVector<double,blaze::rowVector> y; * blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and * initialization * * // Create a subvector from index 0 with a size of 10 (i.e. in the range * [0..9]) auto sv = subvector( x, 0UL, 10UL ); * * // Setting the first ten elements of x to the 2nd row of matrix A sv = row( * A, 2UL ); * * // Setting the second ten elements of x to y subvector( x, 10UL, 10UL ) = y; * * // Setting the 3rd row of A to a subvector of x row( A, 3UL ) = subvector( x, * 3UL, 10UL ); * * // Setting x to a subvector of the result of the addition between y and the * 1st row of A x = subvector( y + row( A, 1UL ), 2UL, 5UL ); \endcode * * // \warning It is the programmer's responsibility to ensure the subvector * does not outlive the // viewed vector: * * \code // Creating a subvector on a temporary vector; results in a dangling * reference! auto sv = subvector<1UL,3UL>( DynamicVector<int>{ 1, 2, 3, 4, 5 * } ); \endcode * * // \n \section views_subvectors_element_access Element Access // <hr> // // * The elements of a subvector can be directly accessed via the subscript * operator: * * \code blaze::DynamicVector<double,blaze::rowVector> v; // ... Resizing and * initialization * * // Creating an 8-dimensional subvector, starting from index 4 auto sv = * subvector( v, 4UL, 8UL ); * * // Setting the 1st element of the subvector, which corresponds to // the * element at index 5 in vector v sv[1] = 2.0; \endcode * * // The numbering of the subvector elements is * * \f[\left(\begin{array}{*{5}{c}} 0 & 1 & 2 & \cdots & N-1 \\ * \end{array}\right),\f] * * // where N is the specified size of the subvector. Alternatively, the * elements of a subvector can // be traversed via iterators. Just as with * vectors, in case of non-const subvectors, \c begin() // and \c end() * return an iterator, which allows to manipulate the elements, in case of * constant // subvectors an iterator to immutable elements is returned: * * \code blaze::DynamicVector<int,blaze::rowVector> v( 256UL ); // ... Resizing * and initialization * * // Creating a reference to a specific subvector of vector v auto sv = * subvector( v, 16UL, 64UL ); * * // Traversing the elements via iterators to non-const elements for( auto * it=sv.begin(); it!=sv.end(); ++it ) { it = ...; // OK: Write access to * the dense subvector value. ... = *it; // OK: Read access to the dense * subvector value. } * * // Traversing the elements via iterators to const elements for( auto * it=sv.cbegin(); it!=sv.cend(); ++it ) { it = ...; // Compilation error: * Assignment to the value via iterator-to-const is invalid. ... = *it; // * OK: Read access to the dense subvector value. } \endcode * * \code blaze::CompressedVector<int,blaze::rowVector> v( 256UL ); // ... * Resizing and initialization * * // Creating a reference to a specific subvector of vector v auto sv = * subvector( v, 16UL, 64UL ); * * // Traversing the elements via iterators to non-const elements for( auto * it=sv.begin(); it!=sv.end(); ++it ) { it->value() = ...; // OK: Write * access to the value of the non-zero element. ... = it->value(); // OK: * Read access to the value of the non-zero element. it->index() = ...; // * Compilation error: The index of a non-zero element cannot be changed. ... * = it->index(); // OK: Read access to the index of the sparse element. } * * // Traversing the elements via iterators to const elements for( auto * it=sv.cbegin(); it!=sv.cend(); ++it ) { it->value() = ...; // Compilation * error: Assignment to the value via iterator-to-const is invalid. ... = * it->value(); // OK: Read access to the value of the non-zero element. * it->index() = ...; // Compilation error: The index of a non-zero element * cannot be changed. ... = it->index(); // OK: Read access to the index of * the sparse element. } \endcode * * // \n \section views_subvectors_element_insertion Element Insertion // <hr> * // // Inserting/accessing elements in a sparse subvector can be done by * several alternative functions. // The following example demonstrates all * options: * * \code blaze::CompressedVector<double,blaze::rowVector> v( 256UL ); // * Non-initialized vector of size 256 * * auto sv = subvector( v, 10UL, 60UL ); // View on the range [10..69] of v * * // The subscript operator provides access to all possible elements of the * sparse subvector, // including the zero elements. In case the subscript * operator is used to access an element // that is currently not stored in * the sparse subvector, the element is inserted into the // subvector. * sv[42] = 2.0; * * // The second operation for inserting elements is the set() function. In case * the element is // not contained in the subvector it is inserted into the * subvector, if it is already contained // in the subvector its value is * modified. sv.set( 45UL, -1.2 ); * * // An alternative for inserting elements into the subvector is the insert() * function. However, // it inserts the element only in case the element is * not already contained in the subvector. sv.insert( 50UL, 3.7 ); * * // Just as in case of vectors, elements can also be inserted via the append() * function. In // case of subvectors, append() also requires that the * appended element's index is strictly // larger than the currently largest * non-zero index of the subvector and that the subvector's // capacity is * large enough to hold the new element. Note however that due to the nature * of // a subvector, which may be an alias to the middle of a sparse vector, * the append() function // does not work as efficiently for a subvector as * it does for a vector. sv.reserve( 10UL ); sv.append( 51UL, -2.1 ); * \endcode * * // \n \section views_subvectors_common_operations Common Operations // <hr> * // // A subvector view can be used like any other dense or sparse vector. * This means that with // only a few exceptions all \ref vector_operations * and \ref arithmetic_operations can be used. // For instance, the current * number of elements can be obtained via the \c size() function, the // * current capacity via the \c capacity() function, and the number of * non-zero elements via the // \c nonZeros() function. However, since * subvectors are references to a specific range of a // vector, several * operations are not possible, such as resizing and swapping. The following * // example shows this by means of a dense subvector view: * * \code blaze::DynamicVector<int,blaze::rowVector> v( 42UL ); // ... Resizing * and initialization * * // Creating a view on the range [5..15] of vector v auto sv = subvector( v, * 5UL, 10UL ); * * sv.size(); // Returns the number of elements in the subvector * sv.capacity(); // Returns the capacity of the subvector * sv.nonZeros(); // Returns the number of non-zero elements contained * in the subvector * * sv.resize( 84UL ); // Compilation error: Cannot resize a subvector of a * vector * * auto sv2 = subvector( v, 15UL, 10UL ); swap( sv, sv2 ); // Compilation * error: Swap operation not allowed \endcode * * // \n \section views_subvectors_arithmetic_operations Arithmetic Operations * // <hr> // // Both dense and sparse subvectors can be used in all * arithmetic operations that any other dense // or sparse vector can be used * in. The following example gives an impression of the use of dense // * subvectors within arithmetic operations. All operations (addition, * subtraction, multiplication, // scaling, ...) can be performed on all * possible combinations of dense and sparse subvectors with // fitting * element types: * * \code blaze::DynamicVector<double,blaze::rowVector> d1, d2, d3; * blaze::CompressedVector<double,blaze::rowVector> s1, s2; * * // ... Resizing and initialization * * blaze::DynamicMatrix<double,blaze::rowMajor> A; * * auto sv( subvector( d1, 0UL, 10UL ) ); // View on the range [0..9] of vector * d1 * * sv = d2; // Dense vector initialization of the * range [0..9] subvector( d1, 10UL, 10UL ) = s1; // Sparse vector * initialization of the range [10..19] * * d3 = sv + d2; // Dense vector/dense vector addition * s2 = s1 + subvector( d1, 10UL, 10UL ); // Sparse vector/dense vector * addition d2 = sv * subvector( d1, 20UL, 10UL ); // Component-wise vector * multiplication * * subvector( d1, 3UL, 4UL ) *= 2.0; // In-place scaling of the range * [3..6] d2 = subvector( d1, 7UL, 3UL ) * 2.0; // Scaling of the range * [7..9] d2 = 2.0 * subvector( d1, 7UL, 3UL ); // Scaling of the range * [7..9] * * subvector( d1, 0UL , 10UL ) += d2; // Addition assignment subvector( d1, * 10UL, 10UL ) -= s2; // Subtraction assignment subvector( d1, 20UL, 10UL ) * *= sv; // Multiplication assignment * * double scalar = subvector( d1, 5UL, 10UL ) * trans( s1 ); // * Scalar/dot/inner product between two vectors * * A = trans( s1 ) * subvector( d1, 4UL, 16UL ); // Outer product between two * vectors \endcode * * // \n \section views_aligned_subvectors Aligned Subvectors // <hr> // // * Usually subvectors can be defined anywhere within a vector. They may start * at any position and // may have an arbitrary size (only restricted by the * size of the underlying vector). However, in // contrast to vectors * themselves, which are always properly aligned in memory and therefore can * // provide maximum performance, this means that subvectors in general have * to be considered to be // unaligned. This can be made explicit by the \c * blaze::unaligned flag: * * \code using blaze::unaligned; * * blaze::DynamicVector<double,blaze::rowVector> x; // ... Resizing and * initialization * * // Identical creations of an unaligned subvector in the range [8..23] auto * sv1 = subvector ( x, 8UL, 16UL ); auto sv2 = * subvector<unaligned>( x, 8UL, 16UL ); auto sv3 = subvector<8UL,16UL> * ( x ); auto sv4 = subvector<unaligned,8UL,16UL>( x ); \endcode * * // All of these calls to the \c subvector() function are identical. Whether * the alignment flag is // explicitly specified or not, it always returns an * unaligned subvector. Whereas this may provide // full flexibility in the * creation of subvectors, this might result in performance disadvantages // * in comparison to vector primitives (even in case the specified subvector * could be aligned). // Whereas vector primitives are guaranteed to be * properly aligned and therefore provide maximum // performance in all * operations, a general view on a vector might not be properly aligned. This * // may cause a performance penalty on some platforms and/or for some * operations. // // However, it is also possible to create aligned * subvectors. Aligned subvectors are identical to // unaligned subvectors in * all aspects, except that they may pose additional alignment restrictions * // and therefore have less flexibility during creation, but don't suffer * from performance penalties // and provide the same performance as the * underlying vector. Aligned subvectors are created by // explicitly * specifying the \c blaze::aligned flag: * * \code using blaze::aligned; * * // Creating an aligned subvector in the range [8..23] auto sv1 = * subvector<aligned>( x, 8UL, 16UL ); auto sv2 = * subvector<aligned,8UL,16UL>( x ); \endcode * * // The alignment restrictions refer to system dependent address restrictions * for the used element // type and the available vectorization mode (SSE, * AVX, ...). In order to be properly aligned the // first element of the * subvector must be aligned. The following source code gives some examples * // for a double precision dynamic vector, assuming that AVX is available, * which packs 4 \c double // values into a SIMD vector: * * \code using blaze::aligned; * * blaze::DynamicVector<double,blaze::columnVector> d( 17UL ); // ... Resizing * and initialization * * // OK: Starts at the beginning, i.e. the first element is aligned auto dsv1 = * subvector<aligned>( d, 0UL, 13UL ); * * // OK: Start index is a multiple of 4, i.e. the first element is aligned auto * dsv2 = subvector<aligned>( d, 4UL, 7UL ); * * // OK: The start index is a multiple of 4 and the subvector includes the last * element auto dsv3 = subvector<aligned>( d, 8UL, 9UL ); * * // Error: Start index is not a multiple of 4, i.e. the first element is not * aligned auto dsv4 = subvector<aligned>( d, 5UL, 8UL ); \endcode * * // Note that the discussed alignment restrictions are only valid for aligned * dense subvectors. // In contrast, aligned sparse subvectors at this time * don't pose any additional restrictions. // Therefore aligned and unaligned * sparse subvectors are truly fully identical. Still, in case // the \c * blaze::aligned flag is specified during setup, an aligned subvector is * created: * * \code using blaze::aligned; * * blaze::CompressedVector<double,blaze::rowVector> x; // ... Resizing and * initialization * * // Creating an aligned subvector in the range [8..23] auto sv1 = * subvector<aligned>( x, 8UL, 16UL ); auto sv2 = * subvector<aligned,8UL,16UL>( x ); \endcode * * // \n Previous: \ref views &nbsp; &nbsp; Next: \ref views_element_selections */ //************************************************************************************************* //**Element Selections ***************************************************************************** /* * !\page views_element_selections Element Selections // // \tableofcontents * // // // Element selections provide views on arbitrary compositions of * elements of dense and sparse // vectors. These views act as a reference to * the selected elements and represent them as another // dense or sparse * vector. This reference is valid and can be used in every way any other * dense // or sparse vector can be used as long as the vector containing the * elements is not resized or // entirely destroyed. The element selection * also acts as an alias to the vector elements in the // specified range: * Changes made to the elements (e.g. modifying values, inserting or erasing * // elements) are immediately visible in the vector and changes made via * the vector are immediately // visible in the elements. // // // \n * \section views_element_selections_setup Setup of Element Selections // // * An element selection can be created very conveniently via the \c * elements() function. It can // be included via the header file * * \code #include <blaze/math/Elements.h> \endcode * * // The indices of the elements to be selected can be specified either at * compile time or at runtime // (by means of an initializer list, array or * vector): * * \code blaze::DynamicVector<double,blaze::rowVector> x; // ... Resizing and * initialization * * // Selecting the elements 4, 6, 8, and 10 (compile time arguments) auto e1 = * elements<4UL,6UL,8UL,10UL>( x ); * * // Selecting the elements 3, 2, and 1 (runtime arguments via an initializer * list) const std::initializer_list<size_t> list{ 3UL, 2UL, 1UL }; auto e2 = * elements( x, { 3UL, 2UL, 1UL } ); auto e3 = elements( x, list ); * * // Selecting the elements 1, 2, 3, 3, 2, and 1 (runtime arguments via a * std::array) const std::array<size_t> array{ 1UL, 2UL, 3UL, 3UL, 2UL, 1UL * }; auto e4 = elements( x, array ); auto e5 = elements( x, array.data(), * array.size() ); * * // Selecting the element 4 fives times (runtime arguments via a std::vector) * const std::vector<size_t> vector{ 4UL, 4UL, 4UL, 4UL, 4UL }; auto e6 = * elements( x, vector ); auto e7 = elements( x, vector.data(), vector.size() * ); \endcode * * // Note that it is possible to alias the elements of the underlying vector in * any order. Also note // that it is possible to use the same index multiple * times. The \c elements() function returns an // expression representing * the view on the selected elements. The type of this expression depends // * on the given arguments, primarily the type of the vector and the compile * time arguments. If the // type is required, it can be determined via the * \c decltype specifier: * * \code using VectorType = blaze::DynamicVector<int>; using ElementsType = * decltype( blaze::elements<4UL,12UL>( std::declval<VectorType>() ) ); * \endcode * * // The resulting view can be treated as any other dense or sparse vector, * i.e. it can be assigned // to, it can be copied from, and it can be used * in arithmetic operations. An element selection // created from a row * vector can be used as any other row vector, an element selection created * // from a column vector can be used as any other column vector. The view * can also be used on both // sides of an assignment: It can either be used * as an alias to grant write access to specific // elements of a vector * primitive on the left-hand side of an assignment or to grant read-access * // to specific elements of a vector primitive or expression on the * right-hand side of an assignment. // The following example demonstrates * this in detail: * * \code blaze::DynamicVector<double,blaze::rowVector> x; * blaze::CompressedVector<double,blaze::rowVector> y; * blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and * initialization * * // Selecting the elements 1, 3, 5, and 7 auto e = elements( x, { 1UL, 3UL, * 5UL, 7UL } ); * * // Setting the elements 1, 3, 5, and 7 of x to the 2nd row of matrix A e = * row( A, 2UL ); * * // Setting the elements 2, 4, 6, and 8 of x to y elements( x, { 2UL, 4UL, * 6UL, 8UL } ) = y; * * // Setting the 3rd row of A to the elements 5, 4, 3, and 2 of x row( A, 3UL ) * = elements( x, { 5UL, 4UL, 3UL, 2UL } ); * * // Rotating the result of the addition between y and the 1st row of A x = * elements( y + row( A, 1UL ), { 2UL, 3UL, 0UL, 1UL } ) \endcode * * // Please note that using an element selection, which refers to an index * multiple times, on the // left-hand side of an assignment leads to * undefined behavior: * * \code blaze::DynamicVector<int,blaze::rowVector> a{ 1, 2, 3 }; * blaze::DynamicVector<int,blaze::rowVector> b{ 1, 2, 3, 4 }; * * auto e = elements( a, { 1, 1, 1, 1 } ); // Selecting the element 1 four * times e = b; // Undefined behavior \endcode * * // In this example both vectors have the same size, which results in a * correct vector assignment, // but the final value of the element at index * 1 is unspecified. // // \warning It is the programmer's responsibility to * ensure the element selection does not outlive // the viewed vector: * * \code // Creating an element selection on a temporary vector; results in a * dangling reference! auto e = elements<1UL,3UL>( DynamicVector<int>{ 1, 2, * 3, 4, 5 } ); \endcode * * // \n \section views_element_selections_element_access Element Access // // * The elements of an element selection can be directly accessed via the * subscript operator: * * \code blaze::DynamicVector<double,blaze::rowVector> v; // ... Resizing and * initialization * * // Selecting the elements 2, 4, 6, and 8 auto e = elements( v, { 2UL, 4UL, * 6UL, 8UL } ); * * // Setting the 1st element of the element selection, which corresponds to // * the element at index 4 in vector v e[1] = 2.0; \endcode * * // The numbering of the selected elements is * * \f[\left(\begin{array}{*{5}{c}} 0 & 1 & 2 & \cdots & N-1 \\ * \end{array}\right),\f] * * // where N is the number of selected elements. Alternatively, the elements of * an element selection // can be traversed via iterators. Just as with * vectors, in case of non-const element selections, // \c begin() and \c * end() return an iterator, which allows to manipulate the elements, in case * of // constant element selections an iterator to immutable elements is * returned: * * \code blaze::DynamicVector<int,blaze::rowVector> v( 256UL ); // ... Resizing * and initialization * * // Creating an element selection including specific elements of dense vector * v auto e = elements( v, { 0UL, 3UL, 6UL, 9UL, 12UL } ); * * // Traversing the elements via iterators to non-const elements for( auto * it=e.begin(); it!=e.end(); ++it ) { it = ...; // OK: Write access to the * dense vector value. ... = *it; // OK: Read access to the dense vector * value. } * * // Traversing the elements via iterators to const elements for( auto * it=e.cbegin(); it!=e.cend(); ++it ) { it = ...; // Compilation error: * Assignment to the value via iterator-to-const is invalid. ... = *it; // * OK: Read access to the dense vector value. } \endcode * * \code blaze::CompressedVector<int,blaze::rowVector> v( 256UL ); // ... * Resizing and initialization * * // Creating an element selection including specific elements of sparse vector * v auto e = elements( v, { 0UL, 3UL, 6UL, 9UL, 12UL } ); * * // Traversing the elements via iterators to non-const elements for( auto * it=e.begin(); it!=e.end(); ++it ) { it->value() = ...; // OK: Write * access to the value of the non-zero element. ... = it->value(); // OK: * Read access to the value of the non-zero element. it->index() = ...; // * Compilation error: The index of a non-zero element cannot be changed. ... * = it->index(); // OK: Read access to the index of the sparse element. } * * // Traversing the elements via iterators to const elements for( auto * it=e.cbegin(); it!=e.cend(); ++it ) { it->value() = ...; // Compilation * error: Assignment to the value via iterator-to-const is invalid. ... = * it->value(); // OK: Read access to the value of the non-zero element. * it->index() = ...; // Compilation error: The index of a non-zero element * cannot be changed. ... = it->index(); // OK: Read access to the index of * the sparse element. } \endcode * * // \n \section views_element_selections_element_insertion Element Insertion * // // Inserting/accessing elements in a sparse element selection can be * done by several alternative // functions. The following example * demonstrates all options: * * \code blaze::CompressedVector<double,blaze::rowVector> v( 256UL ); // * Non-initialized vector of size 256 * * std::vector<size_t> indices; // ... Selecting indices of the sparse vector * * auto e = elements( v, indices ); * * // The subscript operator provides access to the selected elements of the * sparse vector, // including the zero elements. In case the subscript * operator is used to access an element // that is currently not stored in * the sparse vector, the element is inserted. e[42] = 2.0; * * // The second operation for inserting elements via the element selection is * the set() function. // In case the element is not contained in the vector * it is inserted into the vector, if it is // already contained in the * vector its value is modified. e.set( 45UL, -1.2 ); * * // An alternative for inserting elements into the vector is the insert() * function. However, it // inserts the element only in case the element is * not already contained in the vector. e.insert( 50UL, 3.7 ); * * // Just as in case of vectors, elements can also be inserted via the append() * function. In case // of element selections, append() also requires that * the appended element's index is strictly // larger than the currently * largest non-zero index of the selection and that the selections's // * capacity is large enough to hold the new element. Note however that due to * the nature of an // element selection, which is an alias to arbitrary * elements of a sparse vector, the append() // function does not work as * efficiently for an element selection as it does for a vector. e.reserve( * 10UL ); e.append( 51UL, -2.1 ); \endcode * * // \n \section views_element_selections_common_operations Common Operations * // // An element selection can be used like any other dense or sparse * vector. For instance, the // number of selected elements can be obtained * via the \c size() function, the current capacity // via the \c capacity() * function, and the number of non-zero elements via the \c nonZeros() // * function. However, since element selections are references to a specific * range of a vector, // several operations are not possible, such as * resizing and swapping. The following example // shows this by means of an * element selection on a dense vector: * * \code blaze::DynamicVector<int,blaze::rowVector> v( 42UL ); // ... Resizing * and initialization * * // Selecting the elements 5 and 10 auto e = elements( v, { 5UL, 10UL } ); * * e.size(); // Returns the number of elements in the element selection * e.capacity(); // Returns the capacity of the element selection * e.nonZeros(); // Returns the number of non-zero elements contained in * the element selection * * e.resize( 84UL ); // Compilation error: Cannot resize an element selection * * auto e2 = elements( v, { 15UL, 10UL } ); swap( e, e2 ); // Compilation * error: Swap operation not allowed \endcode * * // \n \section views_element_selections_arithmetic_operations Arithmetic * Operations // // Both dense and sparse element selections can be used in * all arithmetic operations that any other // dense or sparse vector can be * used in. The following example gives an impression of the use of // dense * element selections within arithmetic operations. All operations (addition, * subtraction, // multiplication, scaling, ...) can be performed on all * possible combinations of dense and sparse // element selections with * fitting element types: * * \code blaze::DynamicVector<double,blaze::rowVector> d1, d2, d3; * blaze::CompressedVector<double,blaze::rowVector> s1, s2; * * // ... Resizing and initialization * * blaze::DynamicMatrix<double,blaze::rowMajor> A; * * std::initializer_list<size_t> indices1{ 0UL, 3UL, 6UL, 9UL, 12UL, 15UL, * 18UL, 21UL }; std::initializer_list<size_t> indices2{ 1UL, 4UL, 7UL, 10UL, * 13UL, 16UL, 19UL, 22UL }; std::initializer_list<size_t> indices3{ 2UL, * 5UL, 8UL, 11UL, 14UL, 17UL, 20UL, 23UL }; * * auto e( elements( d1, indices1 ) ); // Selecting the every third element of * d1 in the range [0..21] * * e = d2; // Dense vector assignment to the selected * elements elements( d1, indices2 ) = s1; // Sparse vector assignment to * the selected elements * * d3 = e + d2; // Dense vector/dense vector addition s2 * = s1 + elements( d1, indices2 ); // Sparse vector/dense vector addition * d2 = e * elements( d1, indices3 ); // Component-wise vector * multiplication * * elements( d1, indices2 ) *= 2.0; // In-place scaling of the second * selection of elements d2 = elements( d1, indices3 ) * 2.0; // Scaling of * the elements in the third selection of elements d2 = 2.0 * elements( d1, * indices3 ); // Scaling of the elements in the third selection of elements * * elements( d1, indices1 ) += d2; // Addition assignment elements( d1, * indices2 ) -= s2; // Subtraction assignment elements( d1, indices3 ) *= * e; // Multiplication assignment * * double scalar = elements( d1, indices2 ) * trans( s1 ); // Scalar/dot/inner * product between two vectors * * A = trans( s1 ) * elements( d1, { 3UL, 6UL } ); // Outer product between two * vectors \endcode * * // \n Previous: \ref views_subvectors &nbsp; &nbsp; Next: \ref * views_submatrices */ //************************************************************************************************* //**Submatrices ************************************************************************************ /* * !\page views_submatrices Submatrices // // \tableofcontents // // // * Submatrices provide views on a specific part of a dense or sparse matrix * just as subvectors // provide views on specific parts of vectors. As such, * submatrices act as a reference to a // specific block within a matrix. * This reference is valid and can be used in evary way any // other dense or * sparse matrix can be used as long as the matrix containing the submatrix * is // not resized or entirely destroyed. The submatrix also acts as an * alias to the matrix elements // in the specified block: Changes made to * the elements (e.g. modifying values, inserting or // erasing elements) are * immediately visible in the matrix and changes made via the matrix are // * immediately visible in the submatrix. // // // \n \section * views_submatrices_setup Setup of Submatrices // <hr> // // A view on a * dense or sparse submatrix can be created very conveniently via the \c * submatrix() // function. It can be included via the header file * * \code #include <blaze/math/Submatrix.h> \endcode * * // The first and second parameter specify the row and column of the first * element of the submatrix. // The third and fourth parameter specify the * number of rows and columns, respectively. The four // parameters can be * specified either at compile time or at runtime: * * \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and * initialization * * // Creating a dense submatrix of size 4x8, starting in row 3 and column 0 * (compile time arguments) auto sm1 = submatrix<3UL,0UL,4UL,8UL>( A ); * * // Creating a dense submatrix of size 8x16, starting in row 0 and column 4 * (runtime arguments) auto sm2 = submatrix( A, 0UL, 4UL, 8UL, 16UL ); * \endcode * * // The \c submatrix() function returns an expression representing the * submatrix view. The type of // this expression depends on the given * submatrix arguments, primarily the type of the matrix and // the compile * time arguments. If the type is required, it can be determined via the \c * decltype // specifier: * * \code using MatrixType = blaze::DynamicMatrix<int>; using SubmatrixType = * decltype( blaze::submatrix<3UL,0UL,4UL,8UL>( std::declval<MatrixType>() ) * ); \endcode * * // The resulting view can be treated as any other dense or sparse matrix, * i.e. it can be assigned // to, it can be copied from, and it can be used * in arithmetic operations. A submatrix created from // a row-major matrix * will itself be a row-major matrix, a submatrix created from a column-major * // matrix will be a column-major matrix. The view can also be used on both * sides of an assignment: // The submatrix can either be used as an alias to * grant write access to a specific submatrix // of a matrix primitive on the * left-hand side of an assignment or to grant read-access to // a specific * submatrix of a matrix primitive or expression on the right-hand side of an * // assignment. The following example demonstrates this in detail: * * \code blaze::DynamicMatrix<double,blaze::columnMajor> A, B; * blaze::CompressedMatrix<double,blaze::rowMajor> C; // ... Resizing and * initialization * * // Creating a dense submatrix of size 8x4, starting in row 0 and column 2 * auto sm = submatrix( A, 0UL, 2UL, 8UL, 4UL ); * * // Setting the submatrix of A to a 8x4 submatrix of B sm = submatrix( B, 0UL, * 0UL, 8UL, 4UL ); * * // Copying the sparse matrix C into another 8x4 submatrix of A submatrix( A, * 8UL, 2UL, 8UL, 4UL ) = C; * * // Assigning part of the result of a matrix addition to the first submatrix * sm = submatrix( B + C, 0UL, 0UL, 8UL, 4UL ); \endcode * * // \warning It is the programmer's responsibility to ensure the submatrix * does not outlive the // viewed matrix: * * \code // Creating a submatrix on a temporary matrix; results in a dangling * reference! auto sm = submatrix<1UL,0UL,2UL,3UL>( DynamicMatrix<int>{ { 1, * 2, 3 }, { 4, 5, 6 }, { 7, 8, 9 } } ); \endcode * * // \n \section views_submatrices_element_access Element Access // <hr> // // * The elements of a submatrix can be directly accessed with the function * call operator: * * \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and * initialization * * // Creating a 8x8 submatrix, starting from position (4,4) auto sm = * submatrix( A, 4UL, 4UL, 8UL, 8UL ); * * // Setting the element (0,0) of the submatrix, which corresponds to // the * element at position (4,4) in matrix A sm(0,0) = 2.0; \endcode * * // Alternatively, the elements of a submatrix can be traversed via (const) * iterators. Just as // with matrices, in case of non-const submatrices, \c * begin() and \c end() return an iterator, // which allows to manipuate the * elements, in case of constant submatrices an iterator to // immutable * elements is returned: * * \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 256UL, 512UL ); // ... * Resizing and initialization * * // Creating a reference to a specific submatrix of matrix A auto sm = * submatrix( A, 16UL, 16UL, 64UL, 128UL ); * * // Traversing the elements of the 0th row via iterators to non-const elements * for( auto it=sm.begin(0); it!=sm.end(0); ++it ) { it = ...; // OK: Write * access to the dense submatrix value. ... = *it; // OK: Read access to the * dense submatrix value. } * * // Traversing the elements of the 1st row via iterators to const elements * for( auto it=sm.cbegin(1); it!=sm.cend(1); ++it ) { it = ...; // * Compilation error: Assignment to the value via iterator-to-const is * invalid. ... = *it; // OK: Read access to the dense submatrix value. } * \endcode * * \code blaze::CompressedMatrix<int,blaze::rowMajor> A( 256UL, 512UL ); // ... * Resizing and initialization * * // Creating a reference to a specific submatrix of matrix A auto sm = * submatrix( A, 16UL, 16UL, 64UL, 128UL ); * * // Traversing the elements of the 0th row via iterators to non-const elements * for( auto it=sm.begin(0); it!=sm.end(0); ++it ) { it->value() = ...; // * OK: Write access to the value of the non-zero element. ... = it->value(); * // OK: Read access to the value of the non-zero element. it->index() = * ...; // Compilation error: The index of a non-zero element cannot be * changed. ... = it->index(); // OK: Read access to the index of the sparse * element. } * * // Traversing the elements of the 1st row via iterators to const elements * for( auto it=sm.cbegin(1); it!=sm.cend(1); ++it ) { it->value() = ...; // * Compilation error: Assignment to the value via iterator-to-const is * invalid. ... = it->value(); // OK: Read access to the value of the * non-zero element. it->index() = ...; // Compilation error: The index of a * non-zero element cannot be changed. ... = it->index(); // OK: Read access * to the index of the sparse element. } \endcode * * // \n \section views_submatrices_element_insertion Element Insertion // <hr> * // // Inserting/accessing elements in a sparse submatrix can be done by * several alternative functions. // The following example demonstrates all * options: * * \code blaze::CompressedMatrix<double,blaze::rowMajor> A( 256UL, 512UL ); // * Non-initialized matrix of size 256x512 * * auto sm = submatrix( A, 10UL, 10UL, 16UL, 16UL ); // View on a 16x16 * submatrix of A * * // The function call operator provides access to all possible elements of the * sparse submatrix, // including the zero elements. In case the function * call operator is used to access an element // that is currently not stored * in the sparse submatrix, the element is inserted into the // submatrix. * sm(2,4) = 2.0; * * // The second operation for inserting elements is the set() function. In case * the element is // not contained in the submatrix it is inserted into the * submatrix, if it is already contained // in the submatrix its value is * modified. sm.set( 2UL, 5UL, -1.2 ); * * // An alternative for inserting elements into the submatrix is the insert() * function. However, // it inserts the element only in case the element is * not already contained in the submatrix. sm.insert( 2UL, 6UL, 3.7 ); * * // Just as in the case of sparse matrices, elements can also be inserted via * the append() // function. In case of submatrices, append() also requires * that the appended element's // index is strictly larger than the currently * largest non-zero index in the according row // or column of the submatrix * and that the according row's or column's capacity is large // enough to * hold the new element. Note however that due to the nature of a submatrix, * which // may be an alias to the middle of a sparse matrix, the append() * function does not work as // efficiently for a submatrix as it does for a * matrix. sm.reserve( 2UL, 10UL ); sm.append( 2UL, 10UL, -2.1 ); \endcode * * // \n \section views_submatrices_common_operations Common Operations // <hr> * // // A submatrix view can be used like any other dense or sparse matrix. * This means that with only // a few exceptions all \ref matrix_operations * and \ref arithmetic_operations can be used. For // instance, the current * size of the matrix, i.e. the number of rows or columns can be obtained // * via the \c rows() and \c columns() functions, the current total capacity * via the \c capacity() // function, and the number of non-zero elements via * the \c nonZeros() function. However, since // submatrices are views on a * specific submatrix of a matrix, several operations are not possible, // * such as resizing and swapping: * * \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 42UL, 42UL ); // ... * Resizing and initialization * * // Creating a view on the a 8x12 submatrix of matrix A auto sm = submatrix( * A, 0UL, 0UL, 8UL, 12UL ); * * sm.rows(); // Returns the number of rows of the submatrix sm.columns(); * // Returns the number of columns of the submatrix sm.capacity(); // * Returns the capacity of the submatrix sm.nonZeros(); // Returns the * number of non-zero elements contained in the submatrix * * sm.resize( 10UL, 8UL ); // Compilation error: Cannot resize a submatrix of a * matrix * * auto sm2 = submatrix( A, 8UL, 0UL, 12UL, 8UL ); swap( sm, sm2 ); // * Compilation error: Swap operation not allowed \endcode * * // \n \section views_submatrices_arithmetic_operations Arithmetic Operations * // <hr> // // Both dense and sparse submatrices can be used in all * arithmetic operations that any other dense // or sparse matrix can be used * in. The following example gives an impression of the use of dense // * submatrices within arithmetic operations. All operations (addition, * subtraction, multiplication, // scaling, ...) can be performed on all * possible combinations of dense and sparse matrices with // fitting element * types: * * \code blaze::DynamicMatrix<double,blaze::rowMajor> D1, D2, D3; * blaze::CompressedMatrix<double,blaze::rowMajor> S1, S2; * * blaze::CompressedVector<double,blaze::columnVector> a, b; * * // ... Resizing and initialization * * auto sm = submatrix( D1, 0UL, 0UL, 8UL, 8UL ); // View on the 8x8 submatrix * of matrix D1 // starting from row 0 and column 0 * * submatrix( D1, 0UL, 8UL, 8UL, 8UL ) = D2; // Dense matrix initialization of * the 8x8 submatrix // starting in row 0 and column 8 sm = S1; * // Sparse matrix initialization of the second 8x8 submatrix * * D3 = sm + D2; // Dense matrix/dense matrix * addition S2 = S1 - submatrix( D1, 8UL, 0UL, 8UL, 8UL ); // Sparse * matrix/dense matrix subtraction D2 = sm * submatrix( D1, 8UL, 8UL, 8UL, * 8UL ); // Dense matrix/dense matrix multiplication * * submatrix( D1, 8UL, 0UL, 8UL, 8UL ) *= 2.0; // In-place scaling of a * submatrix of D1 D2 = submatrix( D1, 8UL, 8UL, 8UL, 8UL ) * 2.0; // * Scaling of the a submatrix of D1 D2 = 2.0 * sm; * // Scaling of the a submatrix of D1 * * submatrix( D1, 0UL, 8UL, 8UL, 8UL ) += D2; // Addition assignment submatrix( * D1, 8UL, 0UL, 8UL, 8UL ) -= S1; // Subtraction assignment submatrix( D1, * 8UL, 8UL, 8UL, 8UL ) *= sm; // Multiplication assignment * * a = submatrix( D1, 4UL, 4UL, 8UL, 8UL ) * b; // Dense matrix/sparse vector * multiplication \endcode * * // \n \section views_aligned_submatrices Aligned Submatrices // <hr> // // * Usually submatrices can be defined anywhere within a matrix. They may * start at any position and // may have an arbitrary extension (only * restricted by the extension of the underlying matrix). // However, in * contrast to matrices themselves, which are always properly aligned in * memory and // therefore can provide maximum performance, this means that * submatrices in general have to be // considered to be unaligned. This can * be made explicit by the \c blaze::unaligned flag: * * \code using blaze::unaligned; * * blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and * initialization * * // Identical creations of an unaligned submatrix of size 8x8, starting in row * 0 and column 0 auto sm1 = submatrix ( A, 0UL, 0UL, 8UL, 8UL ); * auto sm2 = submatrix<unaligned>( A, 0UL, 0UL, 8UL, 8UL ); auto sm3 = * submatrix<0UL,0UL,8UL,8UL> ( A ); auto sm4 = * submatrix<unaligned,0UL,0UL,8UL,8UL>( A ); \endcode * * // All of these calls to the \c submatrix() function are identical. Whether * the alignment flag is // explicitly specified or not, it always returns an * unaligned submatrix. Whereas this may provide // full flexibility in the * creation of submatrices, this might result in performance disadvantages // * in comparison to matrix primitives (even in case the specified submatrix * could be aligned). // Whereas matrix primitives are guaranteed to be * properly aligned and therefore provide maximum // performance in all * operations, a general view on a matrix might not be properly aligned. This * // may cause a performance penalty on some platforms and/or for some * operations. // // However, it is also possible to create aligned * submatrices. Aligned submatrices are identical to // unaligned submatrices * in all aspects, except that they may pose additional alignment * restrictions // and therefore have less flexibility during creation, but * don't suffer from performance penalties // and provide the same * performance as the underlying matrix. Aligned submatrices are created by * // explicitly specifying the \c blaze::aligned flag: * * \code using blaze::aligned; * * // Creating an aligned submatrix of size 8x8, starting in row 0 and column 0 * auto sv1 = submatrix<aligned>( A, 0UL, 0UL, 8UL, 8UL ); auto sv2 = * submatrix<aligned,0UL,0UL,8UL,8UL>( A ); \endcode * * // The alignment restrictions refer to system dependent address restrictions * for the used element // type and the available vectorization mode (SSE, * AVX, ...). In order to be properly aligned the // first element of each * row/column of the submatrix must be aligned. The following source code // * gives some examples for a double precision row-major dynamic matrix, * assuming that padding is // enabled and that AVX is available, which packs * 4 \c double values into a SIMD vector: * * \code using blaze::aligned; * * blaze::DynamicMatrix<double,blaze::rowMajor> D( 13UL, 17UL ); // ... Resizing * and initialization * * // OK: Starts at position (0,0), i.e. the first element of each row is * aligned (due to padding) auto dsm1 = submatrix<aligned>( D, 0UL, 0UL, 7UL, * 11UL ); * * // OK: First column is a multiple of 4, i.e. the first element of each row is * aligned (due to padding) auto dsm2 = submatrix<aligned>( D, 3UL, 12UL, * 8UL, 16UL ); * * // OK: First column is a multiple of 4 and the submatrix includes the last * row and column auto dsm3 = submatrix<aligned>( D, 4UL, 0UL, 9UL, 17UL ); * * // Error: First column is not a multiple of 4, i.e. the first element is not * aligned auto dsm4 = submatrix<aligned>( D, 2UL, 3UL, 12UL, 12UL ); * \endcode * * // Note that the discussed alignment restrictions are only valid for aligned * dense submatrices. // In contrast, aligned sparse submatrices at this time * don't pose any additional restrictions. // Therefore aligned and unaligned * sparse submatrices are truly fully identical. Still, in case // the \c * blaze::aligned flag is specified during setup, an aligned submatrix is * created: * * \code using blaze::aligned; * * blaze::CompressedMatrix<double,blaze::rowMajor> A; // ... Resizing and * initialization * * // Creating an aligned submatrix of size 8x8, starting in row 0 and column 0 * auto sv = submatrix<aligned>( A, 0UL, 0UL, 8UL, 8UL ); \endcode * * // \n \section views_submatrices_on_symmetric_matrices Submatrices on * Symmetric Matrices // // Submatrices can also be created on symmetric * matrices (see the \c SymmetricMatrix class template): * * \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; * * // Setup of a 16x16 symmetric matrix SymmetricMatrix< DynamicMatrix<int> > A( * 16UL ); * * // Creating a dense submatrix of size 8x12, starting in row 2 and column 4 * auto sm = submatrix( A, 2UL, 4UL, 8UL, 12UL ); \endcode * * // It is important to note, however, that (compound) assignments to such * submatrices have a // special restriction: The symmetry of the underlying * symmetric matrix must not be broken! // Since the modification of element * \f$ a_{ij} \f$ of a symmetric matrix also modifies the // element \f$ * a_{ji} \f$, the matrix to be assigned must be structured such that the * symmetry // of the symmetric matrix is preserved. Otherwise a \a * std::invalid_argument exception is // thrown: * * \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; * * // Setup of two default 4x4 symmetric matrices SymmetricMatrix< * DynamicMatrix<int> > A1( 4 ), A2( 4 ); * * // Setup of the 3x2 dynamic matrix // // ( 1 2 ) // B = ( 3 4 ) // * ( 5 6 ) // DynamicMatrix<int> B{ { 1, 2 }, { 3, 4 }, { 5, 6 } }; * * // OK: Assigning B to a submatrix of A1 such that the symmetry can be * preserved // // ( 0 0 1 2 ) // A1 = ( 0 0 3 4 ) // ( 1 3 5 * 6 ) // ( 2 4 6 0 ) // submatrix( A1, 0UL, 2UL, 3UL, 2UL ) = B; // * OK * * // Error: Assigning B to a submatrix of A2 such that the symmetry cannot be * preserved! // The elements marked with X cannot be assigned * unambiguously! // // ( 0 1 2 0 ) // A2 = ( 1 3 X 0 ) // ( * 2 X 6 0 ) // ( 0 0 0 0 ) // submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = * B; // Assignment throws an exception! \endcode * * // \n Previous: \ref views_element_selections &nbsp; &nbsp; Next: \ref * views_rows */ //************************************************************************************************* //**Rows ******************************************************************************************* /* * !\page views_rows Rows // // \tableofcontents // // // Rows provide views * on a specific row of a dense or sparse matrix. As such, rows act as a // * reference to a specific row. This reference is valid and can be used in * every way any other // row vector can be used as long as the matrix * containing the row is not resized or entirely // destroyed. The row also * acts as an alias to the row elements: Changes made to the elements // * (e.g. modifying values, inserting or erasing elements) are immediately * visible in the matrix // and changes made via the matrix are immediately * visible in the row. // // // \n \section views_rows_setup Setup of Rows // * <hr> // // \image html row.png // \image latex row.eps "Row view" * width=250pt // // A reference to a dense or sparse row can be created very * conveniently via the \c row() function. // It can be included via the * header file * * \code #include <blaze/math/Row.h> \endcode * * // The row index must be in the range from \f$[0..M-1]\f$, where \c M is the * total number of rows // of the matrix, and can be specified both at * compile time or at runtime: * * \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and * initialization * * // Creating a reference to the 1st row of matrix A (compile time index) auto * row1 = row<1UL>( A ); * * // Creating a reference to the 2nd row of matrix A (runtime index) auto row2 * = row( A, 2UL ); \endcode * * // The \c row() function returns an expression representing the row view. The * type of this // expression depends on the given row arguments, primarily * the type of the matrix and the compile // time arguments. If the type is * required, it can be determined via the \c decltype specifier: * * \code using MatrixType = blaze::DynamicMatrix<int>; using RowType = decltype( * blaze::row<1UL>( std::declval<MatrixType>() ) ); \endcode * * // The resulting view can be treated as any other row vector, i.e. it can be * assigned to, it can // be copied from, and it can be used in arithmetic * operations. The reference can also be used on // both sides of an * assignment: The row can either be used as an alias to grant write access * to a // specific row of a matrix primitive on the left-hand side of an * assignment or to grant read-access // to a specific row of a matrix * primitive or expression on the right-hand side of an assignment. // The * following example demonstrates this in detail: * * \code blaze::DynamicVector<double,blaze::rowVector> x; * blaze::CompressedVector<double,blaze::rowVector> y; * blaze::DynamicMatrix<double,blaze::rowMajor> A, B; * blaze::CompressedMatrix<double,blaze::rowMajor> C, D; // ... Resizing and * initialization * * // Setting the 2nd row of matrix A to x auto row2 = row( A, 2UL ); row2 = x; * * // Setting the 3rd row of matrix B to y row( B, 3UL ) = y; * * // Setting x to the 4th row of the result of the matrix multiplication x = * row( A * B, 4UL ); * * // Setting y to the 2nd row of the result of the sparse matrix multiplication * y = row( C * D, 2UL ); \endcode * * // \warning It is the programmer's responsibility to ensure the row does not * outlive the viewed // matrix: * * \code // Creating a row on a temporary matrix; results in a dangling * reference! auto row1 = row<1UL>( DynamicMatrix<int>{ { 1, 2, 3 }, { 4, 5, * 6 }, { 7, 8, 9 } } ); \endcode * * // \n \section views_rows_element_access Element Access // <hr> // // The * elements of a row can be directly accessed with the subscript operator: * * \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and * initialization * * // Creating a view on the 4th row of matrix A auto row4 = row( A, 4UL ); * * // Setting the 1st element of the dense row, which corresponds // to the 1st * element in the 4th row of matrix A row4[1] = 2.0; \endcode * * // The numbering of the row elements is * * \f[\left(\begin{array}{*{5}{c}} 0 & 1 & 2 & \cdots & N-1 \\ * \end{array}\right),\f] * * // where N is the number of columns of the referenced matrix. Alternatively, * the elements of a // row can be traversed via iterators. Just as with * vectors, in case of non-const rows, \c begin() // and \c end() return an * iterator, which allows to manipulate the elements, in case of constant // * rows an iterator to immutable elements is returned: * * \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 128UL, 256UL ); // ... * Resizing and initialization * * // Creating a reference to the 31st row of matrix A auto row31 = row( A, 31UL * ); * * // Traversing the elements via iterators to non-const elements for( auto * it=row31.begin(); it!=row31.end(); ++it ) { it = ...; // OK; Write access * to the dense row value ... = *it; // OK: Read access to the dense row * value. } * * // Traversing the elements via iterators to const elements for( auto * it=row31.cbegin(); it!=row31.cend(); ++it ) { it = ...; // Compilation * error: Assignment to the value via a ConstIterator is invalid. ... = *it; * // OK: Read access to the dense row value. } \endcode * * \code blaze::CompressedMatrix<int,blaze::rowMajor> A( 128UL, 256UL ); // ... * Resizing and initialization * * // Creating a reference to the 31st row of matrix A auto row31 = row( A, 31UL * ); * * // Traversing the elements via iterators to non-const elements for( auto * it=row31.begin(); it!=row31.end(); ++it ) { it->value() = ...; // OK: * Write access to the value of the non-zero element. ... = it->value(); // * OK: Read access to the value of the non-zero element. it->index() = ...; * // Compilation error: The index of a non-zero element cannot be changed. * ... = it->index(); // OK: Read access to the index of the sparse element. * } * * // Traversing the elements via iterators to const elements for( auto * it=row31.cbegin(); it!=row31.cend(); ++it ) { it->value() = ...; // * Compilation error: Assignment to the value via a ConstIterator is invalid. * ... = it->value(); // OK: Read access to the value of the non-zero * element. it->index() = ...; // Compilation error: The index of a non-zero * element cannot be changed. ... = it->index(); // OK: Read access to the * index of the sparse element. } \endcode * * // \n \section views_rows_element_insertion Element Insertion // <hr> // // * Inserting/accessing elements in a sparse row can be done by several * alternative functions. // The following example demonstrates all options: * * \code blaze::CompressedMatrix<double,blaze::rowMajor> A( 10UL, 100UL ); // * Non-initialized 10x100 matrix * * auto row0( row( A, 0UL ) ); // Reference to the 0th row of A * * // The subscript operator provides access to all possible elements of the * sparse row, // including the zero elements. In case the subscript operator * is used to access an element // that is currently not stored in the sparse * row, the element is inserted into the row. row0[42] = 2.0; * * // The second operation for inserting elements is the set() function. In case * the element // is not contained in the row it is inserted into the row, if * it is already contained in // the row its value is modified. row0.set( * 45UL, -1.2 ); * * // An alternative for inserting elements into the row is the insert() * function. However, // it inserts the element only in case the element is * not already contained in the row. row0.insert( 50UL, 3.7 ); * * // A very efficient way to add new elements to a sparse row is the append() * function. // Note that append() requires that the appended element's index * is strictly larger than // the currently largest non-zero index of the row * and that the row's capacity is large // enough to hold the new element. * row0.reserve( 10UL ); row0.append( 51UL, -2.1 ); \endcode * * // \n \section views_rows_common_operations Common Operations // <hr> // // A * row view can be used like any other row vector. This means that with only * a few exceptions // all \ref vector_operations and \ref * arithmetic_operations can be used. For instance, the // current number of * elements can be obtained via the \c size() function, the current capacity * // via the \c capacity() function, and the number of non-zero elements via * the \c nonZeros() // function. However, since rows are references to * specific rows of a matrix, several operations // are not possible on * views, such as resizing and swapping. The following example shows this by * // means of a dense row view: * * \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 42UL, 42UL ); // ... * Resizing and initialization * * // Creating a reference to the 2nd row of matrix A auto row2 = row( A, 2UL ); * * row2.size(); // Returns the number of elements in the row * row2.capacity(); // Returns the capacity of the row row2.nonZeros(); * // Returns the number of non-zero elements contained in the row * * row2.resize( 84UL ); // Compilation error: Cannot resize a single row of a * matrix * * auto row3 = row( A, 3UL ); swap( row2, row3 ); // Compilation error: Swap * operation not allowed \endcode * * // \n \section views_rows_arithmetic_operations Arithmetic Operations // <hr> * // // Both dense and sparse rows can be used in all arithmetic operations * that any other dense or // sparse row vector can be used in. The following * example gives an impression of the use of // dense rows within arithmetic * operations. All operations (addition, subtraction, multiplication, // * scaling, ...) can be performed on all possible combinations of dense and * sparse rows with // fitting element types: * * \code blaze::DynamicVector<double,blaze::rowVector> a( 2UL, 2.0 ), b; * blaze::CompressedVector<double,blaze::rowVector> c( 2UL ); c[1] = 3.0; * * blaze::DynamicMatrix<double,blaze::rowMajor> A( 4UL, 2UL ); // * Non-initialized 4x2 matrix * * auto row0( row( A, 0UL ) ); // Reference to the 0th row of A * * row0[0] = 0.0; // Manual initialization of the 0th row of A row0[1] = * 0.0; row( A, 1UL ) = 1.0; // Homogeneous initialization of the 1st row of * A row( A, 2UL ) = a; // Dense vector initialization of the 2nd row of A * row( A, 3UL ) = c; // Sparse vector initialization of the 3rd row of A * * b = row0 + a; // Dense vector/dense vector addition b = c + row( * A, 1UL ); // Sparse vector/dense vector addition b = row0 * row( A, * 2UL ); // Component-wise vector multiplication * * row( A, 1UL ) *= 2.0; // In-place scaling of the 1st row b = row( A, 1UL * ) * 2.0; // Scaling of the 1st row b = 2.0 * row( A, 1UL ); // Scaling * of the 1st row * * row( A, 2UL ) += a; // Addition assignment row( A, 2UL ) -= c; * // Subtraction assignment row( A, 2UL ) *= row( A, 0UL ); // * Multiplication assignment * * double scalar = row( A, 1UL ) * trans( c ); // Scalar/dot/inner product * between two vectors * * A = trans( c ) * row( A, 1UL ); // Outer product between two vectors * \endcode * * // \n \section views_rows_non_fitting_storage_order Views on Matrices with * Non-Fitting Storage Order // <hr> // // Especially noteworthy is that row * views can be created for both row-major and column-major // matrices. * Whereas the interface of a row-major matrix only allows to traverse a row * directly // and the interface of a column-major matrix only allows to * traverse a column, via views it is // possible to traverse a row of a * column-major matrix or a column of a row-major matrix. For // instance: * * \code blaze::DynamicMatrix<int,blaze::columnMajor> A( 64UL, 32UL ); // ... * Resizing and initialization * * // Creating a reference to the 1st row of a column-major matrix A auto row1 = * row( A, 1UL ); * * for( auto it=row1.begin(); it!=row1.end(); ++it ) { // ... } \endcode * * // However, please note that creating a row view on a matrix stored in a * column-major fashion // can result in a considerable performance decrease * in comparison to a row view on a matrix // with row-major storage format. * This is due to the non-contiguous storage of the matrix // elements. * Therefore care has to be taken in the choice of the most suitable storage * order: * * \code // Setup of two column-major matrices * blaze::DynamicMatrix<double,blaze::columnMajor> A( 128UL, 128UL ); * blaze::DynamicMatrix<double,blaze::columnMajor> B( 128UL, 128UL ); // ... * Resizing and initialization * * // The computation of the 15th row of the multiplication between A and B ... * blaze::DynamicVector<double,blaze::rowVector> x = row( A * B, 15UL ); * * // ... is essentially the same as the following computation, which multiplies * // the 15th row of the column-major matrix A with B. * blaze::DynamicVector<double,blaze::rowVector> x = row( A, 15UL ) * B; * \endcode * * // Although \b Blaze performs the resulting vector/matrix multiplication as * efficiently as possible // using a row-major storage order for matrix \c A * would result in a more efficient evaluation. // // \n Previous: \ref * views_submatrices &nbsp; &nbsp; Next: \ref views_row_selections */ //************************************************************************************************* //**Row Selections ********************************************************************************* /* * !\page views_row_selections Row Selections // // \tableofcontents // // // * Row selections provide views on arbitrary compositions of rows of dense * and sparse matrices. // These views act as a reference to the selected * rows and represent them as another dense or // sparse matrix. This * reference is valid and can be used in every way any other dense or sparse * // matrix can be used as long as the matrix containing the rows is not * resized or entirely // destroyed. The row selection also acts as an alias * to the matrix elements in the specified // range: Changes made to the rows * (e.g. modifying values, inserting or erasing elements) are // immediately * visible in the matrix and changes made via the matrix are immediately * visible // in the rows. // // // \n \section views_row_selections_setup * Setup of Row Selections // // A row selection can be created very * conveniently via the \c rows() function. It can be included // via the * header file * * \code #include <blaze/math/Rows.h> \endcode * * // The indices of the rows to be selected can be specified either at compile * time or at runtime // (by means of an initializer list, array or vector): * * \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and * initialization * * // Selecting the rows 4, 6, 8, and 10 (compile time arguments) auto rs1 = * rows<4UL,6UL,8UL,10UL>( A ); * * // Selecting the rows 3, 2, and 1 (runtime arguments via an initializer list) * const std::initializer_list<size_t> list{ 3UL, 2UL, 1UL }; auto rs2 = * rows( A, { 3UL, 2UL, 1UL } ); auto rs3 = rows( A, list ); * * // Selecting the rows 1, 2, 3, 3, 2, and 1 (runtime arguments via a * std::array) const std::array<size_t> array{ 1UL, 2UL, 3UL, 3UL, 2UL, 1UL * }; auto rs4 = rows( A, array ); auto rs5 = rows( A, array.data(), * array.size() ); * * // Selecting the row 4 fives times (runtime arguments via a std::vector) * const std::vector<size_t> vector{ 4UL, 4UL, 4UL, 4UL, 4UL }; auto rs6 = * rows( A, vector ); auto rs7 = rows( A, vector.data(), vector.size() ); * \endcode * * // Note that it is possible to alias the rows of the underlying matrix in any * order. Also note // that it is possible to use the same index multiple * times. The \c rows() function returns an // expression representing the * view on the selected rows. The type of this expression depends // on the * given arguments, primarily the type of the matrix and the compile time * arguments. If // the type is required, it can be determined via the \c * decltype specifier: * * \code using MatrixType = blaze::DynamicMatrix<int>; using RowsType = * decltype( blaze::rows<3UL,0UL,4UL,8UL>( std::declval<MatrixType>() ) ); * \endcode * * // The resulting view can be treated as any other dense or sparse matrix, * i.e. it can be assigned // to, it can be copied from, and it can be used * in arithmetic operations. Note, however, that a // row selection will * always be treated as a row-major matrix, regardless of the storage order * of // the matrix containing the rows. The view can also be used on both * sides of an assignment: It // can either be used as an alias to grant * write access to specific rows of a matrix primitive // on the left-hand * side of an assignment or to grant read-access to specific rows of a matrix * // primitive or expression on the right-hand side of an assignment. The * following example // demonstrates this in detail: * * \code blaze::DynamicMatrix<double,blaze::rowMajor> A; * blaze::DynamicMatrix<double,blaze::columnMajor> B; * blaze::CompressedMatrix<double,blaze::rowMajor> C; // ... Resizing and * initialization * * // Selecting the rows 1, 3, 5, and 7 of A auto rs = rows( A, { 1UL, 3UL, 5UL, * 7UL } ); * * // Setting rows 1, 3, 5, and 7 of A to row 4 of B rs = rows( B, { 4UL, 4UL, * 4UL, 4UL } ); * * // Setting the rows 2, 4, 6, and 8 of A to C rows( A, { 2UL, 4UL, 6UL, 8UL } * ) = C; * * // Setting the first 4 rows of A to the rows 5, 4, 3, and 2 of C submatrix( * A, 0UL, 0UL, 4UL, A.columns() ) = rows( C, { 5UL, 4UL, 3UL, 2UL } ); * * // Rotating the result of the addition between rows 1, 3, 5, and 7 of A and C * B = rows( rs + C, { 2UL, 3UL, 0UL, 1UL } ); \endcode * * // \warning It is the programmer's responsibility to ensure the row selection * does not outlive the // viewed matrix: * * \code // Creating a row selection on a temporary matrix; results in a * dangling reference! auto rs = rows<2UL,0UL>( DynamicMatrix<int>{ { 1, 2, 3 * }, { 4, 5, 6 }, { 7, 8, 9 } } ); \endcode * * // \n \section views_row_selections_element_access Element Access // // The * elements of a row selection can be directly accessed via the function call * operator: * * \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and * initialization * * // Creating a view on the first four rows of A in reverse order auto rs = * rows( A, { 3UL, 2UL, 1UL, 0UL } ); * * // Setting the element (0,0) of the row selection, which corresponds // to * the element at position (3,0) in matrix A rs(0,0) = 2.0; \endcode * * // Alternatively, the elements of a row selection can be traversed via * (const) iterators. Just as // with matrices, in case of non-const row * selection, \c begin() and \c end() return an iterator, // which allows to * manipuate the elements, in case of constant row selection an iterator to * // immutable elements is returned: * * \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 256UL, 512UL ); // ... * Resizing and initialization * * // Creating a reference to a selection of rows of matrix A auto rs = rows( A, * { 16UL, 32UL, 64UL, 128UL } ); * * // Traversing the elements of the 0th row via iterators to non-const elements * for( auto it=rs.begin(0); it!=rs.end(0); ++it ) { it = ...; // OK: Write * access to the dense value. ... = *it; // OK: Read access to the dense * value. } * * // Traversing the elements of the 1st row via iterators to const elements * for( auto it=rs.cbegin(1); it!=rs.cend(1); ++it ) { it = ...; // * Compilation error: Assignment to the value via iterator-to-const is * invalid. ... = *it; // OK: Read access to the dense value. } \endcode * * \code blaze::CompressedMatrix<int,blaze::rowMajor> A( 256UL, 512UL ); // ... * Resizing and initialization * * // Creating a reference to a selection of rows of matrix A auto rs = rows( A, * { 16UL, 32UL, 64UL, 128UL } ); * * // Traversing the elements of the 0th row via iterators to non-const elements * for( auto it=rs.begin(0); it!=rs.end(0); ++it ) { it->value() = ...; // * OK: Write access to the value of the non-zero element. ... = it->value(); * // OK: Read access to the value of the non-zero element. it->index() = * ...; // Compilation error: The index of a non-zero element cannot be * changed. ... = it->index(); // OK: Read access to the index of the sparse * element. } * * // Traversing the elements of the 1st row via iterators to const elements * for( auto it=rs.cbegin(1); it!=rs.cend(1); ++it ) { it->value() = ...; // * Compilation error: Assignment to the value via iterator-to-const is * invalid. ... = it->value(); // OK: Read access to the value of the * non-zero element. it->index() = ...; // Compilation error: The index of a * non-zero element cannot be changed. ... = it->index(); // OK: Read access * to the index of the sparse element. } \endcode * * // \n \section views_row_selections_element_insertion Element Insertion // // * Inserting/accessing elements in a sparse row selection can be done by * several alternative // functions. The following example demonstrates all * options: * * \code blaze::CompressedMatrix<double,blaze::rowMajor> A( 256UL, 512UL ); // * Non-initialized matrix of size 256x512 * * auto rs = rows( A, { 10UL, 20UL, 30UL, 40UL } ); // View on the rows 10, 20, * 30, and 40 of A * * // The function call operator provides access to all possible elements of the * sparse row // selection, including the zero elements. In case the function * call operator is used to // access an element that is currently not stored * in the sparse row selection, the element // is inserted into the row * selection. rs(2,4) = 2.0; * * // The second operation for inserting elements is the set() function. In case * the element is // not contained in the row selection it is inserted into * the row selection, if it is already // contained in the row selection its * value is modified. rs.set( 2UL, 5UL, -1.2 ); * * // An alternative for inserting elements into the row selection is the * insert() function. // However, it inserts the element only in case the * element is not already contained in the // row selection. rs.insert( 2UL, * 6UL, 3.7 ); * * // Just as in the case of sparse matrices, elements can also be inserted via * the append() // function. In case of row selections, append() also * requires that the appended element's // index is strictly larger than the * currently largest non-zero index in the according row // of the row * selection and that the according row's capacity is large enough to hold * the new // element. Note however that due to the nature of a row * selection, which may be an alias to // an arbitrary collection of rows, * the append() function does not work as efficiently for // a row selection * as it does for a matrix. rs.reserve( 2UL, 10UL ); rs.append( 2UL, 10UL, * -2.1 ); \endcode * * // \n \section views_row_selections_common_operations Common Operations // // * A view on specific rows of a matrix can be used like any other dense or * sparse matrix. For // instance, the current size of the matrix, i.e. the * number of rows or columns can be obtained // via the \c rows() and \c * columns() functions, the current total capacity via the \c capacity() // * function, and the number of non-zero elements via the \c nonZeros() * function. However, since // row selections are views on specific rows of a * matrix, several operations are not possible, // such as resizing and * swapping: * * \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 42UL, 42UL ); // ... * Resizing and initialization * * // Creating a view on the rows 8, 16, 24, and 32 of matrix A auto rs = rows( * A, { 8UL, 16UL, 24UL, 32UL } ); * * rs.rows(); // Returns the number of rows of the row selection * rs.columns(); // Returns the number of columns of the row selection * rs.capacity(); // Returns the capacity of the row selection * rs.nonZeros(); // Returns the number of non-zero elements contained in * the row selection * * rs.resize( 10UL, 8UL ); // Compilation error: Cannot resize a row selection * * auto rs2 = rows( A, 9UL, 17UL, 25UL, 33UL ); swap( rs, rs2 ); // Compilation * error: Swap operation not allowed \endcode * * // \n \section views_row_selections_arithmetic_operations Arithmetic * Operations // // Both dense and sparse row selections can be used in all * arithmetic operations that any other // dense or sparse matrix can be used * in. The following example gives an impression of the use // of dense row * selctions within arithmetic operations. All operations (addition, * subtraction, // multiplication, scaling, ...) can be performed on all * possible combinations of dense and // sparse matrices with fitting element * types: * * \code blaze::DynamicMatrix<double,blaze::rowMajor> D1, D2, D3; * blaze::CompressedMatrix<double,blaze::rowMajor> S1, S2; * * blaze::CompressedVector<double,blaze::columnVector> a, b; * * // ... Resizing and initialization * * std::initializer_list<size_t> indices1{ 0UL, 3UL, 6UL, 9UL, 12UL, 15UL, * 18UL, 21UL }; std::initializer_list<size_t> indices2{ 1UL, 4UL, 7UL, 10UL, * 13UL, 16UL, 19UL, 22UL }; std::initializer_list<size_t> indices3{ 2UL, * 5UL, 8UL, 11UL, 14UL, 17UL, 20UL, 23UL }; * * auto rs = rows( D1, indices1 ); // Selecting the every third row of D1 in * the range [0..21] * * rs = D2; // Dense matrix assignment to the selected rows * rows( D1, indices2 ) = S1; // Sparse matrix assignment to the selected * rows * * D3 = rs + D2; // Dense matrix/dense matrix addition S2 = * S1 - rows( D1, indices2 ); // Sparse matrix/dense matrix subtraction D2 = * rs % rows( D1, indices3 ); // Dense matrix/dense matrix Schur product D2 * = rows( D1, indices2 ) * D1; // Dense matrix/dense matrix multiplication * * rows( D1, indices2 ) *= 2.0; // In-place scaling of the second selection * of rows D2 = rows( D1, indices3 ) * 2.0; // Scaling of the elements in * the third selection of rows D2 = 2.0 * rows( D1, indices3 ); // Scaling * of the elements in the third selection of rows * * rows( D1, indices1 ) += D2; // Addition assignment rows( D1, indices2 ) -= * S1; // Subtraction assignment rows( D1, indices3 ) %= rs; // Schur * product assignment * * a = rows( D1, indices1 ) * b; // Dense matrix/sparse vector multiplication * \endcode * * // \n \section views_row_selections_on_column_major_matrix Row Selections on * Column-Major Matrices // // Especially noteworthy is that row selections * can be created for both row-major and column-major // matrices. Whereas * the interface of a row-major matrix only allows to traverse a row directly * // and the interface of a column-major matrix only allows to traverse a * column, via views it is // possible to traverse a row of a column-major * matrix or a column of a row-major matrix. For // instance: * * \code blaze::DynamicMatrix<int,blaze::columnMajor> A( 64UL, 32UL ); // ... * Resizing and initialization * * // Creating a reference to the 1st and 3rd row of a column-major matrix A * auto rs = rows( A, { 1UL, 3UL } ); * * // Traversing row 0 of the selection, which corresponds to the 1st row of * matrix A for( auto it=rs.begin( 0UL ); it!=rs.end( 0UL ); ++it ) { // ... * } \endcode * * // However, please note that creating a row selection on a matrix stored in a * column-major fashion // can result in a considerable performance decrease * in comparison to a row selection on a matrix // with row-major storage * format. This is due to the non-contiguous storage of the matrix elements. * // Therefore care has to be taken in the choice of the most suitable * storage order: * * \code // Setup of two column-major matrices * blaze::DynamicMatrix<double,blaze::columnMajor> A( 128UL, 128UL ); * blaze::DynamicMatrix<double,blaze::columnMajor> B( 128UL, 128UL ); // ... * Resizing and initialization * * // The computation of the 15th, 30th, and 45th row of the multiplication * between A and B ... blaze::DynamicMatrix<double,blaze::rowMajor> x = rows( * A * B, { 15UL, 30UL, 45UL } ); * * // ... is essentially the same as the following computation, which multiplies * // the 15th, 30th, and 45th row of the column-major matrix A with B. * blaze::DynamicMatrix<double,blaze::rowMajor> x = rows( A, { 15UL, 30UL, * 45UL } ) * B; \endcode * * // Although \b Blaze performs the resulting matrix/matrix multiplication as * efficiently as possible // using a row-major storage order for matrix \c A * would result in a more efficient evaluation. // // \n Previous: \ref * views_rows &nbsp; &nbsp; Next: \ref views_columns */ //************************************************************************************************* //**Columns **************************************************************************************** /* * !\page views_columns Columns // // \tableofcontents // // // Just as rows * provide a view on a specific row of a matrix, columns provide views on a * specific // column of a dense or sparse matrix. As such, columns act as a * reference to a specific column. // This reference is valid an can be used * in every way any other column vector can be used as long // as the matrix * containing the column is not resized or entirely destroyed. Changes made * to the // elements (e.g. modifying values, inserting or erasing elements) * are immediately visible in the // matrix and changes made via the matrix * are immediately visible in the column. // // // \n \section * views_colums_setup Setup of Columns // <hr> // // \image html column.png * // \image latex column.eps "Column view" width=250pt // // A reference to * a dense or sparse column can be created very conveniently via the \c * column() // function. It can be included via the header file * * \code #include <blaze/math/Column.h> \endcode * * // The column index must be in the range from \f$[0..N-1]\f$, where \c N is * the total number of // columns of the matrix, and can be specified both at * compile time or at runtime: * * \code blaze::DynamicMatrix<double,blaze::columnMajor> A; // ... Resizing and * initialization * * // Creating a reference to the 1st column of matrix A (compile time index) * auto col1 = column<1UL>( A ); * * // Creating a reference to the 2nd column of matrix A (runtime index) auto * col2 = column( A, 2UL ); \endcode * * // The \c column() function returns an expression representing the column * view. The type of this // expression depends on the given column * arguments, primarily the type of the matrix and the // compile time * arguments. If the type is required, it can be determined via the \c * decltype // specifier: * * \code using MatrixType = blaze::DynamicMatrix<int>; using ColumnType = * decltype( blaze::column<1UL>( std::declval<MatrixType>() ) ); \endcode * * // The resulting view can be treated as any other column vector, i.e. it can * be assigned to, it // can be copied from, and it can be used in arithmetic * operations. The reference can also be used // on both sides of an * assignment: The column can either be used as an alias to grant write * access // to a specific column of a matrix primitive on the left-hand side * of an assignment or to grant // read-access to a specific column of a * matrix primitive or expression on the right-hand side // of an assignment. * The following example demonstrates this in detail: * * \code blaze::DynamicVector<double,blaze::columnVector> x; * blaze::CompressedVector<double,blaze::columnVector> y; * blaze::DynamicMatrix<double,blaze::columnMajor> A, B; * blaze::CompressedMatrix<double,blaze::columnMajor> C, D; // ... Resizing * and initialization * * // Setting the 1st column of matrix A to x auto col1 = column( A, 1UL ); col1 * = x; * * // Setting the 4th column of matrix B to y column( B, 4UL ) = y; * * // Setting x to the 2nd column of the result of the matrix multiplication x = * column( A * B, 2UL ); * * // Setting y to the 2nd column of the result of the sparse matrix * multiplication y = column( C * D, 2UL ); \endcode * * // \warning It is the programmer's responsibility to ensure the column does * not outlive the // viewed matrix: * * \code // Creating a column on a temporary matrix; results in a dangling * reference! auto col1 = column<1UL>( DynamicMatrix<int>{ { 1, 2, 3 }, { 4, * 5, 6 }, { 7, 8, 9 } } ); \endcode * * // \n \section views_columns_element_access Element Access // <hr> // // The * elements of a column can be directly accessed with the subscript operator. * * \code blaze::DynamicMatrix<double,blaze::columnMajor> A; // ... Resizing and * initialization * * // Creating a view on the 4th column of matrix A auto col4 = column( A, 4UL * ); * * // Setting the 1st element of the dense column, which corresponds // to the * 1st element in the 4th column of matrix A col4[1] = 2.0; \endcode * * // The numbering of the column elements is * * \f[\left(\begin{array}{*{5}{c}} 0 & 1 & 2 & \cdots & N-1 \\ * \end{array}\right),\f] * * // where N is the number of rows of the referenced matrix. Alternatively, the * elements of a column // can be traversed via iterators. Just as with * vectors, in case of non-const columns, \c begin() // and \c end() return * an iterator, which allows to manipulate the elements, in case of constant * // columns an iterator to immutable elements is returned: * * \code blaze::DynamicMatrix<int,blaze::columnMajor> A( 128UL, 256UL ); // ... * Resizing and initialization * * // Creating a reference to the 31st column of matrix A auto col31 = column( * A, 31UL ); * * // Traversing the elements via iterators to non-const elements for( auto * it=col31.begin(); it!=col31.end(); ++it ) { it = ...; // OK; Write access * to the dense column value ... = *it; // OK: Read access to the dense * column value. } * * // Traversing the elements via iterators to const elements for( auto * it=col31.cbegin(); it!=col31.cend(); ++it ) { it = ...; // Compilation * error: Assignment to the value via iterator-to-const is invalid. ... = * *it; // OK: Read access to the dense column value. } \endcode * * \code blaze::CompressedMatrix<int,blaze::columnMajor> A( 128UL, 256UL ); // * ... Resizing and initialization * * // Creating a reference to the 31st column of matrix A auto col31 = column( * A, 31UL ); * * // Traversing the elements via iterators to non-const elements for( auto * it=col31.begin(); it!=col31.end(); ++it ) { it->value() = ...; // OK: * Write access to the value of the non-zero element. ... = it->value(); // * OK: Read access to the value of the non-zero element. it->index() = ...; * // Compilation error: The index of a non-zero element cannot be changed. * ... = it->index(); // OK: Read access to the index of the sparse element. * } * * // Traversing the elements via iterators to const elements for( auto * it=col31.cbegin(); it!=col31.cend(); ++it ) { it->value() = ...; // * Compilation error: Assignment to the value via iterator-to-const is * invalid. ... = it->value(); // OK: Read access to the value of the * non-zero element. it->index() = ...; // Compilation error: The index of a * non-zero element cannot be changed. ... = it->index(); // OK: Read access * to the index of the sparse element. } \endcode * * // \n \section views_columns_element_insertion Element Insertion // <hr> // * // Inserting/accessing elements in a sparse column can be done by several * alternative functions. // The following example demonstrates all options: * * \code blaze::CompressedMatrix<double,blaze::columnMajor> A( 100UL, 10UL ); * // Non-initialized 100x10 matrix * * auto col0( column( A, 0UL ) ); // Reference to the 0th column of A * * // The subscript operator provides access to all possible elements of the * sparse column, // including the zero elements. In case the subscript * operator is used to access an element // that is currently not stored in * the sparse column, the element is inserted into the column. col0[42] = * 2.0; * * // The second operation for inserting elements is the set() function. In case * the element // is not contained in the column it is inserted into the * column, if it is already contained // in the column its value is modified. * col0.set( 45UL, -1.2 ); * * // An alternative for inserting elements into the column is the insert() * function. However, // it inserts the element only in case the element is * not already contained in the column. col0.insert( 50UL, 3.7 ); * * // A very efficient way to add new elements to a sparse column is the * append() function. // Note that append() requires that the appended * element's index is strictly larger than // the currently largest non-zero * index of the column and that the column's capacity is // large enough to * hold the new element. col0.reserve( 10UL ); col0.append( 51UL, -2.1 ); * \endcode * * // \n \section views_columns_common_operations Common Operations // <hr> // * // A column view can be used like any other column vector. This means that * with only a few // exceptions all \ref vector_operations and \ref * arithmetic_operations can be used. For instance, // the current number of * elements can be obtained via the \c size() function, the current capacity * // via the \c capacity() function, and the number of non-zero elements via * the \c nonZeros() // function. However, since columns are references to * specific columns of a matrix, several // operations are not possible on * views, such as resizing and swapping. The following example // shows this * by means of a dense column view: * * \code blaze::DynamicMatrix<int,blaze::columnMajor> A( 42UL, 42UL ); // ... * Resizing and initialization * * // Creating a reference to the 2nd column of matrix A auto col2 = column( A, * 2UL ); * * col2.size(); // Returns the number of elements in the column * col2.capacity(); // Returns the capacity of the column * col2.nonZeros(); // Returns the number of non-zero elements contained * in the column * * col2.resize( 84UL ); // Compilation error: Cannot resize a single column of * a matrix * * auto col3 = column( A, 3UL ); swap( col2, col3 ); // Compilation error: * Swap operation not allowed \endcode * * // \n \section views_columns_arithmetic_operations Arithmetic Operations // * <hr> // // Both dense and sparse columns can be used in all arithmetic * operations that any other dense or // sparse column vector can be used in. * The following example gives an impression of the use of // dense columns * within arithmetic operations. All operations (addition, subtraction, * multiplication, // scaling, ...) can be performed on all possible * combinations of dense and sparse columns with // fitting element types: * * \code blaze::DynamicVector<double,blaze::columnVector> a( 2UL, 2.0 ), b; * blaze::CompressedVector<double,blaze::columnVector> c( 2UL ); c[1] = 3.0; * * blaze::DynamicMatrix<double,blaze::columnMajor> A( 2UL, 4UL ); // * Non-initialized 2x4 matrix * * auto col0( column( A, 0UL ) ); // Reference to the 0th column of A * * col0[0] = 0.0; // Manual initialization of the 0th column of A * col0[1] = 0.0; column( A, 1UL ) = 1.0; // Homogeneous initialization of * the 1st column of A column( A, 2UL ) = a; // Dense vector * initialization of the 2nd column of A column( A, 3UL ) = c; // Sparse * vector initialization of the 3rd column of A * * b = col0 + a; // Dense vector/dense vector addition b = c + * column( A, 1UL ); // Sparse vector/dense vector addition b = col0 * * column( A, 2UL ); // Component-wise vector multiplication * * column( A, 1UL ) *= 2.0; // In-place scaling of the 1st column b = * column( A, 1UL ) * 2.0; // Scaling of the 1st column b = 2.0 * column( A, * 1UL ); // Scaling of the 1st column * * column( A, 2UL ) += a; // Addition assignment column( A, 2UL * ) -= c; // Subtraction assignment column( A, 2UL ) *= * column( A, 0UL ); // Multiplication assignment * * double scalar = trans( c ) * column( A, 1UL ); // Scalar/dot/inner product * between two vectors * * A = column( A, 1UL ) * trans( c ); // Outer product between two vectors * \endcode * * // \n \section views_columns_non_fitting_storage_order Views on Matrices with * Non-Fitting Storage Order // <hr> // // Especially noteworthy is that * column views can be created for both row-major and column-major // * matrices. Whereas the interface of a row-major matrix only allows to * traverse a row directly // and the interface of a column-major matrix only * allows to traverse a column, via views it is // possible to traverse a row * of a column-major matrix or a column of a row-major matrix. For // * instance: * * \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 64UL, 32UL ); // ... * Resizing and initialization * * // Creating a reference to the 1st column of a column-major matrix A auto * col1 = column( A, 1UL ); * * for( auto it=col1.begin(); it!=col1.end(); ++it ) { // ... } \endcode * * // However, please note that creating a column view on a matrix stored in a * row-major fashion // can result in a considerable performance decrease in * comparison to a column view on a matrix // with column-major storage * format. This is due to the non-contiguous storage of the matrix // * elements. Therefore care has to be taken in the choice of the most * suitable storage order: * * \code // Setup of two row-major matrices * blaze::DynamicMatrix<double,blaze::rowMajor> A( 128UL, 128UL ); * blaze::DynamicMatrix<double,blaze::rowMajor> B( 128UL, 128UL ); // ... * Resizing and initialization * * // The computation of the 15th column of the multiplication between A and B * ... blaze::DynamicVector<double,blaze::columnVector> x = column( A * B, * 15UL ); * * // ... is essentially the same as the following computation, which multiplies * // A with the 15th column of the row-major matrix B. * blaze::DynamicVector<double,blaze::columnVector> x = A * column( B, 15UL * ); \endcode * * // Although \b Blaze performs the resulting matrix/vector multiplication as * efficiently as possible // using a column-major storage order for matrix * \c B would result in a more efficient evaluation. // // \n Previous: \ref * views_row_selections &nbsp; &nbsp; Next: \ref views_column_selections */ //************************************************************************************************* //**Column Selections ****************************************************************************** /* * !\page views_column_selections Column Selections // // \tableofcontents // * // // Column selections provide views on arbitrary compositions of columns * of dense and sparse // matrices. These views act as a reference to the * selected columns and represent them as another // dense or sparse matrix. * This reference is valid and can be used in every way any other dense // or * sparse matrix can be used as long as the matrix containing the columns is * not resized or // entirely destroyed. The column selection also acts as an * alias to the matrix elements in the // specified range: Changes made to * the columns (e.g. modifying values, inserting or erasing // elements) are * immediately visible in the matrix and changes made via the matrix are * immediately // visible in the columns. // // // \n \section * views_column_selections_setup Setup of Column Selections // // A column * selection can be created very conveniently via the \c columns() function. * It can be // included via the header file * * \code #include <blaze/math/Columns.h> \endcode * * // The indices of the columns to be selected can be specified either at * compile time or at runtime // (by means of an initializer list, array or * vector): * * \code blaze::DynamicMatrix<double,blaze::columnMajor> A; // ... Resizing and * initialization * * // Selecting the columns 4, 6, 8, and 10 (compile time arguments) auto cs1 = * columns<4UL,6UL,8UL,10UL>( A ); * * // Selecting the columns 3, 2, and 1 (runtime arguments via an initializer * list) const std::initializer_list<size_t> list{ 3UL, 2UL, 1UL }; auto cs2 * = columns( A, { 3UL, 2UL, 1UL } ); auto cs3 = columns( A, list ); * * // Selecting the columns 1, 2, 3, 3, 2, and 1 (runtime arguments via a * std::array) const std::array<size_t> array{ 1UL, 2UL, 3UL, 3UL, 2UL, 1UL * }; auto cs4 = columns( A, array ); auto cs5 = columns( A, array.data(), * array.size() ); * * // Selecting the column 4 fives times (runtime arguments via a std::vector) * const std::vector<size_t> vector{ 4UL, 4UL, 4UL, 4UL, 4UL }; auto cs6 = * columns( A, vector ); auto cs7 = columns( A, vector.data(), vector.size() * ); \endcode * * // Note that it is possible to alias the columns of the underlying matrix in * any order. Also note // that it is possible to use the same index multiple * times. The \c columns() function returns an // expression representing the * view on the selected columns. The type of this expression depends // on * the given arguments, primarily the type of the matrix and the compile time * arguments. If // the type is required, it can be determined via the \c * decltype specifier: * * \code using MatrixType = blaze::DynamicMatrix<int>; using ColumnsType = * decltype( blaze::columns<3UL,0UL,4UL,8UL>( std::declval<MatrixType>() ) ); * \endcode * * // The resulting view can be treated as any other dense or sparse matrix, * i.e. it can be assigned // to, it can be copied from, and it can be used * in arithmetic operations. Note, however, that a // column selection will * always be treated as a column-major matrix, regardless of the storage // * order of the matrix containing the columns. The view can also be used on * both sides of an // assignment: It can either be used as an alias to grant * write access to specific columns of a // matrix primitive on the left-hand * side of an assignment or to grant read-access to specific // columns of a * matrix primitive or expression on the right-hand side of an assignment. * The // following example demonstrates this in detail: * * \code blaze::DynamicMatrix<double,blaze::columnMajor> A; * blaze::DynamicMatrix<double,blaze::rowMajor> B; * blaze::CompressedMatrix<double,blaze::columnMajor> C; // ... Resizing and * initialization * * // Selecting the columns 1, 3, 5, and 7 of A auto cs = columns( A, { 1UL, * 3UL, 5UL, 7UL } ); * * // Setting columns 1, 3, 5, and 7 of A to column 4 of B cs = columns( B, { * 4UL, 4UL, 4UL, 4UL } ); * * // Setting the columns 2, 4, 6, and 8 of A to C columns( A, { 2UL, 4UL, 6UL, * 8UL } ) = C; * * // Setting the first 4 columns of A to the columns 5, 4, 3, and 2 of C * submatrix( A, 0UL, 0UL, A.rows(), 4UL ) = columns( C, { 5UL, 4UL, 3UL, 2UL * } ); * * // Rotating the result of the addition between columns 1, 3, 5, and 7 of A * and C B = columns( cs + C, { 2UL, 3UL, 0UL, 1UL } ); \endcode * * // \warning It is the programmer's responsibility to ensure the column * selection does not outlive // the viewed matrix: * * \code // Creating a column selection on a temporary matrix; results in a * dangling reference! auto cs = columns<2UL,0UL>( DynamicMatrix<int>{ { 1, * 2, 3 }, { 4, 5, 6 }, { 7, 8, 9 } } ); \endcode * * // \n \section views_column_selections_element_access Element Access // // * The elements of a column selection can be directly accessed via the * function call operator: * * \code blaze::DynamicMatrix<double,blaze::columnMajor> A; // ... Resizing and * initialization * * // Creating a view on the first four columns of A in reverse order auto cs = * columns( A, { 3UL, 2UL, 1UL, 0UL } ); * * // Setting the element (0,0) of the column selection, which corresponds // to * the element at position (0,3) in matrix A cs(0,0) = 2.0; \endcode * * // Alternatively, the elements of a column selection can be traversed via * (const) iterators. // Just as with matrices, in case of non-const column * selection, \c begin() and \c end() return // an iterator, which allows to * manipuate the elements, in case of constant column selection an // * iterator to immutable elements is returned: * * \code blaze::DynamicMatrix<int,blaze::columnMajor> A( 512UL, 256UL ); // ... * Resizing and initialization * * // Creating a reference to a selection of columns of matrix A auto cs = * columns( A, { 16UL, 32UL, 64UL, 128UL } ); * * // Traversing the elements of the 0th column via iterators to non-const * elements for( auto it=cs.begin(0); it!=cs.end(0); ++it ) { it = ...; // * OK: Write access to the dense value. ... = *it; // OK: Read access to the * dense value. } * * // Traversing the elements of the 1st column via iterators to const elements * for( auto it=cs.cbegin(1); it!=cs.cend(1); ++it ) { it = ...; // * Compilation error: Assignment to the value via iterator-to-const is * invalid. ... = *it; // OK: Read access to the dense value. } \endcode * * \code blaze::CompressedMatrix<int,blaze::columnMajor> A( 512UL, 256UL ); // * ... Resizing and initialization * * // Creating a reference to a selection of columns of matrix A auto cs = * columns( A, { 16UL, 32UL, 64UL, 128UL } ); * * // Traversing the elements of the 0th column via iterators to non-const * elements for( auto it=cs.begin(0); it!=cs.end(0); ++it ) { it->value() = * ...; // OK: Write access to the value of the non-zero element. ... = * it->value(); // OK: Read access to the value of the non-zero element. * it->index() = ...; // Compilation error: The index of a non-zero element * cannot be changed. ... = it->index(); // OK: Read access to the index of * the sparse element. } * * // Traversing the elements of the 1st column via iterators to const elements * for( auto it=cs.cbegin(1); it!=cs.cend(1); ++it ) { it->value() = ...; // * Compilation error: Assignment to the value via iterator-to-const is * invalid. ... = it->value(); // OK: Read access to the value of the * non-zero element. it->index() = ...; // Compilation error: The index of a * non-zero element cannot be changed. ... = it->index(); // OK: Read access * to the index of the sparse element. } \endcode * * // \n \section views_column_selections_element_insertion Element Insertion // * // Inserting/accessing elements in a sparse column selection can be done * by several alternative // functions. The following example demonstrates * all options: * * \code blaze::CompressedMatrix<double,blaze::columnMajor> A( 512UL, 256UL ); * // Non-initialized matrix of size 512x256 * * auto cs = columns( A, { 10UL, 20UL, 30UL, 40UL } ); // View on the columns * 10, 20, 30, and 40 of A * * // The function call operator provides access to all possible elements of the * sparse column // selection, including the zero elements. In case the * function call operator is used to // access an element that is currently * not stored in the sparse column selection, the element // is inserted into * the column selection. cs(2,4) = 2.0; * * // The second operation for inserting elements is the set() function. In case * the element is // not contained in the column selection it is inserted * into the column selection, if it is // already contained in the column * selection its value is modified. cs.set( 2UL, 5UL, -1.2 ); * * // An alternative for inserting elements into the column selection is the * insert() function. // However, it inserts the element only in case the * element is not already contained in the // column selection. cs.insert( * 2UL, 6UL, 3.7 ); * * // Just as in the case of sparse matrices, elements can also be inserted via * the append() // function. In case of column selections, append() also * requires that the appended element's // index is strictly larger than the * currently largest non-zero index in the according column // of the column * selection and that the according column's capacity is large enough to hold * the // new element. Note however that due to the nature of a column * selection, which may be an alias // to an arbitrary collection of columns, * the append() function does not work as efficiently // for a column * selection as it does for a matrix. cs.reserve( 2UL, 10UL ); cs.append( * 2UL, 10UL, -2.1 ); \endcode * * // \n \section views_column_selections_common_operations Common Operations // * // A view on specific columns of a matrix can be used like any other dense * or sparse matrix. For // instance, the current size of the matrix, i.e. * the number of rows or columns can be obtained // via the \c rows() and \c * columns() functions, the current total capacity via the \c capacity() // * function, and the number of non-zero elements via the \c nonZeros() * function. However, since // column selections are views on specific * columns of a matrix, several operations are not possible, // such as * resizing and swapping: * * \code blaze::DynamicMatrix<int,blaze::columnMajor> A( 42UL, 42UL ); // ... * Resizing and initialization * * // Creating a view on the columns 8, 16, 24, and 32 of matrix A auto cs = * columns( A, { 8UL, 16UL, 24UL, 32UL } ); * * cs.rows(); // Returns the number of rows of the column selection * cs.columns(); // Returns the number of columns of the column selection * cs.capacity(); // Returns the capacity of the column selection * cs.nonZeros(); // Returns the number of non-zero elements contained in * the column selection * * cs.resize( 10UL, 8UL ); // Compilation error: Cannot resize a column * selection * * auto cs2 = columns( A, 9UL, 17UL, 25UL, 33UL ); swap( cs, cs2 ); // * Compilation error: Swap operation not allowed \endcode * * // \n \section views_column_selections_arithmetic_operations Arithmetic * Operations // // Both dense and sparse column selections can be used in * all arithmetic operations that any other // dense or sparse matrix can be * used in. The following example gives an impression of the use of // dense * column selctions within arithmetic operations. All operations (addition, * subtraction, // multiplication, scaling, ...) can be performed on all * possible combinations of dense and // sparse matrices with fitting element * types: * * \code blaze::DynamicMatrix<double,blaze::columnMajor> D1, D2, D3; * blaze::CompressedMatrix<double,blaze::columnMajor> S1, S2; * * blaze::CompressedVector<double,blaze::columnVector> a, b; * * // ... Resizing and initialization * * std::initializer_list<size_t> indices1{ 0UL, 3UL, 6UL, 9UL, 12UL, 15UL, * 18UL, 21UL }; std::initializer_list<size_t> indices2{ 1UL, 4UL, 7UL, 10UL, * 13UL, 16UL, 19UL, 22UL }; std::initializer_list<size_t> indices3{ 2UL, * 5UL, 8UL, 11UL, 14UL, 17UL, 20UL, 23UL }; * * auto cs = columns( D1, indices1 ); // Selecting the every third column of D1 * in the range [0..21] * * cs = D2; // Dense matrix assignment to the selected * columns columns( D1, indices2 ) = S1; // Sparse matrix assignment to the * selected columns * * D3 = cs + D2; // Dense matrix/dense matrix addition S2 * = S1 - columns( D1, indices2 ); // Sparse matrix/dense matrix subtraction * D2 = cs % columns( D1, indices3 ); // Dense matrix/dense matrix Schur * product D2 = columns( D1, indices2 ) * D1; // Dense matrix/dense matrix * multiplication * * columns( D1, indices2 ) *= 2.0; // In-place scaling of the second * selection of columns D2 = columns( D1, indices3 ) * 2.0; // Scaling of * the elements in the third selection of columns D2 = 2.0 * columns( D1, * indices3 ); // Scaling of the elements in the third selection of columns * * columns( D1, indices1 ) += D2; // Addition assignment columns( D1, indices2 * ) -= S1; // Subtraction assignment columns( D1, indices3 ) %= cs; // * Schur product assignment * * a = columns( D1, indices1 ) * b; // Dense matrix/sparse vector * multiplication \endcode * * // \n \section views_column_selections_on_row_major_matrix Column Selections * on a Row-Major Matrix // // Especially noteworthy is that column * selections can be created for both row-major and // column-major matrices. * Whereas the interface of a row-major matrix only allows to traverse a // * row directly and the interface of a column-major matrix only allows to * traverse a column, via // views it is possible to traverse a row of a * column-major matrix or a column of a row-major // matrix. For instance: * * \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 64UL, 32UL ); // ... * Resizing and initialization * * // Creating a reference to the 1st and 3rd column of a column-major matrix A * auto cs = columns( A, { 1UL, 3UL } ); * * // Traversing column 0 of the selection, which corresponds to the 1st column * of matrix A for( auto it=cs.begin( 0UL ); it!=cs.end( 0UL ); ++it ) { // * ... } \endcode * * // However, please note that creating a column selection on a matrix stored * in a row-major fashion // can result in a considerable performance * decrease in comparison to a column selection on a // matrix with * column-major storage format. This is due to the non-contiguous storage of * the // matrix elements. Therefore care has to be taken in the choice of * the most suitable storage // order: * * \code // Setup of two row-major matrices * blaze::DynamicMatrix<double,blaze::rowMajor> A( 128UL, 128UL ); * blaze::DynamicMatrix<double,blaze::rowMajor> B( 128UL, 128UL ); // ... * Resizing and initialization * * // The computation of the 15th, 30th, and 45th column of the multiplication * between A and B ... blaze::DynamicMatrix<double,blaze::columnMajor> x = * columns( A * B, { 15UL, 30UL, 45UL } ); * * // ... is essentially the same as the following computation, which multiplies * // A with the 15th, 30th, and 45th column of the row-major matrix B. * blaze::DynamicMatrix<double,blaze::columnMajor> x = A * column( B, { 15UL, * 30UL, 45UL } ); \endcode * * // Although \b Blaze performs the resulting matrix/matrix multiplication as * efficiently as possible // using a column-major storage order for matrix * \c A would result in a more efficient evaluation. // // \n Previous: \ref * views_columns &nbsp; &nbsp; Next: \ref views_bands */ //************************************************************************************************* //**Bands ****************************************************************************************** /* * !\page views_bands Bands // // \tableofcontents // // // Bands provide * views on a specific band of a dense or sparse matrix (e.g. the diagonal, * the // subdiagonal, ...). As such, bands act as a reference to a specific * band. This reference // is valid and can be used in every way any other * vector can be used as long as the matrix // containing the band is not * resized or entirely destroyed. The band also acts as an alias to // the * band elements: Changes made to the elements (e.g. modifying values, * inserting or erasing // elements) are immediately visible in the matrix * and changes made via the matrix are immediately // visible in the band. // * // // \n \section views_bands_setup Setup of Bands // <hr> // // \image * html band.png // \image latex band.eps "Band view" width=250pt // // A * reference to a dense or sparse band can be created very conveniently via * the \c band() // function. It can be included via the header file * * \code #include <blaze/math/Band.h> \endcode * * // The band index must be in the range from \f$[min(0,1-M)..max(0,N-1)]\f$, * where \c M is the // total number of rows and \c N is the total number of * columns, and can be specified both at // compile time or at runtime: * * \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and * initialization * * // Creating a reference to the 1st lower band of matrix A (compile time * index) auto band1 = band<-1L>( A ); * * // Creating a reference to the 2nd upper band of matrix A (runtime index) * auto band2 = band( A, 2L ); \endcode * * // In addition, the \c diagonal() function provides a convenient shortcut for * the setup of a view // on the diagonal of a dense or sparse matrix. It has * the same effect as calling the \c band() // function with a compile time * index of 0: * * \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and * initialization * * // Creating a reference to the diagonal of matrix A via the band() and * diagonal() functions auto diag1 = band<0L>( A ); auto diag2 = diagonal( A * ); * * static_assert( blaze::IsSame< decltype(diag1), decltype(diag2) >::value, * "Non-identical types detected" ); \endcode * * // Both the \c band() and the \c diagonal() function return an expression * representing the band // view. The type of this expression depends on the * given arguments, primarily the type of the // matrix and the compile time * arguments. If the type is required, it can be determined via // \c * decltype specifier: * * \code using MatrixType = blaze::DynamicMatrix<int>; using BandType = * decltype( blaze::band<1L>( std::declval<MatrixType>() ) ); using * DiagonalType = decltype( blaze::diagonal( std::declval<MatrixType>() ) ); * \endcode * * // This resulting view can be treated as any other vector, i.e. it can be * assigned to, it can // be copied from, and it can be used in arithmetic * operations. By default, bands are considered // column vectors, but this * setting can be changed via the \c defaultTransposeFlag switch. The // * reference can also be used on both sides of an assignment: The band can * either be used as an // alias to grant write access to a specific band of * a matrix primitive on the left-hand side of // an assignment or to grant * read-access to a specific band of a matrix primitive or expression // on * the right-hand side of an assignment. The following example demonstrates * this in detail: * * \code blaze::DynamicVector<double,blaze::rowVector> x; * blaze::CompressedVector<double,blaze::rowVector> y; * blaze::DynamicMatrix<double,blaze::rowMajor> A, B; * blaze::CompressedMatrix<double,blaze::rowMajor> C, D; // ... Resizing and * initialization * * // Setting the 2nd upper band of matrix A to x auto band2 = band( A, 2L ); * band2 = x; * * // Setting the 3rd upper band of matrix B to y band( B, 3L ) = y; * * // Setting x to the 2nd lower band of the result of the matrix multiplication * x = band( A * B, -2L ); * * // Setting y to the 2nd upper band of the result of the sparse matrix * multiplication y = band( C * D, 2L ); \endcode * * // \warning It is the programmer's responsibility to ensure the band does not * outlive the viewed // matrix: * * \code // Creating a band on a temporary matrix; results in a dangling * reference! auto band1 = band<1L>( DynamicMatrix<int>{ { 1, 2, 3 }, { 4, 5, * 6 }, { 7, 8, 9 } } ); \endcode * * // \n \section views_bands_element_access Element Access // <hr> // // The * elements of a band can be directly accessed with the subscript operator: * * \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and * initialization * * // Creating a view on the 4th upper band of matrix A auto band4 = band( A, 4L * ); * * // Setting the 1st element of the dense band, which corresponds // to the 1st * element in the 4th upper band of matrix A band4[1] = 2.0; \endcode * * // The numbering of the band elements is * * \f[\left(\begin{array}{*{5}{c}} 0 & 1 & 2 & \cdots & N-1 \\ * \end{array}\right),\f] * * // where N is the number of elements of the referenced band. Alternatively, * the elements of a band // can be traversed via iterators. Just as with * vectors, in case of non-const band, \c begin() and // \c end() return an * iterator, which allows to manipulate the elements, in case of constant * bands // an iterator to immutable elements is returned: * * \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 128UL, 256UL ); // ... * Resizing and initialization * * // Creating a reference to the 5th upper band of matrix A auto band5 = band( * A, 5L ); * * // Traversing the elements via iterators to non-const elements for( auto * it=band5.begin(); it!=band5.end(); ++it ) { it = ...; // OK; Write access * to the dense band value ... = *it; // OK: Read access to the dense band * value. } * * // Traversing the elements via iterators to const elements for( auto * it=band5.cbegin(); it!=band5.cend(); ++it ) { it = ...; // Compilation * error: Assignment to the value via iterator-to-const is invalid. ... = * *it; // OK: Read access to the dense band value. } \endcode * * \code blaze::CompressedMatrix<int,blaze::rowMajor> A( 128UL, 256UL ); // ... * Resizing and initialization * * // Creating a reference to the 5th band of matrix A auto band5 = band( A, 5L * ); * * // Traversing the elements via iterators to non-const elements for( auto * it=band5.begin(); it!=band5.end(); ++it ) { it->value() = ...; // OK: * Write access to the value of the non-zero element. ... = it->value(); // * OK: Read access to the value of the non-zero element. it->index() = ...; * // Compilation error: The index of a non-zero element cannot be changed. * ... = it->index(); // OK: Read access to the index of the sparse element. * } * * // Traversing the elements via iterators to const elements for( auto * it=band5.cbegin(); it!=band5.cend(); ++it ) { it->value() = ...; // * Compilation error: Assignment to the value via iterator-to-const is * invalid. ... = it->value(); // OK: Read access to the value of the * non-zero element. it->index() = ...; // Compilation error: The index of a * non-zero element cannot be changed. ... = it->index(); // OK: Read access * to the index of the sparse element. } \endcode * * // \n \section views_bands_element_insertion Element Insertion // <hr> // // * Inserting/accessing elements in a sparse band can be done by several * alternative functions. // The following example demonstrates all options: * * \code blaze::CompressedMatrix<double,blaze::rowMajor> A( 10UL, 100UL ); // * Non-initialized 10x100 matrix * * auto diag( band( A, 0L ) ); // Reference to the diagonal of A * * // The subscript operator provides access to all possible elements of the * sparse band, // including the zero elements. In case the subscript * operator is used to access an element // that is currently not stored in * the sparse band, the element is inserted into the band. diag[42] = 2.0; * * // The second operation for inserting elements is the set() function. In case * the element // is not contained in the band it is inserted into the band, * if it is already contained in // the band its value is modified. diag.set( * 45UL, -1.2 ); * * // An alternative for inserting elements into the band is the insert() * function. However, // it inserts the element only in case the element is * not already contained in the band. diag.insert( 50UL, 3.7 ); \endcode * * // \n \section views_bands_common_operations Common Operations // <hr> // // * A band view can be used like any other column vector. This means that with * only a few // exceptions all \ref vector_operations and \ref * arithmetic_operations can be used. For instance, // the current number of * band elements can be obtained via the \c size() function, the current // * capacity via the \c capacity() function, and the number of non-zero * elements via the // \c nonZeros() function. However, since bands are * references to specific bands of a matrix, // several operations are not * possible, such as resizing and swapping. The following example // shows * this by means of a dense band view: * * \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 42UL, 42UL ); // ... * Resizing and initialization * * // Creating a reference to the 2nd upper band of matrix A auto band2 = band( * A, 2L ); * * band2.size(); // Returns the number of elements in the band * band2.capacity(); // Returns the capacity of the band * band2.nonZeros(); // Returns the number of non-zero elements * contained in the band * * band2.resize( 84UL ); // Compilation error: Cannot resize a single band of a * matrix * * auto band3 = band( A, 3L ); swap( band2, band3 ); // Compilation error: * Swap operation not allowed \endcode * * // \n \section views_bands_arithmetic_operations Arithmetic Operations // * <hr> // // Both dense and sparse bands can be used in all arithmetic * operations that any other dense or // sparse vector can be used in. The * following example gives an impression of the use of dense // bands within * arithmetic operations. All operations (addition, subtraction, * multiplication, // scaling, ...) can be performed on all possible * combinations of dense and sparse bands with // fitting element types: * * \code blaze::DynamicVector<double,blaze::columnVector> a( 2UL, 2.0 ), b; * blaze::CompressedVector<double,blaze::columnVector> c( 2UL ); c[1] = 3.0; * * blaze::DynamicMatrix<double,blaze::rowMajor> A( 4UL, 2UL ); // * Non-initialized 4x2 matrix * * auto band1( band( A, 1L ) ); // Reference to the 1st upper band of A auto * diag ( band( A, 0L ) ); // Reference to the diagonal of A * * band1[0] = 0.0; // Manual initialization of the 1st upper band of A diag * = 1.0; // Homogeneous initialization of the diagonal of A band( * A, -1L ) = a; // Dense vector initialization of the 1st lower band of A * band( A, -2L ) = c; // Sparse vector initialization of the 2nd lower band * of A * * b = diag + a; // Dense vector/dense vector addition b = c + * band( A, -1L ); // Sparse vector/dense vector addition b = diag * * band( A, -2L ); // Component-wise vector multiplication * * band( A, -1L ) *= 2.0; // In-place scaling of the 1st upper band b = * band( A, -1L ) * 2.0; // Scaling of the 1st upper band b = 2.0 * band( A, * -1L ); // Scaling of the 1st upper band * * band( A, -2L ) += a; // Addition assignment band( A, -2L ) -= c; * // Subtraction assignment band( A, -2L ) *= band( A, 0L ); // * Multiplication assignment * * double scalar = trans( c ) * band( A, -1L ); // Scalar/dot/inner product * between two vectors * * A = band( A, -1L ) * trans( c ); // Outer product between two vectors * \endcode * * // \n Previous: \ref views_column_selections &nbsp; &nbsp; Next: \ref * arithmetic_operations */ //************************************************************************************************* //**Arithmetic Operations ************************************************************************** /* * !\page arithmetic_operations Arithmetic Operations // // \tableofcontents * // // // \b Blaze provides the following arithmetic operations for vectors * and matrices: // // <ul> // <li> \ref addition </li> // <li> \ref * subtraction </li> // <li> \ref scalar_multiplication </li> // <li> * \ref vector_vector_multiplication // <ul> // <li> \ref * componentwise_multiplication </li> // <li> \ref inner_product * </li> // <li> \ref outer_product </li> // <li> \ref * cross_product </li> // </ul> // </li> // <li> \ref * vector_vector_division </li> // <li> \ref matrix_vector_multiplication * </li> // <li> \ref matrix_matrix_multiplication </li> // </ul> // // \n * Previous: \ref views_bands &nbsp; &nbsp; Next: \ref addition */ //************************************************************************************************* //**Addition *************************************************************************************** /* * !\page addition Addition // // The addition of vectors and matrices is as * intuitive as the addition of scalar values. For both // the vector * addition as well as the matrix addition the addition operator can be used. * It even // enables the addition of dense and sparse vectors as well as the * addition of dense and sparse // matrices: * * \code blaze::DynamicVector<int> v1( 5UL ), v3; * blaze::CompressedVector<float> v2( 5UL ); * * // ... Initializing the vectors * * v3 = v1 + v2; // Addition of a two column vectors of different data type * \endcode * * \code blaze::DynamicMatrix<float,rowMajor> M1( 7UL, 3UL ); * blaze::CompressedMatrix<size_t,columnMajor> M2( 7UL, 3UL ), M3; * * // ... Initializing the matrices * * M3 = M1 + M2; // Addition of a row-major and a column-major matrix of * different data type \endcode * * // Note that it is necessary that both operands have exactly the same * dimensions. Violating this // precondition results in an exception. Also * note that in case of vectors it is only possible to // add vectors with * the same transpose flag: * * \code blaze::DynamicVector<int,columnVector> v1( 5UL ); * blaze::CompressedVector<float,rowVector> v2( 5UL ); * * v1 + v2; // Compilation error: Cannot add a column vector and a row * vector v1 + trans( v2 ); // OK: Addition of two column vectors \endcode * * // In case of matrices, however, it is possible to add row-major and * column-major matrices. Note // however that in favor of performance the * addition of two matrices with the same storage order // is favorable. The * same argument holds for the element type: In case two vectors or matrices * // with the same element type are added, the performance can be much * higher due to vectorization // of the operation. * * \code blaze::DynamicVector<double>v1( 100UL ), v2( 100UL ), v3; * * // ... Initialization of the vectors * * v3 = v1 + v2; // Vectorized addition of two double precision vectors * \endcode * * \code blaze::DynamicMatrix<float> M1( 50UL, 70UL ), M2( 50UL, 70UL ), M3; * * // ... Initialization of the matrices * * M3 = M1 + M2; // Vectorized addition of two row-major, single precision * dense matrices \endcode * * // \n Previous: \ref arithmetic_operations &nbsp; &nbsp; Next: \ref * subtraction */ //************************************************************************************************* //**Subtraction ************************************************************************************ /* * !\page subtraction Subtraction // // The subtraction of vectors and * matrices works exactly as intuitive as the addition, but with // the * subtraction operator. For both the vector subtraction as well as the * matrix subtraction // the subtraction operator can be used. It also * enables the subtraction of dense and sparse // vectors as well as the * subtraction of dense and sparse matrices: * * \code blaze::DynamicVector<int> v1( 5UL ), v3; * blaze::CompressedVector<float> v2( 5UL ); * * // ... Initializing the vectors * * v3 = v1 - v2; // Subtraction of a two column vectors of different data type * * * blaze::DynamicMatrix<float,rowMajor> M1( 7UL, 3UL ); * blaze::CompressedMatrix<size_t,columnMajor> M2( 7UL, 3UL ), M3; * * // ... Initializing the matrices * * M3 = M1 - M2; // Subtraction of a row-major and a column-major matrix of * different data type \endcode * * // Note that it is necessary that both operands have exactly the same * dimensions. Violating this // precondition results in an exception. Also * note that in case of vectors it is only possible to // subtract vectors * with the same transpose flag: * * \code blaze::DynamicVector<int,columnVector> v1( 5UL ); * blaze::CompressedVector<float,rowVector> v2( 5UL ); * * v1 - v2; // Compilation error: Cannot subtract a row vector from a * column vector v1 - trans( v2 ); // OK: Subtraction of two column vectors * \endcode * * // In case of matrices, however, it is possible to subtract row-major and * column-major matrices. // Note however that in favor of performance the * subtraction of two matrices with the same storage // order is favorable. * The same argument holds for the element type: In case two vectors or * matrices // with the same element type are added, the performance can be * much higher due to vectorization // of the operation. * * \code blaze::DynamicVector<double>v1( 100UL ), v2( 100UL ), v3; * * // ... Initialization of the vectors * * v3 = v1 - v2; // Vectorized subtraction of two double precision vectors * * * blaze::DynamicMatrix<float> M1( 50UL, 70UL ), M2( 50UL, 70UL ), M3; * * // ... Initialization of the matrices * * M3 = M1 - M2; // Vectorized subtraction of two row-major, single precision * dense matrices \endcode * * // \n Previous: \ref addition &nbsp; &nbsp; Next: \ref scalar_multiplication */ //************************************************************************************************* //**Scalar Multiplication ************************************************************************** /* * !\page scalar_multiplication Scalar Multiplication // // The scalar * multiplication is the multiplication of a scalar value with a vector or a * matrix. // In \b Blaze it is possible to use all built-in/fundamental data * types except bool as scalar // values. Additionally, it is possible to use * std::complex values with the same built-in data // types as element type. * * \code blaze::StaticVector<int,3UL> v1{ 1, 2, 3 }; * * blaze::DynamicVector<double> v2 = v1 * 1.2; blaze::CompressedVector<float> * v3 = -0.3F * v1; \endcode * * \code blaze::StaticMatrix<int,3UL,2UL> M1{ { 1, 2 }, { 3, 4 }, { 5, 6 } }; * * blaze::DynamicMatrix<double> M2 = M1 * 1.2; blaze::CompressedMatrix<float> * M3 = -0.3F * M1; \endcode * * // Vectors and matrices cannot be used for as scalar value for scalar * multiplications (see the // following example). However, each vector and * matrix provides the \c scale() function, which // can be used to scale a * vector or matrix element-wise with arbitrary scalar data types: * * \code blaze::CompressedMatrix< blaze::StaticMatrix<int,3UL,3UL> > M1; * blaze::StaticMatrix<int,3UL,3UL> scalar; * * M1 * scalar; // No scalar multiplication, but matrix/matrix multiplication * * M1.scale( scalar ); // Scalar multiplication \endcode * * // \n Previous: \ref subtraction &nbsp; &nbsp; Next: \ref * componentwise_multiplication */ //************************************************************************************************* //**Vector / Vector Multiplication ******************************************************************* /* * !\page vector_vector_multiplication Vector/Vector Multiplication // // \n * \section componentwise_multiplication Componentwise Multiplication // <hr> * // // Multiplying two vectors with the same transpose flag (i.e. either * blaze::columnVector or // blaze::rowVector) via the multiplication * operator results in a componentwise multiplication // of the two vectors: * * \code using blaze::DynamicVector; using blaze::CompressedVector; * * CompressedVector<int,columnVector> v1( 17UL ); * DynamicVector<int,columnVector> v2( 17UL ); * * StaticVector<double,10UL,rowVector> v3; DynamicVector<double,rowVector> * v4( 10UL ); * * // ... Initialization of the vectors * * CompressedVector<int,columnVector> v5( v1 * v2 ); // Componentwise * multiplication of a sparse and // a dense column vector. The result is a * sparse // column vector. DynamicVector<double,rowVector> v6( v3 * v4 ); * // Componentwise multiplication of two dense row // vectors. The result is * a dense row vector. \endcode * * // \n \section inner_product Inner Product / Scalar Product / Dot Product // * <hr> // // The multiplication between a row vector and a column vector * results in an inner product between // the two vectors: * * \code blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 }; * blaze::DynamicVector<int,columnVector> v2{ -1, 3, -2 }; * * int result = v1 * v2; // Results in the value 15 \endcode * * // The \c trans() function can be used to transpose a vector as necessary: * * \code blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 }; * blaze::StaticVector<int,3UL,rowVector> v2{ -1, 3, -2 }; * * int result = v1 * trans( v2 ); // Also results in the value 15 \endcode * * // Alternatively, either the \c inner() function, the \c dot() function or * the comma operator can // be used for any combination of vectors (row or * column vectors) to perform an inner product: * * \code blaze::StaticVector<int,3UL,columnVector> v1{ 2, 5, -1 }; * blaze::StaticVector<int,3UL,rowVector> v2{ -1, 3, -2 }; * * // All alternatives for the inner product between a column vector and a row * vector int result1 = trans( v1 ) * trans( v2 ); int result2 = inner( v1, * v2 ); int result3 = dot( v1, v2 ); int result4 = (v1,v2); \endcode * * // When using the comma operator, please note the brackets embracing the * inner product expression. // Due to the low precedence of the comma * operator (lower even than the assignment operator) these // brackets are * strictly required for a correct evaluation of the inner product. // // // * \n \section outer_product Outer Product // <hr> // // The multiplication * between a column vector and a row vector results in the outer product of * // the two vectors: * * \code blaze::StaticVector<int,3UL,columnVector> v1{ 2, 5, -1 }; * blaze::DynamicVector<int,rowVector> v2{ -1, 3, -2 }; * * StaticMatrix<int,3UL,3UL> M1 = v1 * v2; \endcode * * // The \c trans() function can be used to transpose a vector as necessary: * * \code blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 }; * blaze::StaticVector<int,3UL,rowVector> v2{ -1, 3, -2 }; * * int result = trans( v1 ) * v2; \endcode * * // Alternatively, the \c outer() function can be used for any combination of * vectors (row or column // vectors) to perform an outer product: * * \code blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 }; * blaze::StaticVector<int,3UL,rowVector> v2{ -1, 3, -2 }; * * StaticMatrix<int,3UL,3UL> M1 = outer( v1, v2 ); // Outer product between two * row vectors \endcode * * // \n \section cross_product Cross Product // <hr> // // Two vectors with the * same transpose flag can be multiplied via the cross product. The cross // * product between two vectors \f$ a \f$ and \f$ b \f$ is defined as * * \f[ \left(\begin{array}{*{1}{c}} c_0 \\ c_1 \\ c_2 \\ \end{array}\right) = * \left(\begin{array}{*{1}{c}} a_1 b_2 - a_2 b_1 \\ a_2 b_0 - a_0 b_2 \\ a_0 * b_1 - a_1 b_0 \\ \end{array}\right). \f] * * // Due to the absence of a \f$ \times \f$ operator in the C++ language, the * cross product is // realized via the \c cross() function. Alternatively, * the modulo operator (i.e. \c operator%) // can be used in case infix * notation is required: * * \code blaze::StaticVector<int,3UL,columnVector> v1{ 2, 5, -1 }; * blaze::DynamicVector<int,columnVector> v2{ -1, 3, -2 }; * * blaze::StaticVector<int,3UL,columnVector> v3( cross( v1, v2 ) ); * blaze::StaticVector<int,3UL,columnVector> v4( v1 % v2 ); \endcode * * // Please note that the cross product is restricted to three dimensional * (dense and sparse) // column vectors. // // \n Previous: \ref * scalar_multiplication &nbsp; &nbsp; Next: \ref vector_vector_division */ //************************************************************************************************* //**Vector / Vector Division ************************************************************************* /* * !\page vector_vector_division Vector/Vector Division // // \n \section * componentwise_division Componentwise Division // <hr> // // Dividing a * vector by a dense vector with the same transpose flag (i.e. either * blaze::columnVector // or blaze::rowVector) via the division operator * results in a componentwise division: * * \code using blaze::DynamicVector; using blaze::CompressedVector; * * CompressedVector<int,columnVector> v1( 17UL ); * DynamicVector<int,columnVector> v2( 17UL ); * * StaticVector<double,10UL,rowVector> v3; DynamicVector<double,rowVector> * v4( 10UL ); * * // ... Initialization of the vectors * * CompressedVector<int,columnVector> v5( v1 / v2 ); // Componentwise division * of a sparse and a // dense column vector. The result is a sparse // column * vector. DynamicVector<double,rowVector> v6( v3 / v4 ); // * Componentwise division of two dense row // vectors. The result is a dense * row vector. \endcode * * // Note that all values of the divisor must be non-zero and that no checks * are performed to assert // this precondition! // // \n Previous: \ref * vector_vector_multiplication &nbsp; &nbsp; Next: \ref * matrix_vector_multiplication */ //************************************************************************************************* //**Matrix / Vector Multiplication ******************************************************************* /* * !\page matrix_vector_multiplication Matrix/Vector Multiplication // // In * \b Blaze matrix/vector multiplications can be as intuitively formulated as * in mathematical // textbooks. Just as in textbooks there are two different * multiplications between a matrix and // a vector: a matrix/column vector * multiplication and a row vector/matrix multiplication: * * \code using blaze::StaticVector; using blaze::DynamicVector; using * blaze::DynamicMatrix; * * DynamicMatrix<int> M1( 39UL, 12UL ); * StaticVector<int,12UL,columnVector> v1; * * // ... Initialization of the matrix and the vector * * DynamicVector<int,columnVector> v2 = M1 * v1; // Matrix/column * vector multiplication DynamicVector<int,rowVector> v3 = trans( v1 ) * * M1; // Row vector/matrix multiplication \endcode * * // Note that the storage order of the matrix poses no restrictions on the * operation. Also note, // that the highest performance for a multiplication * between a dense matrix and a dense vector can // be achieved if both the * matrix and the vector have the same scalar element type. // // \n * Previous: \ref vector_vector_division &nbsp; &nbsp; Next: \ref * matrix_matrix_multiplication */ //************************************************************************************************* //**Matrix / Matrix Multiplication ******************************************************************* /* * !\page matrix_matrix_multiplication Matrix/Matrix Multiplication // // \n * \section schur_product Componentwise Multiplication / Schur Product // * <hr> // // Multiplying two matrices with the same dimensions (i.e. the * same number of rows and columns) // via the modulo operator results in a * componentwise multiplication (Schur product) of the two // matrices: * * \code using blaze::DynamicMatrix; using blaze::CompressedMatrix; * * DynamicMatrix<double> M1( 28UL, 35UL ); CompressedMatrix<float> M2( 28UL, * 35UL ); * * // ... Initialization of the matrices * * DynamicMatrix<double> M3 = M1 % M2; \endcode * * // \n \section matrix_product Matrix Product // <hr> // // The matrix/matrix * product can be formulated exactly as in mathematical textbooks: * * \code using blaze::DynamicMatrix; using blaze::CompressedMatrix; * * DynamicMatrix<double> M1( 45UL, 85UL ); CompressedMatrix<float> M2( 85UL, * 37UL ); * * // ... Initialization of the matrices * * DynamicMatrix<double> M3 = M1 * M2; \endcode * * // The storage order of the two matrices poses no restrictions on the * operation, all variations // are possible. It is also possible to multiply * two matrices with different element type, as // long as the element types * themselves can be multiplied and added. Note however that the // highest * performance for a multiplication between two matrices can be expected for * two // matrices with the same scalar element type. // // In case the * resulting matrix is known to be symmetric, Hermitian, lower triangular, * upper // triangular, or diagonal, the computation can be optimized by * explicitly declaring the // multiplication as symmetric, Hermitian, lower * triangular, upper triangular, or diagonal by // means of the \ref * matrix_operations_declaration_operations : * * \code using blaze::DynamicMatrix; * * DynamicMatrix<double> M1, M2, M3; * * // ... Initialization of the square matrices * * M3 = declsym ( M1 * M2 ); // Declare the result of the matrix multiplication * as symmetric M3 = declherm( M1 * M2 ); // Declare the result of the * matrix multiplication as Hermitian M3 = decllow ( M1 * M2 ); // Declare * the result of the matrix multiplication as lower triangular M3 = declupp ( * M1 * M2 ); // Declare the result of the matrix multiplication as upper * triangular M3 = decldiag( M1 * M2 ); // Declare the result of the matrix * multiplication as diagonal \endcode * * // Using a declaration operation on the a multiplication expression can speed * up the computation // by a factor of 2. Note however that the caller of * the according declaration operation takes // full responsibility for the * correctness of the declaration. Falsely declaring a multiplication // as * symmetric, Hermitian, lower triangular, upper triangular, or diagonal * leads to undefined // behavior! // // \n Previous: \ref * matrix_vector_multiplication &nbsp; &nbsp; Next: \ref * shared_memory_parallelization */ //************************************************************************************************* //**Shared Memory Parallelization ****************************************************************** /* * !\page shared_memory_parallelization Shared Memory Parallelization // // * For all possible operations \b Blaze tries to achieve maximum performance * on a single CPU // core. However, today's CPUs are not single core * anymore, but provide several (homogeneous // or heterogeneous) compute * cores. In order to fully exploit the performance potential of a // * multicore CPU, computations have to be parallelized across all available * cores of a CPU. // For this purpose, \b Blaze provides four different * shared memory parallelization techniques: // // - \ref * hpx_parallelization // - \ref cpp_threads_parallelization // - \ref * boost_threads_parallelization // - \ref openmp_parallelization // // When * any of the shared memory parallelization techniques is activated, all * arithmetic // operations on dense vectors and matrices (including * additions, subtractions, multiplications, // divisions, and all * componentwise arithmetic operations) and most operations on sparse vectors * // and matrices are automatically run in parallel. However, in addition, * \b Blaze provides means // to enforce the serial execution of specific * operations: // // - \ref serial_execution // // \n Previous: \ref * matrix_matrix_multiplication &nbsp; &nbsp; Next: \ref hpx_parallelization */ //************************************************************************************************* //**HPX Parallelization **************************************************************************** /* * !\page hpx_parallelization HPX Parallelization // // \tableofcontents // * // // The first shared memory parallelization provided with \b Blaze is * based on // <a href="http://stellar.cct.lsu.edu/projects/hpx/">HPX</a>. // * // // \n \section hpx_setup HPX Setup // <hr> // // In order to enable the * HPX-based parallelization, the following steps have to be taken: First, // * the \c BLAZE_USE_HPX_THREADS command line argument has to be explicitly * specified during // compilation: * * \code ... -DBLAZE_USE_HPX_THREADS ... \endcode * * // Second, the HPX library and depending libraries such as Boost, hwloc, etc. * have to be linked. // And third, the HPX threads have to be initialized by * a call to the \c hpx::init() function (see // the <a * href="http://stellar.cct.lsu.edu/files/hpx_0.9.0/docs/hpx/tutorial.html">HP * X tutorial</a> // for further details). These three actions will cause the * \b Blaze library to automatically try // to run all operations in parallel * with the specified number of HPX threads. // // Note that the HPX-based * parallelization has priority over the OpenMP-based, C++11 thread-based, // * and Boost thread-based parallelizations, i.e. is preferred in case * multiple parallelizations // are enabled in combination with the HPX * thread parallelization. // // The number of threads used by the HPX * backend has to be specified via the command line: * * \code ... --hpx:threads 4 ... \endcode * * // Please note that the \b Blaze library does not limit the available number * of threads. Therefore // it is in YOUR responsibility to choose an * appropriate number of threads. The best performance, // though, can be * expected if the specified number of threads matches the available number * of // cores. // // In order to query the number of threads used for the * parallelization of operations, the // \c getNumThreads() function can be * used: * * \code const size_t threads = blaze::getNumThreads(); \endcode * * // In the context of HPX threads, the function will return the actual number * of threads used by // the HPX subsystem. // // // \n \section * hpx_configuration HPX Configuration // <hr> // // As in case of the other * shared memory parallelizations \b Blaze is not unconditionally running // * an operation in parallel (see for instance \ref openmp_parallelization). * Only in case a given // operation is large enough and exceeds a certain * threshold the operation is executed in parallel. // All thresholds related * to the HPX-based parallelization are contained within the configuration // * file <tt><blaze/config/Thresholds.h></tt>. // // Please note that these * thresholds are highly sensitiv to the used system architecture and // the * shared memory parallelization technique. Therefore the default values * cannot guarantee // maximum performance for all possible situations and * configurations. They merely provide a // reasonable standard for the * current CPU generation. Also note that the provided defaults // have been * determined using the OpenMP parallelization and require individual * adaption for // the HPX-based parallelization. // // \n Previous: \ref * shared_memory_parallelization &nbsp; &nbsp; Next: \ref * cpp_threads_parallelization */ //************************************************************************************************* //**C++ 11 Thread Parallelization ******************************************************************* /* * !\page cpp_threads_parallelization C++11 Thread Parallelization // // * \tableofcontents // // // In addition to the HPX-based shared memory * parallelization, starting with \b Blaze 2.1, // \b Blaze also provides a * shared memory parallelization based on C++11 threads. // // // \n \section * cpp_threads_setup C++11 Thread Setup // <hr> // // In order to enable the * C++11 thread-based parallelization, first the according C++11-specific // * compiler flags have to be used and second the \c BLAZE_USE_CPP_THREADS * command line argument // has to be explicitly specified. For instance, in * case of the GNU C++ and Clang compilers the // compiler flags have to be * extended by * * \code ... -std=c++11 -DBLAZE_USE_CPP_THREADS ... \endcode * * // This simple action will cause the \b Blaze library to automatically try to * run all operations // in parallel with the specified number of C++11 * threads. Note that in case both HPX and C++11 // threads are enabled on * the command line, the HPX-based parallelization has priority and is // * preferred. // // The number of threads can be either specified via the * environment variable \c BLAZE_NUM_THREADS * * \code export BLAZE_NUM_THREADS=4 // Unix systems set BLAZE_NUM_THREADS=4 * // Windows systems \endcode * * // or alternatively via the \c setNumThreads() function provided by the \b * Blaze library: * * \code blaze::setNumThreads( 4 ); \endcode * * // Please note that the \b Blaze library does not limit the available number * of threads. Therefore // it is in YOUR responsibility to choose an * appropriate number of threads. The best performance, // though, can be * expected if the specified number of threads matches the available number * of // cores. // // In order to query the number of threads used for the * parallelization of operations, the // \c getNumThreads() function can be * used: * * \code const size_t threads = blaze::getNumThreads(); \endcode * * // In the context of C++11 threads, the function will return the previously * specified number of // threads. // // // \n \section * cpp_threads_configuration C++11 Thread Configuration // <hr> // // As in * case of the OpenMP-based parallelization \b Blaze is not unconditionally * running an // operation in parallel. In case \b Blaze deems the parallel * execution as counterproductive for // the overall performance, the * operation is executed serially. One of the main reasons for not // * executing an operation in parallel is the size of the operands. For * instance, a vector addition // is only executed in parallel if the size of * both vector operands exceeds a certain threshold. // Otherwise, the * performance could seriously decrease due to the overhead caused by the * thread // setup. However, in order to be able to adjust the \b Blaze * library to a specific system, it // is possible to configure these * thresholds manually. All thresholds are contained within the // * configuration file <tt><blaze/config/Thresholds.h></tt>. // // Please note * that these thresholds are highly sensitiv to the used system architecture * and // the shared memory parallelization technique. Therefore the default * values cannot guarantee // maximum performance for all possible situations * and configurations. They merely provide a // reasonable standard for the * current CPU generation. Also note that the provided defaults // have been * determined using the OpenMP parallelization and require individual * adaption for // the C++11 thread parallelization. // // // \n \section * cpp_threads_known_issues Known Issues // <hr> // // There is a known issue * in Visual Studio 2012 and 2013 that may cause C++11 threads to hang // if * their destructor is executed after the \c main() function: // // * http://connect.microsoft.com/VisualStudio/feedback/details/747145 // // * Unfortunately, the C++11 parallelization of the \b Blaze library is * affected from this bug. // In order to circumvent this problem, \b Blaze * provides the \c shutDownThreads() function, // which can be used to * manually destroy all threads at the end of the \c main() function: * * \code int main() { // ... Using the C++11 thread parallelization of Blaze * * shutDownThreads(); } \endcode * * // Please note that this function may only be used at the end of the \c * main() function. After // this function no further computation may be * executed! Also note that this function has an // effect for Visual Studio * compilers only and doesn't need to be used with any other compiler. // // * \n Previous: \ref hpx_parallelization &nbsp; &nbsp; Next: \ref * boost_threads_parallelization */ //************************************************************************************************* //**Boost Thread Parallelization ******************************************************************* /* * !\page boost_threads_parallelization Boost Thread Parallelization // // * \tableofcontents // // // The third available shared memory * parallelization provided with \b Blaze is based // on <a * href="https://www.boost.org/doc/libs/1_68_0/doc/html/thread.html">Boost * threads</a>. // // // \n \section boost_threads_setup Boost Thread Setup * // <hr> // // In order to enable the Boost thread-based parallelization, * two steps have to be taken: First, // the \c BLAZE_USE_BOOST_THREADS * command line argument has to be explicitly specified during // * compilation: * * \code ... -DBLAZE_USE_BOOST_THREADS ... \endcode * * // Second, the according Boost libraries have to be linked. These two simple * actions will cause // the \b Blaze library to automatically try to run all * operations in parallel with the specified // number of Boost threads. Note * that the HPX-based and C++11 thread-based parallelizations have // * priority, i.e. are preferred in case either is enabled in combination with * the Boost thread // parallelization. // // The number of threads can be * either specified via the environment variable \c BLAZE_NUM_THREADS * * \code export BLAZE_NUM_THREADS=4 // Unix systems set BLAZE_NUM_THREADS=4 * // Windows systems \endcode * * // or alternatively via the \c setNumThreads() function provided by the \b * Blaze library: * * \code blaze::setNumThreads( 4 ); \endcode * * // Please note that the \b Blaze library does not limit the available number * of threads. Therefore // it is in YOUR responsibility to choose an * appropriate number of threads. The best performance, // though, can be * expected if the specified number of threads matches the available number * of // cores. // // In order to query the number of threads used for the * parallelization of operations, the // \c getNumThreads() function can be * used: * * \code const size_t threads = blaze::getNumThreads(); \endcode * * // In the context of Boost threads, the function will return the previously * specified number of // threads. // // // \n \section * boost_threads_configuration Boost Thread Configuration // <hr> // // As in * case of the other shared memory parallelizations \b Blaze is not * unconditionally running // an operation in parallel (see \ref * openmp_parallelization or \ref cpp_threads_parallelization). // All * thresholds related to the Boost thread parallelization are also contained * within the // configuration file <tt><blaze/config/Thresholds.h></tt>. // * // Please note that these thresholds are highly sensitiv to the used * system architecture and // the shared memory parallelization technique. * Therefore the default values cannot guarantee // maximum performance for * all possible situations and configurations. They merely provide a // * reasonable standard for the current CPU generation. Also note that the * provided defaults // have been determined using the OpenMP parallelization * and require individual adaption for // the Boost thread parallelization. * // // \n Previous: \ref cpp_threads_parallelization &nbsp; &nbsp; Next: * \ref openmp_parallelization */ //************************************************************************************************* //**OpenMP Parallelization ************************************************************************* /* * !\page openmp_parallelization OpenMP Parallelization // // * \tableofcontents // // // The fourth and final shared memory * parallelization provided with \b Blaze is based on // <a * href="https://www.openmp.org">OpenMP</a>. // // // \n \section * openmp_setup OpenMP Setup // <hr> // // To enable the OpenMP-based * parallelization, all that needs to be done is to explicitly specify // the * use of OpenMP on the command line: * * \code -fopenmp // GNU/Clang C++ compiler -openmp // Intel C++ compiler * /openmp // Visual Studio \endcode * * // This simple action will cause the \b Blaze library to automatically try to * run all operations // in parallel with the specified number of threads. * Note however that the HPX-based, the C++11 // thread-based, and the Boost * thread-based parallelizations have priority, i.e. are preferred in // case * either is enabled in combination with the OpenMP thread parallelization. * // // As common for OpenMP, the number of threads can be specified either * via an environment variable * * \code export OMP_NUM_THREADS=4 // Unix systems set OMP_NUM_THREADS=4 // * Windows systems \endcode * * // or via an explicit call to the \c omp_set_num_threads() function: * * \code omp_set_num_threads( 4 ); \endcode * * // Alternatively, the number of threads can also be specified via the \c * setNumThreads() function // provided by the \b Blaze library: * * \code blaze::setNumThreads( 4 ); \endcode * * // Please note that the \b Blaze library does not limit the available number * of threads. Therefore // it is in YOUR responsibility to choose an * appropriate number of threads. The best performance, // though, can be * expected if the specified number of threads matches the available number * of // cores. // // In order to query the number of threads used for the * parallelization of operations, the // \c getNumThreads() function can be * used: * * \code const size_t threads = blaze::getNumThreads(); \endcode * * // In the context of OpenMP, the function returns the maximum number of * threads OpenMP will use // within a parallel region and is therefore * equivalent to the \c omp_get_max_threads() function. // // // \n \section * openmp_configuration OpenMP Configuration // <hr> // // Note that \b Blaze * is not unconditionally running an operation in parallel. In case \b Blaze * // deems the parallel execution as counterproductive for the overall * performance, the operation // is executed serially. One of the main * reasons for not executing an operation in parallel is // the size of the * operands. For instance, a vector addition is only executed in parallel if * the // size of both vector operands exceeds a certain threshold. * Otherwise, the performance could // seriously decrease due to the overhead * caused by the thread setup. However, in order to be // able to adjust the * \b Blaze library to a specific system, it is possible to configure these * // thresholds manually. All shared memory thresholds are contained within * the configuration file // <tt><blaze/config/Thresholds.h></tt>. // // * Please note that these thresholds are highly sensitiv to the used system * architecture and // the shared memory parallelization technique (see also * \ref cpp_threads_parallelization and // \ref * boost_threads_parallelization). Therefore the default values cannot * guarantee maximum // performance for all possible situations and * configurations. They merely provide a reasonable // standard for the * current CPU generation. // // // \n \section openmp_first_touch First * Touch Policy // <hr> // // So far the \b Blaze library does not (yet) * automatically initialize dynamic memory according // to the first touch * principle. Consider for instance the following vector triad example: * * \code using blaze::columnVector; * * const size_t N( 1000000UL ); * * blaze::DynamicVector<double,columnVector> a( N ), b( N ), c( N ), d( N ); * * // Initialization of the vectors b, c, and d for( size_t i=0UL; i<N; ++i ) { * b[i] = rand<double>(); c[i] = rand<double>(); d[i] = rand<double>(); } * * // Performing a vector triad a = b + c * d; \endcode * * // If this code, which is prototypical for many OpenMP applications that have * not been optimized // for ccNUMA architectures, is run across several * locality domains (LD), it will not scale // beyond the maximum performance * achievable on a single LD if the working set does not fit into // the * cache. This is because the initialization loop is executed by a single * thread, writing to // \c b, \c c, and \c d for the first time. Hence, all * memory pages belonging to those arrays will // be mapped into a single LD. * // // As mentioned above, this problem can be solved by performing vector * initialization in parallel: * * \code // ... * * // Initialization of the vectors b, c, and d #pragma omp parallel for for( * size_t i=0UL; i<N; ++i ) { b[i] = rand<double>(); c[i] = rand<double>(); * d[i] = rand<double>(); } * * // ... \endcode * * // This simple modification makes a huge difference on ccNUMA in memory-bound * situations (as for // instance in all BLAS level 1 operations and * partially BLAS level 2 operations). Therefore, in // order to achieve the * maximum possible performance, it is imperative to initialize the memory // * according to the later use of the data structures. // // // \n \section * openmp_limitations Limitations of the OpenMP Parallelization // <hr> // // * There are a few important limitations to the current \b Blaze OpenMP * parallelization. The first // one involves the explicit use of an OpenMP * parallel region (see \ref openmp_parallel), the // other one the OpenMP \c * sections directive (see \ref openmp_sections). // // // \n \subsection * openmp_parallel The Parallel Directive // // In OpenMP threads are * explicitly spawned via the an OpenMP parallel directive: * * \code // Serial region, executed by a single thread * * #pragma omp parallel { // Parallel region, executed by the specified number * of threads } * * // Serial region, executed by a single thread \endcode * * // Conceptually, the specified number of threads (see \ref openmp_setup) is * created every time a // parallel directive is encountered. Therefore, from * a performance point of view, it seems to be // beneficial to use a single * OpenMP parallel directive for several operations: * * \code blaze::DynamicVector<double> x, y1, y2; blaze::DynamicMatrix<double> A, * B; * * #pragma omp parallel { y1 = A * x; y2 = B * x; } \endcode * * // Unfortunately, this optimization approach is not allowed within the \b * Blaze library. More // explicitly, it is not allowed to put an operation * into a parallel region. The reason is that // the entire code contained * within a parallel region is executed by all threads. Although this // * appears to just comprise the contained computations, a computation (or * more specifically the // assignment of an expression to a vector or * matrix) can contain additional logic that must not // be handled by * multiple threads (as for instance memory allocations, setup of * temporaries, etc.). // Therefore it is not possible to manually start a * parallel region for several operations, but // \b Blaze will spawn threads * automatically, depending on the specifics of the operation at hand // and * the given operands. // // \n \subsection openmp_sections The Sections * Directive // // OpenMP provides several work-sharing construct to * distribute work among threads. One of these // constructs is the \c * sections directive: * * \code blaze::DynamicVector<double> x, y1, y2; blaze::DynamicMatrix<double> A, * B; * * // ... Resizing and initialization * * #pragma omp sections { #pragma omp section * * y1 = A * x; * * #pragma omp section * * y2 = B * x; * * } \endcode * * // In this example, two threads are used to compute two distinct * matrix/vector multiplications // concurrently. Thereby each of the \c * sections is executed by exactly one thread. // // Unfortunately \b Blaze * does not support concurrent parallel computations and therefore this // * approach does not work with any of the \b Blaze parallelization * techniques. All techniques // (including the C++11 and Boost thread * parallelizations; see \ref cpp_threads_parallelization // and \ref * boost_threads_parallelization) are optimized for the parallel computation * of an // operation within a single thread of execution. This means that \b * Blaze tries to use all // available threads to compute the result of a * single operation as efficiently as possible. // Therefore, for this * special case, it is advisable to disable all \b Blaze parallelizations // * and to let \b Blaze compute all operations within a \c sections directive * in serial. This can // be done by either completely disabling the \b Blaze * parallelization (see \ref serial_execution) // or by selectively * serializing all operations within a \c sections directive via the \c * serial() // function: * * \code blaze::DynamicVector<double> x, y1, y2; blaze::DynamicMatrix<double> A, * B; * * // ... Resizing and initialization * * #pragma omp sections { #pragma omp section * * y1 = serial( A * x ); * * #pragma omp section * * y2 = serial( B * x ); * * } \endcode * * // Please note that the use of the \c BLAZE_SERIAL_SECTION (see also \ref * serial_execution) does // NOT work in this context! // // \n Previous: * \ref boost_threads_parallelization &nbsp; &nbsp; Next: \ref * serial_execution */ //************************************************************************************************* //**Serial Execution ******************************************************************************* /* * !\page serial_execution Serial Execution // // Sometimes it may be * necessary to enforce the serial execution of specific operations. For this * // purpose, the \b Blaze library offers three possible options: the * serialization of a single // expression via the \c serial() function, the * serialization of a block of expressions via the // \c * BLAZE_SERIAL_SECTION, and the general deactivation of the parallel * execution. // // // \n \section serial_execution_serial_expression Option * 1: Serialization of a Single Expression // <hr> // // The first option is * the serialization of a specific operation via the \c serial() function: * * \code blaze::DynamicMatrix<double> A, B, C; // ... Resizing and * initialization C = serial( A + B ); \endcode * * // \c serial() enforces the serial evaluation of the enclosed expression. It * can be used on any // kind of dense or sparse vector or matrix expression. * // // // \n \section serial_execution_serial_section Option 2: * Serialization of Multiple Expressions // <hr> // // The second option is * the temporary and local enforcement of a serial execution via the // \c * BLAZE_SERIAL_SECTION: * * \code using blaze::rowMajor; using blaze::columnVector; * * blaze::DynamicMatrix<double,rowMajor> A; * blaze::DynamicVector<double,columnVector> b, c, d, x, y, z; * * // ... Resizing and initialization * * // Parallel execution // If possible and beneficial for performance the * following operation is executed in parallel. x = A * b; * * // Serial execution // All operations executed within the serial section are * guaranteed to be executed in // serial (even if a parallel execution would * be possible and/or beneficial). BLAZE_SERIAL_SECTION { y = A * c; z = A * * d; } * * // Parallel execution continued // ... \endcode * * // Within the scope of the \c BLAZE_SERIAL_SECTION, all operations are * guaranteed to run in serial. // Outside the scope of the serial section, * all operations are run in parallel (if beneficial for // the performance). * // // Note that the \c BLAZE_SERIAL_SECTION must only be used within a * single thread of execution. // The use of the serial section within * several concurrent threads will result undefined behavior! // // // \n * \section serial_execution_deactivate_parallelism Option 3: Deactivation of * Parallel Execution // <hr> // // The third option is the general * deactivation of the parallel execution (even in case OpenMP is // enabled * on the command line). This can be achieved via the \c * BLAZE_USE_SHARED_MEMORY_PARALLELIZATION // switch in the * <tt>./blaze/config/SMP.h</tt> configuration file: * * \code #define BLAZE_USE_SHARED_MEMORY_PARALLELIZATION 1 \endcode * * // In case the \c BLAZE_USE_SHARED_MEMORY_PARALLELIZATION switch is set to 0, * the shared memory // parallelization is deactivated altogether. // // \n * Previous: \ref openmp_parallelization &nbsp; &nbsp; Next: \ref * serialization */ //************************************************************************************************* //**Serialization ********************************************************************************** /* * !\page serialization Serialization // // Sometimes it is necessary to * store vector and/or matrices on disk, for instance for storing // results * or for sharing specific setups with other people. The \b Blaze math * serialization // module provides the according functionality to create * platform independent, portable, binary // representations of vectors and * matrices that can be used to store the \b Blaze data structures // without * loss of precision and to reliably transfer them from one machine to * another. // // The following two pages explain how to serialize vectors * and matrices: // // - \ref vector_serialization // - \ref * matrix_serialization // // \n Previous: \ref serial_execution &nbsp; * &nbsp; Next: \ref vector_serialization */ //************************************************************************************************* //**Vector Serialization *************************************************************************** /* * !\page vector_serialization Vector Serialization // // The following * example demonstrates the (de-)serialization of dense and sparse vectors: * * \code using blaze::columnVector; using blaze::rowVector; * * // Serialization of both vectors { blaze::StaticVector<double,5UL,rowVector> * d; blaze::CompressedVector<int,columnVector> s; * * // ... Resizing and initialization * * // Creating an archive that writes into a the file "vectors.blaze" * blaze::Archive<std::ofstream> archive( "vectors.blaze" ); * * // Serialization of both vectors into the same archive. Note that d lies * before s! archive << d << s; } * * // Reconstitution of both vectors { blaze::DynamicVector<double,rowVector> * d1; blaze::DynamicVector<int,rowVector> d2; * * // Creating an archive that reads from the file "vectors.blaze" * blaze::Archive<std::ifstream> archive( "vectors.blaze" ); * * // Reconstituting the former d vector into d1. Note that it is possible to * reconstitute // the vector into a differrent kind of vector (StaticVector * -> DynamicVector), but that // the type of elements has to be the same. * archive >> d1; * * // Reconstituting the former s vector into d2. Note that is is even possible * to reconstitute // a sparse vector as a dense vector (also the reverse is * possible) and that a column vector // can be reconstituted as row vector * (and vice versa). Note however that also in this case // the type of * elements is the same! archive >> d2 } \endcode * * // The (de-)serialization of vectors is not restricted to vectors of built-in * data type, but can // also be used for vectors with vector or matrix * element type: * * \code // Serialization { blaze::CompressedVector< blaze::DynamicVector< * blaze::complex<double> > > vec; * * // ... Resizing and initialization * * // Creating an archive that writes into a the file "vector.blaze" * blaze::Archive<std::ofstream> archive( "vector.blaze" ); * * // Serialization of the vector into the archive archive << vec; } * * // Deserialization { blaze::CompressedVector< blaze::DynamicVector< * blaze::complex<double> > > vec; * * // Creating an archive that reads from the file "vector.blaze" * blaze::Archive<std::ifstream> archive( "vector.blaze" ); * * // Reconstitution of the vector from the archive archive >> vec; } \endcode * * // As the examples demonstrates, the vector serialization offers an enormous * flexibility. However, // several actions result in errors: // // - * vectors cannot be reconstituted as matrices (and vice versa) // - the * element type of the serialized and reconstituted vector must match, which * means // that on the source and destination platform the general type * (signed/unsigned integral // or floating point) and the size of the * type must be exactly the same // - when reconstituting a \c StaticVector, * its size must match the size of the serialized vector // // In case an * error is encountered during (de-)serialization, a \c * std::runtime_exception is // thrown. // // \n Previous: \ref serialization * &nbsp; &nbsp; Next: \ref matrix_serialization */ //************************************************************************************************* //**Matrix Serialization *************************************************************************** /* * !\page matrix_serialization Matrix Serialization // // The serialization * of matrices works in the same manner as the serialization of vectors. The * // following example demonstrates the (de-)serialization of dense and * sparse matrices: * * \code using blaze::rowMajor; using blaze::columnMajor; * * // Serialization of both matrices { * blaze::StaticMatrix<double,3UL,5UL,rowMajor> D; * blaze::CompressedMatrix<int,columnMajor> S; * * // ... Resizing and initialization * * // Creating an archive that writes into a the file "matrices.blaze" * blaze::Archive<std::ofstream> archive( "matrices.blaze" ); * * // Serialization of both matrices into the same archive. Note that D lies * before S! archive << D << S; } * * // Reconstitution of both matrices { blaze::DynamicMatrix<double,rowMajor> * D1; blaze::DynamicMatrix<int,rowMajor> D2; * * // Creating an archive that reads from the file "matrices.blaze" * blaze::Archive<std::ifstream> archive( "matrices.blaze" ); * * // Reconstituting the former D matrix into D1. Note that it is possible to * reconstitute // the matrix into a differrent kind of matrix (StaticMatrix * -> DynamicMatrix), but that // the type of elements has to be the same. * archive >> D1; * * // Reconstituting the former S matrix into D2. Note that is is even possible * to reconstitute // a sparse matrix as a dense matrix (also the reverse is * possible) and that a column-major // matrix can be reconstituted as * row-major matrix (and vice versa). Note however that also // in this case * the type of elements is the same! archive >> D2 } \endcode * * // Note that also in case of matrices it is possible to (de-)serialize * matrices with vector or // matrix elements: * * \code // Serialization { blaze::CompressedMatrix< blaze::DynamicMatrix< * blaze::complex<double> > > mat; * * // ... Resizing and initialization * * // Creating an archive that writes into a the file "matrix.blaze" * blaze::Archive<std::ofstream> archive( "matrix.blaze" ); * * // Serialization of the matrix into the archive archive << mat; } * * // Deserialization { blaze::CompressedMatrix< blaze::DynamicMatrix< * blaze::complex<double> > > mat; * * // Creating an archive that reads from the file "matrix.blaze" * blaze::Archive<std::ifstream> archive( "matrix.blaze" ); * * // Reconstitution of the matrix from the archive archive >> mat; } \endcode * * // Note that just as the vector serialization, the matrix serialization is * restricted by a // few important rules: // // - matrices cannot be * reconstituted as vectors (and vice versa) // - the element type of the * serialized and reconstituted matrix must match, which means // that on * the source and destination platform the general type (signed/unsigned * integral // or floating point) and the size of the type must be exactly * the same // - when reconstituting a \c StaticMatrix, the number of rows * and columns must match those // of the serialized matrix // // In case * an error is encountered during (de-)serialization, a \c * std::runtime_exception is // thrown. // // \n Previous: \ref * vector_serialization &nbsp; &nbsp; Next: \ref customization \n */ //************************************************************************************************* //**Customization ********************************************************************************** /* * !\page customization Customization // // Although \b Blaze tries to work * out of the box for every possible setting, still it may be // necessary to * adapt the library to specific requirements. The following three pages * explain // how to customize the \b Blaze library to your own needs: // // * - \ref configuration_files // - \ref vector_and_matrix_customization // * - \ref error_reporting_customization // // \n Previous: \ref * matrix_serialization &nbsp; &nbsp; Next: \ref configuration_files */ //************************************************************************************************* //**Configuration Files **************************************************************************** /* * !\page configuration_files Configuration Files // // \tableofcontents // * // // Sometimes it is necessary to adapt \b Blaze to specific * requirements. For this purpose // \b Blaze provides several configuration * files in the <tt>./blaze/config/</tt> subdirectory, // which provide ample * opportunity to customize internal settings, behavior, and thresholds. // * This chapter explains the most important of these configuration files. For * a complete // overview of all customization opportunities, please go to * the configuration files in the // <tt>./blaze/config/</tt> subdirectory or * see the complete \b Blaze documentation. // // // \n \section * transpose_flag Default Vector Storage // <hr> // // The \b Blaze default * is that all vectors are created as column vectors (if not specified // * explicitly): * * \code blaze::StaticVector<double,3UL> x; // Creates a 3-dimensional static * column vector \endcode * * // The header file <tt>./blaze/config/TransposeFlag.h</tt> allows the * configuration of the default // vector storage (i.e. the default transpose * flag) of all vectors within the \b Blaze library. // The default transpose * flag is specified via the \c BLAZE_DEFAULT_TRANSPOSE_FLAG macro: * * \code #define BLAZE_DEFAULT_TRANSPOSE_FLAG blaze::columnVector \endcode * * // Alternatively the default transpose flag can be specified via command line * or by defining this // symbol manually before including any \b Blaze * header file: * * \code #define BLAZE_DEFAULT_TRANSPOSE_FLAG blaze::columnVector #include * <blaze/Blaze.h> \endcode * * // Valid settings for \c BLAZE_DEFAULT_TRANSPOSE_FLAG are blaze::rowVector * and blaze::columnVector. // // // \n \section storage_order Default Matrix * Storage // <hr> // // Matrices are by default created as row-major * matrices: * * \code blaze::StaticMatrix<double,3UL,3UL> A; // Creates a 3x3 row-major * matrix \endcode * * // The header file <tt>./blaze/config/StorageOrder.h</tt> allows the * configuration of the default // matrix storage order. Via the \c * BLAZE_DEFAULT_STORAGE_ORDER macro the default storage order // for all * matrices of the \b Blaze library can be specified. * * \code #define BLAZE_DEFAULT_STORAGE_ORDER blaze::rowMajor \endcode * * // Alternatively the default storage order can be specified via command line * or by defining this // symbol manually before including any \b Blaze * header file: * * \code #define BLAZE_DEFAULT_STORAGE_ORDER blaze::rowMajor #include * <blaze/Blaze.h> \endcode * * // Valid settings for \c BLAZE_DEFAULT_STORAGE_ORDER are blaze::rowMajor and * blaze::columnMajor. // // // \n \section blas_mode BLAS Mode // <hr> // // * In order to achieve maximum performance for multiplications with dense * matrices, \b Blaze can // be configured to use a BLAS library. Via the * following compilation switch in the configuration // file * <tt>./blaze/config/BLAS.h</tt> BLAS can be enabled: * * \code #define BLAZE_BLAS_MODE 1 \endcode * * // In case the selected BLAS library provides parallel execution, the \c * BLAZE_BLAS_IS_PARALLEL // switch should be activated to prevent \b Blaze * from parallelizing on its own: * * \code #define BLAZE_BLAS_IS_PARALLEL 1 \endcode * * // Alternatively, both settings can be specified via command line or by * defining the symbols // manually before including any \b Blaze header * file: * * \code #define BLAZE_BLAS_MODE 1 #define BLAZE_BLAS_IS_PARALLEL 1 #include * <blaze/Blaze.h> \endcode * * // In case no BLAS library is available, \b Blaze will still work and will * not be reduced in // functionality, but performance may be limited. // // * // \n \section cache_size Cache Size // <hr> // // The optimization of * several \b Blaze compute kernels depends on the cache size of the target * // architecture. By default, \b Blaze assumes a cache size of 3 MiByte. * However, for optimal // speed the exact cache size of the system should be * provided via the \c cacheSize value in the // * <tt>./blaze/config/CacheSize.h</tt> configuration file: * * \code #define BLAZE_CACHE_SIZE 3145728UL; \endcode * * // The cache size can also be specified via command line or by defining this * symbol manually // before including any \b Blaze header file: * * \code #define BLAZE_CACHE_SIZE 3145728UL #include <blaze/Blaze.h> \endcode * * // \n \section vectorization Vectorization // <hr> // // In order to achieve * maximum performance and to exploit the compute power of a target platform * // the \b Blaze library attempts to vectorize all linear algebra * operations by SSE, AVX, and/or // AVX-512 intrinsics, depending on which * instruction set is available. However, it is possible // to disable the * vectorization entirely by the compile time switch in the configuration * file // <tt>./blaze/config/Vectorization.h</tt>: * * \code #define BLAZE_USE_VECTORIZATION 1 \endcode * * // It is also possible to (de-)activate vectorization via command line or by * defining this symbol // manually before including any \b Blaze header * file: * * \code #define BLAZE_USE_VECTORIZATION 1 #include <blaze/Blaze.h> \endcode * * // In case the switch is set to 1, vectorization is enabled and the \b Blaze * library is allowed // to use intrinsics to speed up computations. In case * the switch is set to 0, vectorization is // disabled entirely and the \b * Blaze library chooses default, non-vectorized functionality for // the * operations. Note that deactivating the vectorization may pose a severe * performance // limitation for a large number of operations! // // // \n * \section thresholds Thresholds // <hr> // // For many computations \b * Blaze distinguishes between small and large vectors and matrices. // This * separation is especially important for the parallel execution of * computations, since // the use of several threads only pays off for * sufficiently large vectors and matrices. // Additionally, it also enables * \b Blaze to select kernels that are optimized for a specific // size. // * // In order to distinguish between small and large data structures \b * Blaze provides several // thresholds that can be adapted to the * characteristics of the target platform. For instance, // the \c * DMATDVECMULT_THRESHOLD specifies the threshold between the application of * the custom // \b Blaze kernels for small dense matrix/dense vector * multiplications and the BLAS kernels // for large multiplications. All * thresholds, including the thresholds for the OpenMP- and // thread-based * parallelization, are contained within the configuration file // * <tt><blaze/config/Thresholds.h></tt>. // // // \n \section padding Padding * // <hr> // // By default the \b Blaze library uses padding for all dense * vectors and matrices in order to // achieve maximum performance in all * operations. Due to padding, the proper alignment of data // elements can * be guaranteed and the need for remainder loops is minimized. However, on * the // downside padding introduces an additional memory overhead, which * can be large depending on // the used data type. // // The configuration * file <tt>./blaze/config/Optimizations.h</tt> provides a compile time * switch // that can be used to (de-)activate padding: * * \code #define BLAZE_USE_PADDING 1 \endcode * * // Alternatively it is possible to (de-)activate padding via command line or * by defining this // symbol manually before including any \b Blaze header * file: * * \code #define BLAZE_USE_PADDING 1 #include <blaze/Blaze.h> \endcode * * // If \c BLAZE_USE_PADDING is set to 1 padding is enabled for all dense * vectors and matrices, if // it is set to 0 padding is disabled. Note * however that disabling padding can considerably reduce // the performance * of all dense vector and matrix operations! // // // \n \section streaming * Streaming (Non-Temporal Stores) // <hr> // // For vectors and matrices * that don't fit into the cache anymore non-temporal stores can provide // a * significant performance advantage of about 20%. However, this advantage is * only in effect in // case the memory bandwidth of the target architecture * is maxed out. If the target architecture's // memory bandwidth cannot be * exhausted the use of non-temporal stores can decrease performance // * instead of increasing it. // // The configuration file * <tt>./blaze/config/Optimizations.h</tt> provides a compile time switch // * that can be used to (de-)activate streaming: * * \code #define BLAZE_USE_STREAMING 1 \endcode * * // Alternatively streaming can be (de-)activated via command line or by * defining this symbol // manually before including any \b Blaze header * file: * * \code #define BLAZE_USE_STREAMING 1 #include <blaze/Blaze.h> \endcode * * // If \c BLAZE_USE_STREAMING is set to 1 streaming is enabled, if it is set * to 0 streaming is // disabled. It is recommended to consult the target * architecture's white papers to decide whether // streaming is beneficial * or hurtful for performance. // // // \n Previous: \ref customization * &nbsp; &nbsp; Next: \ref vector_and_matrix_customization \n */ //************************************************************************************************* //**Customization of Vectors and Matrices ********************************************************** /* * !\page vector_and_matrix_customization Customization of Vectors and * Matrices // // \tableofcontents // // // \n \section custom_data_members * Custom Data Members // <hr> // // So far the \b Blaze library does not * provide a lot of flexibility to customize the data // members of existing * \ref vector_types and \ref matrix_types. However, to some extend it is // * possible to customize vectors and matrices by inheritance. The following * example gives an // impression on how to create a simple variation of \ref * matrix_types_custom_matrix, which // automatically takes care of acquiring * and releasing custom memory. * * \code template< typename Type // Data type of the matrix , * bool SO = defaultStorageOrder > // Storage order class MyCustomMatrix : * public CustomMatrix< Type, unaligned, unpadded, SO > { public: explicit * inline MyCustomMatrix( size_t m, size_t n ) : * CustomMatrix<Type,unaligned,unpadded,SO>() , array_( new Type[m*n] ) { * this->reset( array_.get(), m, n ); } * * private: std::unique_ptr<Type[]> array_; }; \endcode * * // Please note that this is a simplified example with the intent to show the * general approach. // The number of constructors, the memory acquisition, * and the kind of memory management can of // course be adapted to specific * requirements. Also, please note that since none of the \b Blaze // vectors * and matrices have virtual destructors polymorphic destruction cannot be * used. // // // \n \section custom_operations Custom Operations // <hr> // * // There are two approaches to extend \b Blaze with custom operations. * First, the \c map() // functions provide the possibility to execute * componentwise custom operations on vectors and // matrices. Second, it is * possible to add customized free functions. // // \n \subsection * custom_operations_map The map() Functions // // Via the unary and binary * \c map() functions it is possible to execute componentwise custom // * operations on vectors and matrices. The unary \c map() function can be * used to apply a custom // operation on each single element of a dense * vector or matrix or each non-zero element of a // sparse vector or matrix. * For instance, the following example demonstrates a custom square // root * computation on a dense matrix: * * \code blaze::DynamicMatrix<double> A, B; * * B = map( A, []( double d ) { return std::sqrt( d ); } ); \endcode * * // The binary \c map() function can be used to apply an operation pairwise to * the elements of // two dense vectors or two dense matrices. The following * example demonstrates the merging of // two matrices of double precision * values into a matrix of double precision complex numbers: * * \code blaze::DynamicMatrix<double> real{ { 2.1, -4.2 }, { 1.0, 0.6 } }; * blaze::DynamicMatrix<double> imag{ { 0.3, 1.4 }, { 2.9, -3.4 } }; * * blaze::DynamicMatrix< complex<double> > cplx; * * // Creating the matrix // ( (-2.1, 0.3) (-4.2, -1.4) ) // ( ( 1.0, * 2.9) ( 0.6, -3.4) ) cplx = map( real, imag, []( double r, double i ){ * return complex( r, i ); } ); \endcode * * // These examples demonstrate the most convenient way of defining a unary * custom operation by // passing a lambda to the \c map() function. * Alternatively, it is possible to pass a custom // functor: * * \code struct Sqrt { double operator()( double a ) const { return std::sqrt( a * ); } }; * * B = map( A, Sqrt() ); \endcode * * // In order for the functor to work in a call to \c map() it must define a * function call operator, // which accepts arguments of the type of the * according vector or matrix elements. // // Although the operation is * automatically parallelized depending on the size of the vector or // * matrix, no automatic vectorization is possible. In order to enable * vectorization, a \c load() // function can be added to the functor, which * handles the vectorized computation. Depending on // the data type this * function is passed one of the following \b Blaze SIMD data types: // // * <ul> // <li>SIMD data types for fundamental data types // <ul> // * <li>\c blaze::SIMDint8: Packed SIMD type for 8-bit signed integral data * types</li> // <li>\c blaze::SIMDuint8: Packed SIMD type for 8-bit * unsigned integral data types</li> // <li>\c blaze::SIMDint16: * Packed SIMD type for 16-bit signed integral data types</li> // * <li>\c blaze::SIMDuint16: Packed SIMD type for 16-bit unsigned integral * data types</li> // <li>\c blaze::SIMDint32: Packed SIMD type for * 32-bit signed integral data types</li> // <li>\c * blaze::SIMDuint32: Packed SIMD type for 32-bit unsigned integral data * types</li> // <li>\c blaze::SIMDint64: Packed SIMD type for * 64-bit signed integral data types</li> // <li>\c * blaze::SIMDuint64: Packed SIMD type for 64-bit unsigned integral data * types</li> // <li>\c blaze::SIMDfloat: Packed SIMD type for * single precision floating point data</li> // <li>\c * blaze::SIMDdouble: Packed SIMD type for double precision floating point * data</li> // </ul> // </li> // <li>SIMD data types for complex * data types // <ul> // <li>\c blaze::SIMDcint8: Packed SIMD * type for complex 8-bit signed integral data types</li> // <li>\c * blaze::SIMDcuint8: Packed SIMD type for complex 8-bit unsigned integral * data types</li> // <li>\c blaze::SIMDcint16: Packed SIMD type for * complex 16-bit signed integral data types</li> // <li>\c * blaze::SIMDcuint16: Packed SIMD type for complex 16-bit unsigned integral * data types</li> // <li>\c blaze::SIMDcint32: Packed SIMD type for * complex 32-bit signed integral data types</li> // <li>\c * blaze::SIMDcuint32: Packed SIMD type for complex 32-bit unsigned integral * data types</li> // <li>\c blaze::SIMDcint64: Packed SIMD type for * complex 64-bit signed integral data types</li> // <li>\c * blaze::SIMDcuint64: Packed SIMD type for complex 64-bit unsigned integral * data types</li> // <li>\c blaze::SIMDcfloat: Packed SIMD type for * complex single precision floating point data</li> // <li>\c * blaze::SIMDcdouble: Packed SIMD type for complex double precision floating * point data</li> // </ul> // </li> // </ul> // // All SIMD types * provide the \c value data member for a direct access to the underlying * intrinsic // data element. In the following example, this intrinsic * element is passed to the AVX function // \c _mm256_sqrt_pd(): * * \code struct Sqrt { double operator()( double a ) const { return std::sqrt( a * ); } * * SIMDdouble load( const SIMDdouble& a ) const { return _mm256_sqrt_pd( a.value * ); } }; \endcode * * // In this example, whenever vectorization is generally applicable, the \c * load() function is // called instead of the function call operator for as * long as the number of remaining elements // is larger-or-equal to the * width of the packed SIMD type. In all other cases (which also // includes * peel-off and remainder loops) the scalar operation is used. // // Please * note that this example has two drawbacks: First, it will only compile in * case the // intrinsic \c _mm256_sqrt_pd() function is available (i.e. when * AVX is active). Second, the // availability of AVX is not taken into * account. The first drawback can be alleviated by making // the \c load() * function a function template. The second drawback can be dealt with by * adding a // \c simdEnabled() function template to the functor: * * \code struct Sqrt { double operator()( double a ) const { return std::sqrt( a * ); } * * template< typename T > T load( const T& a ) const { return _mm256_sqrt_pd( * a.value ); } * * template< typename T > static constexpr bool simdEnabled() { #if * defined(__AVX__) return true; #else return false; #endif } }; \endcode * * // The \c simdEnabled() function must be a \c static, \c constexpr function * and must return whether // or not vectorization is available for the given * data type \c T. In case the function returns // \c true, the \c load() * function is used for a vectorized evaluation, in case the function // * returns \c false, \c load() is not called. // // Note that this is a * simplified example that is only working when used for dense vectors and // * matrices with double precision floating point elements. The following code * shows the complete // implementation of the according functor that is used * within the \b Blaze library. The \b Blaze // \c Sqrt functor is working * for all data types that are providing a square root operation: * * \code namespace blaze { * * struct Sqrt { template< typename T > BLAZE_ALWAYS_INLINE auto operator()( * const T& a ) const { return sqrt( a ); } * * template< typename T > static constexpr bool simdEnabled() { return * HasSIMDSqrt<T>::value; } * * template< typename T > BLAZE_ALWAYS_INLINE auto load( const T& a ) const { * BLAZE_CONSTRAINT_MUST_BE_SIMD_PACK( T ); return sqrt( a ); } }; * * } // namespace blaze \endcode * * // The same approach can be taken for binary custom operations. The following * code demonstrates // the \c Min functor of the \b Blaze library, which is * working for all data types that provide // a \c min() operation: * * \code struct Min { explicit inline Min() {} * * template< typename T1, typename T2 > BLAZE_ALWAYS_INLINE decltype(auto) * operator()( const T1& a, const T2& b ) const { return min( a, b ); } * * template< typename T1, typename T2 > static constexpr bool simdEnabled() { * return HasSIMDMin<T1,T2>::value; } * * template< typename T1, typename T2 > BLAZE_ALWAYS_INLINE decltype(auto) load( * const T1& a, const T2& b ) const { BLAZE_CONSTRAINT_MUST_BE_SIMD_PACK( T1 * ); BLAZE_CONSTRAINT_MUST_BE_SIMD_PACK( T2 ); return min( a, b ); } }; * \endcode * * // For more information on the available \b Blaze SIMD data types and * functions, please see the // SIMD module in the complete \b Blaze * documentation. // // \n \subsection custom_operations_free_functions Free * Functions // // In order to extend \b Blaze with new functionality it is * possible to add free functions. Free // functions can be used either as * wrappers around calls to the map() function or to implement // general, * non-componentwise operations. The following two examples will demonstrate * both ideas. // // The first example shows the \c setToZero() function, * which resets a sparse matrix to zero // without affecting the sparsity * pattern. It is implemented as a convenience wrapper around // the map() * function: * * \code template< typename MT // Type of the sparse matrix , bool SO > // * Storage order void setToZero( blaze::SparseMatrix<MT,SO>& mat ) { (~mat) = * blaze::map( ~mat, []( int ){ return 0; } ); } \endcode * * // The blaze::SparseMatrix class template is the base class for all kinds of * sparse matrices and // provides an abstraction from the actual type \c MT * of the sparse matrix. However, due to the // <a * href="https://en.wikipedia.org/wiki/Curiously_recurring_template_pattern">C * uriously Recurring Template Pattern (CRTP)</a> // it also enables a * conversion back to the actual type. This downcast is performed via the * tilde // operator (i.e. \c operator~()). The template parameter \c SO * represents the storage order // (blaze::rowMajor or blaze::columnMajor) of * the matrix. // // The second example shows the \c countZeros() function, * which counts the number of values, which // are exactly zero, in a dense, * row-major matrix: * * \code template< typename MT > size_t countZeros( * blaze::DenseMatrix<MT,rowMajor>& mat ) { const size_t M( (~mat).rows() ); * const size_t N( (~mat).columns() ); size_t count( 0UL ); * * for( size_t i=0UL; i<M; ++i ) { for( size_t j=0UL; j<N; ++j ) { if( * blaze::isDefault<strict>( (~mat)(i,j) ) ) ++count; } } * * return count; } \endcode * * // The blaze::DenseMatrix class template is the base class for all kinds of * dense matrices. Again, // it is possible to perform the conversion to the * actual type via the tilde operator. // // The following two listings show * the declarations of all vector and matrix base classes, which // can be * used for custom free functions: * * \code template< typename VT // Concrete type of the dense or sparse vector , * bool TF > // Transpose flag (blaze::columnVector or blaze::rowVector) * class Vector; * * template< typename VT // Concrete type of the dense vector , bool TF > // * Transpose flag (blaze::columnVector or blaze::rowVector) class * DenseVector; * * template< typename VT // Concrete type of the sparse vector , bool TF > * // Transpose flag (blaze::columnVector or blaze::rowVector) class * SparseVector; \endcode * * \code template< typename MT // Concrete type of the dense or sparse matrix , * bool SO > // Storage order (blaze::rowMajor or blaze::columnMajor) * class Matrix; * * template< typename MT // Concrete type of the dense matrix , bool SO > // * Storage order (blaze::rowMajor or blaze::columnMajor) class DenseMatrix; * * template< typename MT // Concrete type of the sparse matrix , bool SO > * // Storage order (blaze::rowMajor or blaze::columnMajor) class * SparseMatrix; \endcode * * // \n \section custom_data_types Custom Data Types // <hr> // // The \b Blaze * library tries hard to make the use of custom data types as convenient, * easy and // intuitive as possible. However, unfortunately it is not * possible to meet the requirements of // all possible data types. Thus it * might be necessary to provide \b Blaze with some additional // information * about the data type. The following sections give an overview of the * necessary steps // to enable the use of the hypothetical custom data type * \c custom::double_t for vector and // matrix operations. For example: * * \code blaze::DynamicVector<custom::double_t> a, b, c; // ... Resizing and * initialization c = a + b; \endcode * * // The \b Blaze library assumes that the \c custom::double_t data type * provides \c operator+() // for additions, \c operator-() for subtractions, * \c operator*() for multiplications and // \c operator/() for divisions. If * any of these functions is missing it is necessary to implement // the * operator to perform the according operation. For this example we assume * that the custom // data type provides the four following functions instead * of operators: * * \code namespace custom { * * double_t add ( const double_t& a, const double_t b ); double_t sub ( const * double_t& a, const double_t b ); double_t mult( const double_t& a, const * double_t b ); double_t div ( const double_t& a, const double_t b ); * * } // namespace custom \endcode * * // The following implementations will satisfy the requirements of the \b * Blaze library: * * \code inline custom::double_t operator+( const custom::double_t& a, const * custom::double_t& b ) { return add( a, b ); } * * inline custom::double_t operator-( const custom::double_t& a, const * custom::double_t& b ) { return sub( a, b ); } * * inline custom::double_t operator*( const custom::double_t& a, const * custom::double_t& b ) { return mult( a, b ); } * * inline custom::double_t operator/( const custom::double_t& a, const * custom::double_t& b ) { return div( a, b ); } \endcode * * // \b Blaze will use all the information provided with these functions (for * instance the return // type) to properly handle the operations. In the * rare case that the return type cannot be // automatically determined from * the operator it might be additionally necessary to provide a // * specialization of the following four \b Blaze class templates: * * \code namespace blaze { * * template<> struct AddTrait<custom::double_t,custom::double_t> { using Type = * custom::double_t; }; * * template<> struct SubTrait<custom::double_t,custom::double_t> { using Type = * custom::double_t; }; * * template<> struct MultTrait<custom::double_t,custom::double_t> { using Type = * custom::double_t; }; * * template<> struct DivTrait<custom::double_t,custom::double_t> { using Type = * custom::double_t; }; * * } // namespace blaze \endcode * * // The same steps are necessary if several custom data types need to be * combined (as for instance // \c custom::double_t and \c custom::float_t). * Note that in this case both permutations need to // be taken into account: * * \code custom::double_t operator+( const custom::double_t& a, const * custom::float_t& b ); custom::double_t operator+( const custom::float_t& * a, const custom::double_t& b ); // ... \endcode * * // Please note that only built-in data types apply for vectorization and thus * custom data types // cannot achieve maximum performance! // // // \n * Previous: \ref configuration_files &nbsp; &nbsp; Next: \ref * custom_operations \n */ //************************************************************************************************* //**Customization of the Error Reporting Mechanism ************************************************* /* * !\page error_reporting_customization Customization of the Error Reporting * Mechanism // // \tableofcontents // // // \n \section * error_reporting_background Background // <hr> // // The default way of \b * Blaze to report errors of any kind is to throw a standard exception. // * However, although in general this approach works well, in certain * environments and under // special circumstances exceptions may not be the * mechanism of choice and a different error // reporting mechanism may be * desirable. For this reason, \b Blaze provides several macros, // which * enable the customization of the error reporting mechanism. Via these * macros it is // possible to replace the standard exceptions by some other * exception type or a completely // different approach to report errors. // * // // \n \section error_reporting_general_customization Customization of * the Reporting Mechanism // <hr> // // In some cases it might be necessary * to adapt the entire error reporting mechanism and to // replace it by some * other means to signal failure. The primary macro for this purpose is the * // \c BLAZE_THROW macro: * * \code #define BLAZE_THROW( EXCEPTION ) \ throw EXCEPTION \endcode * * // This macro represents the default mechanism of the \b Blaze library to * report errors of any // kind. In order to customize the error reporing * mechanism all that needs to be done is to // define the macro prior to * including any \b Blaze header file. This will cause the \b Blaze // * specific mechanism to be overridden. The following example demonstrates * this by replacing // exceptions by a call to a \c log() function and a * direct call to abort: * * \code #define BLAZE_THROW( EXCEPTION ) \ log( "..." ); \ abort() * * #include <blaze/Blaze.h> \endcode * * // Doing this will trigger a call to \c log() and an abort instead of * throwing an exception // whenever an error (such as an invalid argument) * is detected. // // \note It is possible to execute several statements * instead of executing a single statement to // throw an exception. Also * note that it is recommended to define the macro such that a subsequent // * semicolon is required! // // \warning This macro is provided with the * intention to assist in adapting \b Blaze to special // conditions and * environments. However, the customization of the error reporting mechanism * via // this macro can have a significant effect on the library. Thus be * advised to use the macro // with due care! // // // \n \section * error_reporting_exception_customization Customization of the Type of * Exceptions // <hr> // // In addition to the customization of the entire * error reporting mechanism it is also possible // to customize the type of * exceptions being thrown. This can be achieved by customizing any // number * of the following macros: * * \code #define BLAZE_THROW_BAD_ALLOC \ BLAZE_THROW( std::bad_alloc() ) * * #define BLAZE_THROW_LOGIC_ERROR( MESSAGE ) \ BLAZE_THROW( std::logic_error( * MESSAGE ) ) * * #define BLAZE_THROW_INVALID_ARGUMENT( MESSAGE ) \ BLAZE_THROW( * std::invalid_argument( MESSAGE ) ) * * #define BLAZE_THROW_LENGTH_ERROR( MESSAGE ) \ BLAZE_THROW( std::length_error( * MESSAGE ) ) * * #define BLAZE_THROW_OUT_OF_RANGE( MESSAGE ) \ BLAZE_THROW( std::out_of_range( * MESSAGE ) ) * * #define BLAZE_THROW_RUNTIME_ERROR( MESSAGE ) \ BLAZE_THROW( * std::runtime_error( MESSAGE ) ) \endcode * * // In order to customize the type of exception the according macro has to be * defined prior to // including any \b Blaze header file. This will override * the \b Blaze default behavior. The // following example demonstrates this * by replacing \c std::invalid_argument by a custom // exception type: * * \code class InvalidArgument { public: InvalidArgument(); explicit * InvalidArgument( const std::string& message ); // ... }; * * #define BLAZE_THROW_INVALID_ARGUMENT( MESSAGE ) \ BLAZE_THROW( * InvalidArgument( MESSAGE ) ) * * #include <blaze/Blaze.h> \endcode * * // By manually defining the macro, an \c InvalidArgument exception is thrown * instead of a // \c std::invalid_argument exception. Note that it is * recommended to define the macro such // that a subsequent semicolon is * required! // // \warning These macros are provided with the intention to * assist in adapting \b Blaze to // special conditions and environments. * However, the customization of the type of an exception // via this macro * may have an effect on the library. Thus be advised to use the macro with * due // care! // // // \n \section error_reporting_special_errors * Customization of Special Errors // <hr> // // Last but not least it is * possible to customize the error reporting for special kinds of errors. // * This can be achieved by customizing any number of the following macros: * * \code #define BLAZE_THROW_DIVISION_BY_ZERO( MESSAGE ) \ * BLAZE_THROW_RUNTIME_ERROR( MESSAGE ) * * #define BLAZE_THROW_LAPACK_ERROR( MESSAGE ) \ BLAZE_THROW_RUNTIME_ERROR( * MESSAGE ) \endcode * * // As explained in the previous sections, in order to customize the handling * of special errors // the according macro has to be defined prior to * including any \b Blaze header file. This will // override the \b Blaze * default behavior. // // // \n Previous: \ref * vector_and_matrix_customization &nbsp; &nbsp; Next: \ref blas_functions \n */ //************************************************************************************************* //**BLAS Functions ********************************************************************************* /* * !\page blas_functions BLAS Functions // // \tableofcontents // // // For * vector/vector, matrix/vector and matrix/matrix multiplications with large * dense matrices // \b Blaze relies on the efficiency of BLAS libraries. For * this purpose, \b Blaze implements // several convenient C++ wrapper * functions for several BLAS functions. The following sections // give a * complete overview of all available BLAS level 1, 2 and 3 functions. // // * // \n \section blas_level_1 BLAS Level 1 // <hr> // // \subsection * blas_level_1_dotu Dot Product (dotu) // // The following wrapper functions * provide a generic interface for the BLAS functions for the // dot product * of two dense vectors (\c sdot(), \c ddot(), \c cdotu_sub(), and \c * zdotu_sub()): * * \code namespace blaze { * * float dotu( int n, const float* x, int incX, const float* y, int incY ); * * double dotu( int n, const double* x, int incX, const double* y, int incY ); * * complex<float> dotu( int n, const complex<float>* x, int incX, const * complex<float>* y, int incY ); * * complex<double> dotu( int n, const complex<double>* x, int incX, const * complex<double>* y, int incY ); * * template< typename VT1, bool TF1, typename VT2, bool TF2 > ElementType_<VT1> * dotu( const DenseVector<VT1,TF1>& x, const DenseVector<VT2,TF2>& y ); * * } // namespace blaze \endcode * * // \subsection blas_level_1_dotc Complex Conjugate Dot Product (dotc) // // * The following wrapper functions provide a generic interface for the BLAS * functions for the // complex conjugate dot product of two dense vectors * (\c sdot(), \c ddot(), \c cdotc_sub(), // and \c zdotc_sub()): * * \code namespace blaze { * * float dotc( int n, const float* x, int incX, const float* y, int incY ); * * double dotc( int n, const double* x, int incX, const double* y, int incY ); * * complex<float> dotc( int n, const complex<float>* x, int incX, const * complex<float>* y, int incY ); * * complex<double> dotc( int n, const complex<double>* x, int incX, const * complex<double>* y, int incY ); * * template< typename VT1, bool TF1, typename VT2, bool TF2 > ElementType_<VT1> * dotc( const DenseVector<VT1,TF1>& x, const DenseVector<VT2,TF2>& y ); * * } // namespace blaze \endcode * * // \subsection blas_level_1_axpy Axpy Product (axpy) // // The following * wrapper functions provide a generic interface for the BLAS functions for * the // axpy product of two dense vectors (\c saxpy(), \c daxpy(), \c * caxpy(), and \c zaxpy()): * * \code namespace blaze { * * void axpy( int n, float alpha, const float* x, int incX, float* y, int incY * ); * * void axpy( int n, double alpha, const double* x, int incX, double* y, int * incY ); * * void axpy( int n, complex<float> alpha, const complex<float>* x, int incX, * complex<float>* y, int incY ); * * void axpy( int n, complex<double> alpha, const complex<double>* x, int incX, * complex<double>* y, int incY ); * * template< typename VT1, bool TF1, typename VT2, bool TF2, typename ST > void * axpy( const DenseVector<VT1,TF1>& x, const DenseVector<VT2,TF2>& y, ST * alpha ); * * } // namespace blaze \endcode * * // \n \section blas_level_2 BLAS Level 2 // <hr> // // \subsection * blas_level_2_gemv General Matrix/Vector Multiplication (gemv) // // The * following wrapper functions provide a generic interface for the BLAS * functions for the // general matrix/vector multiplication (\c sgemv(), \c * dgemv(), \c cgemv(), and \c zgemv()): * * \code namespace blaze { * * void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, int m, int n, float * alpha, const float* A, int lda, const float* x, int incX, float beta, * float* y, int incY ); * * void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, int m, int n, double * alpha, const double* A, int lda, const double* x, int incX, double beta, * double* y, int incY ); * * void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, int m, int n, * complex<float> alpha, const complex<float>* A, int lda, const * complex<float>* x, int incX, complex<float> beta, complex<float>* y, int * incY ); * * void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, int m, int n, * complex<double> alpha, const complex<double>* A, int lda, const * complex<double>* x, int incX, complex<double> beta, complex<double>* y, * int incY ); * * template< typename VT1, typename MT1, bool SO, typename VT2, typename ST > * void gemv( DenseVector<VT1,false>& y, const DenseMatrix<MT1,SO>& A, const * DenseVector<VT2,false>& x, ST alpha, ST beta ); * * template< typename VT1, typename VT2, typename MT1, bool SO, typename ST > * void gemv( DenseVector<VT1,true>& y, const DenseVector<VT2,true>& x, const * DenseMatrix<MT1,SO>& A, ST alpha, ST beta ); * * } // namespace blaze \endcode * * // \n \subsection blas_level_2_trmv Triangular Matrix/Vector Multiplication * (trmv) // // The following wrapper functions provide a generic interface * for the BLAS functions for the // matrix/vector multiplication with a * triangular matrix (\c strmv(), \c dtrmv(), \c ctrmv(), // and \c ztrmv()): * * \code namespace blaze { * * void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, * CBLAS_DIAG diag, int n, const float* A, int lda, float* x, int incX ); * * void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, * CBLAS_DIAG diag, int n, const double* A, int lda, double* x, int incX ); * * void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, * CBLAS_DIAG diag, int n, const complex<float>* A, int lda, complex<float>* * x, int incX ); * * void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, * CBLAS_DIAG diag, int n, const complex<double>* A, int lda, complex<double>* * x, int incX ); * * template< typename VT, typename MT, bool SO > void trmv( * DenseVector<VT,false>& x, const DenseMatrix<MT,SO>& A, CBLAS_UPLO uplo ); * * template< typename VT, typename MT, bool SO > void trmv( * DenseVector<VT,true>& x, const DenseMatrix<MT,SO>& A, CBLAS_UPLO uplo ); * * } // namespace blaze \endcode * * // \n \section blas_level_3 BLAS Level 3 // <hr> // // \subsection * blas_level_3_gemm General Matrix/Matrix Multiplication (gemm) // // The * following wrapper functions provide a generic interface for the BLAS * functions for the // general matrix/matrix multiplication (\c sgemm(), \c * dgemm(), \c cgemm(), and \c zgemm()): * * \code namespace blaze { * * void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB, * int m, int n, int k, float alpha, const float* A, int lda, const float* B, * int ldb, float beta, float* C, int ldc ); * * void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB, * int m, int n, int k, double alpha, const double* A, int lda, const double* * B, int ldb, double beta, float* C, int ldc ); * * void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB, * int m, int n, int k, complex<float> alpha, const complex<float>* A, int * lda, const complex<float>* B, int ldb, complex<float> beta, float* C, int * ldc ); * * void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB, * int m, int n, int k, complex<double> alpha, const complex<double>* A, int * lda, const complex<double>* B, int ldb, complex<double> beta, float* C, * int ldc ); * * template< typename MT1, bool SO1, typename MT2, bool SO2, typename MT3, bool * SO3, typename ST > void gemm( DenseMatrix<MT1,SO1>& C, const * DenseMatrix<MT2,SO2>& A, const DenseMatrix<MT3,SO3>& B, ST alpha, ST beta * ); * * } // namespace blaze \endcode * * // \n \subsection blas_level_3_trmm Triangular Matrix/Matrix Multiplication * (trmm) // // The following wrapper functions provide a generic interface * for the BLAS functions for the // matrix/matrix multiplication with a * triangular matrix (\c strmm(), \c dtrmm(), \c ctrmm(), and // \c ztrmm()): * * \code namespace blaze { * * void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, * CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, float alpha, const * float* A, int lda, float* B, int ldb ); * * void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, * CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, double alpha, const * double* A, int lda, double* B, int ldb ); * * void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, * CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, complex<float> * alpha, const complex<float>* A, int lda, complex<float>* B, int ldb ); * * void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, * CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, complex<double> * alpha, const complex<double>* A, int lda, complex<double>* B, int ldb ); * * template< typename MT1, bool SO1, typename MT2, bool SO2, typename ST > void * trmm( DenseMatrix<MT1,SO1>& B, const DenseMatrix<MT2,SO2>& A, CBLAS_SIDE * side, CBLAS_UPLO uplo, ST alpha ); * * } // namespace blaze \endcode * * // \n \subsection blas_level_3_trsm Triangular System Solver (trsm) // // The * following wrapper functions provide a generic interface for the BLAS * functions for solving // a triangular system of equations (\c strsm(), \c * dtrsm(), \c ctrsm(), and \c ztrsm()): * * \code namespace blaze { * * void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, * CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, float alpha, const * float* A, int lda, float* B, int ldb ); * * void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, * CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, double alpha, const * double* A, int lda, double* B, int ldb ); * * void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, * CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, complex<float> * alpha, const complex<float>* A, int lda, complex<float>* B, int ldb ); * * void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, * CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, complex<double> * alpha, const complex<double>* A, int lda, complex<double>* B, int ldb ); * * template< typename MT, bool SO, typename VT, bool TF, typename ST > void * trsm( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, CBLAS_SIDE side, * CBLAS_UPLO uplo, ST alpha ); * * template< typename MT1, bool SO1, typename MT2, bool SO2, typename ST > void * trsm( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, CBLAS_SIDE * side, CBLAS_UPLO uplo, ST alpha ); * * } // namespace blaze \endcode * * // \n Previous: \ref error_reporting_customization &nbsp; &nbsp; Next: \ref * lapack_functions \n */ //************************************************************************************************* //**LAPACK Functions ******************************************************************************* /* * !\page lapack_functions LAPACK Functions // // \tableofcontents // // // * \n \section lapack_introction Introduction // <hr> // // The \b Blaze * library makes extensive use of the LAPACK functionality for various * compute tasks // (including the decomposition, inversion and the * computation of the determinant of dense matrices). // For this purpose, \b * Blaze implements several convenient C++ wrapper functions for all required * // LAPACK functions. The following sections give a complete overview of * all available LAPACK wrapper // functions. For more details on the * individual LAPACK functions see the \b Blaze function // documentation or * the LAPACK online documentation browser: // // * http://www.netlib.org/lapack/explore-html/ // // Most of the wrapper * functions are implemented as thin wrappers around LAPACK functions. They * // provide the parameters of the original LAPACK functions and thus * provide maximum flexibility: * * \code constexpr size_t N( 100UL ); * * blaze::DynamicMatrix<double,blaze::columnMajor> A( N, N ); // ... * Initializing the matrix * * const int m ( numeric_cast<int>( A.rows() ) ); // == N const int n * ( numeric_cast<int>( A.columns() ) ); // == N const int lda ( * numeric_cast<int>( A.spacing() ) ); // >= N const int lwork( n*lda ); * * const std::unique_ptr<int[]> ipiv( new int[N] ); // No initialization * required const std::unique_ptr<double[]> work( new double[N] ); // No * initialization required * * int info( 0 ); * * getrf( m, n, A.data(), lda, ipiv.get(), &info ); // Reports * failure via 'info' getri( n, A.data(), lda, ipiv.get(), work.get(), lwork, * &info ); // Reports failure via 'info' \endcode * * // Additionally, \b Blaze provides wrappers that provide a higher level of * abstraction. These // wrappers provide a maximum of convenience: * * \code constexpr size_t N( 100UL ); * * blaze::DynamicMatrix<double,blaze::columnMajor> A( N, N ); // ... * Initializing the matrix * * const std::unique_ptr<int[]> ipiv( new int[N] ); // No initialization * required * * getrf( A, ipiv.get() ); // Cannot fail getri( A, ipiv.get() ); // Reports * failure via exception \endcode * * // \note All functions only work for general, non-adapted matrices with \c * float, \c double, // \c complex<float>, or \c complex<double> element * type. The attempt to call the function with // adaptors or matrices of any * other element type results in a compile time error! // // \note All * functions can only be used if a fitting LAPACK library is available and * linked to // the final executable. Otherwise a call to this function will * result in a linker error. // // \note For performance reasons all * functions do only provide the basic exception safety guarantee, // i.e. in * case an exception is thrown the given matrix may already have been * modified. // // // \n \section lapack_decomposition Matrix Decomposition * // <hr> // // The following functions decompose/factorize the given dense * matrix. Based on this decomposition // the matrix can be inverted or used * to solve a linear system of equations. // // // \n \subsection * lapack_lu_decomposition LU Decomposition // // The following functions * provide an interface for the LAPACK functions \c sgetrf(), \c dgetrf(), // * \c cgetrf(), and \c zgetrf(), which compute the LU decomposition for the * given general matrix: * * \code namespace blaze { * * void getrf( int m, int n, float* A, int lda, int* ipiv, int* info ); * * void getrf( int m, int n, double* A, int lda, int* ipiv, int* info ); * * void getrf( int m, int n, complex<float>* A, int lda, int* ipiv, int* info ); * * void getrf( int m, int n, complex<double>* A, int lda, int* ipiv, int* info * ); * * template< typename MT, bool SO > void getrf( DenseMatrix<MT,SO>& A, int* ipiv * ); * * } // namespace blaze \endcode * * // The decomposition has the form * * \f[ A = P \cdot L \cdot U, \f]\n * * // where \c P is a permutation matrix, \c L is a lower unitriangular matrix, * and \c U is an upper // triangular matrix. The resulting decomposition is * stored within \a A: In case of a column-major // matrix, \c L is stored in * the lower part of \a A and \c U is stored in the upper part. The unit // * diagonal elements of \c L are not stored. In case \a A is a row-major * matrix the result is // transposed. // // \note The LU decomposition will * never fail, even for singular matrices. However, in case of a // singular * matrix the resulting decomposition cannot be used for a matrix inversion * or solving // a linear system of equations. // // // \n \subsection * lapack_ldlt_decomposition LDLT Decomposition // // The following functions * provide an interface for the LAPACK functions \c ssytrf(), \c dsytrf(), // * \c csytrf(), and \c zsytrf(), which compute the LDLT (Bunch-Kaufman) * decomposition for the given // symmetric indefinite matrix: * * \code namespace blaze { * * void sytrf( char uplo, int n, float* A, int lda, int* ipiv, float* work, int * lwork, int* info ); * * void sytrf( char uplo, int n, double* A, int lda, int* ipiv, double* work, * int lwork, int* info ); * * void sytrf( char uplo, int n, complex<float>* A, int lda, int* ipiv, * complex<float>* work, int lwork, int* info ); * * void sytrf( char uplo, int n, complex<double>* A, int lda, int* ipiv, * complex<double>* work, int lwork, int* info ); * * template< typename MT, bool SO > void sytrf( DenseMatrix<MT,SO>& A, char * uplo, int* ipiv ); * * } // namespace blaze \endcode * * // The decomposition has the form * * \f[ A = U D U^{T} \texttt{ (if uplo = 'U'), or } A = L D L^{T} \texttt{ (if * uplo = 'L'), } \f] * * // where \c U (or \c L) is a product of permutation and unit upper (lower) * triangular matrices, // and \c D is symmetric and block diagonal with * 1-by-1 and 2-by-2 diagonal blocks. The resulting // decomposition is * stored within \a A: In case \a uplo is set to \c 'L' the result is stored * in // the lower part of the matrix and the upper part remains untouched, * in case \a uplo is set to // \c 'U' the result is stored in the upper part * and the lower part remains untouched. // // \note The Bunch-Kaufman * decomposition will never fail, even for singular matrices. However, in // * case of a singular matrix the resulting decomposition cannot be used for a * matrix inversion or // solving a linear system of equations. // // // \n * \subsection lapack_ldlh_decomposition LDLH Decomposition // // The * following functions provide an interface for the LAPACK functions \c * chetrf() and \c zsytrf(), // which compute the LDLH (Bunch-Kaufman) * decomposition for the given Hermitian indefinite matrix: * * \code namespace blaze { * * void hetrf( char uplo, int n, complex<float>* A, int lda, int* ipiv, * complex<float>* work, int lwork, int* info ); * * void hetrf( char uplo, int n, complex<double>* A, int lda, int* ipiv, * complex<double>* work, int lwork, int* info ); * * template< typename MT, bool SO > void hetrf( DenseMatrix<MT,SO>& A, char * uplo, int* ipiv ); * * } // namespace blaze \endcode * * // The decomposition has the form * * \f[ A = U D U^{H} \texttt{ (if uplo = 'U'), or } A = L D L^{H} \texttt{ (if * uplo = 'L'), } \f] * * // where \c U (or \c L) is a product of permutation and unit upper (lower) * triangular matrices, // and \c D is Hermitian and block diagonal with * 1-by-1 and 2-by-2 diagonal blocks. The resulting // decomposition is * stored within \a A: In case \a uplo is set to \c 'L' the result is stored * in // the lower part of the matrix and the upper part remains untouched, * in case \a uplo is set to // \c 'U' the result is stored in the upper part * and the lower part remains untouched. // // \note The Bunch-Kaufman * decomposition will never fail, even for singular matrices. However, in // * case of a singular matrix the resulting decomposition cannot be used for a * matrix inversion or // solving a linear system of equations. // // // \n * \subsection lapack_llh_decomposition Cholesky Decomposition // // The * following functions provide an interface for the LAPACK functions \c * spotrf(), \c dpotrf(), // \c cpotrf(), and \c zpotrf(), which compute the * Cholesky (LLH) decomposition for the given // positive definite matrix: * * \code namespace blaze { * * void potrf( char uplo, int n, float* A, int lda, int* info ); * * void potrf( char uplo, int n, double* A, int lda, int* info ); * * void potrf( char uplo, int n, complex<float>* A, int lda, int* info ); * * void potrf( char uplo, int n, complex<double>* A, int lda, int* info ); * * template< typename MT, bool SO > void potrf( DenseMatrix<MT,SO>& A, char uplo * ); * * } // namespace blaze \endcode * * // The decomposition has the form * * \f[ A = U^{T} U \texttt{ (if uplo = 'U'), or } A = L L^{T} \texttt{ (if uplo * = 'L'), } \f] * * // where \c U is an upper triangular matrix and \c L is a lower triangular * matrix. The Cholesky // decomposition fails if the given matrix \a A is * not a positive definite matrix. In this case // a \a * std::std::invalid_argument exception is thrown. // // // \n \subsection * lapack_qr_decomposition QR Decomposition // // The following functions * provide an interface for the LAPACK functions \c sgeqrf(), \c dgeqrf(), // * \c cgeqrf(), and \c zgeqrf(), which compute the QR decomposition of the * given general matrix: * * \code namespace blaze { * * void geqrf( int m, int n, float* A, int lda, float* tau, float* work, int * lwork, int* info ); * * void geqrf( int m, int n, double* A, int lda, double* tau, double* work, int * lwork, int* info ); * * void geqrf( int m, int n, complex<float>* A, int lda, complex<float>* tau, * complex<float>* work, int lwork, int* info ); * * void geqrf( int m, int n, complex<double>* A, int lda, complex<double>* tau, * complex<double>* work, int lwork, int* info ); * * template< typename MT, bool SO > void geqrf( DenseMatrix<MT,SO>& A, typename * MT::ElementType* tau ); * * } // namespace blaze \endcode * * // The decomposition has the form * * \f[ A = Q \cdot R, \f] * * // where the \c Q is represented as a product of elementary reflectors * * \f[ Q = H(1) H(2) . . . H(k) \texttt{, with k = min(m,n).} \f] * * // Each H(i) has the form * * \f[ H(i) = I - tau \cdot v \cdot v^T, \f] * * // where \c tau is a real scalar, and \c v is a real vector with <tt>v(0:i-1) * = 0</tt> and // <tt>v(i) = 1</tt>. <tt>v(i+1:m)</tt> is stored on exit in * <tt>A(i+1:m,i)</tt>, and \c tau // in \c tau(i). Thus on exit the elements * on and above the diagonal of the matrix contain the // min(\a m,\a * n)-by-\a n upper trapezoidal matrix \c R (\c R is upper triangular if \a m * >= \a n); // the elements below the diagonal, with the array \c tau, * represent the orthogonal matrix \c Q as // a product of min(\a m,\a n) * elementary reflectors. // // The following functions provide an interface * for the LAPACK functions \c sorgqr(), \c dorgqr(), // \c cungqr(), and \c * zunqqr(), which reconstruct the \c Q matrix from a QR decomposition: * * \code namespace blaze { * * void orgqr( int m, int n, int k, float* A, int lda, const float* tau, float* * work, int lwork, int* info ); * * void orgqr( int m, int n, int k, double* A, int lda, const double* tau, * double* work, int lwork, int* info ); * * void ungqr( int m, int n, int k, complex<float>* A, int lda, const * complex<float>* tau, complex<float>* work, int lwork, int* info ); * * void ungqr( int m, int n, int k, complex<double>* A, int lda, const * complex<double>* tau, complex<double>* work, int lwork, int* info ); * * template< typename MT, bool SO > void orgqr( DenseMatrix<MT,SO>& A, const * typename MT::ElementType* tau ); * * template< typename MT, bool SO > void ungqr( DenseMatrix<MT,SO>& A, const * typename MT::ElementType* tau ); * * } // namespace blaze \endcode * * // The following functions provide an interface for the LAPACK functions \c * sormqr(), \c dormqr(), // \c cunmqr(), and \c zunmqr(), which can be used * to multiply a matrix with the \c Q matrix from // a QR decomposition: * * \code namespace blaze { * * void ormqr( char side, char trans, int m, int n, int k, const float* A, int * lda, const float* tau, float* C, int ldc, float* work, int lwork, int* * info ); * * void ormqr( char side, char trans, int m, int n, int k, const double* A, int * lda, const double* tau, double* C, int ldc, double* work, int lwork, int* * info ); * * void unmqr( char side, char trans, int m, int n, int k, const complex<float>* * A, int lda, const complex<float>* tau, complex<float>* C, int ldc, * complex<float>* work, int lwork, int* info ); * * void unmqr( char side, char trans, int m, int n, int k, const complex<double>* * A, int lda, const complex<double>* tau, complex<double>* C, int ldc, * complex<double>* work, int lwork, int* info ); * * template< typename MT1, bool SO1, typename MT2, bool SO2 > void ormqr( * DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char * trans, const ElementType_<MT2>* tau ); * * template< typename MT1, bool SO, typename MT2 > void unmqr( * DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, * ElementType_<MT2>* tau ); * * } // namespace blaze \endcode * * // \n \subsection lapack_rq_decomposition RQ Decomposition // // The * following functions provide an interface for the LAPACK functions \c * sgerqf(), \c dgerqf(), // \c cgerqf(), and \c zgerqf(), which compute the * RQ decomposition of the given general matrix: * * \code namespace blaze { * * void gerqf( int m, int n, float* A, int lda, float* tau, float* work, int * lwork, int* info ); * * void gerqf( int m, int n, double* A, int lda, double* tau, double* work, int * lwork, int* info ); * * void gerqf( int m, int n, complex<float>* A, int lda, complex<float>* tau, * complex<float>* work, int lwork, int* info ); * * void gerqf( int m, int n, complex<double>* A, int lda, complex<double>* tau, * complex<double>* work, int lwork, int* info ); * * template< typename MT, bool SO > void gerqf( DenseMatrix<MT,SO>& A, typename * MT::ElementType* tau ); * * } // namespace blaze \endcode * * // The decomposition has the form * * \f[ A = R \cdot Q, \f] * * // where the \c Q is represented as a product of elementary reflectors * * \f[ Q = H(1) H(2) . . . H(k) \texttt{, with k = min(m,n).} \f] * * // Each H(i) has the form * * \f[ H(i) = I - tau \cdot v \cdot v^T, \f] * * // where \c tau is a real scalar, and \c v is a real vector with * <tt>v(n-k+i+1:n) = 0</tt> and // <tt>v(n-k+i) = 1</tt>. * <tt>v(1:n-k+i-1)</tt> is stored on exit in <tt>A(m-k+i,1:n-k+i-1)</tt>, // * and \c tau in \c tau(i). Thus in case \a m <= \a n, the upper triangle of * the subarray // <tt>A(1:m,n-m+1:n)</tt> contains the \a m-by-\a m upper * triangular matrix \c R and in case // \a m >= \a n, the elements on and * above the (\a m-\a n)-th subdiagonal contain the \a m-by-\a n // upper * trapezoidal matrix \c R; the remaining elements in combination with the * array \c tau // represent the orthogonal matrix \c Q as a product of * min(\a m,\a n) elementary reflectors. // // The following functions * provide an interface for the LAPACK functions \c sorgrq(), \c dorgrq(), // * \c cungrq(), and \c zunqrq(), which reconstruct the \c Q matrix from a RQ * decomposition: * * \code namespace blaze { * * void orgrq( int m, int n, int k, float* A, int lda, const float* tau, float* * work, int lwork, int* info ); * * void orgrq( int m, int n, int k, double* A, int lda, const double* tau, * double* work, int lwork, int* info ); * * void ungrq( int m, int n, int k, complex<float>* A, int lda, const * complex<float>* tau, complex<float>* work, int lwork, int* info ); * * void ungrq( int m, int n, int k, complex<double>* A, int lda, const * complex<double>* tau, complex<double>* work, int lwork, int* info ); * * template< typename MT, bool SO > void orgrq( DenseMatrix<MT,SO>& A, const * typename MT::ElementType* tau ); * * template< typename MT, bool SO > void ungrq( DenseMatrix<MT,SO>& A, const * typename MT::ElementType* tau ); * * } // namespace blaze \endcode * * // The following functions provide an interface for the LAPACK functions \c * sormrq(), \c dormrq(), // \c cunmrq(), and \c zunmrq(), which can be used * to multiply a matrix with the \c Q matrix from // a RQ decomposition: * * \code namespace blaze { * * void ormrq( char side, char trans, int m, int n, int k, const float* A, int * lda, const float* tau, float* C, int ldc, float* work, int lwork, int* * info ); * * void ormrq( char side, char trans, int m, int n, int k, const double* A, int * lda, const double* tau, double* C, int ldc, double* work, int lwork, int* * info ); * * void unmrq( char side, char trans, int m, int n, int k, const complex<float>* * A, int lda, const complex<float>* tau, complex<float>* C, int ldc, * complex<float>* work, int lwork, int* info ); * * void unmrq( char side, char trans, int m, int n, int k, const complex<double>* * A, int lda, const complex<double>* tau, complex<double>* C, int ldc, * complex<double>* work, int lwork, int* info ); * * template< typename MT1, bool SO1, typename MT2, bool SO2 > void ormrq( * DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char * trans, const ElementType_<MT2>* tau ); * * template< typename MT1, bool SO, typename MT2 > void unmrq( * DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, * ElementType_<MT2>* tau ); * * } // namespace blaze \endcode * * // \n \subsection lapack_ql_decomposition QL Decomposition // // The * following functions provide an interface for the LAPACK functions \c * sgeqlf(), \c dgeqlf(), // \c cgeqlf(), and \c zgeqlf(), which compute the * QL decomposition of the given general matrix: * * \code namespace blaze { * * void geqlf( int m, int n, float* A, int lda, float* tau, float* work, int * lwork, int* info ); * * void geqlf( int m, int n, double* A, int lda, double* tau, double* work, int * lwork, int* info ); * * void geqlf( int m, int n, complex<float>* A, int lda, complex<float>* tau, * complex<float>* work, int lwork, int* info ); * * void geqlf( int m, int n, complex<double>* A, int lda, complex<double>* tau, * complex<double>* work, int lwork, int* info ); * * template< typename MT, bool SO > void geqlf( DenseMatrix<MT,SO>& A, typename * MT::ElementType* tau ); * * } // namespace blaze \endcode * * // The decomposition has the form * * \f[ A = Q \cdot L, \f] * * // where the \c Q is represented as a product of elementary reflectors * * \f[ Q = H(k) . . . H(2) H(1) \texttt{, with k = min(m,n).} \f] * * // Each H(i) has the form * * \f[ H(i) = I - tau \cdot v \cdot v^T, \f] * * // where \c tau is a real scalar, and \c v is a real vector with * <tt>v(m-k+i+1:m) = 0</tt> and // <tt>v(m-k+i) = 1</tt>. * <tt>v(1:m-k+i-1)</tt> is stored on exit in <tt>A(1:m-k+i-1,n-k+i)</tt>, // * and \c tau in \c tau(i). Thus in case \a m >= \a n, the lower triangle of * the subarray // A(m-n+1:m,1:n) contains the \a n-by-\a n lower triangular * matrix \c L and in case \a m <= \a n, // the elements on and below the (\a * n-\a m)-th subdiagonal contain the \a m-by-\a n lower // trapezoidal * matrix \c L; the remaining elements in combination with the array \c tau * represent // the orthogonal matrix \c Q as a product of min(\a m,\a n) * elementary reflectors. // // The following functions provide an interface * for the LAPACK functions \c sorgql(), \c dorgql(), // \c cungql(), and \c * zunqql(), which reconstruct the \c Q matrix from an QL decomposition: * * \code namespace blaze { * * void orgql( int m, int n, int k, float* A, int lda, const float* tau, float* * work, int lwork, int* info ); * * void orgql( int m, int n, int k, double* A, int lda, const double* tau, * double* work, int lwork, int* info ); * * void ungql( int m, int n, int k, complex<float>* A, int lda, const * complex<float>* tau, complex<float>* work, int lwork, int* info ); * * void ungql( int m, int n, int k, complex<double>* A, int lda, const * complex<double>* tau, complex<double>* work, int lwork, int* info ); * * template< typename MT, bool SO > void orgql( DenseMatrix<MT,SO>& A, const * typename MT::ElementType* tau ); * * template< typename MT, bool SO > void ungql( DenseMatrix<MT,SO>& A, const * typename MT::ElementType* tau ); * * } // namespace blaze \endcode * * // The following functions provide an interface for the LAPACK functions \c * sormql(), \c dormql(), // \c cunmql(), and \c zunmql(), which can be used * to multiply a matrix with the \c Q matrix from // a QL decomposition: * * \code namespace blaze { * * void ormql( char side, char trans, int m, int n, int k, const float* A, int * lda, const float* tau, float* C, int ldc, float* work, int lwork, int* * info ); * * void ormql( char side, char trans, int m, int n, int k, const double* A, int * lda, const double* tau, double* C, int ldc, double* work, int lwork, int* * info ); * * void unmql( char side, char trans, int m, int n, int k, const complex<float>* * A, int lda, const complex<float>* tau, complex<float>* C, int ldc, * complex<float>* work, int lwork, int* info ); * * void unmql( char side, char trans, int m, int n, int k, const complex<double>* * A, int lda, const complex<double>* tau, complex<double>* C, int ldc, * complex<double>* work, int lwork, int* info ); * * template< typename MT1, bool SO1, typename MT2, bool SO2 > void ormql( * DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char * trans, const ElementType_<MT2>* tau ); * * template< typename MT1, bool SO, typename MT2 > void unmql( * DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, * ElementType_<MT2>* tau ); * * } // namespace blaze \endcode * * // \n \subsection lapack_lq_decomposition LQ Decomposition // // The * following functions provide an interface for the LAPACK functions \c * sgelqf(), \c dgelqf(), // \c cgelqf(), and \c zgelqf(), which compute the * LQ decomposition of the given general matrix: * * \code namespace blaze { * * void gelqf( int m, int n, float* A, int lda, float* tau, float* work, int * lwork, int* info ); * * void gelqf( int m, int n, double* A, int lda, double* tau, double* work, int * lwork, int* info ); * * void gelqf( int m, int n, complex<float>* A, int lda, complex<float>* tau, * complex<float>* work, int lwork, int* info ); * * void gelqf( int m, int n, complex<double>* A, int lda, complex<double>* tau, * complex<double>* work, int lwork, int* info ); * * template< typename MT, bool SO > void gelqf( DenseMatrix<MT,SO>& A, typename * MT::ElementType* tau ); * * } // namespace blaze \endcode * * // The decomposition has the form * * \f[ A = L \cdot Q, \f] * * // where the \c Q is represented as a product of elementary reflectors * * \f[ Q = H(k) . . . H(2) H(1) \texttt{, with k = min(m,n).} \f] * * // Each H(i) has the form * * \f[ H(i) = I - tau \cdot v \cdot v^T, \f] * * // where \c tau is a real scalar, and \c v is a real vector with <tt>v(0:i-1) * = 0</tt> and // <tt>v(i) = 1</tt>. <tt>v(i+1:n)</tt> is stored on exit in * <tt>A(i,i+1:n)</tt>, and \c tau // in \c tau(i). Thus on exit the elements * on and below the diagonal of the matrix contain the // \a m-by-min(\a m,\a * n) lower trapezoidal matrix \c L (\c L is lower triangular if \a m <= \a * n); // the elements above the diagonal, with the array \c tau, represent * the orthogonal matrix \c Q // as a product of min(\a m,\a n) elementary * reflectors. // // The following functions provide an interface for the * LAPACK functions \c sorglq(), \c dorglq(), // \c cunglq(), and \c * zunqlq(), which reconstruct the \c Q matrix from an LQ decomposition: * * \code namespace blaze { * * void orglq( int m, int n, int k, float* A, int lda, const float* tau, float* * work, int lwork, int* info ); * * void orglq( int m, int n, int k, double* A, int lda, const double* tau, * double* work, int lwork, int* info ); * * void unglq( int m, int n, int k, complex<float>* A, int lda, const * complex<float>* tau, complex<float>* work, int lwork, int* info ); * * void unglq( int m, int n, int k, complex<double>* A, int lda, const * complex<double>* tau, complex<double>* work, int lwork, int* info ); * * template< typename MT, bool SO > void orglq( DenseMatrix<MT,SO>& A, const * typename MT::ElementType* tau ); * * template< typename MT, bool SO > void unglq( DenseMatrix<MT,SO>& A, const * typename MT::ElementType* tau ); * * } // namespace blaze \endcode * * // The following functions provide an interface for the LAPACK functions \c * sormlq(), \c dormlq(), // \c cunmlq(), and \c zunmlq(), which can be used * to multiply a matrix with the \c Q matrix from // a LQ decomposition: * * \code namespace blaze { * * void ormlq( char side, char trans, int m, int n, int k, const float* A, int * lda, const float* tau, float* C, int ldc, float* work, int lwork, int* * info ); * * void ormlq( char side, char trans, int m, int n, int k, const double* A, int * lda, const double* tau, double* C, int ldc, double* work, int lwork, int* * info ); * * void unmlq( char side, char trans, int m, int n, int k, const complex<float>* * A, int lda, const complex<float>* tau, complex<float>* C, int ldc, * complex<float>* work, int lwork, int* info ); * * void unmlq( char side, char trans, int m, int n, int k, const complex<double>* * A, int lda, const complex<double>* tau, complex<double>* C, int ldc, * complex<double>* work, int lwork, int* info ); * * template< typename MT1, bool SO1, typename MT2, bool SO2 > void ormlq( * DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char * trans, const ElementType_<MT2>* tau ); * * template< typename MT1, bool SO, typename MT2 > void unmlq( * DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, * ElementType_<MT2>* tau ); * * } // namespace blaze \endcode * * // \n \section lapack_inversion Matrix Inversion // <hr> // // Given a matrix * that has already been decomposed, the following functions can be used to * invert // the matrix in-place. // // // \n \subsection lapack_lu_inversion * LU-based Inversion // // The following functions provide an interface for * the LAPACK functions \c sgetri(), \c dgetri(), // \c cgetri(), and \c * zgetri(), which invert a general matrix that has already been decomposed * by // an \ref lapack_lu_decomposition : * * \code namespace blaze { * * void getri( int n, float* A, int lda, const int* ipiv, float* work, int * lwork, int* info ); * * void getri( int n, double* A, int lda, const int* ipiv, double* work, int * lwork, int* info ); * * void getri( int n, complex<float>* A, int lda, const int* ipiv, * complex<float>* work, int lwork, int* info ); * * void getri( int n, complex<double>* A, int lda, const int* ipiv, * complex<double>* work, int lwork, int* info ); * * template< typename MT, bool SO > void getri( DenseMatrix<MT,SO>& A, const int* * ipiv ); * * } // namespace blaze \endcode * * // The functions fail if ... // // - ... the given matrix is not a square * matrix; // - ... the given matrix is singular and not invertible. // // * The first four functions report failure via the \c info argument, the * fifth function throws a // \a std::invalid_argument exception in case of * an error. // // // \n \subsection lapack_ldlt_inversion LDLT-based * Inversion // // The following functions provide an interface for the * LAPACK functions \c ssytri(), \c dsytri(), // \c csytri(), and \c * zsytri(), which invert a symmetric indefinite matrix that has already been * // decomposed by an \ref lapack_ldlt_decomposition : * * \code namespace blaze { * * void sytri( char uplo, int n, float* A, int lda, const int* ipiv, float* * work, int* info ); * * void sytri( char uplo, int n, double* A, int lda, const int* ipiv, double* * work, int* info ); * * void sytri( char uplo, int n, complex<float>* A, int lda, const int* ipiv, * complex<float>* work, int* info ); * * void sytri( char uplo, int n, complex<double>* A, int lda, const int* ipiv, * complex<double>* work, int* info ); * * template< typename MT, bool SO > void sytri( DenseMatrix<MT,SO>& A, char * uplo, const int* ipiv ); * * } // namespace blaze \endcode * * // The functions fail if ... // // - ... the given matrix is not a square * matrix; // - ... the given matrix is singular and not invertible. // // * The first four functions report failure via the \c info argument, the * fifth function throws a // \a std::invalid_argument exception in case of * an error. // // // \n \subsection lapack_ldlh_inversion LDLH-based * Inversion // // The following functions provide an interface for the * LAPACK functions \c chetri() and // \c zhetri(), which invert an Hermitian * indefinite matrix that has already been decomposed by // an \ref * lapack_ldlh_decomposition : * * \code namespace blaze { * * void hetri( char uplo, int n, complex<float>* A, int lda, const int* ipiv, * complex<float>* work, int* info ); * * void hetri( char uplo, int n, complex<double>* A, int lda, const int* ipiv, * complex<double>* work, int* info ); * * template< typename MT, bool SO > void hetri( DenseMatrix<MT,SO>& A, char * uplo, const int* ipiv ); * * } // namespace blaze \endcode * * // The functions fail if ... // // - ... the given matrix is not a square * matrix; // - ... the given matrix is singular and not invertible. // // * The first four functions report failure via the \c info argument, the * fifth function throws a // \a std::invalid_argument exception in case of * an error. // // // \n \subsection lapack_llh_inversion Cholesky-based * Inversion // // The following functions provide an interface for the * LAPACK functions \c spotri(), \c dpotri(), // \c cpotri(), and \c * zpotri(), which invert a positive definite matrix that has already been // * decomposed by an \ref lapack_llh_decomposition : * * \code namespace blaze { * * void potri( char uplo, int n, float* A, int lda, int* info ); * * void potri( char uplo, int n, double* A, int lda, int* info ); * * void potri( char uplo, int n, complex<float>* A, int lda, int* info ); * * void potri( char uplo, int n, complex<double>* A, int lda, int* info ); * * template< typename MT, bool SO > void potri( DenseMatrix<MT,SO>& A, char uplo * ); * * } // namespace blaze \endcode * * // The functions fail if ... // // - ... the given matrix is not a square * matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - * ... the given matrix is singular and not invertible. // // The first four * functions report failure via the \c info argument, the fifth function * throws a // \a std::invalid_argument exception in case of an error. // // * // \n \subsection lapack_triangular_inversion Inversion of Triangular * Matrices // // The following functions provide an interface for the LAPACK * functions \c strtri(), \c dtrtri(), // \c ctrtri(), and \c ztrtri(), which * invert the given triangular matrix in-place: * * \code namespace blaze { * * void trtri( char uplo, char diag, int n, float* A, int lda, int* info ); * * void trtri( char uplo, char diag, int n, double* A, int lda, int* info ); * * void trtri( char uplo, char diag, int n, complex<float>* A, int lda, int* * info ); * * void trtri( char uplo, char diag, int n, complex<double>* A, int lda, int* * info ); * * template< typename MT, bool SO > void trtri( DenseMatrix<MT,SO>& A, char * uplo, char diag ); * * } // namespace blaze \endcode * * // The functions fail if ... // // - ... the given matrix is not a square * matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - * ... the given \a diag argument is neither 'U' nor 'N'; // - ... the given * matrix is singular and not invertible. // // The first four functions * report failure via the \c info argument, the fifth function throws a // \a * std::invalid_argument exception in case of an error. // // // \n \section * lapack_substitution Substitution // <hr> // // Given a matrix that has * already been decomposed the following functions can be used to perform // * the forward/backward substitution step to compute the solution to a system * of linear equations. // Note that depending on the storage order of the * system matrix and the given right-hand side the // functions solve * different equation systems: // // Single right-hand side: // - \f$ A * *x=b \f$ if \a A is column-major // - \f$ A^T*x=b \f$ if \a A is * row-major // // Multiple right-hand sides: // - \f$ A *X =B \f$ if * both \a A and \a B are column-major // - \f$ A^T*X =B \f$ if \a A is * row-major and \a B is column-major // - \f$ A *X^T=B^T \f$ if \a A is * column-major and \a B is row-major // - \f$ A^T*X^T=B^T \f$ if both \a A * and \a B are row-major // // In this context the general system matrix \a * A is a n-by-n matrix that has already been // factorized by the according * decomposition function, \a x and \a b are n-dimensional vectors // and \a * X and \a B are either row-major m-by-n matrices or column-major n-by-m * matrices. // // // \n \subsection lapack_lu_substitution LU-based * Substitution // // The following functions provide an interface for the * LAPACK functions \c sgetrs(), \c dgetrs(), // \c cgetrs(), and \c * zgetrs(), which perform the substitution step for a general matrix that * has // already been decomposed by an \ref lapack_lu_decomposition : * * \code namespace blaze { * * void getrs( char trans, int n, int nrhs, const float* A, int lda, const int* * ipiv, float* B, int ldb, int* info ); * * void getrs( char trans, int n, int nrhs, const double* A, int lda, const int* * ipiv, double* B, int ldb, int* info ); * * void getrs( char trans, int n, const complex<float>* A, int lda, const int* * ipiv, complex<float>* B, int ldb, int* info ); * * void getrs( char trans, int n, const complex<double>* A, int lda, const int* * ipiv, complex<double>* B, int ldb, int* info ); * * template< typename MT, bool SO, typename VT, bool TF > void getrs( const * DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char trans, const int* ipiv * ); * * template< typename MT1, bool SO1, typename MT2, bool SO2 > void getrs( const * DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char trans, const int* * ipiv ); * * } // namespace blaze \endcode * * // If the function exits successfully, the vector \a b or the matrix \a B * contain the solution(s) // of the linear system of equations. The * functions fail if ... // // - ... the given system matrix is not a square * matrix; // - ... the given \a trans argument is neither 'N' nor 'T' nor * 'C'; // - ... the sizes of the two given matrices do not match. // // The * first four functions report failure via the \c info argument, the last two * functions throw // a \a std::invalid_argument exception in case of an * error. // // // \n \subsection lapack_ldlt_substitution LDLT-based * Substitution // // The following functions provide an interface for the * LAPACK functions \c ssytrs(), \c dsytrs(), // \c csytrs(), and \c * zsytrs(), which perform the substitution step for a symmetric indefinite * // matrix that has already been decomposed by an \ref * lapack_ldlt_decomposition : * * \code namespace blaze { * * void sytrs( char uplo, int n, int nrhs, const float* A, int lda, const int* * ipiv, float* B, int ldb, int* info ); * * void sytrs( char uplo, int n, int nrhs, const double* A, int lda, const int* * ipiv, double* B, int ldb, int* info ); * * void sytrs( char uplo, int n, int nrhs, const complex<float>* A, int lda, * const int* ipiv, complex<float>* B, int ldb, int* info ); * * void sytrs( char uplo, int n, int nrhs, const complex<double>* A, int lda, * const int* ipiv, complex<double>* B, int ldb, int* info ); * * template< typename MT, bool SO, typename VT, bool TF > void sytrs( const * DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, const int* ipiv * ); * * template< typename MT1, bool SO1, typename MT2, bool SO2 > void sytrs( const * DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, const int* * ipiv ); * * } // namespace blaze \endcode * * // If the function exits successfully, the vector \a b or the matrix \a B * contain the solution(s) // of the linear system of equations. The * functions fail if ... // // - ... the given system matrix is not a square * matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - * ... the sizes of the two given matrices do not match. // // The first four * functions report failure via the \c info argument, the last two functions * throw // a \a std::invalid_argument exception in case of an error. // // * // \n \subsection lapack_ldlh_substitution LDLH-based Substitution // // * The following functions provide an interface for the LAPACK functions \c * chetrs(), and \c zhetrs(), // which perform the substitution step for an * Hermitian indefinite matrix that has already been // decomposed by an \ref * lapack_ldlh_decomposition : * * \code namespace blaze { * * void hetrs( char uplo, int n, int nrhs, const complex<float>* A, int lda, * const int* ipiv, complex<float>* B, int ldb, int* info ); * * void hetrs( char uplo, int n, int nrhs, const complex<double>* A, int lda, * const int* ipiv, complex<double>* B, int ldb, int* info ); * * template< typename MT, bool SO, typename VT, bool TF > void hetrs( const * DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, const int* ipiv * ); * * template< typename MT1, bool SO1, typename MT2, bool SO2 > void hetrs( const * DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, const int* * ipiv ); * * } // namespace blaze \endcode * * // If the function exits successfully, the vector \a b or the matrix \a B * contain the solution(s) // of the linear system of equations. The * functions fail if ... // // - ... the given system matrix is not a square * matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - * ... the sizes of the two given matrices do not match. // // The first two * functions report failure via the \c info argument, the last two functions * throw // a \a std::invalid_argument exception in case of an error. // // * // \n \subsection lapack_llh_substitution Cholesky-based Substitution // * // The following functions provide an interface for the LAPACK functions * \c spotrs(), \c dpotrs(), // \c cpotrs(), and \c zpotrs(), which perform * the substitution step for a positive definite matrix // that has already * been decomposed by an \ref lapack_llh_decomposition : * * \code namespace blaze { * * void potrs( char uplo, int n, int nrhs, const float* A, int lda, float* B, * int ldb, int* info ); * * void potrs( char uplo, int n, int nrhs, const double* A, int lda, double* B, * int ldb, int* info ); * * void potrs( char uplo, int n, int nrhs, const complex<float>* A, int lda, * complex<float>* B, int ldb, int* info ); * * void potrs( char uplo, int n, int nrhs, const complex<double>* A, int lda, * complex<double>* B, int ldb, int* info ); * * template< typename MT, bool SO, typename VT, bool TF > void potrs( const * DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo ); * * template< typename MT1, bool SO1, typename MT2, bool SO2 > void potrs( const * DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo ); * * } // namespace blaze \endcode * * // If the function exits successfully, the vector \a b or the matrix \a B * contain the solution(s) // of the linear system of equations. The * functions fail if ... // // - ... the given system matrix is not a square * matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - * ... the sizes of the two given matrices do not match. // // The first two * functions report failure via the \c info argument, the last two functions * throw // a \a std::invalid_argument exception in case of an error. // // * // \n \subsection lapack_triangular_substitution Substitution for * Triangular Matrices // // The following functions provide an interface for * the LAPACK functions \c strtrs(), \c dtrtrs(), // \c ctrtrs(), and \c * ztrtrs(), which perform the substitution step for a triangular matrix: * * \code namespace blaze { * * void trtrs( char uplo, char trans, char diag, int n, int nrhs, const float* * A, int lda, float* B, int ldb, int* info ); * * void trtrs( char uplo, char trans, char diag, int n, int nrhs, const double* * A, int lda, double* B, int ldb, int* info ); * * void trtrs( char uplo, char trans, char diag, int n, int nrhs, const * complex<float>* A, int lda, complex<float>* B, int ldb, int* info ); * * void trtrs( char uplo, char trans, char diag, int n, int nrhs, const * complex<double>* A, int lda, complex<double>* B, int ldb, int* info ); * * template< typename MT, bool SO, typename VT, bool TF > void trtrs( const * DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, char trans, char * diag ); * * template< typename MT1, bool SO1, typename MT2, bool SO2 > void trtrs( const * DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, char trans, * char diag ); * * } // namespace blaze \endcode * * // If the function exits successfully, the vector \a b or the matrix \a B * contain the solution(s) // of the linear system of equations. The * functions fail if ... // // - ... the given system matrix is not a square * matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - * ... the given \a trans argument is neither 'N' nor 'T' nor 'C'; // - ... * the given \a diag argument is neither 'U' nor 'N'; // - ... the sizes of * the two given matrices do not match. // // The first four functions report * failure via the \c info argument, the last two functions throw // a \a * std::invalid_argument exception in case of an error. // // // \n \section * lapack_linear_system_solver Linear System Solver // <hr> // // The * following functions represent compound functions that perform both the * decomposition step // as well as the substitution step to compute the * solution to a system of linear equations. Note // that depending on the * storage order of the system matrix and the given right-hand side the // * functions solve different equation systems: // // Single right-hand side: * // - \f$ A *x=b \f$ if \a A is column-major // - \f$ A^T*x=b \f$ if \a * A is row-major // // Multiple right-hand sides: // - \f$ A *X =B \f$ * if both \a A and \a B are column-major // - \f$ A^T*X =B \f$ if \a A * is row-major and \a B is column-major // - \f$ A *X^T=B^T \f$ if \a A is * column-major and \a B is row-major // - \f$ A^T*X^T=B^T \f$ if both \a A * and \a B are row-major // // In this context the general system matrix \a * A is a n-by-n matrix that has already been // factorized by the according * decomposition function, \a x and \a b are n-dimensional vectors // and \a * X and \a B are either row-major m-by-n matrices or column-major n-by-m * matrices. // // // \subsection lapack_lu_linear_system_solver LU-based * Linear System Solver // // The following functions provide an interface * for the LAPACK functions \c sgesv(), \c dgesv(), // \c cgesv(), and \c * zgesv(), which combine an \ref lapack_lu_decomposition and the according * // \ref lapack_lu_substitution : * * \code namespace blaze { * * void gesv( int n, int nrhs, float* A, int lda, int* ipiv, float* B, int ldb, * int* info ); * * void gesv( int n, int nrhs, double* A, int lda, int* ipiv, double* B, int * ldb, int* info ); * * void gesv( int n, int nrhs, complex<float>* A, int lda, int* ipiv, * complex<float>* B, int ldb, int* info ); * * void gesv( int n, int nrhs, complex<double>* A, int lda, int* ipiv, * complex<double>* B, int ldb, int* info ); * * template< typename MT, bool SO, typename VT, bool TF > void gesv( * DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, int* ipiv ); * * template< typename MT1, bool SO1, typename MT2, bool SO2 > void gesv( * DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, int* ipiv ); * * } // namespace blaze \endcode * * // If the function exits successfully, the vector \a b or the matrix \a B * contain the // solution(s) of the linear system of equations and \a A has * been decomposed by means of an // \ref lapack_lu_decomposition. // // The * functions fail if ... // // - ... the given system matrix is not a square * matrix; // - ... the given system matrix is singular and not invertible. * // // The first four functions report failure via the \c info argument, * the fifth function throws a // \a std::invalid_argument exception in case * of an error. // // // \n \subsection lapack_ldlt_linear_system_solver * LDLT-based Linear System Solver // // The following functions provide an * interface for the LAPACK functions \c ssysv(), \c dsysv(), // \c csysv(), * and \c zsysv(), which combine an \ref lapack_ldlt_decomposition and the * according // \ref lapack_ldlt_substitution : * * \code namespace blaze { * * void sysv( char uplo, int n, int nrhs, float* A, int lda, int* ipiv, float* * B, int ldb, float* work, int lwork, int* info ); * * void sysv( char uplo, int n, int nrhs, double* A, int lda, int* ipiv, double* * B, int ldb, double* work, int lwork, int* info ); * * void sysv( char uplo, int n, int nrhs, complex<float>* A, int lda, int* ipiv, * complex<float>* B, int ldb, complex<float>* work, int lwork, int* info ); * * void sysv( char uplo, int n, int nrhs, complex<double>* A, int lda, int* * ipiv, complex<double>* B, int ldb, complex<double>* work, int lwork, int* * info ); * * template< typename MT, bool SO, typename VT, bool TF > void sysv( * DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, int* ipiv ); * * template< typename MT1, bool SO1, typename MT2, bool SO2 > void sysv( * DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, int* ipiv ); * * } // namespace blaze \endcode * * // If the function exits successfully, the vector \a b or the matrix \a B * contain the // solution(s) of the linear system of equations and \a A has * been decomposed by means of an // \ref lapack_ldlt_decomposition. // // * The functions fail if ... // // - ... the given system matrix is not a * square matrix; // - ... the given \a uplo argument is neither 'L' nor * 'U'; // - ... the sizes of the two given matrices do not match; // - ... * the given system matrix is singular and not invertible. // // The first * four functions report failure via the \c info argument, the fifth function * throws a // \a std::invalid_argument exception in case of an error. // // * // \n \subsection lapack_ldlh_linear_system_solver LDLH-based Linear * System Solver // // The following functions provide an interface for the * LAPACK functions \c shesv(), \c dhesv(), // \c chesv(), and \c zhesv(), * which combine an \ref lapack_ldlh_decomposition and the according // \ref * lapack_ldlh_substitution : * * \code namespace blaze { * * void hesv( char uplo, int n, int nrhs, complex<float>* A, int lda, int* ipiv, * complex<float>* B, int ldb, complex<float>* work, int lwork, int* info ); * * void hesv( char uplo, int n, int nrhs, complex<double>* A, int lda, int* * ipiv, complex<double>* B, int ldb, complex<double>* work, int lwork, int* * info ); * * template< typename MT, bool SO, typename VT, bool TF > void hesv( * DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, int* ipiv ); * * template< typename MT1, bool SO1, typename MT2, bool SO2 > void hesv( * DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, int* ipiv ); * * } // namespace blaze \endcode * * // If the function exits successfully, the vector \a b or the matrix \a B * contain the // solution(s) of the linear system of equations and \a A has * been decomposed by means of an // \ref lapack_ldlh_decomposition. // // * The functions fail if ... // // - ... the given system matrix is not a * square matrix; // - ... the given \a uplo argument is neither 'L' nor * 'U'; // - ... the sizes of the two given matrices do not match; // - ... * the given system matrix is singular and not invertible. // // The first * two functions report failure via the \c info argument, the fifth function * throws a // \a std::invalid_argument exception in case of an error. // // * // \n \subsection lapack_llh_linear_system_solver Cholesky-based Linear * System Solver // // The following functions provide an interface for the * LAPACK functions \c sposv(), \c dposv(), // \c cposv(), and \c zposv(), * which combine an \ref lapack_llh_decomposition and the according // \ref * lapack_llh_substitution : * * \code namespace blaze { * * void posv( char uplo, int n, int nrhs, float* A, int lda, float* B, int ldb, * int* info ); * * void posv( char uplo, int n, int nrhs, double* A, int lda, double* B, int * ldb, int* info ); * * void posv( char uplo, int n, int nrhs, complex<float>* A, int lda, * complex<float>* B, int ldb, int* info ); * * void posv( char uplo, int n, int nrhs, complex<double>* A, int lda, * complex<double>* B, int ldb, int* info ); * * template< typename MT, bool SO, typename VT, bool TF > void posv( * DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo ); * * template< typename MT1, bool SO1, typename MT2, bool SO2 > void posv( * DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo ); * * } // namespace blaze \endcode * * // If the function exits successfully, the vector \a b or the matrix \a B * contain the // solution(s) of the linear system of equations and \a A has * been decomposed by means of an // \ref lapack_llh_decomposition. // // The * functions fail if ... // // - ... the given system matrix is not a square * matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - * ... the sizes of the two given matrices do not match; // - ... the given * system matrix is singular and not invertible. // // The first four * functions report failure via the \c info argument, the fifth function * throws a // \a std::invalid_argument exception in case of an error. // // * // \n \subsection lapack_triangular_linear_system_solver Linear System * Solver for Triangular Matrices // // The following functions provide an * interface for the LAPACK functions \c strsv(), \c dtrsv(), // \c ctrsv(), * and \c ztrsv(): * * \code namespace blaze { * * void trsv( char uplo, char trans, char diag, int n, const float* A, int lda, * float* x, int incX ); * * void trsv( char uplo, char trans, char diag, int n, const double* A, int lda, * double* x, int incX ); * * void trsv( char uplo, char trans, char diag, int n, const complex<float>* A, * int lda, complex<float>* x, int incX ); * * void trsv( char uplo, char trans, char diag, int n, const complex<double>* A, * int lda, complex<double>* x, int incX ); * * template< typename MT, bool SO, typename VT, bool TF > void trsv( const * DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, char trans, char * diag ); * * } // namespace blaze \endcode * * // If the function exits successfully, the vector \a b or the matrix \a B * contain the // solution(s) of the linear system of equations. // // The * functions fail if ... // // - ... the given system matrix is not a square * matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - * ... the given \a trans argument is neither 'N' nor 'T' nor 'C'; // - ... * the given \a diag argument is neither 'U' nor 'N'. // // The last function * throws a \a std::invalid_argument exception in case of an error. Note that * // none of the functions does perform any test for singularity or * near-singularity. Such tests // must be performed prior to calling this * function! // // // \n \section lapack_eigenvalues Eigenvalues/Eigenvectors * // // \subsection lapack_eigenvalues_general General Matrices // // The * following functions provide an interface for the LAPACK functions \c * sgeev(), \c dgeev(), // \c cgeev(), and \c zgeev(), which compute the * eigenvalues and optionally the eigenvectors of // the given general * matrix: * * \code namespace blaze { * * void geev( char jobvl, char jobvr, int n, float* A, int lda, float* wr, float* * wi, float* VL, int ldvl, float* VR, int ldvr, float* work, int lwork, int* * info ); * * void geev( char jobvl, char jobvr, int n, double* A, int lda, double* wr, * double* wi, double* VL, int ldvl, double* VR, int ldvr, double* work, int * lwork, int* info ); * * void geev( char jobvl, char jobvr, int n, complex<float>* A, int lda, * complex<float>* w, complex<float>* VL, int ldvl, complex<float>* VR, int * ldvr, complex<float>* work, int lwork, float* rwork, int* info ); * * void geev( char jobvl, char jobvr, int n, complex<double>* A, int lda, * complex<double>* w, complex<double>* VL, int ldvl, complex<double>* VR, * int ldvr, complex<double>* work, int lwork, double* rwork, int* info ); * * template< typename MT, bool SO, typename VT, bool TF > void geev( * DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w ); * * template< typename MT1, bool SO1, typename MT2, bool SO2, typename VT, bool * TF > void geev( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& VL, * DenseVector<VT,TF>& w ); * * template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool * SO2 > void geev( DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, * DenseMatrix<MT2,SO2>& VR ); * * template< typename MT1, bool SO1, typename MT2, bool SO2, typename VT, bool * TF, typename MT3, bool SO3 > void geev( DenseMatrix<MT1,SO1>& A, * DenseMatrix<MT2,SO2>& VL, DenseVector<VT,TF>& w, DenseMatrix<MT3,SO3>& VR * ); * * } // namespace blaze \endcode * * // The complex eigenvalues of the given matrix \a A are returned in the given * vector \a w. // Please note that no order of eigenvalues can be assumed, * except that complex conjugate pairs // of eigenvalues appear consecutively * with the eigenvalue having the positive imaginary part // first. // // If * \a VR is provided as an argument, the right eigenvectors are returned in * the rows of \a VR // in case \a VR is a row-major matrix and in the * columns of \a VR in case \a VR is a column-major // matrix. The right * eigenvector \f$v[j]\f$ of \a A satisfies * * \f[ A * v[j] = lambda[j] * v[j], \f] * * // where \f$lambda[j]\f$ is its eigenvalue. // // If \a VL is provided as an * argument, the left eigenvectors are returned in the rows of \a VL // in * case \a VL is a row-major matrix and in the columns of \a VL in case \a VL * is a column-major // matrix. The left eigenvector \f$u[j]\f$ of \a A * satisfies * * \f[ u[j]^{H} * A = lambda[j] * u[j]^{H}, \f] * * // where \f$u[j]^{H}\f$ denotes the conjugate transpose of \f$u[j]\f$. // // * \a w, \a VL, and \a VR are resized to the correct dimensions (if possible * and necessary). The // functions fail if ... // // - ... the given matrix * \a A is not a square matrix; // - ... the given matrix \a VL is a fixed * size matrix and the dimensions don't match; // - ... the given vector \a * w is a fixed size vector and the size doesn't match; // - ... the given * matrix \a VR is a fixed size matrix and the dimensions don't match; // - * ... the eigenvalue computation fails. // // The first four functions * report failure via the \c info argument, the last four functions throw // * an exception in case of an error. // // // \n \subsection * lapack_eigenvalues_symmetric Symmetric Matrices // // The following * functions provide an interface for the LAPACK functions \c ssyev() and \c * dsyev(), // which compute the eigenvalues and eigenvectors of the given * symmetric matrix: * * \code namespace blaze { * * void syev( char jobz, char uplo, int n, float* A, int lda, float* w, float* * work, int lwork, int* info ); * * void syev( char jobz, char uplo, int n, double* A, int lda, double* w, double* * work, int lwork, int* info ); * * template< typename MT, bool SO, typename VT, bool TF > void syev( * DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char jobz, char uplo ); * * } // namespace blaze \endcode * * // Alternatively, the following functions can be used, which provide an * interface to the LAPACK // functions \c ssyevd() and \c dsyevd(). In * contrast to the \c syev() functions they use a // divide-and-conquer * strategy for the computation of the left and right eigenvectors: * * \code namespace blaze { * * void syevd( char jobz, char uplo, int n, float* A, int lda, float* w, float* * work, int lwork, int* iwork, int liwork, int* info ); * * void syevd( char jobz, char uplo, int n, double* A, int lda, double* w, * double* work, int lwork, int* iwork, int liwork, int* info ); * * template< typename MT, bool SO, typename VT, bool TF > void syevd( * DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char jobz, char uplo ); * * } // namespace blaze \endcode * * // The real eigenvalues are returned in ascending order in the given vector * \a w. \a w is resized // to the correct size (if possible and necessary). * In case \a A is a row-major matrix, the left // eigenvectors are returned * in the rows of \a A, in case \a A is a column-major matrix, the right // * eigenvectors are returned in the columns of \a A. // // The functions fail * if ... // // - ... the given matrix \a A is not a square matrix; // - * ... the given vector \a w is a fixed size vector and the size doesn't * match; // - ... the given \a jobz argument is neither \c 'V' nor \c 'N'; * // - ... the given \a uplo argument is neither \c 'L' nor \c 'U'; // - * ... the eigenvalue computation fails. // // The first two functions report * failure via the \c info argument, the last function throws an // exception * in case of an error. // // Via the following functions, which wrap the * LAPACK functions \c ssyevx() and \c dsyevx(), it // is possible to compute * a subset of eigenvalues and/or eigenvectors of a symmetric matrix: * * \code namespace blaze { * * void syevx( char jobz, char range, char uplo, int n, float* A, int lda, float * vl, float vu, int il, int iu, float abstol, int* m, float* w, float* Z, * int ldz, float* work, int lwork, int* iwork, int* ifail, int* info ); * * void syevx( char jobz, char range, char uplo, int n, double* A, int lda, * double vl, double vu, int il, int iu, double abstol, int* m, double* w, * double* Z, int ldz, double* work, int lwork, int* iwork, int* ifail, int* * info ); * * template< typename MT, bool SO, typename VT, bool TF > size_t syevx( * DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char uplo ); * * template< typename MT, bool SO, typename VT, bool TF, typename ST > size_t * syevx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char uplo, ST low, ST * upp ); * * template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool * SO2 > size_t syevx( DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, * DenseMatrix<MT2,SO2>& Z, char uplo ); * * template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool * SO2, typename ST > size_t syevx( DenseMatrix<MT1,SO1>& A, * DenseVector<VT,TF>& w, DenseMatrix<MT2,SO2>& Z, char uplo, ST low, ST upp * ); * * } // namespace blaze \endcode * * // The number of eigenvalues to be computed is specified by the lower bound * \c low and the upper // bound \c upp, which either form an integral or a * floating point range. // // In case \a low and \a upp are of integral * type, the function computes all eigenvalues in the // index range * \f$[low..upp]\f$. The \a num resulting real eigenvalues are stored in * ascending // order in the given vector \a w, which is either resized (if * possible) or expected to be a // \a num-dimensional vector. The * eigenvectors are returned in the rows of \a Z in case \a Z is // row-major * matrix and in the columns of \a Z in case \a Z is a column-major matrix. * \a Z is // resized (if possible) or expected to be a \a num-by-\a n * row-major matrix or a \a n-by-\a num // column-major matrix. // // In case * \a low and \a upp are of floating point type, the function computes all * eigenvalues // in the half-open interval \f$(low..upp]\f$. The resulting * real eigenvalues are stored in // ascending order in the given vector \a * w, which is either resized (if possible) or expected // to be an \a * n-dimensional vector. The eigenvectors are returned in the rows of \a Z in * case // \a Z is a row-major matrix and in the columns of \a Z in case \a Z * is a column-major matrix. // \a Z is resized (if possible) or expected to * be a \a n-by-\a n matrix. // // The functions fail if ... // // - ... the * given matrix \a A is not a square matrix; // - ... the given vector \a w * is a fixed size vector and the size doesn't match; // - ... the given * matrix \a Z is a fixed size matrix and the dimensions don't match; // - * ... the given \a uplo argument is neither \c 'L' nor \c 'U'; // - ... the * eigenvalue computation fails. // // The first two functions report failure * via the \c info argument, the last four functions throw // an exception in * case of an error. // // // \n \subsection lapack_eigenvalues_hermitian * Hermitian Matrices // // The following functions provide an interface for * the LAPACK functions \c cheev() and \c zheev(), // which compute the * eigenvalues and eigenvectors of the given Hermitian matrix: * * \code namespace blaze { * * void heev( char jobz, char uplo, int n, complex<float>* A, int lda, float* w, * complex<float>* work, int lwork, float* rwork, int* info ); * * void heev( char jobz, char uplo, int n, complex<double>* A, int lda, double* * w, complex<double>* work, int lwork, float* rwork, int* info ); * * template< typename MT, bool SO, typename VT, bool TF > void heev( * DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char jobz, char uplo ); * * } // namespace blaze \endcode * * // Alternatively, the following functions can be used, which provide an * interface to the LAPACK // functions \c cheevd() and \c zheevd(). In * contrast to the \c heev() functions they use a // divide-and-conquer * strategy for the computation of the left and right eigenvectors: * * \code namespace blaze { * * void heevd( char jobz, char uplo, int n, complex<float>* A, int lda, float* * w, complex<float>* work, int lwork, float* rwork, int* lrwork, int* iwork, * int* liwork, int* info ); * * void heevd( char jobz, char uplo, int n, complex<double>* A, int lda, double* * w, complex<double>* work, int lwork, double* rwork, int lrwork, int* * iwork, int* liwork, int* info ); * * template< typename MT, bool SO, typename VT, bool TF > void heevd( * DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char jobz, char uplo ); * * } // namespace blaze \endcode * * // The real eigenvalues are returned in ascending order in the given vector * \a w. \a w is resized // to the correct size (if possible and necessary). * In case \a A is a row-major matrix, the left // eigenvectors are returned * in the rows of \a A, in case \a A is a column-major matrix, the right // * eigenvectors are returned in the columns of \a A. // // The functions fail * if ... // // - ... the given matrix \a A is not a square matrix; // - * ... the given vector \a w is a fixed size vector and the size doesn't * match; // - ... the given \a jobz argument is neither \c 'V' nor \c 'N'; * // - ... the given \a uplo argument is neither \c 'L' nor \c 'U'; // - * ... the eigenvalue computation fails. // // The first two functions report * failure via the \c info argument, the last function throws an // exception * in case of an error. // // Via the following functions, which wrap the * LAPACK functions \c cheevx() and \c zheevx(), it // is possible to compute * a subset of eigenvalues and/or eigenvectors of an Hermitian matrix: * * \code namespace blaze { * * void heevx( char jobz, char range, char uplo, int n, complex<float>* A, int * lda, float vl, float vu, int il, int iu, float abstol, int* m, float* w, * complex<float>* Z, int ldz, complex<float>* work, int lwork, float* rwork, * int* iwork, int* ifail, int* info ); * * void heevx( char jobz, char range, char uplo, int n, complex<double>* A, int * lda, double vl, double vu, int il, int iu, double abstol, int* m, double* * w, complex<double>* Z, int ldz, complex<double>* work, int lwork, double* * rwork, int* iwork, int* ifail, int* info ); * * template< typename MT, bool SO, typename VT, bool TF > size_t heevx( * DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char uplo ); * * template< typename MT, bool SO, typename VT, bool TF, typename ST > size_t * heevx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char uplo, ST low, ST * upp ); * * template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool * SO2 > size_t heevx( DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, * DenseMatrix<MT2,SO2>& Z, char uplo ); * * template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool * SO2, typename ST > size_t heevx( DenseMatrix<MT1,SO1>& A, * DenseVector<VT,TF>& w, DenseMatrix<MT2,SO2>& Z, char uplo, ST low, ST upp * ); * * } // namespace blaze \endcode * * // The number of eigenvalues to be computed is specified by the lower bound * \c low and the upper // bound \c upp, which either form an integral or a * floating point range. // // In case \a low and \a upp are of integral * type, the function computes all eigenvalues in the // index range * \f$[low..upp]\f$. The \a num resulting real eigenvalues are stored in * ascending // order in the given vector \a w, which is either resized (if * possible) or expected to be a // \a num-dimensional vector. The * eigenvectors are returned in the rows of \a Z in case \a Z is // row-major * matrix and in the columns of \a Z in case \a Z is a column-major matrix. * \a Z is // resized (if possible) or expected to be a \a num-by-\a n * row-major matrix or a \a n-by-\a num // column-major matrix. // // In case * \a low and \a upp are of floating point type, the function computes all * eigenvalues // in the half-open interval \f$(low..upp]\f$. The resulting * real eigenvalues are stored in // ascending order in the given vector \a * w, which is either resized (if possible) or expected // to be an \a * n-dimensional vector. The eigenvectors are returned in the rows of \a Z in * case // \a Z is a row-major matrix and in the columns of \a Z in case \a Z * is a column-major matrix. // \a Z is resized (if possible) or expected to * be a \a n-by-\a n matrix. // // The functions fail if ... // // - ... the * given matrix \a A is not a square matrix; // - ... the given vector \a w * is a fixed size vector and the size doesn't match; // - ... the given * matrix \a Z is a fixed size matrix and the dimensions don't match; // - * ... the given \a uplo argument is neither \c 'L' nor \c 'U'; // - ... the * eigenvalue computation fails. // // The first two functions report failure * via the \c info argument, the last four functions throw // an exception in * case of an error. // // // \n \section lapack_singular_values Singular * Values/Singular Vectors // // The following functions provide an interface * for the LAPACK functions \c sgesvd(), \c dgesvd(), // \c cgesvd(), and \c * zgesvd(), which perform a singular value decomposition (SVD) on the given * // general matrix: * * \code namespace blaze { * * void gesvd( char jobu, char jobv, int m, int n, float* A, int lda, float* s, * float* U, int ldu, float* V, int ldv, float* work, int lwork, int* info ); * * void gesvd( char jobu, char jobv, int m, int n, double* A, int lda, double* * s, double* U, int ldu, double* V, int ldv, double* work, int lwork, int* * info ); * * void gesvd( char jobu, char jobv, int m, int n, complex<float>* A, int lda, * float* s, complex<float>* U, int ldu, complex<float>* V, int ldv, * complex<float>* work, int lwork, float* rwork, int* info ); * * void gesvd( char jobu, char jobv, int m, int n, complex<double>* A, int lda, * double* s, complex<double>* U, int ldu, complex<double>* V, int ldv, * complex<double>* work, int lwork, double* rwork, int* info ); * * template< typename MT, bool SO, typename VT, bool TF > void gesvd( * DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s, char jobu, char jobv ); * * template< typename MT1, bool SO, typename MT2, typename VT, bool TF > void * gesvd( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& * s, char jobu, char jobv ); * * template< typename MT1, bool SO, typename VT, bool TF, typename MT2 > void * gesvd( DenseMatrix<MT1,SO>& A, DenseVector<VT,TF>& s, DenseMatrix<MT2,SO>& * V, char jobu, char jobv ); * * template< typename MT1, bool SO, typename MT2, typename VT, bool TF, typename * MT3 > void gesvd( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, * DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V, char jobu, char jobv ); * * } // namespace blaze \endcode * * // Alternatively, the following functions can be used, which provide an * interface to the LAPACK // functions \c sgesdd(), \c dgesdd(), \c * cgesdd(), and \c zgesdd(). In contrast to the \c gesvd() // functions they * compute the singular value decomposition (SVD) of the given general matrix * by // applying a divide-and-conquer strategy for the computation of the * left and right singular // vectors: * * \code namespace blaze { * * void gesdd( char jobz, int m, int n, float* A, int lda, float* s, float* U, * int ldu, float* V, int ldv, float* work, int lwork, int* iwork, int* info * ); * * void gesdd( char jobz, int m, int n, double* A, int lda, double* s, double* * U, int ldu, double* V, int ldv, double* work, int lwork, int* iwork, int* * info ); * * void gesdd( char jobz, int m, int n, complex<float>* A, int lda, float* s, * complex<float>* U, int ldu, complex<float>* V, int ldv, complex<float>* * work, int lwork, float* rwork, int* iwork, int* info ); * * void gesdd( char jobz, int m, int n, complex<double>* A, int lda, double* s, * complex<double>* U, int ldu, complex<double>* V, int ldv, complex<double>* * work, int lwork, double* rwork, int* iwork, int* info ); * * template< typename MT, bool SO, typename VT, bool TF > void gesdd( * DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s ); * * template< typename MT1, bool SO, typename MT2, typename VT, bool TF > void * gesdd( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& * s, char jobz ); * * template< typename MT1, bool SO, typename MT2, typename VT, bool TF > void * gesdd( DenseMatrix<MT1,SO>& A, DenseVector<VT,TF>& s, DenseMatrix<MT2,SO>& * V, char jobz ); * * template< typename MT1, bool SO, typename MT2, typename VT, bool TF, typename * MT3 > void gesdd( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, * DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V, char jobz ); * * } // namespace blaze \endcode * * // The resulting decomposition has the form * * \f[ A = U \cdot S \cdot V, \f] * * // where \a S is a \a m-by-\a n matrix, which is zero except for its min(\a * m,\a n) diagonal // elements, \a U is an \a m-by-\a m orthogonal matrix, * and \a V is a \a n-by-\a n orthogonal // matrix. The diagonal elements of * \a S are the singular values of \a A, the first min(\a m,\a n) // columns * of \a U and rows of \a V are the left and right singular vectors of \a A, * respectively. // // The resulting min(\a m,\a n) real and non-negative * singular values are returned in descending // order in the vector \a s, * which is resized to the correct size (if possible and necessary). // // * Via the following functions, which wrap the LAPACK functions \c sgesvdx(), * \c dgesvdx(), // \c cgesvdx(), and \c zgesvdx(), it is possible to compute * a subset of singular values and/or // vectors: * * \code namespace blaze { * * void gesvdx( char jobu, char jobv, char range, int m, int n, float* A, int * lda, float vl, float vu, int il, int iu, int* ns, float* s, float* U, int * ldu, float* V, int ldv, float* work, int lwork, int* iwork, int* info ); * * void gesvdx( char jobu, char jobv, char range, int m, int n, double* A, int * lda, double vl, double vu, int il, int iu, int* ns, double* s, double* U, * int ldu, double* V, int ldv, double* work, int lwork, int* iwork, int* * info ); * * void gesvdx( char jobu, char jobv, char range, int m, int n, complex<float>* * A, int lda, float vl, float vu, int il, int iu, int* ns, float* s, * complex<float>* U, int ldu, complex<float>* V, int ldv, complex<float>* * work, int lwork, float* rwork, int* iwork, int* info ); * * void gesvdx( char jobu, char jobv, char range, int m, int n, complex<double>* * A, int lda, double vl, double vu, int il, int iu, int* ns, double* s, * complex<double>* U, int ldu, complex<double>* V, int ldv, complex<double>* * work, int lwork, double* rwork, int* iwork, int* info ); * * template< typename MT, bool SO, typename VT, bool TF > size_t gesvdx( * DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s ); * * template< typename MT, bool SO, typename VT, bool TF, typename ST > size_t * gesvdx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s, ST low, ST upp ); * * template< typename MT1, bool SO, typename MT2, typename VT, bool TF > size_t * gesvdx( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, * DenseVector<VT,TF>& s ); * * template< typename MT1, bool SO, typename MT2, typename VT, bool TF, typename * ST > size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, * DenseVector<VT,TF>& s, ST low, ST upp ); * * template< typename MT1, bool SO, typename VT, bool TF, typename MT2 > size_t * gesvdx( DenseMatrix<MT1,SO>& A, DenseVector<VT,TF>& s, * DenseMatrix<MT2,SO>& V ); * * template< typename MT1, bool SO, typename VT, bool TF, typename MT2, typename * ST > size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseVector<VT,TF>& s, * DenseMatrix<MT2,SO>& V, ST low, ST upp ); * * template< typename MT1, bool SO, typename MT2, typename VT, bool TF, typename * MT3 > size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, * DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V ); * * template< typename MT1, bool SO, typename MT2, typename VT, bool TF, typename * MT3, typename ST > size_t gesvdx( DenseMatrix<MT1,SO>& A, * DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V, ST * low, ST upp ); * * } // namespace blaze \endcode * * // The number of singular values to be computed is specified by the lower * bound \a low and the // upper bound \a upp, which either form an integral * or a floating point range. // // In case \a low and \a upp form are of * integral type, the function computes all singular values // in the index * range \f$[low..upp]\f$. The \a num resulting real and non-negative * singular values // are stored in descending order in the given vector \a * s, which is either resized (if possible) // or expected to be a \a * num-dimensional vector. The resulting left singular vectors are stored // * in the given matrix \a U, which is either resized (if possible) or * expected to be a // \a m-by-\a num matrix. The resulting right singular * vectors are stored in the given matrix \a V, // which is either resized * (if possible) or expected to be a \a num-by-\a n matrix. // // In case \a * low and \a upp are of floating point type, the function computes all * singular values // in the half-open interval \f$(low..upp]\f$. The * resulting real and non-negative singular values // are stored in * descending order in the given vector \a s, which is either resized (if * possible) // or expected to be a min(\a m,\a n)-dimensional vector. The * resulting left singular vectors are // stored in the given matrix \a U, * which is either resized (if possible) or expected to be a // \a * m-by-min(\a m,\a n) matrix. The resulting right singular vectors are * stored in the given // matrix \a V, which is either resized (if possible) * or expected to be a min(\a m,\a n)-by-\a n // matrix. // // The functions * fail if ... // // - ... the given matrix \a U is a fixed size matrix and * the dimensions don't match; // - ... the given vector \a s is a fixed * size vector and the size doesn't match; // - ... the given matrix \a V is * a fixed size matrix and the dimensions don't match; // - ... the given * scalar values don't form a proper range; // - ... the singular value * decomposition fails. // // The first four functions report failure via the * \c info argument, the remaining functions throw // an exception in case of * an error. // // // \n Previous: \ref blas_functions &nbsp; &nbsp; Next: * \ref block_vectors_and_matrices \n */ //************************************************************************************************* //**Block Vectors and Matrices ********************************************************************* /* * !\page block_vectors_and_matrices Block Vectors and Matrices // // * \tableofcontents // // // \n \section block_vectors_and_matrices_general * General Concepts // <hr> // // In addition to fundamental element types, * the \b Blaze library supports vectors and matrices // with non-fundamental * element type. For instance, it is possible to define block matrices by // * using a matrix type as the element type: * * \code using blaze::DynamicMatrix; using blaze::DynamicVector; using * blaze::rowMajor; using blaze::columnVector; * * DynamicMatrix< DynamicMatrix<double,rowMajor>, rowMajor > A; DynamicVector< * DynamicVector<double,columnVector >, columnVector > x, y; * * // ... Resizing and initialization * * y = A * x; \endcode * * // The matrix/vector multiplication in this example runs fully parallel and * uses vectorization // for every inner matrix/vector multiplication and * vector addition. // // // \n \section block_vectors_and_matrices_pitfalls * Pitfalls // <hr> // // The only thing to keep in mind when using * non-fundamental element types is that all operations // between the * elements have to be well defined. More specifically, the size of vector * and matrix // elements has to match. The attempt to combine two * non-matching elements results in either a // compilation error (in case of * statically sized elements) or an exception (for dynamically sized // * elements): * * \code DynamicVector< StaticVector<int,2UL> > a; DynamicVector< * StaticVector<int,3UL> > b; * * DynamicVector< DynamicVector<int> > c( a + b ); // Compilation error: * element size doesn't match \endcode * * // Therefore please don't forget that dynamically sized elements (e.g. \c * blaze::DynamicVector, // \c blaze::HybridVector, \c blaze::DynamicMatrix, * \c blaze::HybridMatrix, ...) need to be sized // accordingly upfront. // * // // \n \section block_vectors_and_matrices_examples Examples // <hr> // * // The first example demonstrates the multiplication between a statically * sized block matrix // and a block vector: * * \code using namespace blaze; * * // ( ( 1 1 ) ( 2 2 ) ) ( ( 1 ) ) ( ( 10 ) ) // ( ( 1 1 ) ( 2 2 ) ) ( * ( 1 ) ) ( ( 10 ) ) // ( ) * ( ) = ( ) // ( * ( 3 3 ) ( 4 4 ) ) ( ( 2 ) ) ( ( 22 ) ) // ( ( 3 3 ) ( 4 4 ) ) ( ( * 2 ) ) ( ( 22 ) ) * * using M2x2 = StaticMatrix<int,2UL,2UL,rowMajor>; using V2 = * StaticVector<int,2UL,columnVector>; * * DynamicMatrix<M2x2,rowMajor> A{ { M2x2(1), M2x2(2) }, { M2x2(3), M2x2(4) } }; * * DynamicVector<V2,columnVector> x{ V2(1), V2(2) }; * * DynamicVector<V2,columnVector> y( A * x ); \endcode * * // The second example shows the multiplication between a compressed block * matrix with blocks of // varying size and a compressed block vector: * * \code using namespace blaze; * * // ( ( 1 -2 3 ) ( 5 -1 ) ) ( ( 1 ) ) ( ( -3 ) ) // ( ( 4 1 0 * ) ( 1 2 ) ) ( ( 0 ) ) ( ( 7 ) ) // ( ( 0 2 4 ) ( * 3 1 ) ) ( ( 1 ) ) ( ( 3 ) ) // ( ) ( * ) ( ) // ( ( 1 ) ) * ( ( 2 ) ) = ( ( 2 ) * ) // ( ) ( ) ( ) // ( ( 0 * -1 1 ) ( 1 0 ) ) ( ( -1 ) ) ( ( 0 ) ) // ( ( 2 -1 2 ) * ( 0 1 ) ) ( ( 2 ) ) ( ( 6 ) ) * * using M3x3 = HybridMatrix<int,3UL,3UL,rowMajor>; using V3 = * HybridVector<int,3UL,columnVector>; * * CompressedMatrix<M3x3,rowMajor> A( 3UL, 3UL, 5UL ); A(0,0) = M3x3{ { 1, -2, 3 * }, { 4, 1, 0 }, { 0, 2, 4 } }; A(0,2) = M3x3{ { 5, -1 }, { 1, 2 }, { 3, 1 * } }; A(1,1) = M3x3{ { 1 } }; A(2,0) = M3x3{ { 0, -1, 1 }, { 2, -1, 2 } }; * A(2,2) = M3x3{ { 1, 0 }, { 0, 1 } }; * * CompressedVector<V3,columnVector> x( 3UL, 3UL ); x[0] = V3{ 1, 0, 1 }; x[1] = * V3{ 2 }; x[2] = V3{ -1, 2 }; * * CompressedVector<V3,columnVector> y( A * x ); \endcode * * // \n Previous: \ref lapack_functions &nbsp; &nbsp; Next: \ref * intra_statement_optimization \n */ //************************************************************************************************* //**Intra - Statement Optimization ******************************************************************* /* * !\page intra_statement_optimization Intra-Statement Optimization // // One * of the prime features of the \b Blaze library is the automatic * intra-statement optimization. // In order to optimize the overall * performance of every single statement \b Blaze attempts to // rearrange * the operands based on their types. For instance, the following addition of * dense and // sparse vectors * * \code blaze::DynamicVector<double> d1, d2, d3; * blaze::CompressedVector<double> s1; * * // ... Resizing and initialization * * d3 = d1 + s1 + d2; \endcode * * // is automatically rearranged and evaluated as * * \code // ... d3 = d1 + d2 + s1; // <- Note that s1 and d2 have been * rearranged \endcode * * // This order of operands is highly favorable for the overall performance * since the addition of // the two dense vectors \c d1 and \c d2 can be * handled much more efficiently in a vectorized // fashion. // // This * intra-statement optimization can have a tremendous effect on the * performance of a statement. // Consider for instance the following * computation: * * \code blaze::DynamicMatrix<double> A, B; blaze::DynamicVector<double> x, y; * * // ... Resizing and initialization * * y = A * B * x; \endcode * * // Since multiplications are evaluated from left to right, this statement * would result in a // matrix/matrix multiplication, followed by a * matrix/vector multiplication. However, if the // right subexpression is * evaluated first, the performance can be dramatically improved since the // * matrix/matrix multiplication can be avoided in favor of a second * matrix/vector multiplication. // The \b Blaze library exploits this by * automatically restructuring the expression such that the // right * multiplication is evaluated first: * * \code // ... y = A * ( B * x ); \endcode * * // Note however that although this intra-statement optimization may result in * a measurable or // even significant performance improvement, this behavior * may be undesirable for several reasons, // for instance because of * numerical stability. Therefore, in case the order of evaluation matters, * // the best solution is to be explicit and to separate a statement into * several statements: * * \code blaze::DynamicVector<double> d1, d2, d3; * blaze::CompressedVector<double> s1; * * // ... Resizing and initialization * * d3 = d1 + s1; // Compute the dense vector/sparse vector addition first ... * d3 += d2; // ... and afterwards add the second dense vector \endcode * * \code // ... blaze::DynamicMatrix<double> A, B, C; * blaze::DynamicVector<double> x, y; * * // ... Resizing and initialization * * C = A * B; // Compute the left-hand side matrix-matrix multiplication first * ... y = C * x; // ... before the right-hand side matrix-vector * multiplication \endcode * * // Alternatively, it is also possible to use the \c eval() function to fix * the order of evaluation: * * \code blaze::DynamicVector<double> d1, d2, d3; * blaze::CompressedVector<double> s1; * * // ... Resizing and initialization * * d3 = d1 + eval( s1 + d2 ); \endcode * * \code blaze::DynamicMatrix<double> A, B; blaze::DynamicVector<double> x, y; * * // ... Resizing and initialization * * y = eval( A * B ) * x; \endcode * * // \n Previous: \ref block_vectors_and_matrices &nbsp; &nbsp; Next: \ref faq * \n */ //************************************************************************************************* //**FAQ ******************************************************************************************** /* * !\page faq Frequently Asked Questions (FAQ) // // \tableofcontents // // * // <hr> // \section faq_padding A StaticVector/StaticMatrix is larger than * expected. Is this a bug? // // The size of a \c StaticVector, \c * StaticMatrix, \c HybridVector, or \c HybridMatrix can // indeed be larger * than expected: * * \code StaticVector<int,3> a; StaticMatrix<int,3,3> A; * * sizeof( a ); // Evaluates to 16, 32, or even 64, but not 12 sizeof( A ); // * Evaluates to 48, 96, or even 144, but not 36 \endcode * * // In order to achieve the maximum possible performance the \b Blaze library * tries to enable // SIMD vectorization even for small vectors. For that * reason \b Blaze by default uses padding // elements for all dense vectors * and matrices to guarantee that at least a single SIMD vector // can be * loaded. Depending on the used SIMD technology that can significantly * increase the size // of a \c StaticVector, \c StaticMatrix, \c * HybridVector or \c HybridMatrix: * * \code StaticVector<int,3> a; StaticMatrix<int,3,3> A; * * sizeof( a ); // Evaluates to 16 in case of SSE, 32 in case of AVX, and 64 in * case of AVX-512 // (under the assumption that an integer occupies 4 bytes) * sizeof( A ); // Evaluates to 48 in case of SSE, 96 in case of AVX, and * 144 in case of AVX-512 // (under the assumption that an integer occupies 4 * bytes) \endcode * * // The configuration file <tt>./blaze/config/Optimizations.h</tt> provides a * compile time switch // that can be used to (de-)activate padding: * * \code #define BLAZE_USE_PADDING 1 \endcode * * // Alternatively it is possible to (de-)activate padding via command line or * by defining this // symbol manually before including any \b Blaze header * file: * * \code #define BLAZE_USE_PADDING 1 #include <blaze/Blaze.h> \endcode * * // If \c BLAZE_USE_PADDING is set to 1 padding is enabled for all dense * vectors and matrices, if // it is set to 0 padding is disabled. Note * however that disabling padding can considerably reduce // the performance * of all dense vector and matrix operations! // // // <hr> // \section * faq_alignment Despite disabling padding, a StaticVector/StaticMatrix is * still larger than expected. Is this a bug? // // Despite disabling padding * via the \c BLAZE_USE_PADDING compile time switch (see \ref faq_padding), * // the size of a \c StaticVector, \c StaticMatrix, \c HybridVector, or \c * HybridMatrix can still // be larger than expected: * * \code #define BLAZE_USE_PADDING 1 #include <blaze/Blaze.h> * * StaticVector<int,3> a; StaticVector<int,5> b; * * sizeof( a ); // Always evaluates to 12 sizeof( b ); // Evaluates to 32 with * SSE (larger than expected) and to 20 with AVX or AVX-512 (expected) * \endcode * * // The reason for this behavior is the used SIMD technology. If SSE is used, * which provides 128 // bit wide registers, a single SIMD pack can usually * hold 4 integers (128 bit divided by 32 bit). // Since the second vector * contains enough elements is possible to benefit from vectorization. // * However, SSE requires an alignment of 16 bytes, which ultimately results * in a total size of // 32 bytes for the \c StaticVector (2 times 16 bytes * due to 5 integer elements). If AVX or AVX-512 // is used, which provide * 256 bit or 512 bit wide registers, a single SIMD vector can hold 8 or 16 * // integers, respectively. Even the second vector does not hold enough * elements to benefit from // vectorization, which is why \b Blaze does not * enforce a 32 byte (for AVX) or even 64 byte // alignment (for AVX-512). // * // It is possible to disable the vectorization entirely by the compile * time switch in the // <tt>./blaze/config/Vectorization.h</tt> * configuration file: * * \code #define BLAZE_USE_VECTORIZATION 1 \endcode * * // It is also possible to (de-)activate vectorization via command line or by * defining this symbol // manually before including any \b Blaze header * file: * * \code #define BLAZE_USE_VECTORIZATION 1 #include <blaze/Blaze.h> \endcode * * // In case the switch is set to 1, vectorization is enabled and the \b Blaze * library is allowed // to use intrinsics and the necessary alignment to * speed up computations. In case the switch is // set to 0, vectorization is * disabled entirely and the \b Blaze library chooses default, // * non-vectorized functionality for the operations. Note that deactivating * the vectorization may // pose a severe performance limitation for a large * number of operations! // // // <hr> // \section faq_blas To which extend * does Blaze make use of BLAS functions under the hood? // // Currently the * only BLAS functions that are utilized by \b Blaze are the \c gemm() * functions // for the multiplication of two dense matrices (i.e. \c * sgemm(), \c dgemm(), \c cgemm(), and // \c zgemm()). All other operations * are always and unconditionally performed by native \b Blaze // kernels. // * // The \c BLAZE_BLAS_MODE config switch (see * <tt>./blaze/config/BLAS.h</tt>) determines whether // \b Blaze is allowed * to use BLAS kernels. If \c BLAZE_BLAS_MODE is set to 0 then \b Blaze // * does not utilize the BLAS kernels and unconditionally uses its own custom * kernels. If // \c BLAZE_BLAS_MODE is set to 1 then \b Blaze is allowed to * choose between using BLAS kernels // or its own custom kernels. In case of * the dense matrix multiplication this decision is based // on the size of * the dense matrices. For large matrices, \b Blaze uses the BLAS kernels, * for // small matrices it uses its own custom kernels. The threshold for * this decision can be // configured via the \c * BLAZE_DMATDMATMULT_THRESHOLD, \c BLAZE_DMATTDMATMULT_THRESHOLD, // \c * BLAZE_TDMATDMATMULT_THRESHOLD and \c BLAZE_TDMATTDMATMULT_THRESHOLD config * switches // (see <tt>./blaze/config/Thresholds.h</tt>). // // Please note * that the extend to which \b Blaze uses BLAS kernels can change in future * releases // of \b Blaze! // // // <hr> // \section faq_lapack To which * extend does Blaze make use of LAPACK functions under the hood? // // \b * Blaze uses LAPACK functions for matrix decomposition, matrix inversion, * computing the // determinants and eigenvalues, and the SVD. In contrast to * the BLAS functionality (see // \ref faq_blas), you cannot disable LAPACK * or switch to custom kernels. In case you try to // use any of these * functionalities, but do not provide (i.e. link) a LAPACK library you will * // get link time errors. // // Please note that the extend to which \b * Blaze uses LAPACK kernels can change in future releases // of \b Blaze! // * // // <hr> // \section faq_compile_times The compile time is too high if I * include <blaze/Blaze.h>. Can I reduce it? // // The include file * <tt><blaze/Blaze.h></tt> includes the entire functionality of the \b Blaze * // library, which by now is several hundred thousand lines of source code. * That means that a lot // of source code has to be parsed whenever * <tt><blaze/Blaze.h></tt> is encountered. However, it // is rare that * everything is required within a single compilation unit. Therefore it is * easily // possible to reduce compile times by including only those \b * Blaze features that are used within // the compilation unit. For instance, * instead of including <tt><blaze/Blaze.h></tt> it could be // enough to * include <tt><blaze/math/DynamicVector.h></tt>, which would reduce the * compilation // times by about 20%. // // Additionally we are taking care * to implement new \b Blaze functionality such that compile times // do not * explode and try to reduce the compile times of existing features. Thus * newer releases of // \b Blaze can also improve compile times. // // \n * Previous: \ref intra_statement_optimization &nbsp; &nbsp; Next: \ref * issue_creation_guidelines \n */ //************************************************************************************************* //**FAQ ******************************************************************************************** /* * !\page issue_creation_guidelines Issue Creation Guidelines // // * \tableofcontents // // // One of the most important aspects of the \b * Blaze project is the // <a * href="https://bitbucket.org/blaze-lib/blaze/issues">issue management</a> * on the official // \b Blaze Bitbucket page. We cordially invite all \b * Blaze users to submit feature requests // and bug reports, as we believe * that this is a significant part of making \b Blaze a better // library. * However, we are asking to follow a small set of guidelines when creating * an issue // to facilitate the issue management on our side and also to * make issues more useful for users // of \b Blaze. // // // <hr> // * \section issues_title Title // // The title is the most important detail * of an issue. A well chosen title makes it easy to grasp // the idea of an * issue and improves the discoverability. Therefore, please choose a title * that // is ... // // - ... as descriptive as possible; // - ... as * concise as possible; // - ... as unambiguous as possible. // // Also, * please create a separate issue for each idea/problem/etc. A very general * title or an // \"and\" in the title could be an indication that the issue * is not specific enough and should // be split into several issues. // // * \subsection issues_title_good_examples Good Examples // // - \"Provide * support for AVX-512 SIMD operations\" // - \"Add support for the Boost * Multiprecision Library\" // - \"Introduce reduction operations into * Blaze\" // - \"Compilation error on KNL with -march=knl\" // // * \subsection issues_title_bad_examples Bad Examples // // - \"Several * requests\" (instead create separate issues for each single request) // - * \"Improve the performance\" (instead specify which operation should * perform better) // - \"Blaze library compilation error\" (instead try to * be more specific) // // // <hr> // \section issues_description Description * // // The description should help us to understand your idea or problem in * as much detail as possible. // Also, it helps to clearly spell out your * expectations (how a feature is supposed to work, how // the behavior * should be, etc.). Please spend a couple of minutes to try to make the * description // as comprehensive as possible. // // // <hr> // \section * issues_assignee Assignee // // There is no need to assign the issue to a * particular person. It is perfectly ok if you just // ignore this setting. * // // // <hr> // \section issues_kind Kind of Issue // // There are four * kinds of issues available in the Bitbucket issue tracker: \ref * issues_kind_bug, // \ref issues_kind_enhancement, \ref * issues_kind_proposal, and \ref issues_kind_task. In the // following we * try to give guidelines on which kind to choose for a particular issue: // * // \subsection issues_kind_bug Bug // // Please choose the category \ref * issues_kind_bug if ... // // - ... you experience a compilation error * despite your best efforts to get it right; // - ... you experience a * crash/failure despite your best efforts to get it right; // - ... you * experience problems when combining features; // - ... a feature does not * work as specified/documented (i.e. can be considered broken). // // Please * \b don't choose the category \ref issues_kind_bug if ... // // - ... you * feel a feature should work differently than it currently does (instead * create a // \ref issues_kind_proposal with a convincing title and * description); // - ... you are not sure how to use a feature (instead * create an \ref issues_kind_enhancement // issue to extend the * documentation); // - ... you are missing a feature (instead create a \ref * issues_kind_proposal or // \ref issues_kind_enhancement issue). // * // If you select the category \ref issues_kind_bug, please also try to * provide a minimum example // that fails. That helps us to minimize the * time to resolve the bug. // // As we try to keep \b Blaze bug-free, we * will always prioritize bug issues. However, we will // also quickly close * bug issues as \"wontfix\" if the described issue is not a bug (i.e. one of * // the problems mentioned above). We will \b not relabel a bug issue to * \ref issues_kind_enhancement // or \ref issues_kind_proposal, even if they * would be reasonable extensions to \b Blaze. // // \subsection * issues_kind_enhancement Enhancement // // Please choose the category \ref * issues_kind_enhancement if ... // // - ... you need an add-on to an * existing feature; // - ... you need an extension of an existing feature; * // - ... you need an extended documentation for an existing feature. // * // \ref issues_kind_enhancement is very similar to \ref * issues_kind_proposal, so we don't mind // if an \ref * issues_kind_enhancement is labeled as a \ref issues_kind_proposal or vice * versa. // Just make sure you don't request an extension or new feature as * a \ref issues_kind_bug. // // \subsection issues_kind_proposal Proposal // * // Please choose the category \ref issues_kind_proposal if ... // // - * ... you want to request a new feature; // - ... you want to change an * existing feature. // // \ref issues_kind_proposal is very similar to \ref * issues_kind_enhancement, so we don't mind if // a \ref * issues_kind_proposal is labeled as an \ref issues_kind_enhancement or vice * versa. Just // make sure you don't request an extension or new feature as * a \ref issues_kind_bug. // // \subsection issues_kind_task Task // // * Please choose the category \ref issues_kind_task if ... // // - ... you * want us to do something not feature related; // - ... you have something * else in mind which does not fall in the other three categories. // // // * <hr> // \section issues_priority Priority // // Via the priority of an * issue you can tell us how important the issue is to you. Therefore the // * priority can have an influence on when we will deal with the issue. * However, unfortunately we // don't have an infinite amount of time and we * can not deal with an arbitrary amount of issues // at the same time. We * will therefore take the priority into account, but mainly schedule the // * issues based on impact to all \b Blaze users and the estimated time to * resolve it. // // You can choose between \ref issues_priority_blocker, * \ref issues_priority_critical, // \ref issues_priority_major, \ref * issues_priority_minor, and \ref issues_priority_trivial. // // \subsection * issues_priority_blocker Blocker // // Please choose a \ref * issues_priority_blocker priority if ... // // - ... you cannot work with * \b Blaze due to the described \ref issues_kind_bug; // - ... the \ref * issues_kind_bug likely has an influence on \b all \b Blaze users. // // * Please note that the categories \ref issues_kind_enhancement or \ref * issues_kind_proposal // should never be a \ref issues_priority_blocker! // * // \subsection issues_priority_critical Critical // // Please choose a * \ref issues_priority_critical priority if ... // // - ... you can work * around a \ref issues_kind_bug, but the workaround is (much) slower or * awful; // - ... you cannot use \b Blaze without the proposed feature; // * - ... you consider it to be essential for \b all \b Blaze users. // // * \subsection issues_priority_major Major // // Please choose a \ref * issues_priority_major priority if ... // // - ... a \ref issues_kind_bug * or feature request is not \ref issues_priority_critical, but // * still very important to you; // - ... you consider it to have a \ref * issues_priority_major impact on most \b Blaze users. // // The \ref * issues_priority_major category is the default setting in Bitbucket and we * therefore // consider it as the default priority for issues. // // * \subsection issues_priority_minor Minor // // Please choose a \ref * issues_priority_minor priority if ... // // - ... a \ref issues_kind_bug * does not affect many \b Blaze users; // - ... a feature request would * only be useful for a small number of \b Blaze users; // - ... a feature * would be nice to have, but is not particularly important. // // * \subsection issues_priority_trivial Trivial // // Please choose a \ref * issues_priority_trivial priority if ... // // - ... a \ref * issues_kind_bug hardly affects anyone; // - ... a feature request would * only be useful for very few \b Blaze users; // - ... the expected time to * resolve an issue is very small. // // // <hr> // \section * issues_attachment Attachments // // You can always provide us with * additional information in the form of attachments. Feel free // to attach * something to the issue if ... // // - ... it can help us to analyze a * \ref issues_kind_bug; // - ... you have some source code that * demonstrates a problem; // - ... you already have a working prototype * that sketches the idea; // - ... you have additional resources that could * help us. // // We appreciate anything that simplifies our work and speeds * up our progress. // // \n Previous: \ref faq &nbsp; &nbsp; Next: \ref * blaze_references \n */ //************************************************************************************************* //**Blaze References ******************************************************************************* /* * !\page blaze_references Blaze References // // In case you need references * to the \b Blaze library (for papers or other publications), please // feel * free to use one of the following references: * * \code @misc{blazelib, author = "Klaus {Iglberger}", title = * "Blaze C++ Linear Algebra Library", howpublished = * "https://bitbucket.org/blaze-lib", year = 2012 } \endcode * * \code @article{iglberger2012_1, author = "Klaus {Iglberger} and Georg * {Hager} and Jan {Treibig} and Ulrich {R{\"u}de}", title = "Expression * Templates Revisited: A Performance Analysis of Current Methodologies", * journal = "SIAM Journal on Scientific Computing", year = 2012, volume * = 34(2), pages = C42--C69 } \endcode * * \code @inproceedings{iglberger2012_2, author = "Klaus {Iglberger} and * Georg {Hager} and Jan {Treibig} and Ulrich {R{\"u}de}", title = "High * Performance Smart Expression Template Math Libraries", booktitle = * "Proceedings of the 2nd International Workshop on New Algorithms and * Programming Models for the Manycore Era (APMM 2012) at HPCS 2012", year * = 2012 } \endcode * * // \n Previous: \ref issue_creation_guidelines */ //************************************************************************************************* #endif
par_csr_matvec.c
/*BHEADER********************************************************************** * Copyright (c) 2008, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * This file is part of HYPRE. See file COPYRIGHT for details. * * HYPRE is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * $Revision$ ***********************************************************************EHEADER*/ /****************************************************************************** * * Matvec functions for hypre_CSRMatrix class. * *****************************************************************************/ #include "_hypre_parcsr_mv.h" #include <assert.h> /*#ifdef HYPRE_USING_GPU extern "C" { void PackOnDevice(HYPRE_Complex *send_data,HYPRE_Complex *x_local_data, HYPRE_Int *send_map, HYPRE_Int begin,HYPRE_Int end,cudaStream_t s); } #endif */ /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixMatvec *--------------------------------------------------------------------------*/ // y = alpha*A*x + beta*b HYPRE_Int hypre_ParCSRMatrixMatvecOutOfPlace( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, hypre_ParVector *x, HYPRE_Complex beta, hypre_ParVector *b, hypre_ParVector *y ) { hypre_ParCSRCommHandle **comm_handle; hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A); hypre_Vector *x_local = hypre_ParVectorLocalVector(x); hypre_Vector *b_local = hypre_ParVectorLocalVector(b); hypre_Vector *y_local = hypre_ParVectorLocalVector(y); HYPRE_BigInt num_rows = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt num_cols = hypre_ParCSRMatrixGlobalNumCols(A); hypre_Vector *x_tmp; HYPRE_BigInt x_size = hypre_ParVectorGlobalSize(x); HYPRE_BigInt b_size = hypre_ParVectorGlobalSize(b); HYPRE_BigInt y_size = hypre_ParVectorGlobalSize(y); HYPRE_Int num_vectors = hypre_VectorNumVectors(x_local); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(offd); HYPRE_Int ierr = 0; HYPRE_Int num_sends, i, j, jv, index, start; HYPRE_Int vecstride = hypre_VectorVectorStride( x_local ); HYPRE_Int idxstride = hypre_VectorIndexStride( x_local ); HYPRE_Complex *x_tmp_data, **x_buf_data; HYPRE_Complex *x_local_data = hypre_VectorData(x_local); /*--------------------------------------------------------------------- * Check for size compatibility. ParMatvec returns ierr = 11 if * length of X doesn't equal the number of columns of A, * ierr = 12 if the length of Y doesn't equal the number of rows * of A, and ierr = 13 if both are true. * * Because temporary vectors are often used in ParMatvec, none of * these conditions terminates processing, and the ierr flag * is informational only. *--------------------------------------------------------------------*/ PUSH_RANGE_PAYLOAD("PAR_CSR_MATVEC",5,x_size); hypre_assert( idxstride>0 ); if (num_cols != x_size) ierr = 11; if (num_rows != y_size || num_rows != b_size) ierr = 12; if (num_cols != x_size && (num_rows != y_size || num_rows != b_size)) ierr = 13; hypre_assert( hypre_VectorNumVectors(b_local)==num_vectors ); hypre_assert( hypre_VectorNumVectors(y_local)==num_vectors ); if ( num_vectors==1 ) x_tmp = hypre_SeqVectorCreate( num_cols_offd ); else { hypre_assert( num_vectors>1 ); x_tmp = hypre_SeqMultiVectorCreate( num_cols_offd, num_vectors ); } /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings *--------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime(); #endif PUSH_RANGE("MPI_PACK",3); HYPRE_Int use_persistent_comm = 0; #ifdef HYPRE_USING_PERSISTENT_COMM use_persistent_comm = num_vectors == 1; // JSP TODO: we can use persistent communication for multi-vectors, // but then we need different communication handles for different // num_vectors. hypre_ParCSRPersistentCommHandle *persistent_comm_handle; #endif if ( use_persistent_comm ) { #ifdef HYPRE_USING_PERSISTENT_COMM PUSH_RANGE("PERCOMM1",0); persistent_comm_handle = hypre_ParCSRCommPkgGetPersistentCommHandle(1, comm_pkg); HYPRE_Int num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); hypre_assert(num_cols_offd == hypre_ParCSRCommPkgRecvVecStart(comm_pkg, num_recvs)); hypre_VectorData(x_tmp) = (HYPRE_Complex *)persistent_comm_handle->recv_data; hypre_SeqVectorSetDataOwner(x_tmp, 0); POP_RANGE; #endif } else { comm_handle = hypre_CTAlloc(hypre_ParCSRCommHandle*, num_vectors, HYPRE_MEMORY_HOST); } hypre_SeqVectorInitialize(x_tmp); x_tmp_data = hypre_VectorData(x_tmp); num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); if (!use_persistent_comm) { x_buf_data = hypre_CTAlloc( HYPRE_Complex*, num_vectors , HYPRE_MEMORY_HOST); for ( jv=0; jv<num_vectors; ++jv ) x_buf_data[jv] = hypre_CTAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart (comm_pkg, num_sends), HYPRE_MEMORY_SHARED); } if ( num_vectors==1 ) { HYPRE_Int begin = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0); HYPRE_Int end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); #if defined(HYPRE_USING_GPU) && defined(HYPRE_USING_UNIFIED_MEMORY) PUSH_RANGE("PERCOMM2DEVICE",4); #ifdef HYPRE_USING_PERSISTENT_COMM PackOnDevice((HYPRE_Complex*)persistent_comm_handle->send_data,x_local_data,hypre_ParCSRCommPkgSendMapElmts(comm_pkg),begin,end,HYPRE_STREAM(4)); //PrintPointerAttributes(persistent_comm_handle->send_data); #else #if defined(DEBUG_PACK_ON_DEVICE) hypre_CheckErrorDevice(cudaPeekAtLastError()); hypre_CheckErrorDevice(cudaDeviceSynchronize()); ASSERT_MANAGED(x_buf_data[0]); ASSERT_MANAGED(x_local_data); ASSERT_MANAGED(hypre_ParCSRCommPkgSendMapElmts(comm_pkg)); #endif /* printf("%d %d %d\n", PointerAttributes(x_buf_data[0]), PointerAttributes(x_local_data), PointerAttributes(hypre_ParCSRCommPkgSendMapElmts(comm_pkg))); */ PackOnDevice((HYPRE_Complex*)x_buf_data[0],x_local_data,hypre_ParCSRCommPkgSendMapElmts(comm_pkg),begin,end,HYPRE_STREAM(4)); #if defined(DEBUG_PACK_ON_DEVICE) hypre_CheckErrorDevice(cudaPeekAtLastError()); hypre_CheckErrorDevice(cudaDeviceSynchronize()); #endif #endif POP_RANGE; SetAsyncMode(1); hypre_CheckErrorDevice(cudaPeekAtLastError()); hypre_CheckErrorDevice(cudaDeviceSynchronize()); hypre_CSRMatrixMatvecOutOfPlace( alpha, diag, x_local, beta, b_local, y_local, 0); //hypre_SeqVectorUpdateHost(y_local); //hypre_SeqVectorUpdateHost(x_local); //hypre_SeqVectorUpdateHost(b_local); SetAsyncMode(0); #else #ifdef HYPRE_USING_MAPPED_OPENMP_OFFLOAD PUSH_RANGE("MPI_PACK_OMP",4); SyncVectorToHost(x_local); #endif #if defined(HYPRE_USING_OPENMP_OFFLOAD_NOT_USED) HYPRE_Int num_threads=64; HYPRE_Int num_teams = (end-begin+(end-begin)%num_threads)/num_threads; HYPRE_Int *local_send_map_elmts = comm_pkg->send_map_elmts; printf("USING OFFLOADED PACKING OF BUFER\n"); #pragma omp target teams distribute parallel for private(i) num_teams(num_teams) thread_limit(num_threads) is_device_ptr(x_local_data,x_buf_data,comm_pkg,local_send_map_elmts) #elif defined(HYPRE_USING_OPENMP) #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (i = begin; i < end; i++) { #ifdef HYPRE_USING_PERSISTENT_COMM ((HYPRE_Complex *)persistent_comm_handle->send_data)[i - begin] #else x_buf_data[0][i - begin] #endif = x_local_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,i)]; } POP_RANGE; // "MPI_PACK_OMP" #endif } else for ( jv=0; jv<num_vectors; ++jv ) { index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) x_buf_data[jv][index++] = x_local_data[ jv*vecstride + idxstride*hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j) ]; } } hypre_assert( idxstride==1 ); /* ... The assert is because the following loop only works for 'column' storage of a multivector. This needs to be fixed to work more generally, at least for 'row' storage. This in turn, means either change CommPkg so num_sends is no.zones*no.vectors (not no.zones) or, less dangerously, put a stride in the logic of CommHandleCreate (stride either from a new arg or a new variable inside CommPkg). Or put the num_vector iteration inside CommHandleCreate (perhaps a new multivector variant of it). */ #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime(); hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime(); #endif POP_RANGE; PUSH_RANGE("MPI_HALO_EXC_SEND",4); if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_ParCSRPersistentCommHandleStart(persistent_comm_handle); #endif } else { for ( jv=0; jv<num_vectors; ++jv ) { comm_handle[jv] = hypre_ParCSRCommHandleCreate ( 1, comm_pkg, x_buf_data[jv], &(x_tmp_data[jv*num_cols_offd]) ); } } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime(); #endif POP_RANGE; #if !defined(HYPRE_USING_GPU) || !defined(HYPRE_USING_UNIFIED_MEMORY) hypre_CSRMatrixMatvecOutOfPlace( alpha, diag, x_local, beta, b_local, y_local, 0); #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime(); #endif PUSH_RANGE("MPI_HALO_EXC_RECV",6); if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_ParCSRPersistentCommHandleWait(persistent_comm_handle); #endif } else { for ( jv=0; jv<num_vectors; ++jv ) { hypre_ParCSRCommHandleDestroy(comm_handle[jv]); comm_handle[jv] = NULL; } hypre_TFree(comm_handle, HYPRE_MEMORY_HOST); } POP_RANGE; #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime(); #endif //hypre_SeqVectorUpdateDevice(x_tmp); #ifdef HYPRE_USING_MAPPED_OPENMP_OFFLOAD UpdateHRC(x_tmp); #endif if (num_cols_offd) hypre_CSRMatrixMatvec( alpha, offd, x_tmp, 1.0, y_local); //if (num_cols_offd) hypre_SeqVectorUpdateHost(y_local); //hypre_SeqVectorUpdateHost(x_tmp); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime(); #endif PUSH_RANGE("MPI_UNPACK",5); hypre_SeqVectorDestroy(x_tmp); x_tmp = NULL; if (!use_persistent_comm) { for ( jv=0; jv<num_vectors; ++jv ) hypre_TFree(x_buf_data[jv], HYPRE_MEMORY_SHARED); hypre_TFree(x_buf_data, HYPRE_MEMORY_HOST); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime(); #endif POP_RANGE; #if defined(HYPRE_USING_GPU) && defined(HYPRE_USING_UNIFIED_MEMORY) hypre_CheckErrorDevice(cudaStreamSynchronize(HYPRE_STREAM(4))); #endif POP_RANGE; // PAR_CSR return ierr; } HYPRE_Int hypre_ParCSRMatrixMatvec( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, hypre_ParVector *x, HYPRE_Complex beta, hypre_ParVector *y ) { return hypre_ParCSRMatrixMatvecOutOfPlace(alpha, A, x, beta, y, y); } #ifdef HYPRE_USING_MAPPED_OPENMP_OFFLOAD HYPRE_Int hypre_ParCSRMatrixMatvec3( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, hypre_ParVector *x, HYPRE_Complex beta, hypre_ParVector *y ) { HYPRE_Int rval=hypre_ParCSRMatrixMatvecOutOfPlace(alpha, A, x, beta, y, y); hypre_SeqVectorUpdateHost(y->local_vector); } HYPRE_Int hypre_ParCSRMatrixMatvecOutOfPlace3( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, hypre_ParVector *x, HYPRE_Complex beta, hypre_ParVector *b, hypre_ParVector *y ) { hypre_ParCSRMatrixMatvecOutOfPlace(alpha,A,x,beta,b,y); hypre_SeqVectorUpdateHost(y->local_vector); } #endif /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixMatvecT * * Performs y <- alpha * A^T * x + beta * y * *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixMatvecT( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, hypre_ParVector *x, HYPRE_Complex beta, hypre_ParVector *y ) { hypre_ParCSRCommHandle **comm_handle; hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A); hypre_Vector *x_local = hypre_ParVectorLocalVector(x); hypre_Vector *y_local = hypre_ParVectorLocalVector(y); hypre_Vector *y_tmp; HYPRE_Int vecstride = hypre_VectorVectorStride( y_local ); HYPRE_Int idxstride = hypre_VectorIndexStride( y_local ); HYPRE_Complex *y_tmp_data, **y_buf_data; HYPRE_Complex *y_local_data = hypre_VectorData(y_local); HYPRE_BigInt num_rows = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt num_cols = hypre_ParCSRMatrixGlobalNumCols(A); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(offd); HYPRE_BigInt x_size = hypre_ParVectorGlobalSize(x); HYPRE_BigInt y_size = hypre_ParVectorGlobalSize(y); HYPRE_Int num_vectors = hypre_VectorNumVectors(y_local); HYPRE_Int i, j, jv, index, start, num_sends; HYPRE_Int ierr = 0; if (y==NULL) { printf("NULLY %p\b", (void*) y); return 1; } /*--------------------------------------------------------------------- * Check for size compatibility. MatvecT returns ierr = 1 if * length of X doesn't equal the number of rows of A, * ierr = 2 if the length of Y doesn't equal the number of * columns of A, and ierr = 3 if both are true. * * Because temporary vectors are often used in MatvecT, none of * these conditions terminates processing, and the ierr flag * is informational only. *--------------------------------------------------------------------*/ if (num_rows != x_size) ierr = 1; if (num_cols != y_size) ierr = 2; if (num_rows != x_size && num_cols != y_size) ierr = 3; /*----------------------------------------------------------------------- *-----------------------------------------------------------------------*/ if ( num_vectors==1 ) { y_tmp = hypre_SeqVectorCreate(num_cols_offd); } else { y_tmp = hypre_SeqMultiVectorCreate(num_cols_offd,num_vectors); } /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings *--------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime(); #endif HYPRE_Int use_persistent_comm = 0; #ifdef HYPRE_USING_PERSISTENT_COMM use_persistent_comm = num_vectors == 1; // JSP TODO: we can use persistent communication for multi-vectors, // but then we need different communication handles for different // num_vectors. hypre_ParCSRPersistentCommHandle *persistent_comm_handle; #endif if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM // JSP TODO: we should be also able to use persistent communication for multiple vectors persistent_comm_handle = hypre_ParCSRCommPkgGetPersistentCommHandle(2, comm_pkg); HYPRE_Int num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); hypre_assert(num_cols_offd == hypre_ParCSRCommPkgRecvVecStart(comm_pkg, num_recvs)); hypre_VectorData(y_tmp) = (HYPRE_Complex *)persistent_comm_handle->send_data; hypre_SeqVectorSetDataOwner(y_tmp, 0); #endif } else { comm_handle = hypre_CTAlloc(hypre_ParCSRCommHandle*, num_vectors, HYPRE_MEMORY_HOST); } hypre_SeqVectorInitialize(y_tmp); num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); if (!use_persistent_comm) { y_buf_data = hypre_CTAlloc( HYPRE_Complex*, num_vectors , HYPRE_MEMORY_HOST); for ( jv=0; jv<num_vectors; ++jv ) y_buf_data[jv] = hypre_CTAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart (comm_pkg, num_sends), HYPRE_MEMORY_HOST); } y_tmp_data = hypre_VectorData(y_tmp); y_local_data = hypre_VectorData(y_local); hypre_assert( idxstride==1 ); /* only 'column' storage of multivectors * implemented so far */ #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime(); #endif if (num_cols_offd) { if (A->offdT) { // offdT is optional. Used only if it's present. hypre_CSRMatrixMatvec(alpha, A->offdT, x_local, 0.0, y_tmp); #ifdef HYPRE_USING_MAPPED_OPENMP_OFFLOAD SyncVectorToHost(y_tmp); #endif } else { hypre_CSRMatrixMatvecT(alpha, offd, x_local, 0.0, y_tmp); } } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime(); #endif if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_ParCSRPersistentCommHandleStart(persistent_comm_handle); #endif } else { for ( jv=0; jv<num_vectors; ++jv ) { /* this is where we assume multivectors are 'column' storage */ comm_handle[jv] = hypre_ParCSRCommHandleCreate ( 2, comm_pkg, &(y_tmp_data[jv*num_cols_offd]), y_buf_data[jv] ); } } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime(); #endif if (A->diagT) { // diagT is optional. Used only if it's present. hypre_CSRMatrixMatvec(alpha, A->diagT, x_local, beta, y_local); #ifdef HYPRE_USING_MAPPED_OPENMP_OFFLOAD SyncVectorToHost(y_local); #endif } else { hypre_CSRMatrixMatvecT(alpha, diag, x_local, beta, y_local); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime(); #endif if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_ParCSRPersistentCommHandleWait(persistent_comm_handle); #endif } else { for ( jv=0; jv<num_vectors; ++jv ) { hypre_ParCSRCommHandleDestroy(comm_handle[jv]); comm_handle[jv] = NULL; } hypre_TFree(comm_handle, HYPRE_MEMORY_HOST); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime(); hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime(); #endif if ( num_vectors==1 ) { index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) y_local_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)] #ifdef HYPRE_USING_PERSISTENT_COMM += ((HYPRE_Complex *)persistent_comm_handle->recv_data)[index++]; #else += y_buf_data[0][index++]; #endif } } else for ( jv=0; jv<num_vectors; ++jv ) { index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) y_local_data[ jv*vecstride + idxstride*hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j) ] += y_buf_data[jv][index++]; } } #ifdef HYPRE_USING_MAPPED_OPENMP_OFFLOAD UpdateHRC(y_local); #endif hypre_SeqVectorDestroy(y_tmp); y_tmp = NULL; if (!use_persistent_comm) { for ( jv=0; jv<num_vectors; ++jv ) hypre_TFree(y_buf_data[jv], HYPRE_MEMORY_HOST); hypre_TFree(y_buf_data, HYPRE_MEMORY_HOST); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime(); #endif return ierr; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixMatvec_FF *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixMatvec_FF( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, hypre_ParVector *x, HYPRE_Complex beta, hypre_ParVector *y, HYPRE_Int *CF_marker, HYPRE_Int fpt ) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommHandle *comm_handle; hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A); hypre_Vector *x_local = hypre_ParVectorLocalVector(x); hypre_Vector *y_local = hypre_ParVectorLocalVector(y); HYPRE_BigInt num_rows = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt num_cols = hypre_ParCSRMatrixGlobalNumCols(A); hypre_Vector *x_tmp; HYPRE_BigInt x_size = hypre_ParVectorGlobalSize(x); HYPRE_BigInt y_size = hypre_ParVectorGlobalSize(y); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(offd); HYPRE_Int ierr = 0; HYPRE_Int num_sends, i, j, index, start, num_procs; HYPRE_Int *int_buf_data = NULL; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Complex *x_tmp_data = NULL; HYPRE_Complex *x_buf_data = NULL; HYPRE_Complex *x_local_data = hypre_VectorData(x_local); /*--------------------------------------------------------------------- * Check for size compatibility. ParMatvec returns ierr = 11 if * length of X doesn't equal the number of columns of A, * ierr = 12 if the length of Y doesn't equal the number of rows * of A, and ierr = 13 if both are true. * * Because temporary vectors are often used in ParMatvec, none of * these conditions terminates processing, and the ierr flag * is informational only. *--------------------------------------------------------------------*/ hypre_MPI_Comm_size(comm,&num_procs); if (num_cols != x_size) ierr = 11; if (num_rows != y_size) ierr = 12; if (num_cols != x_size && num_rows != y_size) ierr = 13; if (num_procs > 1) { if (num_cols_offd) { x_tmp = hypre_SeqVectorCreate( num_cols_offd ); hypre_SeqVectorInitialize(x_tmp); x_tmp_data = hypre_VectorData(x_tmp); } /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings *--------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); if (num_sends) x_buf_data = hypre_CTAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart (comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) x_buf_data[index++] = x_local_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate ( 1, comm_pkg, x_buf_data, x_tmp_data ); } hypre_CSRMatrixMatvec_FF( alpha, diag, x_local, beta, y_local, CF_marker, CF_marker, fpt); if (num_procs > 1) { hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; if (num_sends) int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart (comm_pkg, num_sends), HYPRE_MEMORY_HOST); if (num_cols_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate(11,comm_pkg,int_buf_data,CF_marker_offd ); hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; if (num_cols_offd) hypre_CSRMatrixMatvec_FF( alpha, offd, x_tmp, 1.0, y_local, CF_marker, CF_marker_offd, fpt); hypre_SeqVectorDestroy(x_tmp); x_tmp = NULL; hypre_TFree(x_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); } return ierr; }
/****************************************************************************** * * Matvec functions for hypre_CSRMatrix class. * *****************************************************************************/ #include "_hypre_parcsr_mv.h" #include <assert.h> /*#ifdef HYPRE_USING_GPU extern "C" { void PackOnDevice(HYPRE_Complex *send_data,HYPRE_Complex *x_local_data, HYPRE_Int *send_map, HYPRE_Int begin,HYPRE_Int end,cudaStream_t s); } #endif */ /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixMatvec *--------------------------------------------------------------------------*/ // y = alpha*A*x + beta*b HYPRE_Int hypre_ParCSRMatrixMatvecOutOfPlace( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, hypre_ParVector *x, HYPRE_Complex beta, hypre_ParVector *b, hypre_ParVector *y ) { hypre_ParCSRCommHandle **comm_handle; hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A); hypre_Vector *x_local = hypre_ParVectorLocalVector(x); hypre_Vector *b_local = hypre_ParVectorLocalVector(b); hypre_Vector *y_local = hypre_ParVectorLocalVector(y); HYPRE_BigInt num_rows = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt num_cols = hypre_ParCSRMatrixGlobalNumCols(A); hypre_Vector *x_tmp; HYPRE_BigInt x_size = hypre_ParVectorGlobalSize(x); HYPRE_BigInt b_size = hypre_ParVectorGlobalSize(b); HYPRE_BigInt y_size = hypre_ParVectorGlobalSize(y); HYPRE_Int num_vectors = hypre_VectorNumVectors(x_local); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(offd); HYPRE_Int ierr = 0; HYPRE_Int num_sends, i, j, jv, index, start; HYPRE_Int vecstride = hypre_VectorVectorStride( x_local ); HYPRE_Int idxstride = hypre_VectorIndexStride( x_local ); HYPRE_Complex *x_tmp_data, **x_buf_data; HYPRE_Complex *x_local_data = hypre_VectorData(x_local); /*--------------------------------------------------------------------- * Check for size compatibility. ParMatvec returns ierr = 11 if * length of X doesn't equal the number of columns of A, * ierr = 12 if the length of Y doesn't equal the number of rows * of A, and ierr = 13 if both are true. * * Because temporary vectors are often used in ParMatvec, none of * these conditions terminates processing, and the ierr flag * is informational only. *--------------------------------------------------------------------*/ PUSH_RANGE_PAYLOAD("PAR_CSR_MATVEC",5,x_size); hypre_assert( idxstride>0 ); if (num_cols != x_size) ierr = 11; if (num_rows != y_size || num_rows != b_size) ierr = 12; if (num_cols != x_size && (num_rows != y_size || num_rows != b_size)) ierr = 13; hypre_assert( hypre_VectorNumVectors(b_local)==num_vectors ); hypre_assert( hypre_VectorNumVectors(y_local)==num_vectors ); if ( num_vectors==1 ) x_tmp = hypre_SeqVectorCreate( num_cols_offd ); else { hypre_assert( num_vectors>1 ); x_tmp = hypre_SeqMultiVectorCreate( num_cols_offd, num_vectors ); } /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings *--------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime(); #endif PUSH_RANGE("MPI_PACK",3); HYPRE_Int use_persistent_comm = 0; #ifdef HYPRE_USING_PERSISTENT_COMM use_persistent_comm = num_vectors == 1; // JSP TODO: we can use persistent communication for multi-vectors, // but then we need different communication handles for different // num_vectors. hypre_ParCSRPersistentCommHandle *persistent_comm_handle; #endif if ( use_persistent_comm ) { #ifdef HYPRE_USING_PERSISTENT_COMM PUSH_RANGE("PERCOMM1",0); persistent_comm_handle = hypre_ParCSRCommPkgGetPersistentCommHandle(1, comm_pkg); HYPRE_Int num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); hypre_assert(num_cols_offd == hypre_ParCSRCommPkgRecvVecStart(comm_pkg, num_recvs)); hypre_VectorData(x_tmp) = (HYPRE_Complex *)persistent_comm_handle->recv_data; hypre_SeqVectorSetDataOwner(x_tmp, 0); POP_RANGE; #endif } else { comm_handle = hypre_CTAlloc(hypre_ParCSRCommHandle*, num_vectors, HYPRE_MEMORY_HOST); } hypre_SeqVectorInitialize(x_tmp); x_tmp_data = hypre_VectorData(x_tmp); num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); if (!use_persistent_comm) { x_buf_data = hypre_CTAlloc( HYPRE_Complex*, num_vectors , HYPRE_MEMORY_HOST); for ( jv=0; jv<num_vectors; ++jv ) x_buf_data[jv] = hypre_CTAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart (comm_pkg, num_sends), HYPRE_MEMORY_SHARED); } if ( num_vectors==1 ) { HYPRE_Int begin = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0); HYPRE_Int end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); #if defined(HYPRE_USING_GPU) && defined(HYPRE_USING_UNIFIED_MEMORY) PUSH_RANGE("PERCOMM2DEVICE",4); #ifdef HYPRE_USING_PERSISTENT_COMM PackOnDevice((HYPRE_Complex*)persistent_comm_handle->send_data,x_local_data,hypre_ParCSRCommPkgSendMapElmts(comm_pkg),begin,end,HYPRE_STREAM(4)); //PrintPointerAttributes(persistent_comm_handle->send_data); #else #if defined(DEBUG_PACK_ON_DEVICE) hypre_CheckErrorDevice(cudaPeekAtLastError()); hypre_CheckErrorDevice(cudaDeviceSynchronize()); ASSERT_MANAGED(x_buf_data[0]); ASSERT_MANAGED(x_local_data); ASSERT_MANAGED(hypre_ParCSRCommPkgSendMapElmts(comm_pkg)); #endif /* printf("%d %d %d\n", PointerAttributes(x_buf_data[0]), PointerAttributes(x_local_data), PointerAttributes(hypre_ParCSRCommPkgSendMapElmts(comm_pkg))); */ PackOnDevice((HYPRE_Complex*)x_buf_data[0],x_local_data,hypre_ParCSRCommPkgSendMapElmts(comm_pkg),begin,end,HYPRE_STREAM(4)); #if defined(DEBUG_PACK_ON_DEVICE) hypre_CheckErrorDevice(cudaPeekAtLastError()); hypre_CheckErrorDevice(cudaDeviceSynchronize()); #endif #endif POP_RANGE; SetAsyncMode(1); hypre_CheckErrorDevice(cudaPeekAtLastError()); hypre_CheckErrorDevice(cudaDeviceSynchronize()); hypre_CSRMatrixMatvecOutOfPlace( alpha, diag, x_local, beta, b_local, y_local, 0); //hypre_SeqVectorUpdateHost(y_local); //hypre_SeqVectorUpdateHost(x_local); //hypre_SeqVectorUpdateHost(b_local); SetAsyncMode(0); #else for (i = begin; i < end; i++) { #ifdef HYPRE_USING_PERSISTENT_COMM ((HYPRE_Complex *)persistent_comm_handle->send_data)[i - begin] #else x_buf_data[0][i - begin] #endif = x_local_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,i)]; } POP_RANGE; // "MPI_PACK_OMP" #endif } else for ( jv=0; jv<num_vectors; ++jv ) { index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) x_buf_data[jv][index++] = x_local_data[ jv*vecstride + idxstride*hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j) ]; } } hypre_assert( idxstride==1 ); /* ... The assert is because the following loop only works for 'column' storage of a multivector. This needs to be fixed to work more generally, at least for 'row' storage. This in turn, means either change CommPkg so num_sends is no.zones*no.vectors (not no.zones) or, less dangerously, put a stride in the logic of CommHandleCreate (stride either from a new arg or a new variable inside CommPkg). Or put the num_vector iteration inside CommHandleCreate (perhaps a new multivector variant of it). */ #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime(); hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime(); #endif POP_RANGE; PUSH_RANGE("MPI_HALO_EXC_SEND",4); if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_ParCSRPersistentCommHandleStart(persistent_comm_handle); #endif } else { for ( jv=0; jv<num_vectors; ++jv ) { comm_handle[jv] = hypre_ParCSRCommHandleCreate ( 1, comm_pkg, x_buf_data[jv], &(x_tmp_data[jv*num_cols_offd]) ); } } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime(); #endif POP_RANGE; #if !defined(HYPRE_USING_GPU) || !defined(HYPRE_USING_UNIFIED_MEMORY) hypre_CSRMatrixMatvecOutOfPlace( alpha, diag, x_local, beta, b_local, y_local, 0); #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime(); #endif PUSH_RANGE("MPI_HALO_EXC_RECV",6); if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_ParCSRPersistentCommHandleWait(persistent_comm_handle); #endif } else { for ( jv=0; jv<num_vectors; ++jv ) { hypre_ParCSRCommHandleDestroy(comm_handle[jv]); comm_handle[jv] = NULL; } hypre_TFree(comm_handle, HYPRE_MEMORY_HOST); } POP_RANGE; #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime(); #endif //hypre_SeqVectorUpdateDevice(x_tmp); if (num_cols_offd) hypre_CSRMatrixMatvec( alpha, offd, x_tmp, 1.0, y_local); //if (num_cols_offd) hypre_SeqVectorUpdateHost(y_local); //hypre_SeqVectorUpdateHost(x_tmp); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime(); #endif PUSH_RANGE("MPI_UNPACK",5); hypre_SeqVectorDestroy(x_tmp); x_tmp = NULL; if (!use_persistent_comm) { for ( jv=0; jv<num_vectors; ++jv ) hypre_TFree(x_buf_data[jv], HYPRE_MEMORY_SHARED); hypre_TFree(x_buf_data, HYPRE_MEMORY_HOST); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime(); #endif POP_RANGE; #if defined(HYPRE_USING_GPU) && defined(HYPRE_USING_UNIFIED_MEMORY) hypre_CheckErrorDevice(cudaStreamSynchronize(HYPRE_STREAM(4))); #endif POP_RANGE; // PAR_CSR return ierr; } HYPRE_Int hypre_ParCSRMatrixMatvec( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, hypre_ParVector *x, HYPRE_Complex beta, hypre_ParVector *y ) { return hypre_ParCSRMatrixMatvecOutOfPlace(alpha, A, x, beta, y, y); } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixMatvecT * * Performs y <- alpha * A^T * x + beta * y * *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixMatvecT( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, hypre_ParVector *x, HYPRE_Complex beta, hypre_ParVector *y ) { hypre_ParCSRCommHandle **comm_handle; hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A); hypre_Vector *x_local = hypre_ParVectorLocalVector(x); hypre_Vector *y_local = hypre_ParVectorLocalVector(y); hypre_Vector *y_tmp; HYPRE_Int vecstride = hypre_VectorVectorStride( y_local ); HYPRE_Int idxstride = hypre_VectorIndexStride( y_local ); HYPRE_Complex *y_tmp_data, **y_buf_data; HYPRE_Complex *y_local_data = hypre_VectorData(y_local); HYPRE_BigInt num_rows = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt num_cols = hypre_ParCSRMatrixGlobalNumCols(A); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(offd); HYPRE_BigInt x_size = hypre_ParVectorGlobalSize(x); HYPRE_BigInt y_size = hypre_ParVectorGlobalSize(y); HYPRE_Int num_vectors = hypre_VectorNumVectors(y_local); HYPRE_Int i, j, jv, index, start, num_sends; HYPRE_Int ierr = 0; if (y==NULL) { printf("NULLY %p\b", (void*) y); return 1; } /*--------------------------------------------------------------------- * Check for size compatibility. MatvecT returns ierr = 1 if * length of X doesn't equal the number of rows of A, * ierr = 2 if the length of Y doesn't equal the number of * columns of A, and ierr = 3 if both are true. * * Because temporary vectors are often used in MatvecT, none of * these conditions terminates processing, and the ierr flag * is informational only. *--------------------------------------------------------------------*/ if (num_rows != x_size) ierr = 1; if (num_cols != y_size) ierr = 2; if (num_rows != x_size && num_cols != y_size) ierr = 3; /*----------------------------------------------------------------------- *-----------------------------------------------------------------------*/ if ( num_vectors==1 ) { y_tmp = hypre_SeqVectorCreate(num_cols_offd); } else { y_tmp = hypre_SeqMultiVectorCreate(num_cols_offd,num_vectors); } /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings *--------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime(); #endif HYPRE_Int use_persistent_comm = 0; #ifdef HYPRE_USING_PERSISTENT_COMM use_persistent_comm = num_vectors == 1; // JSP TODO: we can use persistent communication for multi-vectors, // but then we need different communication handles for different // num_vectors. hypre_ParCSRPersistentCommHandle *persistent_comm_handle; #endif if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM // JSP TODO: we should be also able to use persistent communication for multiple vectors persistent_comm_handle = hypre_ParCSRCommPkgGetPersistentCommHandle(2, comm_pkg); HYPRE_Int num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); hypre_assert(num_cols_offd == hypre_ParCSRCommPkgRecvVecStart(comm_pkg, num_recvs)); hypre_VectorData(y_tmp) = (HYPRE_Complex *)persistent_comm_handle->send_data; hypre_SeqVectorSetDataOwner(y_tmp, 0); #endif } else { comm_handle = hypre_CTAlloc(hypre_ParCSRCommHandle*, num_vectors, HYPRE_MEMORY_HOST); } hypre_SeqVectorInitialize(y_tmp); num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); if (!use_persistent_comm) { y_buf_data = hypre_CTAlloc( HYPRE_Complex*, num_vectors , HYPRE_MEMORY_HOST); for ( jv=0; jv<num_vectors; ++jv ) y_buf_data[jv] = hypre_CTAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart (comm_pkg, num_sends), HYPRE_MEMORY_HOST); } y_tmp_data = hypre_VectorData(y_tmp); y_local_data = hypre_VectorData(y_local); hypre_assert( idxstride==1 ); /* only 'column' storage of multivectors * implemented so far */ #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime(); #endif if (num_cols_offd) { if (A->offdT) { // offdT is optional. Used only if it's present. hypre_CSRMatrixMatvec(alpha, A->offdT, x_local, 0.0, y_tmp); } else { hypre_CSRMatrixMatvecT(alpha, offd, x_local, 0.0, y_tmp); } } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime(); #endif if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_ParCSRPersistentCommHandleStart(persistent_comm_handle); #endif } else { for ( jv=0; jv<num_vectors; ++jv ) { /* this is where we assume multivectors are 'column' storage */ comm_handle[jv] = hypre_ParCSRCommHandleCreate ( 2, comm_pkg, &(y_tmp_data[jv*num_cols_offd]), y_buf_data[jv] ); } } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime(); #endif if (A->diagT) { // diagT is optional. Used only if it's present. hypre_CSRMatrixMatvec(alpha, A->diagT, x_local, beta, y_local); } else { hypre_CSRMatrixMatvecT(alpha, diag, x_local, beta, y_local); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime(); #endif if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_ParCSRPersistentCommHandleWait(persistent_comm_handle); #endif } else { for ( jv=0; jv<num_vectors; ++jv ) { hypre_ParCSRCommHandleDestroy(comm_handle[jv]); comm_handle[jv] = NULL; } hypre_TFree(comm_handle, HYPRE_MEMORY_HOST); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime(); hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime(); #endif if ( num_vectors==1 ) { index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) y_local_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)] #ifdef HYPRE_USING_PERSISTENT_COMM += ((HYPRE_Complex *)persistent_comm_handle->recv_data)[index++]; #else += y_buf_data[0][index++]; #endif } } else for ( jv=0; jv<num_vectors; ++jv ) { index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) y_local_data[ jv*vecstride + idxstride*hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j) ] += y_buf_data[jv][index++]; } } hypre_SeqVectorDestroy(y_tmp); y_tmp = NULL; if (!use_persistent_comm) { for ( jv=0; jv<num_vectors; ++jv ) hypre_TFree(y_buf_data[jv], HYPRE_MEMORY_HOST); hypre_TFree(y_buf_data, HYPRE_MEMORY_HOST); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime(); #endif return ierr; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixMatvec_FF *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixMatvec_FF( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, hypre_ParVector *x, HYPRE_Complex beta, hypre_ParVector *y, HYPRE_Int *CF_marker, HYPRE_Int fpt ) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommHandle *comm_handle; hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A); hypre_Vector *x_local = hypre_ParVectorLocalVector(x); hypre_Vector *y_local = hypre_ParVectorLocalVector(y); HYPRE_BigInt num_rows = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt num_cols = hypre_ParCSRMatrixGlobalNumCols(A); hypre_Vector *x_tmp; HYPRE_BigInt x_size = hypre_ParVectorGlobalSize(x); HYPRE_BigInt y_size = hypre_ParVectorGlobalSize(y); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(offd); HYPRE_Int ierr = 0; HYPRE_Int num_sends, i, j, index, start, num_procs; HYPRE_Int *int_buf_data = NULL; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Complex *x_tmp_data = NULL; HYPRE_Complex *x_buf_data = NULL; HYPRE_Complex *x_local_data = hypre_VectorData(x_local); /*--------------------------------------------------------------------- * Check for size compatibility. ParMatvec returns ierr = 11 if * length of X doesn't equal the number of columns of A, * ierr = 12 if the length of Y doesn't equal the number of rows * of A, and ierr = 13 if both are true. * * Because temporary vectors are often used in ParMatvec, none of * these conditions terminates processing, and the ierr flag * is informational only. *--------------------------------------------------------------------*/ hypre_MPI_Comm_size(comm,&num_procs); if (num_cols != x_size) ierr = 11; if (num_rows != y_size) ierr = 12; if (num_cols != x_size && num_rows != y_size) ierr = 13; if (num_procs > 1) { if (num_cols_offd) { x_tmp = hypre_SeqVectorCreate( num_cols_offd ); hypre_SeqVectorInitialize(x_tmp); x_tmp_data = hypre_VectorData(x_tmp); } /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings *--------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); if (num_sends) x_buf_data = hypre_CTAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart (comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) x_buf_data[index++] = x_local_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate ( 1, comm_pkg, x_buf_data, x_tmp_data ); } hypre_CSRMatrixMatvec_FF( alpha, diag, x_local, beta, y_local, CF_marker, CF_marker, fpt); if (num_procs > 1) { hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; if (num_sends) int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart (comm_pkg, num_sends), HYPRE_MEMORY_HOST); if (num_cols_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate(11,comm_pkg,int_buf_data,CF_marker_offd ); hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; if (num_cols_offd) hypre_CSRMatrixMatvec_FF( alpha, offd, x_tmp, 1.0, y_local, CF_marker, CF_marker_offd, fpt); hypre_SeqVectorDestroy(x_tmp); x_tmp = NULL; hypre_TFree(x_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); } return ierr; }
/****************************************************************************** * * Matvec functions for hypre_CSRMatrix class. * *****************************************************************************/ #include "_hypre_parcsr_mv.h" #include <assert.h> /*#ifdef HYPRE_USING_GPU extern "C" { void PackOnDevice(HYPRE_Complex *send_data,HYPRE_Complex *x_local_data, HYPRE_Int *send_map, HYPRE_Int begin,HYPRE_Int end,cudaStream_t s); } #endif */ /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixMatvec *--------------------------------------------------------------------------*/ // y = alpha*A*x + beta*b HYPRE_Int hypre_ParCSRMatrixMatvecOutOfPlace( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, hypre_ParVector *x, HYPRE_Complex beta, hypre_ParVector *b, hypre_ParVector *y ) { hypre_ParCSRCommHandle **comm_handle; hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A); hypre_Vector *x_local = hypre_ParVectorLocalVector(x); hypre_Vector *b_local = hypre_ParVectorLocalVector(b); hypre_Vector *y_local = hypre_ParVectorLocalVector(y); HYPRE_BigInt num_rows = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt num_cols = hypre_ParCSRMatrixGlobalNumCols(A); hypre_Vector *x_tmp; HYPRE_BigInt x_size = hypre_ParVectorGlobalSize(x); HYPRE_BigInt b_size = hypre_ParVectorGlobalSize(b); HYPRE_BigInt y_size = hypre_ParVectorGlobalSize(y); HYPRE_Int num_vectors = hypre_VectorNumVectors(x_local); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(offd); HYPRE_Int ierr = 0; HYPRE_Int num_sends, i, j, jv, index, start; HYPRE_Int vecstride = hypre_VectorVectorStride( x_local ); HYPRE_Int idxstride = hypre_VectorIndexStride( x_local ); HYPRE_Complex *x_tmp_data, **x_buf_data; HYPRE_Complex *x_local_data = hypre_VectorData(x_local); /*--------------------------------------------------------------------- * Check for size compatibility. ParMatvec returns ierr = 11 if * length of X doesn't equal the number of columns of A, * ierr = 12 if the length of Y doesn't equal the number of rows * of A, and ierr = 13 if both are true. * * Because temporary vectors are often used in ParMatvec, none of * these conditions terminates processing, and the ierr flag * is informational only. *--------------------------------------------------------------------*/ PUSH_RANGE_PAYLOAD("PAR_CSR_MATVEC",5,x_size); hypre_assert( idxstride>0 ); if (num_cols != x_size) ierr = 11; if (num_rows != y_size || num_rows != b_size) ierr = 12; if (num_cols != x_size && (num_rows != y_size || num_rows != b_size)) ierr = 13; hypre_assert( hypre_VectorNumVectors(b_local)==num_vectors ); hypre_assert( hypre_VectorNumVectors(y_local)==num_vectors ); if ( num_vectors==1 ) x_tmp = hypre_SeqVectorCreate( num_cols_offd ); else { hypre_assert( num_vectors>1 ); x_tmp = hypre_SeqMultiVectorCreate( num_cols_offd, num_vectors ); } /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings *--------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime(); #endif PUSH_RANGE("MPI_PACK",3); HYPRE_Int use_persistent_comm = 0; #ifdef HYPRE_USING_PERSISTENT_COMM use_persistent_comm = num_vectors == 1; // JSP TODO: we can use persistent communication for multi-vectors, // but then we need different communication handles for different // num_vectors. hypre_ParCSRPersistentCommHandle *persistent_comm_handle; #endif if ( use_persistent_comm ) { #ifdef HYPRE_USING_PERSISTENT_COMM PUSH_RANGE("PERCOMM1",0); persistent_comm_handle = hypre_ParCSRCommPkgGetPersistentCommHandle(1, comm_pkg); HYPRE_Int num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); hypre_assert(num_cols_offd == hypre_ParCSRCommPkgRecvVecStart(comm_pkg, num_recvs)); hypre_VectorData(x_tmp) = (HYPRE_Complex *)persistent_comm_handle->recv_data; hypre_SeqVectorSetDataOwner(x_tmp, 0); POP_RANGE; #endif } else { comm_handle = hypre_CTAlloc(hypre_ParCSRCommHandle*, num_vectors, HYPRE_MEMORY_HOST); } hypre_SeqVectorInitialize(x_tmp); x_tmp_data = hypre_VectorData(x_tmp); num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); if (!use_persistent_comm) { x_buf_data = hypre_CTAlloc( HYPRE_Complex*, num_vectors , HYPRE_MEMORY_HOST); for ( jv=0; jv<num_vectors; ++jv ) x_buf_data[jv] = hypre_CTAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart (comm_pkg, num_sends), HYPRE_MEMORY_SHARED); } if ( num_vectors==1 ) { HYPRE_Int begin = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0); HYPRE_Int end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); #if defined(HYPRE_USING_GPU) && defined(HYPRE_USING_UNIFIED_MEMORY) PUSH_RANGE("PERCOMM2DEVICE",4); #ifdef HYPRE_USING_PERSISTENT_COMM PackOnDevice((HYPRE_Complex*)persistent_comm_handle->send_data,x_local_data,hypre_ParCSRCommPkgSendMapElmts(comm_pkg),begin,end,HYPRE_STREAM(4)); //PrintPointerAttributes(persistent_comm_handle->send_data); #else #if defined(DEBUG_PACK_ON_DEVICE) hypre_CheckErrorDevice(cudaPeekAtLastError()); hypre_CheckErrorDevice(cudaDeviceSynchronize()); ASSERT_MANAGED(x_buf_data[0]); ASSERT_MANAGED(x_local_data); ASSERT_MANAGED(hypre_ParCSRCommPkgSendMapElmts(comm_pkg)); #endif /* printf("%d %d %d\n", PointerAttributes(x_buf_data[0]), PointerAttributes(x_local_data), PointerAttributes(hypre_ParCSRCommPkgSendMapElmts(comm_pkg))); */ PackOnDevice((HYPRE_Complex*)x_buf_data[0],x_local_data,hypre_ParCSRCommPkgSendMapElmts(comm_pkg),begin,end,HYPRE_STREAM(4)); #if defined(DEBUG_PACK_ON_DEVICE) hypre_CheckErrorDevice(cudaPeekAtLastError()); hypre_CheckErrorDevice(cudaDeviceSynchronize()); #endif #endif POP_RANGE; SetAsyncMode(1); hypre_CheckErrorDevice(cudaPeekAtLastError()); hypre_CheckErrorDevice(cudaDeviceSynchronize()); hypre_CSRMatrixMatvecOutOfPlace( alpha, diag, x_local, beta, b_local, y_local, 0); //hypre_SeqVectorUpdateHost(y_local); //hypre_SeqVectorUpdateHost(x_local); //hypre_SeqVectorUpdateHost(b_local); SetAsyncMode(0); #else #ifdef HYPRE_USING_MAPPED_OPENMP_OFFLOAD PUSH_RANGE("MPI_PACK_OMP",4); SyncVectorToHost(x_local); #endif #if defined(HYPRE_USING_OPENMP_OFFLOAD_NOT_USED) HYPRE_Int num_threads=64; HYPRE_Int num_teams = (end-begin+(end-begin)%num_threads)/num_threads; HYPRE_Int *local_send_map_elmts = comm_pkg->send_map_elmts; printf("USING OFFLOADED PACKING OF BUFER\n"); #pragma omp target teams distribute parallel for private(i) num_teams(num_teams) thread_limit(num_threads) is_device_ptr(x_local_data,x_buf_data,comm_pkg,local_send_map_elmts) #elif defined(HYPRE_USING_OPENMP) #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (i = begin; i < end; i++) { #ifdef HYPRE_USING_PERSISTENT_COMM ((HYPRE_Complex *)persistent_comm_handle->send_data)[i - begin] #else x_buf_data[0][i - begin] #endif = x_local_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,i)]; } POP_RANGE; // "MPI_PACK_OMP" #endif } else for ( jv=0; jv<num_vectors; ++jv ) { index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) x_buf_data[jv][index++] = x_local_data[ jv*vecstride + idxstride*hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j) ]; } } hypre_assert( idxstride==1 ); /* ... The assert is because the following loop only works for 'column' storage of a multivector. This needs to be fixed to work more generally, at least for 'row' storage. This in turn, means either change CommPkg so num_sends is no.zones*no.vectors (not no.zones) or, less dangerously, put a stride in the logic of CommHandleCreate (stride either from a new arg or a new variable inside CommPkg). Or put the num_vector iteration inside CommHandleCreate (perhaps a new multivector variant of it). */ #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime(); hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime(); #endif POP_RANGE; PUSH_RANGE("MPI_HALO_EXC_SEND",4); if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_ParCSRPersistentCommHandleStart(persistent_comm_handle); #endif } else { for ( jv=0; jv<num_vectors; ++jv ) { comm_handle[jv] = hypre_ParCSRCommHandleCreate ( 1, comm_pkg, x_buf_data[jv], &(x_tmp_data[jv*num_cols_offd]) ); } } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime(); #endif POP_RANGE; #if !defined(HYPRE_USING_GPU) || !defined(HYPRE_USING_UNIFIED_MEMORY) hypre_CSRMatrixMatvecOutOfPlace( alpha, diag, x_local, beta, b_local, y_local, 0); #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime(); #endif PUSH_RANGE("MPI_HALO_EXC_RECV",6); if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_ParCSRPersistentCommHandleWait(persistent_comm_handle); #endif } else { for ( jv=0; jv<num_vectors; ++jv ) { hypre_ParCSRCommHandleDestroy(comm_handle[jv]); comm_handle[jv] = NULL; } hypre_TFree(comm_handle, HYPRE_MEMORY_HOST); } POP_RANGE; #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime(); #endif //hypre_SeqVectorUpdateDevice(x_tmp); #ifdef HYPRE_USING_MAPPED_OPENMP_OFFLOAD UpdateHRC(x_tmp); #endif if (num_cols_offd) hypre_CSRMatrixMatvec( alpha, offd, x_tmp, 1.0, y_local); //if (num_cols_offd) hypre_SeqVectorUpdateHost(y_local); //hypre_SeqVectorUpdateHost(x_tmp); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime(); #endif PUSH_RANGE("MPI_UNPACK",5); hypre_SeqVectorDestroy(x_tmp); x_tmp = NULL; if (!use_persistent_comm) { for ( jv=0; jv<num_vectors; ++jv ) hypre_TFree(x_buf_data[jv], HYPRE_MEMORY_SHARED); hypre_TFree(x_buf_data, HYPRE_MEMORY_HOST); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime(); #endif POP_RANGE; #if defined(HYPRE_USING_GPU) && defined(HYPRE_USING_UNIFIED_MEMORY) hypre_CheckErrorDevice(cudaStreamSynchronize(HYPRE_STREAM(4))); #endif POP_RANGE; // PAR_CSR return ierr; } HYPRE_Int hypre_ParCSRMatrixMatvec( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, hypre_ParVector *x, HYPRE_Complex beta, hypre_ParVector *y ) { return hypre_ParCSRMatrixMatvecOutOfPlace(alpha, A, x, beta, y, y); } #ifdef HYPRE_USING_MAPPED_OPENMP_OFFLOAD HYPRE_Int hypre_ParCSRMatrixMatvec3( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, hypre_ParVector *x, HYPRE_Complex beta, hypre_ParVector *y ) { HYPRE_Int rval=hypre_ParCSRMatrixMatvecOutOfPlace(alpha, A, x, beta, y, y); hypre_SeqVectorUpdateHost(y->local_vector); } HYPRE_Int hypre_ParCSRMatrixMatvecOutOfPlace3( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, hypre_ParVector *x, HYPRE_Complex beta, hypre_ParVector *b, hypre_ParVector *y ) { hypre_ParCSRMatrixMatvecOutOfPlace(alpha,A,x,beta,b,y); hypre_SeqVectorUpdateHost(y->local_vector); } #endif /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixMatvecT * * Performs y <- alpha * A^T * x + beta * y * *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixMatvecT( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, hypre_ParVector *x, HYPRE_Complex beta, hypre_ParVector *y ) { hypre_ParCSRCommHandle **comm_handle; hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A); hypre_Vector *x_local = hypre_ParVectorLocalVector(x); hypre_Vector *y_local = hypre_ParVectorLocalVector(y); hypre_Vector *y_tmp; HYPRE_Int vecstride = hypre_VectorVectorStride( y_local ); HYPRE_Int idxstride = hypre_VectorIndexStride( y_local ); HYPRE_Complex *y_tmp_data, **y_buf_data; HYPRE_Complex *y_local_data = hypre_VectorData(y_local); HYPRE_BigInt num_rows = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt num_cols = hypre_ParCSRMatrixGlobalNumCols(A); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(offd); HYPRE_BigInt x_size = hypre_ParVectorGlobalSize(x); HYPRE_BigInt y_size = hypre_ParVectorGlobalSize(y); HYPRE_Int num_vectors = hypre_VectorNumVectors(y_local); HYPRE_Int i, j, jv, index, start, num_sends; HYPRE_Int ierr = 0; if (y==NULL) { printf("NULLY %p\b", (void*) y); return 1; } /*--------------------------------------------------------------------- * Check for size compatibility. MatvecT returns ierr = 1 if * length of X doesn't equal the number of rows of A, * ierr = 2 if the length of Y doesn't equal the number of * columns of A, and ierr = 3 if both are true. * * Because temporary vectors are often used in MatvecT, none of * these conditions terminates processing, and the ierr flag * is informational only. *--------------------------------------------------------------------*/ if (num_rows != x_size) ierr = 1; if (num_cols != y_size) ierr = 2; if (num_rows != x_size && num_cols != y_size) ierr = 3; /*----------------------------------------------------------------------- *-----------------------------------------------------------------------*/ if ( num_vectors==1 ) { y_tmp = hypre_SeqVectorCreate(num_cols_offd); } else { y_tmp = hypre_SeqMultiVectorCreate(num_cols_offd,num_vectors); } /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings *--------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime(); #endif HYPRE_Int use_persistent_comm = 0; #ifdef HYPRE_USING_PERSISTENT_COMM use_persistent_comm = num_vectors == 1; // JSP TODO: we can use persistent communication for multi-vectors, // but then we need different communication handles for different // num_vectors. hypre_ParCSRPersistentCommHandle *persistent_comm_handle; #endif if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM // JSP TODO: we should be also able to use persistent communication for multiple vectors persistent_comm_handle = hypre_ParCSRCommPkgGetPersistentCommHandle(2, comm_pkg); HYPRE_Int num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); hypre_assert(num_cols_offd == hypre_ParCSRCommPkgRecvVecStart(comm_pkg, num_recvs)); hypre_VectorData(y_tmp) = (HYPRE_Complex *)persistent_comm_handle->send_data; hypre_SeqVectorSetDataOwner(y_tmp, 0); #endif } else { comm_handle = hypre_CTAlloc(hypre_ParCSRCommHandle*, num_vectors, HYPRE_MEMORY_HOST); } hypre_SeqVectorInitialize(y_tmp); num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); if (!use_persistent_comm) { y_buf_data = hypre_CTAlloc( HYPRE_Complex*, num_vectors , HYPRE_MEMORY_HOST); for ( jv=0; jv<num_vectors; ++jv ) y_buf_data[jv] = hypre_CTAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart (comm_pkg, num_sends), HYPRE_MEMORY_HOST); } y_tmp_data = hypre_VectorData(y_tmp); y_local_data = hypre_VectorData(y_local); hypre_assert( idxstride==1 ); /* only 'column' storage of multivectors * implemented so far */ #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime(); #endif if (num_cols_offd) { if (A->offdT) { // offdT is optional. Used only if it's present. hypre_CSRMatrixMatvec(alpha, A->offdT, x_local, 0.0, y_tmp); #ifdef HYPRE_USING_MAPPED_OPENMP_OFFLOAD SyncVectorToHost(y_tmp); #endif } else { hypre_CSRMatrixMatvecT(alpha, offd, x_local, 0.0, y_tmp); } } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime(); #endif if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_ParCSRPersistentCommHandleStart(persistent_comm_handle); #endif } else { for ( jv=0; jv<num_vectors; ++jv ) { /* this is where we assume multivectors are 'column' storage */ comm_handle[jv] = hypre_ParCSRCommHandleCreate ( 2, comm_pkg, &(y_tmp_data[jv*num_cols_offd]), y_buf_data[jv] ); } } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime(); #endif if (A->diagT) { // diagT is optional. Used only if it's present. hypre_CSRMatrixMatvec(alpha, A->diagT, x_local, beta, y_local); #ifdef HYPRE_USING_MAPPED_OPENMP_OFFLOAD SyncVectorToHost(y_local); #endif } else { hypre_CSRMatrixMatvecT(alpha, diag, x_local, beta, y_local); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime(); #endif if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_ParCSRPersistentCommHandleWait(persistent_comm_handle); #endif } else { for ( jv=0; jv<num_vectors; ++jv ) { hypre_ParCSRCommHandleDestroy(comm_handle[jv]); comm_handle[jv] = NULL; } hypre_TFree(comm_handle, HYPRE_MEMORY_HOST); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime(); hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime(); #endif if ( num_vectors==1 ) { index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) y_local_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)] #ifdef HYPRE_USING_PERSISTENT_COMM += ((HYPRE_Complex *)persistent_comm_handle->recv_data)[index++]; #else += y_buf_data[0][index++]; #endif } } else for ( jv=0; jv<num_vectors; ++jv ) { index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) y_local_data[ jv*vecstride + idxstride*hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j) ] += y_buf_data[jv][index++]; } } #ifdef HYPRE_USING_MAPPED_OPENMP_OFFLOAD UpdateHRC(y_local); #endif hypre_SeqVectorDestroy(y_tmp); y_tmp = NULL; if (!use_persistent_comm) { for ( jv=0; jv<num_vectors; ++jv ) hypre_TFree(y_buf_data[jv], HYPRE_MEMORY_HOST); hypre_TFree(y_buf_data, HYPRE_MEMORY_HOST); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime(); #endif return ierr; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixMatvec_FF *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixMatvec_FF( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, hypre_ParVector *x, HYPRE_Complex beta, hypre_ParVector *y, HYPRE_Int *CF_marker, HYPRE_Int fpt ) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommHandle *comm_handle; hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A); hypre_Vector *x_local = hypre_ParVectorLocalVector(x); hypre_Vector *y_local = hypre_ParVectorLocalVector(y); HYPRE_BigInt num_rows = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt num_cols = hypre_ParCSRMatrixGlobalNumCols(A); hypre_Vector *x_tmp; HYPRE_BigInt x_size = hypre_ParVectorGlobalSize(x); HYPRE_BigInt y_size = hypre_ParVectorGlobalSize(y); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(offd); HYPRE_Int ierr = 0; HYPRE_Int num_sends, i, j, index, start, num_procs; HYPRE_Int *int_buf_data = NULL; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Complex *x_tmp_data = NULL; HYPRE_Complex *x_buf_data = NULL; HYPRE_Complex *x_local_data = hypre_VectorData(x_local); /*--------------------------------------------------------------------- * Check for size compatibility. ParMatvec returns ierr = 11 if * length of X doesn't equal the number of columns of A, * ierr = 12 if the length of Y doesn't equal the number of rows * of A, and ierr = 13 if both are true. * * Because temporary vectors are often used in ParMatvec, none of * these conditions terminates processing, and the ierr flag * is informational only. *--------------------------------------------------------------------*/ hypre_MPI_Comm_size(comm,&num_procs); if (num_cols != x_size) ierr = 11; if (num_rows != y_size) ierr = 12; if (num_cols != x_size && num_rows != y_size) ierr = 13; if (num_procs > 1) { if (num_cols_offd) { x_tmp = hypre_SeqVectorCreate( num_cols_offd ); hypre_SeqVectorInitialize(x_tmp); x_tmp_data = hypre_VectorData(x_tmp); } /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings *--------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); if (num_sends) x_buf_data = hypre_CTAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart (comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) x_buf_data[index++] = x_local_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate ( 1, comm_pkg, x_buf_data, x_tmp_data ); } hypre_CSRMatrixMatvec_FF( alpha, diag, x_local, beta, y_local, CF_marker, CF_marker, fpt); if (num_procs > 1) { hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; if (num_sends) int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart (comm_pkg, num_sends), HYPRE_MEMORY_HOST); if (num_cols_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate(11,comm_pkg,int_buf_data,CF_marker_offd ); hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; if (num_cols_offd) hypre_CSRMatrixMatvec_FF( alpha, offd, x_tmp, 1.0, y_local, CF_marker, CF_marker_offd, fpt); hypre_SeqVectorDestroy(x_tmp); x_tmp = NULL; hypre_TFree(x_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); } return ierr; }
irbuilder_unroll_partial_factor_for.c
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs // RUN: %clang_cc1 -fopenmp-enable-irbuilder -verify -fopenmp -fopenmp-version=51 -x c -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s // expected-no-diagnostics #ifndef HEADER #define HEADER // CHECK-LABEL: define {{.*}}@unroll_partial_heuristic_for( // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[N_ADDR:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[A_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[B_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[C_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[D_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[I:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[AGG_CAPTURED:.+]] = alloca %struct.anon, align 8 // CHECK-NEXT: %[[AGG_CAPTURED1:.+]] = alloca %struct.anon.0, align 4 // CHECK-NEXT: %[[DOTCOUNT_ADDR:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[P_LASTITER:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[P_LOWERBOUND:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[P_UPPERBOUND:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[P_STRIDE:.+]] = alloca i32, align 4 // CHECK-NEXT: store i32 %[[N:.+]], i32* %[[N_ADDR]], align 4 // CHECK-NEXT: store float* %[[A:.+]], float** %[[A_ADDR]], align 8 // CHECK-NEXT: store float* %[[B:.+]], float** %[[B_ADDR]], align 8 // CHECK-NEXT: store float* %[[C:.+]], float** %[[C_ADDR]], align 8 // CHECK-NEXT: store float* %[[D:.+]], float** %[[D_ADDR]], align 8 // CHECK-NEXT: store i32 0, i32* %[[I]], align 4 // CHECK-NEXT: %[[TMP0:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[AGG_CAPTURED]], i32 0, i32 0 // CHECK-NEXT: store i32* %[[I]], i32** %[[TMP0]], align 8 // CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[AGG_CAPTURED]], i32 0, i32 1 // CHECK-NEXT: store i32* %[[N_ADDR]], i32** %[[TMP1]], align 8 // CHECK-NEXT: %[[TMP2:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[AGG_CAPTURED1]], i32 0, i32 0 // CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: store i32 %[[TMP3]], i32* %[[TMP2]], align 4 // CHECK-NEXT: call void @__captured_stmt(i32* %[[DOTCOUNT_ADDR]], %struct.anon* %[[AGG_CAPTURED]]) // CHECK-NEXT: %[[DOTCOUNT:.+]] = load i32, i32* %[[DOTCOUNT_ADDR]], align 4 // CHECK-NEXT: br label %[[OMP_LOOP_PREHEADER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_PREHEADER]]: // CHECK-NEXT: %[[TMP4:.+]] = udiv i32 %[[DOTCOUNT]], 13 // CHECK-NEXT: %[[TMP5:.+]] = urem i32 %[[DOTCOUNT]], 13 // CHECK-NEXT: %[[TMP6:.+]] = icmp ne i32 %[[TMP5]], 0 // CHECK-NEXT: %[[TMP7:.+]] = zext i1 %[[TMP6]] to i32 // CHECK-NEXT: %[[OMP_FLOOR0_TRIPCOUNT:.+]] = add nuw i32 %[[TMP4]], %[[TMP7]] // CHECK-NEXT: br label %[[OMP_FLOOR0_PREHEADER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_PREHEADER]]: // CHECK-NEXT: store i32 0, i32* %[[P_LOWERBOUND]], align 4 // CHECK-NEXT: %[[TMP8:.+]] = sub i32 %[[OMP_FLOOR0_TRIPCOUNT]], 1 // CHECK-NEXT: store i32 %[[TMP8]], i32* %[[P_UPPERBOUND]], align 4 // CHECK-NEXT: store i32 1, i32* %[[P_STRIDE]], align 4 // CHECK-NEXT: %[[OMP_GLOBAL_THREAD_NUM:.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @1) // CHECK-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @1, i32 %[[OMP_GLOBAL_THREAD_NUM]], i32 34, i32* %[[P_LASTITER]], i32* %[[P_LOWERBOUND]], i32* %[[P_UPPERBOUND]], i32* %[[P_STRIDE]], i32 1, i32 0) // CHECK-NEXT: %[[TMP9:.+]] = load i32, i32* %[[P_LOWERBOUND]], align 4 // CHECK-NEXT: %[[TMP10:.+]] = load i32, i32* %[[P_UPPERBOUND]], align 4 // CHECK-NEXT: %[[TMP11:.+]] = sub i32 %[[TMP10]], %[[TMP9]] // CHECK-NEXT: %[[TMP12:.+]] = add i32 %[[TMP11]], 1 // CHECK-NEXT: br label %[[OMP_FLOOR0_HEADER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_HEADER]]: // CHECK-NEXT: %[[OMP_FLOOR0_IV:.+]] = phi i32 [ 0, %[[OMP_FLOOR0_PREHEADER]] ], [ %[[OMP_FLOOR0_NEXT:.+]], %[[OMP_FLOOR0_INC:.+]] ] // CHECK-NEXT: br label %[[OMP_FLOOR0_COND:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_COND]]: // CHECK-NEXT: %[[OMP_FLOOR0_CMP:.+]] = icmp ult i32 %[[OMP_FLOOR0_IV]], %[[TMP12]] // CHECK-NEXT: br i1 %[[OMP_FLOOR0_CMP]], label %[[OMP_FLOOR0_BODY:.+]], label %[[OMP_FLOOR0_EXIT:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_BODY]]: // CHECK-NEXT: %[[TMP13:.+]] = add i32 %[[OMP_FLOOR0_IV]], %[[TMP9]] // CHECK-NEXT: %[[TMP14:.+]] = icmp eq i32 %[[TMP13]], %[[OMP_FLOOR0_TRIPCOUNT]] // CHECK-NEXT: %[[TMP15:.+]] = select i1 %[[TMP14]], i32 %[[TMP5]], i32 13 // CHECK-NEXT: br label %[[OMP_TILE0_PREHEADER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_PREHEADER]]: // CHECK-NEXT: br label %[[OMP_TILE0_HEADER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_HEADER]]: // CHECK-NEXT: %[[OMP_TILE0_IV:.+]] = phi i32 [ 0, %[[OMP_TILE0_PREHEADER]] ], [ %[[OMP_TILE0_NEXT:.+]], %[[OMP_TILE0_INC:.+]] ] // CHECK-NEXT: br label %[[OMP_TILE0_COND:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_COND]]: // CHECK-NEXT: %[[OMP_TILE0_CMP:.+]] = icmp ult i32 %[[OMP_TILE0_IV]], %[[TMP15]] // CHECK-NEXT: br i1 %[[OMP_TILE0_CMP]], label %[[OMP_TILE0_BODY:.+]], label %[[OMP_TILE0_EXIT:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_BODY]]: // CHECK-NEXT: %[[TMP16:.+]] = mul nuw i32 13, %[[TMP13]] // CHECK-NEXT: %[[TMP17:.+]] = add nuw i32 %[[TMP16]], %[[OMP_TILE0_IV]] // CHECK-NEXT: br label %[[OMP_LOOP_BODY:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_BODY]]: // CHECK-NEXT: call void @__captured_stmt.1(i32* %[[I]], i32 %[[TMP17]], %struct.anon.0* %[[AGG_CAPTURED1]]) // CHECK-NEXT: %[[TMP18:.+]] = load float*, float** %[[B_ADDR]], align 8 // CHECK-NEXT: %[[TMP19:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM:.+]] = sext i32 %[[TMP19]] to i64 // CHECK-NEXT: %[[ARRAYIDX:.+]] = getelementptr inbounds float, float* %[[TMP18]], i64 %[[IDXPROM]] // CHECK-NEXT: %[[TMP20:.+]] = load float, float* %[[ARRAYIDX]], align 4 // CHECK-NEXT: %[[TMP21:.+]] = load float*, float** %[[C_ADDR]], align 8 // CHECK-NEXT: %[[TMP22:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM2:.+]] = sext i32 %[[TMP22]] to i64 // CHECK-NEXT: %[[ARRAYIDX3:.+]] = getelementptr inbounds float, float* %[[TMP21]], i64 %[[IDXPROM2]] // CHECK-NEXT: %[[TMP23:.+]] = load float, float* %[[ARRAYIDX3]], align 4 // CHECK-NEXT: %[[MUL:.+]] = fmul float %[[TMP20]], %[[TMP23]] // CHECK-NEXT: %[[TMP24:.+]] = load float*, float** %[[D_ADDR]], align 8 // CHECK-NEXT: %[[TMP25:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM4:.+]] = sext i32 %[[TMP25]] to i64 // CHECK-NEXT: %[[ARRAYIDX5:.+]] = getelementptr inbounds float, float* %[[TMP24]], i64 %[[IDXPROM4]] // CHECK-NEXT: %[[TMP26:.+]] = load float, float* %[[ARRAYIDX5]], align 4 // CHECK-NEXT: %[[MUL6:.+]] = fmul float %[[MUL]], %[[TMP26]] // CHECK-NEXT: %[[TMP27:.+]] = load float*, float** %[[A_ADDR]], align 8 // CHECK-NEXT: %[[TMP28:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM7:.+]] = sext i32 %[[TMP28]] to i64 // CHECK-NEXT: %[[ARRAYIDX8:.+]] = getelementptr inbounds float, float* %[[TMP27]], i64 %[[IDXPROM7]] // CHECK-NEXT: store float %[[MUL6]], float* %[[ARRAYIDX8]], align 4 // CHECK-NEXT: br label %[[OMP_TILE0_INC]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_INC]]: // CHECK-NEXT: %[[OMP_TILE0_NEXT]] = add nuw i32 %[[OMP_TILE0_IV]], 1 // CHECK-NEXT: br label %[[OMP_TILE0_HEADER]], !llvm.loop ![[LOOP3:[0-9]+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_EXIT]]: // CHECK-NEXT: br label %[[OMP_TILE0_AFTER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_AFTER]]: // CHECK-NEXT: br label %[[OMP_FLOOR0_INC]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_INC]]: // CHECK-NEXT: %[[OMP_FLOOR0_NEXT]] = add nuw i32 %[[OMP_FLOOR0_IV]], 1 // CHECK-NEXT: br label %[[OMP_FLOOR0_HEADER]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_EXIT]]: // CHECK-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @1, i32 %[[OMP_GLOBAL_THREAD_NUM]]) // CHECK-NEXT: %[[OMP_GLOBAL_THREAD_NUM9:.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @1) // CHECK-NEXT: call void @__kmpc_barrier(%struct.ident_t* @2, i32 %[[OMP_GLOBAL_THREAD_NUM9]]) // CHECK-NEXT: br label %[[OMP_FLOOR0_AFTER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_AFTER]]: // CHECK-NEXT: br label %[[OMP_LOOP_AFTER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_AFTER]]: // CHECK-NEXT: ret void // CHECK-NEXT: } void unroll_partial_heuristic_for(int n, float *a, float *b, float *c, float *d) { #pragma omp for #pragma omp unroll partial(13) for (int i = 0; i < n; i++) { a[i] = b[i] * c[i] * d[i]; } } #endif // HEADER // CHECK-LABEL: define {{.*}}@__captured_stmt( // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[DISTANCE_ADDR:.+]] = alloca i32*, align 8 // CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon*, align 8 // CHECK-NEXT: %[[DOTSTART:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[DOTSTOP:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[DOTSTEP:.+]] = alloca i32, align 4 // CHECK-NEXT: store i32* %[[DISTANCE:.+]], i32** %[[DISTANCE_ADDR]], align 8 // CHECK-NEXT: store %struct.anon* %[[__CONTEXT:.+]], %struct.anon** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon*, %struct.anon** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[TMP0]], i32 0, i32 0 // CHECK-NEXT: %[[TMP2:.+]] = load i32*, i32** %[[TMP1]], align 8 // CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[TMP2]], align 4 // CHECK-NEXT: store i32 %[[TMP3]], i32* %[[DOTSTART]], align 4 // CHECK-NEXT: %[[TMP4:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[TMP0]], i32 0, i32 1 // CHECK-NEXT: %[[TMP5:.+]] = load i32*, i32** %[[TMP4]], align 8 // CHECK-NEXT: %[[TMP6:.+]] = load i32, i32* %[[TMP5]], align 4 // CHECK-NEXT: store i32 %[[TMP6]], i32* %[[DOTSTOP]], align 4 // CHECK-NEXT: store i32 1, i32* %[[DOTSTEP]], align 4 // CHECK-NEXT: %[[TMP7:.+]] = load i32, i32* %[[DOTSTART]], align 4 // CHECK-NEXT: %[[TMP8:.+]] = load i32, i32* %[[DOTSTOP]], align 4 // CHECK-NEXT: %[[CMP:.+]] = icmp slt i32 %[[TMP7]], %[[TMP8]] // CHECK-NEXT: br i1 %[[CMP]], label %[[COND_TRUE:.+]], label %[[COND_FALSE:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[COND_TRUE]]: // CHECK-NEXT: %[[TMP9:.+]] = load i32, i32* %[[DOTSTOP]], align 4 // CHECK-NEXT: %[[TMP10:.+]] = load i32, i32* %[[DOTSTART]], align 4 // CHECK-NEXT: %[[SUB:.+]] = sub nsw i32 %[[TMP9]], %[[TMP10]] // CHECK-NEXT: %[[TMP11:.+]] = load i32, i32* %[[DOTSTEP]], align 4 // CHECK-NEXT: %[[SUB1:.+]] = sub i32 %[[TMP11]], 1 // CHECK-NEXT: %[[ADD:.+]] = add i32 %[[SUB]], %[[SUB1]] // CHECK-NEXT: %[[TMP12:.+]] = load i32, i32* %[[DOTSTEP]], align 4 // CHECK-NEXT: %[[DIV:.+]] = udiv i32 %[[ADD]], %[[TMP12]] // CHECK-NEXT: br label %[[COND_END:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[COND_FALSE]]: // CHECK-NEXT: br label %[[COND_END]] // CHECK-EMPTY: // CHECK-NEXT: [[COND_END]]: // CHECK-NEXT: %[[COND:.+]] = phi i32 [ %[[DIV]], %[[COND_TRUE]] ], [ 0, %[[COND_FALSE]] ] // CHECK-NEXT: %[[TMP13:.+]] = load i32*, i32** %[[DISTANCE_ADDR]], align 8 // CHECK-NEXT: store i32 %[[COND]], i32* %[[TMP13]], align 4 // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK-LABEL: define {{.*}}@__captured_stmt.1( // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[LOOPVAR_ADDR:.+]] = alloca i32*, align 8 // CHECK-NEXT: %[[LOGICAL_ADDR:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon.0*, align 8 // CHECK-NEXT: store i32* %[[LOOPVAR:.+]], i32** %[[LOOPVAR_ADDR]], align 8 // CHECK-NEXT: store i32 %[[LOGICAL:.+]], i32* %[[LOGICAL_ADDR]], align 4 // CHECK-NEXT: store %struct.anon.0* %[[__CONTEXT:.+]], %struct.anon.0** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon.0*, %struct.anon.0** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[TMP0]], i32 0, i32 0 // CHECK-NEXT: %[[TMP2:.+]] = load i32, i32* %[[TMP1]], align 4 // CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[LOGICAL_ADDR]], align 4 // CHECK-NEXT: %[[MUL:.+]] = mul i32 1, %[[TMP3]] // CHECK-NEXT: %[[ADD:.+]] = add i32 %[[TMP2]], %[[MUL]] // CHECK-NEXT: %[[TMP4:.+]] = load i32*, i32** %[[LOOPVAR_ADDR]], align 8 // CHECK-NEXT: store i32 %[[ADD]], i32* %[[TMP4]], align 4 // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK: ![[META0:[0-9]+]] = !{i32 1, !"wchar_size", i32 4} // CHECK: ![[META1:[0-9]+]] = !{i32 7, !"openmp", i32 51} // CHECK: ![[META2:[0-9]+]] = // CHECK: ![[LOOP3]] = distinct !{![[LOOP3]], ![[LOOPPROP4:[0-9]+]], ![[LOOPPROP5:[0-9]+]]} // CHECK: ![[LOOPPROP4]] = !{!"llvm.loop.unroll.enable"} // CHECK: ![[LOOPPROP5]] = !{!"llvm.loop.unroll.count", i32 13}
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs // RUN: %clang_cc1 -fopenmp-enable-irbuilder -verify -fopenmp -fopenmp-version=51 -x c -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s // expected-no-diagnostics #ifndef HEADER #define HEADER // CHECK-LABEL: define {{.*}}@unroll_partial_heuristic_for( // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[N_ADDR:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[A_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[B_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[C_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[D_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[I:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[AGG_CAPTURED:.+]] = alloca %struct.anon, align 8 // CHECK-NEXT: %[[AGG_CAPTURED1:.+]] = alloca %struct.anon.0, align 4 // CHECK-NEXT: %[[DOTCOUNT_ADDR:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[P_LASTITER:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[P_LOWERBOUND:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[P_UPPERBOUND:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[P_STRIDE:.+]] = alloca i32, align 4 // CHECK-NEXT: store i32 %[[N:.+]], i32* %[[N_ADDR]], align 4 // CHECK-NEXT: store float* %[[A:.+]], float** %[[A_ADDR]], align 8 // CHECK-NEXT: store float* %[[B:.+]], float** %[[B_ADDR]], align 8 // CHECK-NEXT: store float* %[[C:.+]], float** %[[C_ADDR]], align 8 // CHECK-NEXT: store float* %[[D:.+]], float** %[[D_ADDR]], align 8 // CHECK-NEXT: store i32 0, i32* %[[I]], align 4 // CHECK-NEXT: %[[TMP0:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[AGG_CAPTURED]], i32 0, i32 0 // CHECK-NEXT: store i32* %[[I]], i32** %[[TMP0]], align 8 // CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[AGG_CAPTURED]], i32 0, i32 1 // CHECK-NEXT: store i32* %[[N_ADDR]], i32** %[[TMP1]], align 8 // CHECK-NEXT: %[[TMP2:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[AGG_CAPTURED1]], i32 0, i32 0 // CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: store i32 %[[TMP3]], i32* %[[TMP2]], align 4 // CHECK-NEXT: call void @__captured_stmt(i32* %[[DOTCOUNT_ADDR]], %struct.anon* %[[AGG_CAPTURED]]) // CHECK-NEXT: %[[DOTCOUNT:.+]] = load i32, i32* %[[DOTCOUNT_ADDR]], align 4 // CHECK-NEXT: br label %[[OMP_LOOP_PREHEADER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_PREHEADER]]: // CHECK-NEXT: %[[TMP4:.+]] = udiv i32 %[[DOTCOUNT]], 13 // CHECK-NEXT: %[[TMP5:.+]] = urem i32 %[[DOTCOUNT]], 13 // CHECK-NEXT: %[[TMP6:.+]] = icmp ne i32 %[[TMP5]], 0 // CHECK-NEXT: %[[TMP7:.+]] = zext i1 %[[TMP6]] to i32 // CHECK-NEXT: %[[OMP_FLOOR0_TRIPCOUNT:.+]] = add nuw i32 %[[TMP4]], %[[TMP7]] // CHECK-NEXT: br label %[[OMP_FLOOR0_PREHEADER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_PREHEADER]]: // CHECK-NEXT: store i32 0, i32* %[[P_LOWERBOUND]], align 4 // CHECK-NEXT: %[[TMP8:.+]] = sub i32 %[[OMP_FLOOR0_TRIPCOUNT]], 1 // CHECK-NEXT: store i32 %[[TMP8]], i32* %[[P_UPPERBOUND]], align 4 // CHECK-NEXT: store i32 1, i32* %[[P_STRIDE]], align 4 // CHECK-NEXT: %[[OMP_GLOBAL_THREAD_NUM:.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @1) // CHECK-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @1, i32 %[[OMP_GLOBAL_THREAD_NUM]], i32 34, i32* %[[P_LASTITER]], i32* %[[P_LOWERBOUND]], i32* %[[P_UPPERBOUND]], i32* %[[P_STRIDE]], i32 1, i32 0) // CHECK-NEXT: %[[TMP9:.+]] = load i32, i32* %[[P_LOWERBOUND]], align 4 // CHECK-NEXT: %[[TMP10:.+]] = load i32, i32* %[[P_UPPERBOUND]], align 4 // CHECK-NEXT: %[[TMP11:.+]] = sub i32 %[[TMP10]], %[[TMP9]] // CHECK-NEXT: %[[TMP12:.+]] = add i32 %[[TMP11]], 1 // CHECK-NEXT: br label %[[OMP_FLOOR0_HEADER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_HEADER]]: // CHECK-NEXT: %[[OMP_FLOOR0_IV:.+]] = phi i32 [ 0, %[[OMP_FLOOR0_PREHEADER]] ], [ %[[OMP_FLOOR0_NEXT:.+]], %[[OMP_FLOOR0_INC:.+]] ] // CHECK-NEXT: br label %[[OMP_FLOOR0_COND:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_COND]]: // CHECK-NEXT: %[[OMP_FLOOR0_CMP:.+]] = icmp ult i32 %[[OMP_FLOOR0_IV]], %[[TMP12]] // CHECK-NEXT: br i1 %[[OMP_FLOOR0_CMP]], label %[[OMP_FLOOR0_BODY:.+]], label %[[OMP_FLOOR0_EXIT:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_BODY]]: // CHECK-NEXT: %[[TMP13:.+]] = add i32 %[[OMP_FLOOR0_IV]], %[[TMP9]] // CHECK-NEXT: %[[TMP14:.+]] = icmp eq i32 %[[TMP13]], %[[OMP_FLOOR0_TRIPCOUNT]] // CHECK-NEXT: %[[TMP15:.+]] = select i1 %[[TMP14]], i32 %[[TMP5]], i32 13 // CHECK-NEXT: br label %[[OMP_TILE0_PREHEADER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_PREHEADER]]: // CHECK-NEXT: br label %[[OMP_TILE0_HEADER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_HEADER]]: // CHECK-NEXT: %[[OMP_TILE0_IV:.+]] = phi i32 [ 0, %[[OMP_TILE0_PREHEADER]] ], [ %[[OMP_TILE0_NEXT:.+]], %[[OMP_TILE0_INC:.+]] ] // CHECK-NEXT: br label %[[OMP_TILE0_COND:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_COND]]: // CHECK-NEXT: %[[OMP_TILE0_CMP:.+]] = icmp ult i32 %[[OMP_TILE0_IV]], %[[TMP15]] // CHECK-NEXT: br i1 %[[OMP_TILE0_CMP]], label %[[OMP_TILE0_BODY:.+]], label %[[OMP_TILE0_EXIT:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_BODY]]: // CHECK-NEXT: %[[TMP16:.+]] = mul nuw i32 13, %[[TMP13]] // CHECK-NEXT: %[[TMP17:.+]] = add nuw i32 %[[TMP16]], %[[OMP_TILE0_IV]] // CHECK-NEXT: br label %[[OMP_LOOP_BODY:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_BODY]]: // CHECK-NEXT: call void @__captured_stmt.1(i32* %[[I]], i32 %[[TMP17]], %struct.anon.0* %[[AGG_CAPTURED1]]) // CHECK-NEXT: %[[TMP18:.+]] = load float*, float** %[[B_ADDR]], align 8 // CHECK-NEXT: %[[TMP19:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM:.+]] = sext i32 %[[TMP19]] to i64 // CHECK-NEXT: %[[ARRAYIDX:.+]] = getelementptr inbounds float, float* %[[TMP18]], i64 %[[IDXPROM]] // CHECK-NEXT: %[[TMP20:.+]] = load float, float* %[[ARRAYIDX]], align 4 // CHECK-NEXT: %[[TMP21:.+]] = load float*, float** %[[C_ADDR]], align 8 // CHECK-NEXT: %[[TMP22:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM2:.+]] = sext i32 %[[TMP22]] to i64 // CHECK-NEXT: %[[ARRAYIDX3:.+]] = getelementptr inbounds float, float* %[[TMP21]], i64 %[[IDXPROM2]] // CHECK-NEXT: %[[TMP23:.+]] = load float, float* %[[ARRAYIDX3]], align 4 // CHECK-NEXT: %[[MUL:.+]] = fmul float %[[TMP20]], %[[TMP23]] // CHECK-NEXT: %[[TMP24:.+]] = load float*, float** %[[D_ADDR]], align 8 // CHECK-NEXT: %[[TMP25:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM4:.+]] = sext i32 %[[TMP25]] to i64 // CHECK-NEXT: %[[ARRAYIDX5:.+]] = getelementptr inbounds float, float* %[[TMP24]], i64 %[[IDXPROM4]] // CHECK-NEXT: %[[TMP26:.+]] = load float, float* %[[ARRAYIDX5]], align 4 // CHECK-NEXT: %[[MUL6:.+]] = fmul float %[[MUL]], %[[TMP26]] // CHECK-NEXT: %[[TMP27:.+]] = load float*, float** %[[A_ADDR]], align 8 // CHECK-NEXT: %[[TMP28:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM7:.+]] = sext i32 %[[TMP28]] to i64 // CHECK-NEXT: %[[ARRAYIDX8:.+]] = getelementptr inbounds float, float* %[[TMP27]], i64 %[[IDXPROM7]] // CHECK-NEXT: store float %[[MUL6]], float* %[[ARRAYIDX8]], align 4 // CHECK-NEXT: br label %[[OMP_TILE0_INC]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_INC]]: // CHECK-NEXT: %[[OMP_TILE0_NEXT]] = add nuw i32 %[[OMP_TILE0_IV]], 1 // CHECK-NEXT: br label %[[OMP_TILE0_HEADER]], !llvm.loop ![[LOOP3:[0-9]+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_EXIT]]: // CHECK-NEXT: br label %[[OMP_TILE0_AFTER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_AFTER]]: // CHECK-NEXT: br label %[[OMP_FLOOR0_INC]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_INC]]: // CHECK-NEXT: %[[OMP_FLOOR0_NEXT]] = add nuw i32 %[[OMP_FLOOR0_IV]], 1 // CHECK-NEXT: br label %[[OMP_FLOOR0_HEADER]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_EXIT]]: // CHECK-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @1, i32 %[[OMP_GLOBAL_THREAD_NUM]]) // CHECK-NEXT: %[[OMP_GLOBAL_THREAD_NUM9:.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @1) // CHECK-NEXT: call void @__kmpc_barrier(%struct.ident_t* @2, i32 %[[OMP_GLOBAL_THREAD_NUM9]]) // CHECK-NEXT: br label %[[OMP_FLOOR0_AFTER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_AFTER]]: // CHECK-NEXT: br label %[[OMP_LOOP_AFTER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_AFTER]]: // CHECK-NEXT: ret void // CHECK-NEXT: } void unroll_partial_heuristic_for(int n, float *a, float *b, float *c, float *d) { for (int i = 0; i < n; i++) { a[i] = b[i] * c[i] * d[i]; } } #endif // HEADER // CHECK-LABEL: define {{.*}}@__captured_stmt( // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[DISTANCE_ADDR:.+]] = alloca i32*, align 8 // CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon*, align 8 // CHECK-NEXT: %[[DOTSTART:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[DOTSTOP:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[DOTSTEP:.+]] = alloca i32, align 4 // CHECK-NEXT: store i32* %[[DISTANCE:.+]], i32** %[[DISTANCE_ADDR]], align 8 // CHECK-NEXT: store %struct.anon* %[[__CONTEXT:.+]], %struct.anon** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon*, %struct.anon** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[TMP0]], i32 0, i32 0 // CHECK-NEXT: %[[TMP2:.+]] = load i32*, i32** %[[TMP1]], align 8 // CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[TMP2]], align 4 // CHECK-NEXT: store i32 %[[TMP3]], i32* %[[DOTSTART]], align 4 // CHECK-NEXT: %[[TMP4:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[TMP0]], i32 0, i32 1 // CHECK-NEXT: %[[TMP5:.+]] = load i32*, i32** %[[TMP4]], align 8 // CHECK-NEXT: %[[TMP6:.+]] = load i32, i32* %[[TMP5]], align 4 // CHECK-NEXT: store i32 %[[TMP6]], i32* %[[DOTSTOP]], align 4 // CHECK-NEXT: store i32 1, i32* %[[DOTSTEP]], align 4 // CHECK-NEXT: %[[TMP7:.+]] = load i32, i32* %[[DOTSTART]], align 4 // CHECK-NEXT: %[[TMP8:.+]] = load i32, i32* %[[DOTSTOP]], align 4 // CHECK-NEXT: %[[CMP:.+]] = icmp slt i32 %[[TMP7]], %[[TMP8]] // CHECK-NEXT: br i1 %[[CMP]], label %[[COND_TRUE:.+]], label %[[COND_FALSE:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[COND_TRUE]]: // CHECK-NEXT: %[[TMP9:.+]] = load i32, i32* %[[DOTSTOP]], align 4 // CHECK-NEXT: %[[TMP10:.+]] = load i32, i32* %[[DOTSTART]], align 4 // CHECK-NEXT: %[[SUB:.+]] = sub nsw i32 %[[TMP9]], %[[TMP10]] // CHECK-NEXT: %[[TMP11:.+]] = load i32, i32* %[[DOTSTEP]], align 4 // CHECK-NEXT: %[[SUB1:.+]] = sub i32 %[[TMP11]], 1 // CHECK-NEXT: %[[ADD:.+]] = add i32 %[[SUB]], %[[SUB1]] // CHECK-NEXT: %[[TMP12:.+]] = load i32, i32* %[[DOTSTEP]], align 4 // CHECK-NEXT: %[[DIV:.+]] = udiv i32 %[[ADD]], %[[TMP12]] // CHECK-NEXT: br label %[[COND_END:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[COND_FALSE]]: // CHECK-NEXT: br label %[[COND_END]] // CHECK-EMPTY: // CHECK-NEXT: [[COND_END]]: // CHECK-NEXT: %[[COND:.+]] = phi i32 [ %[[DIV]], %[[COND_TRUE]] ], [ 0, %[[COND_FALSE]] ] // CHECK-NEXT: %[[TMP13:.+]] = load i32*, i32** %[[DISTANCE_ADDR]], align 8 // CHECK-NEXT: store i32 %[[COND]], i32* %[[TMP13]], align 4 // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK-LABEL: define {{.*}}@__captured_stmt.1( // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[LOOPVAR_ADDR:.+]] = alloca i32*, align 8 // CHECK-NEXT: %[[LOGICAL_ADDR:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon.0*, align 8 // CHECK-NEXT: store i32* %[[LOOPVAR:.+]], i32** %[[LOOPVAR_ADDR]], align 8 // CHECK-NEXT: store i32 %[[LOGICAL:.+]], i32* %[[LOGICAL_ADDR]], align 4 // CHECK-NEXT: store %struct.anon.0* %[[__CONTEXT:.+]], %struct.anon.0** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon.0*, %struct.anon.0** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[TMP0]], i32 0, i32 0 // CHECK-NEXT: %[[TMP2:.+]] = load i32, i32* %[[TMP1]], align 4 // CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[LOGICAL_ADDR]], align 4 // CHECK-NEXT: %[[MUL:.+]] = mul i32 1, %[[TMP3]] // CHECK-NEXT: %[[ADD:.+]] = add i32 %[[TMP2]], %[[MUL]] // CHECK-NEXT: %[[TMP4:.+]] = load i32*, i32** %[[LOOPVAR_ADDR]], align 8 // CHECK-NEXT: store i32 %[[ADD]], i32* %[[TMP4]], align 4 // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK: ![[META0:[0-9]+]] = !{i32 1, !"wchar_size", i32 4} // CHECK: ![[META1:[0-9]+]] = !{i32 7, !"openmp", i32 51} // CHECK: ![[META2:[0-9]+]] = // CHECK: ![[LOOP3]] = distinct !{![[LOOP3]], ![[LOOPPROP4:[0-9]+]], ![[LOOPPROP5:[0-9]+]]} // CHECK: ![[LOOPPROP4]] = !{!"llvm.loop.unroll.enable"} // CHECK: ![[LOOPPROP5]] = !{!"llvm.loop.unroll.count", i32 13}
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs // RUN: %clang_cc1 -fopenmp-enable-irbuilder -verify -fopenmp -fopenmp-version=51 -x c -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s // expected-no-diagnostics #ifndef HEADER #define HEADER // CHECK-LABEL: define {{.*}}@unroll_partial_heuristic_for( // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[N_ADDR:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[A_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[B_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[C_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[D_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[I:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[AGG_CAPTURED:.+]] = alloca %struct.anon, align 8 // CHECK-NEXT: %[[AGG_CAPTURED1:.+]] = alloca %struct.anon.0, align 4 // CHECK-NEXT: %[[DOTCOUNT_ADDR:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[P_LASTITER:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[P_LOWERBOUND:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[P_UPPERBOUND:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[P_STRIDE:.+]] = alloca i32, align 4 // CHECK-NEXT: store i32 %[[N:.+]], i32* %[[N_ADDR]], align 4 // CHECK-NEXT: store float* %[[A:.+]], float** %[[A_ADDR]], align 8 // CHECK-NEXT: store float* %[[B:.+]], float** %[[B_ADDR]], align 8 // CHECK-NEXT: store float* %[[C:.+]], float** %[[C_ADDR]], align 8 // CHECK-NEXT: store float* %[[D:.+]], float** %[[D_ADDR]], align 8 // CHECK-NEXT: store i32 0, i32* %[[I]], align 4 // CHECK-NEXT: %[[TMP0:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[AGG_CAPTURED]], i32 0, i32 0 // CHECK-NEXT: store i32* %[[I]], i32** %[[TMP0]], align 8 // CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[AGG_CAPTURED]], i32 0, i32 1 // CHECK-NEXT: store i32* %[[N_ADDR]], i32** %[[TMP1]], align 8 // CHECK-NEXT: %[[TMP2:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[AGG_CAPTURED1]], i32 0, i32 0 // CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: store i32 %[[TMP3]], i32* %[[TMP2]], align 4 // CHECK-NEXT: call void @__captured_stmt(i32* %[[DOTCOUNT_ADDR]], %struct.anon* %[[AGG_CAPTURED]]) // CHECK-NEXT: %[[DOTCOUNT:.+]] = load i32, i32* %[[DOTCOUNT_ADDR]], align 4 // CHECK-NEXT: br label %[[OMP_LOOP_PREHEADER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_PREHEADER]]: // CHECK-NEXT: %[[TMP4:.+]] = udiv i32 %[[DOTCOUNT]], 13 // CHECK-NEXT: %[[TMP5:.+]] = urem i32 %[[DOTCOUNT]], 13 // CHECK-NEXT: %[[TMP6:.+]] = icmp ne i32 %[[TMP5]], 0 // CHECK-NEXT: %[[TMP7:.+]] = zext i1 %[[TMP6]] to i32 // CHECK-NEXT: %[[OMP_FLOOR0_TRIPCOUNT:.+]] = add nuw i32 %[[TMP4]], %[[TMP7]] // CHECK-NEXT: br label %[[OMP_FLOOR0_PREHEADER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_PREHEADER]]: // CHECK-NEXT: store i32 0, i32* %[[P_LOWERBOUND]], align 4 // CHECK-NEXT: %[[TMP8:.+]] = sub i32 %[[OMP_FLOOR0_TRIPCOUNT]], 1 // CHECK-NEXT: store i32 %[[TMP8]], i32* %[[P_UPPERBOUND]], align 4 // CHECK-NEXT: store i32 1, i32* %[[P_STRIDE]], align 4 // CHECK-NEXT: %[[OMP_GLOBAL_THREAD_NUM:.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @1) // CHECK-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @1, i32 %[[OMP_GLOBAL_THREAD_NUM]], i32 34, i32* %[[P_LASTITER]], i32* %[[P_LOWERBOUND]], i32* %[[P_UPPERBOUND]], i32* %[[P_STRIDE]], i32 1, i32 0) // CHECK-NEXT: %[[TMP9:.+]] = load i32, i32* %[[P_LOWERBOUND]], align 4 // CHECK-NEXT: %[[TMP10:.+]] = load i32, i32* %[[P_UPPERBOUND]], align 4 // CHECK-NEXT: %[[TMP11:.+]] = sub i32 %[[TMP10]], %[[TMP9]] // CHECK-NEXT: %[[TMP12:.+]] = add i32 %[[TMP11]], 1 // CHECK-NEXT: br label %[[OMP_FLOOR0_HEADER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_HEADER]]: // CHECK-NEXT: %[[OMP_FLOOR0_IV:.+]] = phi i32 [ 0, %[[OMP_FLOOR0_PREHEADER]] ], [ %[[OMP_FLOOR0_NEXT:.+]], %[[OMP_FLOOR0_INC:.+]] ] // CHECK-NEXT: br label %[[OMP_FLOOR0_COND:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_COND]]: // CHECK-NEXT: %[[OMP_FLOOR0_CMP:.+]] = icmp ult i32 %[[OMP_FLOOR0_IV]], %[[TMP12]] // CHECK-NEXT: br i1 %[[OMP_FLOOR0_CMP]], label %[[OMP_FLOOR0_BODY:.+]], label %[[OMP_FLOOR0_EXIT:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_BODY]]: // CHECK-NEXT: %[[TMP13:.+]] = add i32 %[[OMP_FLOOR0_IV]], %[[TMP9]] // CHECK-NEXT: %[[TMP14:.+]] = icmp eq i32 %[[TMP13]], %[[OMP_FLOOR0_TRIPCOUNT]] // CHECK-NEXT: %[[TMP15:.+]] = select i1 %[[TMP14]], i32 %[[TMP5]], i32 13 // CHECK-NEXT: br label %[[OMP_TILE0_PREHEADER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_PREHEADER]]: // CHECK-NEXT: br label %[[OMP_TILE0_HEADER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_HEADER]]: // CHECK-NEXT: %[[OMP_TILE0_IV:.+]] = phi i32 [ 0, %[[OMP_TILE0_PREHEADER]] ], [ %[[OMP_TILE0_NEXT:.+]], %[[OMP_TILE0_INC:.+]] ] // CHECK-NEXT: br label %[[OMP_TILE0_COND:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_COND]]: // CHECK-NEXT: %[[OMP_TILE0_CMP:.+]] = icmp ult i32 %[[OMP_TILE0_IV]], %[[TMP15]] // CHECK-NEXT: br i1 %[[OMP_TILE0_CMP]], label %[[OMP_TILE0_BODY:.+]], label %[[OMP_TILE0_EXIT:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_BODY]]: // CHECK-NEXT: %[[TMP16:.+]] = mul nuw i32 13, %[[TMP13]] // CHECK-NEXT: %[[TMP17:.+]] = add nuw i32 %[[TMP16]], %[[OMP_TILE0_IV]] // CHECK-NEXT: br label %[[OMP_LOOP_BODY:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_BODY]]: // CHECK-NEXT: call void @__captured_stmt.1(i32* %[[I]], i32 %[[TMP17]], %struct.anon.0* %[[AGG_CAPTURED1]]) // CHECK-NEXT: %[[TMP18:.+]] = load float*, float** %[[B_ADDR]], align 8 // CHECK-NEXT: %[[TMP19:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM:.+]] = sext i32 %[[TMP19]] to i64 // CHECK-NEXT: %[[ARRAYIDX:.+]] = getelementptr inbounds float, float* %[[TMP18]], i64 %[[IDXPROM]] // CHECK-NEXT: %[[TMP20:.+]] = load float, float* %[[ARRAYIDX]], align 4 // CHECK-NEXT: %[[TMP21:.+]] = load float*, float** %[[C_ADDR]], align 8 // CHECK-NEXT: %[[TMP22:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM2:.+]] = sext i32 %[[TMP22]] to i64 // CHECK-NEXT: %[[ARRAYIDX3:.+]] = getelementptr inbounds float, float* %[[TMP21]], i64 %[[IDXPROM2]] // CHECK-NEXT: %[[TMP23:.+]] = load float, float* %[[ARRAYIDX3]], align 4 // CHECK-NEXT: %[[MUL:.+]] = fmul float %[[TMP20]], %[[TMP23]] // CHECK-NEXT: %[[TMP24:.+]] = load float*, float** %[[D_ADDR]], align 8 // CHECK-NEXT: %[[TMP25:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM4:.+]] = sext i32 %[[TMP25]] to i64 // CHECK-NEXT: %[[ARRAYIDX5:.+]] = getelementptr inbounds float, float* %[[TMP24]], i64 %[[IDXPROM4]] // CHECK-NEXT: %[[TMP26:.+]] = load float, float* %[[ARRAYIDX5]], align 4 // CHECK-NEXT: %[[MUL6:.+]] = fmul float %[[MUL]], %[[TMP26]] // CHECK-NEXT: %[[TMP27:.+]] = load float*, float** %[[A_ADDR]], align 8 // CHECK-NEXT: %[[TMP28:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM7:.+]] = sext i32 %[[TMP28]] to i64 // CHECK-NEXT: %[[ARRAYIDX8:.+]] = getelementptr inbounds float, float* %[[TMP27]], i64 %[[IDXPROM7]] // CHECK-NEXT: store float %[[MUL6]], float* %[[ARRAYIDX8]], align 4 // CHECK-NEXT: br label %[[OMP_TILE0_INC]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_INC]]: // CHECK-NEXT: %[[OMP_TILE0_NEXT]] = add nuw i32 %[[OMP_TILE0_IV]], 1 // CHECK-NEXT: br label %[[OMP_TILE0_HEADER]], !llvm.loop ![[LOOP3:[0-9]+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_EXIT]]: // CHECK-NEXT: br label %[[OMP_TILE0_AFTER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_AFTER]]: // CHECK-NEXT: br label %[[OMP_FLOOR0_INC]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_INC]]: // CHECK-NEXT: %[[OMP_FLOOR0_NEXT]] = add nuw i32 %[[OMP_FLOOR0_IV]], 1 // CHECK-NEXT: br label %[[OMP_FLOOR0_HEADER]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_EXIT]]: // CHECK-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @1, i32 %[[OMP_GLOBAL_THREAD_NUM]]) // CHECK-NEXT: %[[OMP_GLOBAL_THREAD_NUM9:.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @1) // CHECK-NEXT: call void @__kmpc_barrier(%struct.ident_t* @2, i32 %[[OMP_GLOBAL_THREAD_NUM9]]) // CHECK-NEXT: br label %[[OMP_FLOOR0_AFTER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_AFTER]]: // CHECK-NEXT: br label %[[OMP_LOOP_AFTER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_AFTER]]: // CHECK-NEXT: ret void // CHECK-NEXT: } void unroll_partial_heuristic_for(int n, float *a, float *b, float *c, float *d) { #pragma omp for #pragma omp unroll partial(13) for (int i = 0; i < n; i++) { a[i] = b[i] * c[i] * d[i]; } } #endif // HEADER // CHECK-LABEL: define {{.*}}@__captured_stmt( // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[DISTANCE_ADDR:.+]] = alloca i32*, align 8 // CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon*, align 8 // CHECK-NEXT: %[[DOTSTART:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[DOTSTOP:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[DOTSTEP:.+]] = alloca i32, align 4 // CHECK-NEXT: store i32* %[[DISTANCE:.+]], i32** %[[DISTANCE_ADDR]], align 8 // CHECK-NEXT: store %struct.anon* %[[__CONTEXT:.+]], %struct.anon** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon*, %struct.anon** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[TMP0]], i32 0, i32 0 // CHECK-NEXT: %[[TMP2:.+]] = load i32*, i32** %[[TMP1]], align 8 // CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[TMP2]], align 4 // CHECK-NEXT: store i32 %[[TMP3]], i32* %[[DOTSTART]], align 4 // CHECK-NEXT: %[[TMP4:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[TMP0]], i32 0, i32 1 // CHECK-NEXT: %[[TMP5:.+]] = load i32*, i32** %[[TMP4]], align 8 // CHECK-NEXT: %[[TMP6:.+]] = load i32, i32* %[[TMP5]], align 4 // CHECK-NEXT: store i32 %[[TMP6]], i32* %[[DOTSTOP]], align 4 // CHECK-NEXT: store i32 1, i32* %[[DOTSTEP]], align 4 // CHECK-NEXT: %[[TMP7:.+]] = load i32, i32* %[[DOTSTART]], align 4 // CHECK-NEXT: %[[TMP8:.+]] = load i32, i32* %[[DOTSTOP]], align 4 // CHECK-NEXT: %[[CMP:.+]] = icmp slt i32 %[[TMP7]], %[[TMP8]] // CHECK-NEXT: br i1 %[[CMP]], label %[[COND_TRUE:.+]], label %[[COND_FALSE:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[COND_TRUE]]: // CHECK-NEXT: %[[TMP9:.+]] = load i32, i32* %[[DOTSTOP]], align 4 // CHECK-NEXT: %[[TMP10:.+]] = load i32, i32* %[[DOTSTART]], align 4 // CHECK-NEXT: %[[SUB:.+]] = sub nsw i32 %[[TMP9]], %[[TMP10]] // CHECK-NEXT: %[[TMP11:.+]] = load i32, i32* %[[DOTSTEP]], align 4 // CHECK-NEXT: %[[SUB1:.+]] = sub i32 %[[TMP11]], 1 // CHECK-NEXT: %[[ADD:.+]] = add i32 %[[SUB]], %[[SUB1]] // CHECK-NEXT: %[[TMP12:.+]] = load i32, i32* %[[DOTSTEP]], align 4 // CHECK-NEXT: %[[DIV:.+]] = udiv i32 %[[ADD]], %[[TMP12]] // CHECK-NEXT: br label %[[COND_END:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[COND_FALSE]]: // CHECK-NEXT: br label %[[COND_END]] // CHECK-EMPTY: // CHECK-NEXT: [[COND_END]]: // CHECK-NEXT: %[[COND:.+]] = phi i32 [ %[[DIV]], %[[COND_TRUE]] ], [ 0, %[[COND_FALSE]] ] // CHECK-NEXT: %[[TMP13:.+]] = load i32*, i32** %[[DISTANCE_ADDR]], align 8 // CHECK-NEXT: store i32 %[[COND]], i32* %[[TMP13]], align 4 // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK-LABEL: define {{.*}}@__captured_stmt.1( // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[LOOPVAR_ADDR:.+]] = alloca i32*, align 8 // CHECK-NEXT: %[[LOGICAL_ADDR:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon.0*, align 8 // CHECK-NEXT: store i32* %[[LOOPVAR:.+]], i32** %[[LOOPVAR_ADDR]], align 8 // CHECK-NEXT: store i32 %[[LOGICAL:.+]], i32* %[[LOGICAL_ADDR]], align 4 // CHECK-NEXT: store %struct.anon.0* %[[__CONTEXT:.+]], %struct.anon.0** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon.0*, %struct.anon.0** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[TMP0]], i32 0, i32 0 // CHECK-NEXT: %[[TMP2:.+]] = load i32, i32* %[[TMP1]], align 4 // CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[LOGICAL_ADDR]], align 4 // CHECK-NEXT: %[[MUL:.+]] = mul i32 1, %[[TMP3]] // CHECK-NEXT: %[[ADD:.+]] = add i32 %[[TMP2]], %[[MUL]] // CHECK-NEXT: %[[TMP4:.+]] = load i32*, i32** %[[LOOPVAR_ADDR]], align 8 // CHECK-NEXT: store i32 %[[ADD]], i32* %[[TMP4]], align 4 // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK: ![[META0:[0-9]+]] = !{i32 1, !"wchar_size", i32 4} // CHECK: ![[META1:[0-9]+]] = !{i32 7, !"openmp", i32 51} // CHECK: ![[META2:[0-9]+]] = // CHECK: ![[LOOP3]] = distinct !{![[LOOP3]], ![[LOOPPROP4:[0-9]+]], ![[LOOPPROP5:[0-9]+]]} // CHECK: ![[LOOPPROP4]] = !{!"llvm.loop.unroll.enable"} // CHECK: ![[LOOPPROP5]] = !{!"llvm.loop.unroll.count", i32 13}
GB_unaryop__identity_fp32_uint16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_fp32_uint16 // op(A') function: GB_tran__identity_fp32_uint16 // C type: float // A type: uint16_t // cast: float cij = (float) aij // unaryop: cij = aij #define GB_ATYPE \ uint16_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ float z = (float) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP32 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_fp32_uint16 ( float *Cx, // Cx and Ax may be aliased uint16_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_fp32_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_fp32_uint16 // op(A') function: GB_tran__identity_fp32_uint16 // C type: float // A type: uint16_t // cast: float cij = (float) aij // unaryop: cij = aij #define GB_ATYPE \ uint16_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ float z = (float) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP32 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_fp32_uint16 ( float *Cx, // Cx and Ax may be aliased uint16_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_fp32_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_fp32_uint16 // op(A') function: GB_tran__identity_fp32_uint16 // C type: float // A type: uint16_t // cast: float cij = (float) aij // unaryop: cij = aij #define GB_ATYPE \ uint16_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ float z = (float) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP32 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_fp32_uint16 ( float *Cx, // Cx and Ax may be aliased uint16_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_fp32_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_bitmap_assign_C_template.c
//------------------------------------------------------------------------------ // GB_bitmap_assign_C_template: iterate over a bitmap matrix C //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // The #include'ing file defines a GB_CIJ_WORK macro for the body of the loop, // which operates on the entry C(iC,jC) at position Cx [pC] and Cb [pC]. The C // matrix held in bitmap form. If the mask matrix is also a bitmap matrix or // full matrix, the GB_GET_MIJ macro can compute the effective value of the // mask for the C(iC,jC) entry. // C must be bitmap or full. If M is accessed, it must also be bitmap or full. #ifndef GB_GET_MIJ #define GB_GET_MIJ(mij,pM) ; #endif { switch (assign_kind) { //---------------------------------------------------------------------- // row assignment: C<M'>(iC,:), M is a column vector //---------------------------------------------------------------------- case GB_ROW_ASSIGN : { // iterate over all of C(iC,:) const int64_t iC = I [0] ; const int nthreads = GB_nthreads (cvdim, chunk, nthreads_max) ; int tid ; #pragma omp parallel for num_threads(nthreads) schedule(static) \ reduction(+:cnvals) for (tid = 0 ; tid < nthreads ; tid++) { int64_t jC_start, jC_end, task_cnvals = 0 ; GB_PARTITION (jC_start, jC_end, cvdim, tid, nthreads) ; for (int64_t jC = jC_start ; jC < jC_end ; jC++) { int64_t pC = iC + jC * cvlen ; GB_GET_MIJ (mij, jC) ; // mij = Mask (jC) GB_CIJ_WORK (pC) ; // operate on C(iC,jC) } cnvals += task_cnvals ; } } break ; //---------------------------------------------------------------------- // column assignment: C<M>(:,jC), M is a column vector //---------------------------------------------------------------------- case GB_COL_ASSIGN : { // iterate over all of C(:,jC) const int64_t jC = J [0] ; const int64_t pC0 = jC * cvlen ; const int nthreads = GB_nthreads (cvlen, chunk, nthreads_max) ; int tid ; #pragma omp parallel for num_threads(nthreads) schedule(static) \ reduction(+:cnvals) for (tid = 0 ; tid < nthreads ; tid++) { int64_t iC_start, iC_end, task_cnvals = 0 ; GB_PARTITION (iC_start, iC_end, cvlen, tid, nthreads) ; for (int64_t iC = iC_start ; iC < iC_end ; iC++) { int64_t pC = iC + pC0 ; GB_GET_MIJ (mij, iC) ; // mij = Mask (iC) GB_CIJ_WORK (pC) ; // operate on C(iC,jC) } cnvals += task_cnvals ; } } break ; //---------------------------------------------------------------------- // GrB_assign: C<M>(I,J), M is a matrix the same size as C //---------------------------------------------------------------------- #ifndef GB_NO_ASSIGN_CASE case GB_ASSIGN : { // iterate over all of C(:,:). #include "GB_bitmap_assign_C_whole_template.c" } break ; #endif //---------------------------------------------------------------------- // GxB_subassign: C(I,J)<M>, M is a matrix the same size as C(I,J) //---------------------------------------------------------------------- #ifndef GB_NO_SUBASSIGN_CASE case GB_SUBASSIGN : { // iterate over all of C(I,J) #undef GB_IXJ_WORK #define GB_IXJ_WORK(pC,pA) \ { \ GB_GET_MIJ (mij, pA) ; /* mij = Mask (pA) */ \ GB_CIJ_WORK (pC) ; /* operate on C(iC,jC) */ \ } #include "GB_bitmap_assign_IxJ_template.c" } break ; #endif default: ; } } #undef GB_NO_ASSIGN_CASE #undef GB_NO_SUBASSIGN_CASE
//------------------------------------------------------------------------------ // GB_bitmap_assign_C_template: iterate over a bitmap matrix C //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // The #include'ing file defines a GB_CIJ_WORK macro for the body of the loop, // which operates on the entry C(iC,jC) at position Cx [pC] and Cb [pC]. The C // matrix held in bitmap form. If the mask matrix is also a bitmap matrix or // full matrix, the GB_GET_MIJ macro can compute the effective value of the // mask for the C(iC,jC) entry. // C must be bitmap or full. If M is accessed, it must also be bitmap or full. #ifndef GB_GET_MIJ #define GB_GET_MIJ(mij,pM) ; #endif { switch (assign_kind) { //---------------------------------------------------------------------- // row assignment: C<M'>(iC,:), M is a column vector //---------------------------------------------------------------------- case GB_ROW_ASSIGN : { // iterate over all of C(iC,:) const int64_t iC = I [0] ; const int nthreads = GB_nthreads (cvdim, chunk, nthreads_max) ; int tid ; reduction(+:cnvals) for (tid = 0 ; tid < nthreads ; tid++) { int64_t jC_start, jC_end, task_cnvals = 0 ; GB_PARTITION (jC_start, jC_end, cvdim, tid, nthreads) ; for (int64_t jC = jC_start ; jC < jC_end ; jC++) { int64_t pC = iC + jC * cvlen ; GB_GET_MIJ (mij, jC) ; // mij = Mask (jC) GB_CIJ_WORK (pC) ; // operate on C(iC,jC) } cnvals += task_cnvals ; } } break ; //---------------------------------------------------------------------- // column assignment: C<M>(:,jC), M is a column vector //---------------------------------------------------------------------- case GB_COL_ASSIGN : { // iterate over all of C(:,jC) const int64_t jC = J [0] ; const int64_t pC0 = jC * cvlen ; const int nthreads = GB_nthreads (cvlen, chunk, nthreads_max) ; int tid ; reduction(+:cnvals) for (tid = 0 ; tid < nthreads ; tid++) { int64_t iC_start, iC_end, task_cnvals = 0 ; GB_PARTITION (iC_start, iC_end, cvlen, tid, nthreads) ; for (int64_t iC = iC_start ; iC < iC_end ; iC++) { int64_t pC = iC + pC0 ; GB_GET_MIJ (mij, iC) ; // mij = Mask (iC) GB_CIJ_WORK (pC) ; // operate on C(iC,jC) } cnvals += task_cnvals ; } } break ; //---------------------------------------------------------------------- // GrB_assign: C<M>(I,J), M is a matrix the same size as C //---------------------------------------------------------------------- #ifndef GB_NO_ASSIGN_CASE case GB_ASSIGN : { // iterate over all of C(:,:). #include "GB_bitmap_assign_C_whole_template.c" } break ; #endif //---------------------------------------------------------------------- // GxB_subassign: C(I,J)<M>, M is a matrix the same size as C(I,J) //---------------------------------------------------------------------- #ifndef GB_NO_SUBASSIGN_CASE case GB_SUBASSIGN : { // iterate over all of C(I,J) #undef GB_IXJ_WORK #define GB_IXJ_WORK(pC,pA) \ { \ GB_GET_MIJ (mij, pA) ; /* mij = Mask (pA) */ \ GB_CIJ_WORK (pC) ; /* operate on C(iC,jC) */ \ } #include "GB_bitmap_assign_IxJ_template.c" } break ; #endif default: ; } } #undef GB_NO_ASSIGN_CASE #undef GB_NO_SUBASSIGN_CASE
//------------------------------------------------------------------------------ // GB_bitmap_assign_C_template: iterate over a bitmap matrix C //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // The #include'ing file defines a GB_CIJ_WORK macro for the body of the loop, // which operates on the entry C(iC,jC) at position Cx [pC] and Cb [pC]. The C // matrix held in bitmap form. If the mask matrix is also a bitmap matrix or // full matrix, the GB_GET_MIJ macro can compute the effective value of the // mask for the C(iC,jC) entry. // C must be bitmap or full. If M is accessed, it must also be bitmap or full. #ifndef GB_GET_MIJ #define GB_GET_MIJ(mij,pM) ; #endif { switch (assign_kind) { //---------------------------------------------------------------------- // row assignment: C<M'>(iC,:), M is a column vector //---------------------------------------------------------------------- case GB_ROW_ASSIGN : { // iterate over all of C(iC,:) const int64_t iC = I [0] ; const int nthreads = GB_nthreads (cvdim, chunk, nthreads_max) ; int tid ; #pragma omp parallel for num_threads(nthreads) schedule(static) \ reduction(+:cnvals) for (tid = 0 ; tid < nthreads ; tid++) { int64_t jC_start, jC_end, task_cnvals = 0 ; GB_PARTITION (jC_start, jC_end, cvdim, tid, nthreads) ; for (int64_t jC = jC_start ; jC < jC_end ; jC++) { int64_t pC = iC + jC * cvlen ; GB_GET_MIJ (mij, jC) ; // mij = Mask (jC) GB_CIJ_WORK (pC) ; // operate on C(iC,jC) } cnvals += task_cnvals ; } } break ; //---------------------------------------------------------------------- // column assignment: C<M>(:,jC), M is a column vector //---------------------------------------------------------------------- case GB_COL_ASSIGN : { // iterate over all of C(:,jC) const int64_t jC = J [0] ; const int64_t pC0 = jC * cvlen ; const int nthreads = GB_nthreads (cvlen, chunk, nthreads_max) ; int tid ; #pragma omp parallel for num_threads(nthreads) schedule(static) \ reduction(+:cnvals) for (tid = 0 ; tid < nthreads ; tid++) { int64_t iC_start, iC_end, task_cnvals = 0 ; GB_PARTITION (iC_start, iC_end, cvlen, tid, nthreads) ; for (int64_t iC = iC_start ; iC < iC_end ; iC++) { int64_t pC = iC + pC0 ; GB_GET_MIJ (mij, iC) ; // mij = Mask (iC) GB_CIJ_WORK (pC) ; // operate on C(iC,jC) } cnvals += task_cnvals ; } } break ; //---------------------------------------------------------------------- // GrB_assign: C<M>(I,J), M is a matrix the same size as C //---------------------------------------------------------------------- #ifndef GB_NO_ASSIGN_CASE case GB_ASSIGN : { // iterate over all of C(:,:). #include "GB_bitmap_assign_C_whole_template.c" } break ; #endif //---------------------------------------------------------------------- // GxB_subassign: C(I,J)<M>, M is a matrix the same size as C(I,J) //---------------------------------------------------------------------- #ifndef GB_NO_SUBASSIGN_CASE case GB_SUBASSIGN : { // iterate over all of C(I,J) #undef GB_IXJ_WORK #define GB_IXJ_WORK(pC,pA) \ { \ GB_GET_MIJ (mij, pA) ; /* mij = Mask (pA) */ \ GB_CIJ_WORK (pC) ; /* operate on C(iC,jC) */ \ } #include "GB_bitmap_assign_IxJ_template.c" } break ; #endif default: ; } } #undef GB_NO_ASSIGN_CASE #undef GB_NO_SUBASSIGN_CASE
m2.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <omp.h> typedef struct { long long int re; long long int im; } com; typedef struct { com x; com y; } PO; typedef struct { unsigned int p; unsigned int e2; unsigned int e3; unsigned int xQ20; unsigned int xQ21; unsigned int yQ20; unsigned int yQ21; unsigned int xP20; unsigned int xP21; unsigned int yP20; unsigned int yP21; unsigned int xR20; unsigned int xR21; unsigned int xQ30; unsigned int xQ31; unsigned int yQ30; unsigned int yQ31; unsigned int xP30; unsigned int xP31; unsigned int yP30; unsigned int yP31; unsigned int xR30; unsigned int xR31; unsigned int n; } SIDH; typedef struct { int n; int p; int q; char s[]; } tor; typedef struct { unsigned int p; unsigned int e2; unsigned int e3; PO P2; PO P3; PO Q2; PO Q3; PO R2; PO R3; unsigned int n; } CM; unsigned int p=431; unsigned int pp=185761; // SIDH sp434; // invert of integer long long int inv(long long int a,long long int n){ long long int d,x,s,q,r,t; d = n; x = 0; s = 1; while (a != 0){ q = d / a; r = d % a; d = a; a = r; t = x - q * s; x = s; s = t; } // gcd = d; // $\gcd(a, n)$ return ((x + n) % (n / d)); } //SIDH com cadd(com a,com b){ com c; c.re=(a.re+b.re); if(c.re>p) c.re=c.re%p; if(c.re<0) c.re+=p; c.im=(a.im+b.im); if(c.im>p) c.im=c.im%p; if(c.im<0) c.im=c.im+p; return c; } com inv_add(com a){// -a com c; c.re= -1; c.im= -1; c.re=c.re*a.re%p; if(c.re>p) c.re%=p; c.im=c.im*a.im%p; if(c.im>p) c.im%=p; return c; } com csub(com a,com b){ com c,m; c.re=(a.re-b.re); if(c.re<0) c.re+=p; c.im=(a.im-b.im); if(c.im<0) c.im+=p; return c; } com cmul(com a,com b){ com c; long long int d,e; c.re=a.re*b.re-(a.im*b.im); d=(a.re*b.im);//%p; e=(b.re*a.im);//%p; // c.re=c.re+c.im;//%p; c.im=d+e;//%p; return c; } com cinv(com a){ com c,a1,a2,b1,b2,h,w; unsigned int i,j,d,e,f,g,A,pp,l,n; for(l=0;l<p;l++){ //#pragma omp parallel for for(n=0;n<p;n++){ //a=162+172i //a2.re=162; //a2.im=172; a2.re=l; //259 a2.im=n; //340 b1=cmul(a2,a); if(b1.re%p==1 && b1.im%p==0){ printf("%d %d %d %d\n",a1.re,a1.im,b1.re%p,b1.im%p); printf("%d %d\n",l,n); // exit(1); return a2; } } } return a2; } com cdiv(com a,com b){ com c,d,v,f,h; long long g; d.re=(b.re*b.re+b.im*b.im)%p; if(d.re>p) d.re=d.re%p; if(d.re<0) d.re=d.re+p; d.im=0; v.re=((a.re%p)*(b.re%p)+((a.im%p)*(b.im%p))%p)%p; v.im=((a.im%p)*(b.re%p))-(a.re%p)*(b.im%p); if(a.re>p) a.re=a.re%p; if(a.re<0) a.re=b.re+p; if(a.im>p) a.im=b.im%p; if(a.im<0) a.re=a.im+p; if(b.re>p) b.re=a.re%p; if(b.re<0) b.re=b.re+p; if(b.im>p) b.im=b.im%p; if(b.im<0) b.re=a.im+p; printf("re=%lld %lld\n",a.re,b.re); printf("imm=%lldi %lldi\n",a.im,b.im); //exit(1); printf("d=%lld\n",d.re); d.re=inv(d.re,p); v.re=((p+v.re)*d.re)%p; v.im=((v.im%p)*d.re)%p; if(v.re>p) v.re=v.re%p; if(v.im<0) v.im+=p; printf("v=%lld %lldi\n",v.re,v.im); // exit(1); //c.re=d.re; //c.im=v.im*inv(d.re,p); return v; } com cnst(unsigned int A,com a){ unsigned int t,s; com r; t=A*a.re; s=A*a.im; r.re=t; r.im=s; return r; } PO eadd(PO P,PO Q){ PO R={0}; unsigned int r,s,t,u,v,w; com c,d,e,f,g,l,A; A.re=6; A.im=0; c=csub(P.y,Q.y); d=csub(P.x,Q.x); e=cinv(d); l=cmul(c,e); d=cmul(l,l); e=cadd(P.x,Q.x); R.x=csub(csub(d,e),A); R.y=csub(cmul(l,csub(P.x,R.x)),P.y); return R; } PO eadd2(PO P){ com a,b,c; PO R; return R; } //E = EllipticCurve(GF(131), [0, 0, 0, 1, 23]) //E.j_invariant() com j_inv(com a){ com r,f,h,b1,b2,h1,o,g,q; // unsigned int w; o.re= 3; o.im= 0; q.re= 256; q.im= 0; f.re=4; f.im=0; r=cmul(a,a); //printf("%d %d\n",r.re,r.im); //a^2-4 h=csub(r,f); printf("a^2-4: %lld %lld\n",h.re,h.im); b1=cadd(r,f); printf("%lld %lld\n",b1.re,b1.im); b2=cmul(r,r); h1=cmul(f,f); h1=cadd(h1,b2); printf("%lld %lld\n",h1.re,h1.im); //p=131 のとき y^2 = x^3 + x + 23 の j-不変量は 78 となります。 //g=a^2-3 g=csub(r,o); printf("a^2-3: %d %d\n",g.re,g.im); printf("a^2-4: %lld %lld\n",h.re,h.im); //g=256*(a^2-3)^3 //(a^2 - 3)^2 = -4184900860 - 2323531392 I //(a^2 - 3)^3 = 228212128828152 - 239983944473728 I g=cmul(cmul(cmul(g,g),g),q); g.re=g.re%p; g.im=g.im%p; printf("g=256*(a^2-3)^3: %lld %lld\n",g.re,g.im); g=cdiv(g,h); if(g.re>p) g.re%=p; if(g.re<0) g.re+=p; if(g.im>p) g.im=g.im%p; if(g.im<0) g.im+=p; printf("ans=%lld,%lld\n",g.re,g.im); return g; } /* //jj=aa^bb mod oo BigInt exp(BigInt aa,BigInt bb,BigInt oo){ BigInt ii,jj,kk[8192]; int j,c[8192],count=0,i; ii=oo; j=0; jj=0; // kk[4096]; //prime is 4096 bit table // c[8192] //mod is 8192 bit table count=0; for(i=0;i<8192;i++){ kk[i]=0; } while(ii>0){ ii = (ii>>1); j=j+1; } kk[0]=aa; // std::cout << j << "\n"; //ex.1000=2**3+2**5+2**6+2**7+2**8+2**9 makes a array c=[3,5,6,7,8,9] for(i=0;i<j+1;i++){ if((bb >> i)%2 != 0){ // testbit(bb,i) c[count]=i; count=count+1; } } // std::cout << bb << endl; // std::cout << count << "\n"; //exit(1); for(i=1;i<c[count-1]+1;i++){ kk[i] = kk[i-1]*kk[i-1]%oo; } jj=1; for(i=0;i<count;i++){ jj=kk[c[i]]*jj%oo; if (jj==0){ // print i,"\n" } } return jj; } */ com cc(com a,com b){ com c; c.re= a.re*b.re+a.im*b.im; c.im=0; return c; } int main () { char buf[65536]; CM sp434; com a1,a2,b1,b2,j,r,o,q,g,f,v,w,h,r2,g2,h2,h1,c; int s=31,t=304,l,k,n,i,count=0,a,b,jj,aa,bb,jj2,test[431][431][2]={0},tmp[431]={0}; s=inv(s,p); //a1 v.re=s; v.im=0; t=inv(t,p); //a2 w.re=s; w.im=0; printf("s=%d,t=%d\n",s,t); o.re= 3; o.im= 0; q.re= 256; q.im= 0; f.re=4; f.im=0; //h.re=p; //h.im=0; //q=cdiv(r,o); //printf("%d %d\n",q.re,q.im); //exit(1); //a=161+208i a1.re=161; a1.im=208; j_inv(a1); printf("a1======================================\n"); //exit(1); a2.re=161;//161; //162; a2.im=208;//208;//172; a2=j_inv(a2); c.re=132; c.im=0; j_inv(c); //exit(1); printf("j=%d %d\n",a2.re,a2.im); /* c=a2; while(1){ a2=j_inv(a2); count++; if(247 == a2.re){ printf("%d %d %d\n",a2.re,a2.im,count); scanf("%d",&n); // exit(1); } if(a2.re < 0 && a2.im < 0){ printf("baka\n"); exit(1); } count++; } */ o.im=0; //同じj不変量を持つ楕円曲線を総探索する 20200804 for(i=0;i<p;i++){ o.re=i; for(k=0;k<p;k++){ o.im=k; r=j_inv(o); // printf("%d %d %d %d\n",r.re,r.im,i,k); //scanf("%d",&n); // if(test[r.re][0]==512 && r.re>=0 && r.im==0){ test[i][k][0]=r.re; test[i][k][1]=r.im; //count++; } // if(test[r.re].im!=r.im){ //count++; //test[r.re].im=r.im; } for(i=0;i<p;i++){ for(k=0;k<p;k++){ //if(test[i][k]>=0){ // tmp[test[i][0]]=-1; printf("j_inv=%d,%d %d %d\n",i,k,test[i][k][0],test[i][k][1]); //count++; } //} } /* for(i=0;i<p;i++){ if(tmp[i]== -1) count++; } printf("%d\n",count); */ //exit(1); /* //j-invariant if(r.re==304 && r.im==364){ printf("(i,k)=%d %d\n",i,k); //scanf("%d",&n); //count++; } } */ c.re=109; c.im=0; j_inv(c); printf("p=%d count=%d\n",p,count); return 0; }
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <omp.h> typedef struct { long long int re; long long int im; } com; typedef struct { com x; com y; } PO; typedef struct { unsigned int p; unsigned int e2; unsigned int e3; unsigned int xQ20; unsigned int xQ21; unsigned int yQ20; unsigned int yQ21; unsigned int xP20; unsigned int xP21; unsigned int yP20; unsigned int yP21; unsigned int xR20; unsigned int xR21; unsigned int xQ30; unsigned int xQ31; unsigned int yQ30; unsigned int yQ31; unsigned int xP30; unsigned int xP31; unsigned int yP30; unsigned int yP31; unsigned int xR30; unsigned int xR31; unsigned int n; } SIDH; typedef struct { int n; int p; int q; char s[]; } tor; typedef struct { unsigned int p; unsigned int e2; unsigned int e3; PO P2; PO P3; PO Q2; PO Q3; PO R2; PO R3; unsigned int n; } CM; unsigned int p=431; unsigned int pp=185761; // SIDH sp434; // invert of integer long long int inv(long long int a,long long int n){ long long int d,x,s,q,r,t; d = n; x = 0; s = 1; while (a != 0){ q = d / a; r = d % a; d = a; a = r; t = x - q * s; x = s; s = t; } // gcd = d; // $\gcd(a, n)$ return ((x + n) % (n / d)); } //SIDH com cadd(com a,com b){ com c; c.re=(a.re+b.re); if(c.re>p) c.re=c.re%p; if(c.re<0) c.re+=p; c.im=(a.im+b.im); if(c.im>p) c.im=c.im%p; if(c.im<0) c.im=c.im+p; return c; } com inv_add(com a){// -a com c; c.re= -1; c.im= -1; c.re=c.re*a.re%p; if(c.re>p) c.re%=p; c.im=c.im*a.im%p; if(c.im>p) c.im%=p; return c; } com csub(com a,com b){ com c,m; c.re=(a.re-b.re); if(c.re<0) c.re+=p; c.im=(a.im-b.im); if(c.im<0) c.im+=p; return c; } com cmul(com a,com b){ com c; long long int d,e; c.re=a.re*b.re-(a.im*b.im); d=(a.re*b.im);//%p; e=(b.re*a.im);//%p; // c.re=c.re+c.im;//%p; c.im=d+e;//%p; return c; } com cinv(com a){ com c,a1,a2,b1,b2,h,w; unsigned int i,j,d,e,f,g,A,pp,l,n; for(l=0;l<p;l++){ // for(n=0;n<p;n++){ //a=162+172i //a2.re=162; //a2.im=172; a2.re=l; //259 a2.im=n; //340 b1=cmul(a2,a); if(b1.re%p==1 && b1.im%p==0){ printf("%d %d %d %d\n",a1.re,a1.im,b1.re%p,b1.im%p); printf("%d %d\n",l,n); // exit(1); return a2; } } } return a2; } com cdiv(com a,com b){ com c,d,v,f,h; long long g; d.re=(b.re*b.re+b.im*b.im)%p; if(d.re>p) d.re=d.re%p; if(d.re<0) d.re=d.re+p; d.im=0; v.re=((a.re%p)*(b.re%p)+((a.im%p)*(b.im%p))%p)%p; v.im=((a.im%p)*(b.re%p))-(a.re%p)*(b.im%p); if(a.re>p) a.re=a.re%p; if(a.re<0) a.re=b.re+p; if(a.im>p) a.im=b.im%p; if(a.im<0) a.re=a.im+p; if(b.re>p) b.re=a.re%p; if(b.re<0) b.re=b.re+p; if(b.im>p) b.im=b.im%p; if(b.im<0) b.re=a.im+p; printf("re=%lld %lld\n",a.re,b.re); printf("imm=%lldi %lldi\n",a.im,b.im); //exit(1); printf("d=%lld\n",d.re); d.re=inv(d.re,p); v.re=((p+v.re)*d.re)%p; v.im=((v.im%p)*d.re)%p; if(v.re>p) v.re=v.re%p; if(v.im<0) v.im+=p; printf("v=%lld %lldi\n",v.re,v.im); // exit(1); //c.re=d.re; //c.im=v.im*inv(d.re,p); return v; } com cnst(unsigned int A,com a){ unsigned int t,s; com r; t=A*a.re; s=A*a.im; r.re=t; r.im=s; return r; } PO eadd(PO P,PO Q){ PO R={0}; unsigned int r,s,t,u,v,w; com c,d,e,f,g,l,A; A.re=6; A.im=0; c=csub(P.y,Q.y); d=csub(P.x,Q.x); e=cinv(d); l=cmul(c,e); d=cmul(l,l); e=cadd(P.x,Q.x); R.x=csub(csub(d,e),A); R.y=csub(cmul(l,csub(P.x,R.x)),P.y); return R; } PO eadd2(PO P){ com a,b,c; PO R; return R; } //E = EllipticCurve(GF(131), [0, 0, 0, 1, 23]) //E.j_invariant() com j_inv(com a){ com r,f,h,b1,b2,h1,o,g,q; // unsigned int w; o.re= 3; o.im= 0; q.re= 256; q.im= 0; f.re=4; f.im=0; r=cmul(a,a); //printf("%d %d\n",r.re,r.im); //a^2-4 h=csub(r,f); printf("a^2-4: %lld %lld\n",h.re,h.im); b1=cadd(r,f); printf("%lld %lld\n",b1.re,b1.im); b2=cmul(r,r); h1=cmul(f,f); h1=cadd(h1,b2); printf("%lld %lld\n",h1.re,h1.im); //p=131 のとき y^2 = x^3 + x + 23 の j-不変量は 78 となります。 //g=a^2-3 g=csub(r,o); printf("a^2-3: %d %d\n",g.re,g.im); printf("a^2-4: %lld %lld\n",h.re,h.im); //g=256*(a^2-3)^3 //(a^2 - 3)^2 = -4184900860 - 2323531392 I //(a^2 - 3)^3 = 228212128828152 - 239983944473728 I g=cmul(cmul(cmul(g,g),g),q); g.re=g.re%p; g.im=g.im%p; printf("g=256*(a^2-3)^3: %lld %lld\n",g.re,g.im); g=cdiv(g,h); if(g.re>p) g.re%=p; if(g.re<0) g.re+=p; if(g.im>p) g.im=g.im%p; if(g.im<0) g.im+=p; printf("ans=%lld,%lld\n",g.re,g.im); return g; } /* //jj=aa^bb mod oo BigInt exp(BigInt aa,BigInt bb,BigInt oo){ BigInt ii,jj,kk[8192]; int j,c[8192],count=0,i; ii=oo; j=0; jj=0; // kk[4096]; //prime is 4096 bit table // c[8192] //mod is 8192 bit table count=0; for(i=0;i<8192;i++){ kk[i]=0; } while(ii>0){ ii = (ii>>1); j=j+1; } kk[0]=aa; // std::cout << j << "\n"; //ex.1000=2**3+2**5+2**6+2**7+2**8+2**9 makes a array c=[3,5,6,7,8,9] for(i=0;i<j+1;i++){ if((bb >> i)%2 != 0){ // testbit(bb,i) c[count]=i; count=count+1; } } // std::cout << bb << endl; // std::cout << count << "\n"; //exit(1); for(i=1;i<c[count-1]+1;i++){ kk[i] = kk[i-1]*kk[i-1]%oo; } jj=1; for(i=0;i<count;i++){ jj=kk[c[i]]*jj%oo; if (jj==0){ // print i,"\n" } } return jj; } */ com cc(com a,com b){ com c; c.re= a.re*b.re+a.im*b.im; c.im=0; return c; } int main () { char buf[65536]; CM sp434; com a1,a2,b1,b2,j,r,o,q,g,f,v,w,h,r2,g2,h2,h1,c; int s=31,t=304,l,k,n,i,count=0,a,b,jj,aa,bb,jj2,test[431][431][2]={0},tmp[431]={0}; s=inv(s,p); //a1 v.re=s; v.im=0; t=inv(t,p); //a2 w.re=s; w.im=0; printf("s=%d,t=%d\n",s,t); o.re= 3; o.im= 0; q.re= 256; q.im= 0; f.re=4; f.im=0; //h.re=p; //h.im=0; //q=cdiv(r,o); //printf("%d %d\n",q.re,q.im); //exit(1); //a=161+208i a1.re=161; a1.im=208; j_inv(a1); printf("a1======================================\n"); //exit(1); a2.re=161;//161; //162; a2.im=208;//208;//172; a2=j_inv(a2); c.re=132; c.im=0; j_inv(c); //exit(1); printf("j=%d %d\n",a2.re,a2.im); /* c=a2; while(1){ a2=j_inv(a2); count++; if(247 == a2.re){ printf("%d %d %d\n",a2.re,a2.im,count); scanf("%d",&n); // exit(1); } if(a2.re < 0 && a2.im < 0){ printf("baka\n"); exit(1); } count++; } */ o.im=0; //同じj不変量を持つ楕円曲線を総探索する 20200804 for(i=0;i<p;i++){ o.re=i; for(k=0;k<p;k++){ o.im=k; r=j_inv(o); // printf("%d %d %d %d\n",r.re,r.im,i,k); //scanf("%d",&n); // if(test[r.re][0]==512 && r.re>=0 && r.im==0){ test[i][k][0]=r.re; test[i][k][1]=r.im; //count++; } // if(test[r.re].im!=r.im){ //count++; //test[r.re].im=r.im; } for(i=0;i<p;i++){ for(k=0;k<p;k++){ //if(test[i][k]>=0){ // tmp[test[i][0]]=-1; printf("j_inv=%d,%d %d %d\n",i,k,test[i][k][0],test[i][k][1]); //count++; } //} } /* for(i=0;i<p;i++){ if(tmp[i]== -1) count++; } printf("%d\n",count); */ //exit(1); /* //j-invariant if(r.re==304 && r.im==364){ printf("(i,k)=%d %d\n",i,k); //scanf("%d",&n); //count++; } } */ c.re=109; c.im=0; j_inv(c); printf("p=%d count=%d\n",p,count); return 0; }
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <omp.h> typedef struct { long long int re; long long int im; } com; typedef struct { com x; com y; } PO; typedef struct { unsigned int p; unsigned int e2; unsigned int e3; unsigned int xQ20; unsigned int xQ21; unsigned int yQ20; unsigned int yQ21; unsigned int xP20; unsigned int xP21; unsigned int yP20; unsigned int yP21; unsigned int xR20; unsigned int xR21; unsigned int xQ30; unsigned int xQ31; unsigned int yQ30; unsigned int yQ31; unsigned int xP30; unsigned int xP31; unsigned int yP30; unsigned int yP31; unsigned int xR30; unsigned int xR31; unsigned int n; } SIDH; typedef struct { int n; int p; int q; char s[]; } tor; typedef struct { unsigned int p; unsigned int e2; unsigned int e3; PO P2; PO P3; PO Q2; PO Q3; PO R2; PO R3; unsigned int n; } CM; unsigned int p=431; unsigned int pp=185761; // SIDH sp434; // invert of integer long long int inv(long long int a,long long int n){ long long int d,x,s,q,r,t; d = n; x = 0; s = 1; while (a != 0){ q = d / a; r = d % a; d = a; a = r; t = x - q * s; x = s; s = t; } // gcd = d; // $\gcd(a, n)$ return ((x + n) % (n / d)); } //SIDH com cadd(com a,com b){ com c; c.re=(a.re+b.re); if(c.re>p) c.re=c.re%p; if(c.re<0) c.re+=p; c.im=(a.im+b.im); if(c.im>p) c.im=c.im%p; if(c.im<0) c.im=c.im+p; return c; } com inv_add(com a){// -a com c; c.re= -1; c.im= -1; c.re=c.re*a.re%p; if(c.re>p) c.re%=p; c.im=c.im*a.im%p; if(c.im>p) c.im%=p; return c; } com csub(com a,com b){ com c,m; c.re=(a.re-b.re); if(c.re<0) c.re+=p; c.im=(a.im-b.im); if(c.im<0) c.im+=p; return c; } com cmul(com a,com b){ com c; long long int d,e; c.re=a.re*b.re-(a.im*b.im); d=(a.re*b.im);//%p; e=(b.re*a.im);//%p; // c.re=c.re+c.im;//%p; c.im=d+e;//%p; return c; } com cinv(com a){ com c,a1,a2,b1,b2,h,w; unsigned int i,j,d,e,f,g,A,pp,l,n; for(l=0;l<p;l++){ //#pragma omp parallel for for(n=0;n<p;n++){ //a=162+172i //a2.re=162; //a2.im=172; a2.re=l; //259 a2.im=n; //340 b1=cmul(a2,a); if(b1.re%p==1 && b1.im%p==0){ printf("%d %d %d %d\n",a1.re,a1.im,b1.re%p,b1.im%p); printf("%d %d\n",l,n); // exit(1); return a2; } } } return a2; } com cdiv(com a,com b){ com c,d,v,f,h; long long g; d.re=(b.re*b.re+b.im*b.im)%p; if(d.re>p) d.re=d.re%p; if(d.re<0) d.re=d.re+p; d.im=0; v.re=((a.re%p)*(b.re%p)+((a.im%p)*(b.im%p))%p)%p; v.im=((a.im%p)*(b.re%p))-(a.re%p)*(b.im%p); if(a.re>p) a.re=a.re%p; if(a.re<0) a.re=b.re+p; if(a.im>p) a.im=b.im%p; if(a.im<0) a.re=a.im+p; if(b.re>p) b.re=a.re%p; if(b.re<0) b.re=b.re+p; if(b.im>p) b.im=b.im%p; if(b.im<0) b.re=a.im+p; printf("re=%lld %lld\n",a.re,b.re); printf("imm=%lldi %lldi\n",a.im,b.im); //exit(1); printf("d=%lld\n",d.re); d.re=inv(d.re,p); v.re=((p+v.re)*d.re)%p; v.im=((v.im%p)*d.re)%p; if(v.re>p) v.re=v.re%p; if(v.im<0) v.im+=p; printf("v=%lld %lldi\n",v.re,v.im); // exit(1); //c.re=d.re; //c.im=v.im*inv(d.re,p); return v; } com cnst(unsigned int A,com a){ unsigned int t,s; com r; t=A*a.re; s=A*a.im; r.re=t; r.im=s; return r; } PO eadd(PO P,PO Q){ PO R={0}; unsigned int r,s,t,u,v,w; com c,d,e,f,g,l,A; A.re=6; A.im=0; c=csub(P.y,Q.y); d=csub(P.x,Q.x); e=cinv(d); l=cmul(c,e); d=cmul(l,l); e=cadd(P.x,Q.x); R.x=csub(csub(d,e),A); R.y=csub(cmul(l,csub(P.x,R.x)),P.y); return R; } PO eadd2(PO P){ com a,b,c; PO R; return R; } //E = EllipticCurve(GF(131), [0, 0, 0, 1, 23]) //E.j_invariant() com j_inv(com a){ com r,f,h,b1,b2,h1,o,g,q; // unsigned int w; o.re= 3; o.im= 0; q.re= 256; q.im= 0; f.re=4; f.im=0; r=cmul(a,a); //printf("%d %d\n",r.re,r.im); //a^2-4 h=csub(r,f); printf("a^2-4: %lld %lld\n",h.re,h.im); b1=cadd(r,f); printf("%lld %lld\n",b1.re,b1.im); b2=cmul(r,r); h1=cmul(f,f); h1=cadd(h1,b2); printf("%lld %lld\n",h1.re,h1.im); //p=131 のとき y^2 = x^3 + x + 23 の j-不変量は 78 となります。 //g=a^2-3 g=csub(r,o); printf("a^2-3: %d %d\n",g.re,g.im); printf("a^2-4: %lld %lld\n",h.re,h.im); //g=256*(a^2-3)^3 //(a^2 - 3)^2 = -4184900860 - 2323531392 I //(a^2 - 3)^3 = 228212128828152 - 239983944473728 I g=cmul(cmul(cmul(g,g),g),q); g.re=g.re%p; g.im=g.im%p; printf("g=256*(a^2-3)^3: %lld %lld\n",g.re,g.im); g=cdiv(g,h); if(g.re>p) g.re%=p; if(g.re<0) g.re+=p; if(g.im>p) g.im=g.im%p; if(g.im<0) g.im+=p; printf("ans=%lld,%lld\n",g.re,g.im); return g; } /* //jj=aa^bb mod oo BigInt exp(BigInt aa,BigInt bb,BigInt oo){ BigInt ii,jj,kk[8192]; int j,c[8192],count=0,i; ii=oo; j=0; jj=0; // kk[4096]; //prime is 4096 bit table // c[8192] //mod is 8192 bit table count=0; for(i=0;i<8192;i++){ kk[i]=0; } while(ii>0){ ii = (ii>>1); j=j+1; } kk[0]=aa; // std::cout << j << "\n"; //ex.1000=2**3+2**5+2**6+2**7+2**8+2**9 makes a array c=[3,5,6,7,8,9] for(i=0;i<j+1;i++){ if((bb >> i)%2 != 0){ // testbit(bb,i) c[count]=i; count=count+1; } } // std::cout << bb << endl; // std::cout << count << "\n"; //exit(1); for(i=1;i<c[count-1]+1;i++){ kk[i] = kk[i-1]*kk[i-1]%oo; } jj=1; for(i=0;i<count;i++){ jj=kk[c[i]]*jj%oo; if (jj==0){ // print i,"\n" } } return jj; } */ com cc(com a,com b){ com c; c.re= a.re*b.re+a.im*b.im; c.im=0; return c; } int main () { char buf[65536]; CM sp434; com a1,a2,b1,b2,j,r,o,q,g,f,v,w,h,r2,g2,h2,h1,c; int s=31,t=304,l,k,n,i,count=0,a,b,jj,aa,bb,jj2,test[431][431][2]={0},tmp[431]={0}; s=inv(s,p); //a1 v.re=s; v.im=0; t=inv(t,p); //a2 w.re=s; w.im=0; printf("s=%d,t=%d\n",s,t); o.re= 3; o.im= 0; q.re= 256; q.im= 0; f.re=4; f.im=0; //h.re=p; //h.im=0; //q=cdiv(r,o); //printf("%d %d\n",q.re,q.im); //exit(1); //a=161+208i a1.re=161; a1.im=208; j_inv(a1); printf("a1======================================\n"); //exit(1); a2.re=161;//161; //162; a2.im=208;//208;//172; a2=j_inv(a2); c.re=132; c.im=0; j_inv(c); //exit(1); printf("j=%d %d\n",a2.re,a2.im); /* c=a2; while(1){ a2=j_inv(a2); count++; if(247 == a2.re){ printf("%d %d %d\n",a2.re,a2.im,count); scanf("%d",&n); // exit(1); } if(a2.re < 0 && a2.im < 0){ printf("baka\n"); exit(1); } count++; } */ o.im=0; //同じj不変量を持つ楕円曲線を総探索する 20200804 for(i=0;i<p;i++){ o.re=i; for(k=0;k<p;k++){ o.im=k; r=j_inv(o); // printf("%d %d %d %d\n",r.re,r.im,i,k); //scanf("%d",&n); // if(test[r.re][0]==512 && r.re>=0 && r.im==0){ test[i][k][0]=r.re; test[i][k][1]=r.im; //count++; } // if(test[r.re].im!=r.im){ //count++; //test[r.re].im=r.im; } for(i=0;i<p;i++){ for(k=0;k<p;k++){ //if(test[i][k]>=0){ // tmp[test[i][0]]=-1; printf("j_inv=%d,%d %d %d\n",i,k,test[i][k][0],test[i][k][1]); //count++; } //} } /* for(i=0;i<p;i++){ if(tmp[i]== -1) count++; } printf("%d\n",count); */ //exit(1); /* //j-invariant if(r.re==304 && r.im==364){ printf("(i,k)=%d %d\n",i,k); //scanf("%d",&n); //count++; } } */ c.re=109; c.im=0; j_inv(c); printf("p=%d count=%d\n",p,count); return 0; }
test.c
#include <stdio.h> #include <float.h> #include <stdlib.h> #include <math.h> #include <omp.h> #pragma omp requires unified_shared_memory #include "../utilities/check.h" #include "../utilities/utilities.h" #define TRIALS (1) #define N (957*3) #define ZERO(X) ZERO_ARRAY(N, X) #define INIT() { \ INIT_LOOP(N, { \ Ad[i] = 1 << 16; \ Bd[i] = i << 16; \ Cd[i] = -(i << 16); \ Dd[i] = (2*i+1) << 16; \ Ed[i] = ((i % 2 == 0 ? 0x1 : 0x0) << 16) | \ ((i % 3 == 0 ? 0x2 : 0x0) << 16); \ }) \ } #define INIT1 (1) #define INIT2 (3) #define INIT3 (5) #define INIT4 (7) #define INITc5 (9) #define INITs5 (9 << 4) #define INITi5 (9 << 16) #define INITll5 (9ll << 32) #define INITf5 (9 << 8) #define INITd5 (9 << 16) #define INITc6 (0xf) #define INITs6 (0xff << 4) #define INITi6 (0xff << 16) #define INITll6 (0xffll << 32) #define INITf6 (0xff << 8) #define INITd6 (0xff << 16) #define INIT7 (0) #define INIT8 (0) #define INIT9 (1) #define INIT10 (0) #define EXPECTED_RESULT ( \ INIT1 + INIT2 + \ (N << 16) + (N << 16) + \ /* + (2*(N-1)+1) - (N-1) */ + \ (INITd5*2*2*2) + \ 1 + 1 \ ) #define REDUCTION_CLAUSES reduction(+:Rd1) reduction(-:Rd2) reduction(*:Rd5) \ reduction(&&:Rd9) reduction(||:Rd10) //reduction(max:Ri3) reduction(min:Ri4) #define REDUCTION_MAP map(tofrom: Rd1, Rd2, Rd5, Rd9, Rd10) #define REDUCTION_INIT() { \ Rd1 = INIT1; Rd2 = INIT2; \ Rd3 = INIT3; Rd4 = INIT4; \ Rd5 = INITd5; Rd6 = INITd6; \ Rd7 = INIT7; Rd8 = INIT8; \ Rd9 = INIT9; Rd10 = INIT10; \ } #define REDUCTION_BODY() \ Rd1 += Ad[i] + (Bd[i] + Cd[i]); \ Rd2 += Ad[i] + (Bd[i] + Cd[i]); \ /*Rd3 = Dd[i] > Rd3 ? Dd[i] : Rd3; \ Rd4 = Cd[i] < Rd4 ? Cd[i] : Rd4; \*/ \ Rd5 *= i % 1000 == 0 ? 2 : 1; \ Rd9 = Rd9 && Ad[i] > 0; \ Rd10 = Rd10 || Ad[i] > 0; #define REDUCTION_LOOP() \ for (int i = 0; i < N; i++) { \ REDUCTION_BODY(); \ } #define REDUCTION_FINAL() { \ OUT[0] += (long long) (Rd1 + Rd2 /*+ Rd3 + Rd4 */ + Rd5 + Rd9 + Rd10); \ } int main(void) { check_offloading(); double Ad[N], Bd[N], Cd[N], Dd[N], Ed[N]; double Rd1, Rd2, Rd3, Rd4, Rd5, Rd6, Rd7, Rd8, Rd9, Rd10; long long OUT[1]; long long EXPECTED[1]; EXPECTED[0] = EXPECTED_RESULT; int cpuExec = 0; #pragma omp target map(tofrom: cpuExec) { cpuExec = omp_is_initial_device(); } int gpu_threads = 512; int cpu_threads = 32; int max_threads = cpuExec ? cpu_threads : gpu_threads; INIT(); if (cpuExec) { // Certain tests in this testcase fails on the host. A bug report has // been filed: https://puna0.watson.ibm.com/T143 // Disabling this test on the host for now. DUMP_SUCCESS(3153); return 0; } // // Test: reduction on teams. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams num_teams(tms) REDUCTION_CLAUSES") { int tid = omp_get_team_num(); int th = omp_get_num_teams(); for (int i = tid; i < N; i+= th) { REDUCTION_BODY(); } } }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } // // Test: reduction on teams with nested parallel. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams num_teams(tms) REDUCTION_CLAUSES") { int tid = omp_get_team_num(); int th = omp_get_num_teams(); _Pragma("omp parallel for REDUCTION_CLAUSES") for (int i = tid; i < N; i+= th) { REDUCTION_BODY(); } } }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } // // Test: reduction on target teams. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { OUT[0] = 0; TESTD2("omp target teams num_teams(tms) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, { { int tid = omp_get_team_num(); int th = omp_get_num_teams(); for (int i = tid; i < N; i+= th) { REDUCTION_BODY(); } } }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } // // Test: reduction on teams with nested parallel. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { OUT[0] = 0; TESTD2("omp target teams num_teams(tms) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, { { int tid = omp_get_team_num(); int th = omp_get_num_teams(); _Pragma("omp parallel for REDUCTION_CLAUSES") for (int i = tid; i < N; i+= th) { REDUCTION_BODY(); } } }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } // // Test: reduction on target parallel with nested parallel. // OUT[0] = 0; TESTD2("omp target parallel num_threads(30) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, { { int tid = omp_get_thread_num(); int th = omp_get_num_threads(); _Pragma("omp simd REDUCTION_CLAUSES") for (int i = tid; i < N; i+= th) { REDUCTION_BODY(); } } }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); // // Test: reduction on target teams distribute parallel for. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for num_teams(tms) thread_limit(ths) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } // // Test: reduction on target teams distribute parallel for with schedule static nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for num_teams(tms) thread_limit(ths) schedule(static) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } // // Test: reduction on target teams distribute parallel for with schedule static chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for num_teams(tms) thread_limit(ths) schedule(static,sch) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for with schedule dynamic nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for num_teams(tms) thread_limit(ths) schedule(dynamic) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } // // Test: reduction on target teams distribute parallel for with schedule dynamic chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for num_teams(tms) thread_limit(ths) schedule(dynamic,sch) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for with dist_schedule static nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } // // Test: reduction on target teams distribute parallel for with dist_schedule static nochunk, schedule static nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static) schedule(static) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } // // Test: reduction on target teams distribute parallel for with dist_schedule static nochunk, schedule static chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static) schedule(static,sch) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for with dist_schedule static chunk, schedule static nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(static) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for with dist_schedule static chunk, schedule static chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(static,sch) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for with dist_schedule static chunk, schedule dynamic nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(dynamic) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for with dist_schedule static chunk,schedule dynamic chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(dynamic,sch) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for with dist_schedule static chunk, schedule guided nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(guided) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for with dist_schedule static chunk, schedule guided chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(guided,sch) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for with dist_schedule static chunk, schedule auto. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(dynamic) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for with dist_schedule static chunk, schedule runtime. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(runtime) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for num_teams(tms) thread_limit(ths) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } // // Test: reduction on teams distribute parallel for with schedule static nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for num_teams(tms) thread_limit(ths) schedule(static) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } // // Test: reduction on teams distribute parallel for with schedule static chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for num_teams(tms) thread_limit(ths) schedule(static,sch) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for with schedule dynamic nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for num_teams(tms) thread_limit(ths) schedule(dynamic) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } // // Test: reduction on teams distribute parallel for with schedule dynamic chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for num_teams(tms) thread_limit(ths) schedule(dynamic,sch) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for with dist_schedule static nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } // // Test: reduction on teams distribute parallel for with dist_schedule static nochunk, schedule static nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static) schedule(static) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } // // Test: reduction on teams distribute parallel for with dist_schedule static nochunk, schedule static chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static) schedule(static,sch) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for with dist_schedule static chunk, schedule static nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(static) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for with dist_schedule static chunk, schedule static chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(static,sch) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for with dist_schedule static chunk, schedule dynamic nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(dynamic) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for with dist_schedule static chunk,schedule dynamic chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(dynamic,sch) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for with dist_schedule static chunk, schedule guided nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(guided) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for with dist_schedule static chunk, schedule guided chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(guided,sch) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for with dist_schedule static chunk, schedule auto. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(dynamic) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for with dist_schedule static chunk, schedule runtime. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(runtime) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for simd. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for simd num_teams(tms) thread_limit(ths) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } // // Test: reduction on target teams distribute parallel for simd with schedule static nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for simd num_teams(tms) thread_limit(ths) schedule(static) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } // // Test: reduction on target teams distribute parallel for simd with schedule static chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for simd num_teams(tms) thread_limit(ths) schedule(static,sch) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for simd with schedule dynamic nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for simd num_teams(tms) thread_limit(ths) schedule(dynamic) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } // // Test: reduction on target teams distribute parallel for simd with schedule dynamic chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for simd num_teams(tms) thread_limit(ths) schedule(dynamic,sch) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for simd with dist_schedule static nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } // // Test: reduction on target teams distribute parallel for simd with dist_schedule static nochunk, schedule static nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static) schedule(static) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } // // Test: reduction on target teams distribute parallel for simd with dist_schedule static nochunk, schedule static chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static) schedule(static,sch) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for simd with dist_schedule static chunk, schedule static nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(static) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for simd with dist_schedule static chunk, schedule static chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(static,sch) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for simd with dist_schedule static chunk, schedule dynamic nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(dynamic) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for simd with dist_schedule static chunk,schedule dynamic chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(dynamic,sch) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for simd with dist_schedule static chunk, schedule guided nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(guided) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for simd with dist_schedule static chunk, schedule guided chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(guided,sch) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for simd with dist_schedule static chunk, schedule auto. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(dynamic) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for simd with dist_schedule static chunk, schedule runtime. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(runtime) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for simd. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for simd num_teams(tms) thread_limit(ths) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } // // Test: reduction on teams distribute parallel for simd with schedule static nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for simd num_teams(tms) thread_limit(ths) schedule(static) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } // // Test: reduction on teams distribute parallel for simd with schedule static chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for simd num_teams(tms) thread_limit(ths) schedule(static,sch) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for simd with schedule dynamic nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for simd num_teams(tms) thread_limit(ths) schedule(dynamic) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } // // Test: reduction on teams distribute parallel for simd with schedule dynamic chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for simd num_teams(tms) thread_limit(ths) schedule(dynamic,sch) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for simd with dist_schedule static nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } // // Test: reduction on teams distribute parallel for simd with dist_schedule static nochunk, schedule static nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static) schedule(static) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } // // Test: reduction on teams distribute parallel for simd with dist_schedule static nochunk, schedule static chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static) schedule(static,sch) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for simd with dist_schedule static chunk, schedule static nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(static) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for simd with dist_schedule static chunk, schedule static chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(static,sch) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for simd with dist_schedule static chunk, schedule dynamic nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(dynamic) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for simd with dist_schedule static chunk,schedule dynamic chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(dynamic,sch) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for simd with dist_schedule static chunk, schedule guided nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(guided) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for simd with dist_schedule static chunk, schedule guided chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(guided,sch) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for simd with dist_schedule static chunk, schedule auto. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(dynamic) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for simd with dist_schedule static chunk, schedule runtime. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(runtime) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } return 0; }
#include <stdio.h> #include <float.h> #include <stdlib.h> #include <math.h> #include <omp.h> #include "../utilities/check.h" #include "../utilities/utilities.h" #define TRIALS (1) #define N (957*3) #define ZERO(X) ZERO_ARRAY(N, X) #define INIT() { \ INIT_LOOP(N, { \ Ad[i] = 1 << 16; \ Bd[i] = i << 16; \ Cd[i] = -(i << 16); \ Dd[i] = (2*i+1) << 16; \ Ed[i] = ((i % 2 == 0 ? 0x1 : 0x0) << 16) | \ ((i % 3 == 0 ? 0x2 : 0x0) << 16); \ }) \ } #define INIT1 (1) #define INIT2 (3) #define INIT3 (5) #define INIT4 (7) #define INITc5 (9) #define INITs5 (9 << 4) #define INITi5 (9 << 16) #define INITll5 (9ll << 32) #define INITf5 (9 << 8) #define INITd5 (9 << 16) #define INITc6 (0xf) #define INITs6 (0xff << 4) #define INITi6 (0xff << 16) #define INITll6 (0xffll << 32) #define INITf6 (0xff << 8) #define INITd6 (0xff << 16) #define INIT7 (0) #define INIT8 (0) #define INIT9 (1) #define INIT10 (0) #define EXPECTED_RESULT ( \ INIT1 + INIT2 + \ (N << 16) + (N << 16) + \ /* + (2*(N-1)+1) - (N-1) */ + \ (INITd5*2*2*2) + \ 1 + 1 \ ) #define REDUCTION_CLAUSES reduction(+:Rd1) reduction(-:Rd2) reduction(*:Rd5) \ reduction(&&:Rd9) reduction(||:Rd10) //reduction(max:Ri3) reduction(min:Ri4) #define REDUCTION_MAP map(tofrom: Rd1, Rd2, Rd5, Rd9, Rd10) #define REDUCTION_INIT() { \ Rd1 = INIT1; Rd2 = INIT2; \ Rd3 = INIT3; Rd4 = INIT4; \ Rd5 = INITd5; Rd6 = INITd6; \ Rd7 = INIT7; Rd8 = INIT8; \ Rd9 = INIT9; Rd10 = INIT10; \ } #define REDUCTION_BODY() \ Rd1 += Ad[i] + (Bd[i] + Cd[i]); \ Rd2 += Ad[i] + (Bd[i] + Cd[i]); \ /*Rd3 = Dd[i] > Rd3 ? Dd[i] : Rd3; \ Rd4 = Cd[i] < Rd4 ? Cd[i] : Rd4; \*/ \ Rd5 *= i % 1000 == 0 ? 2 : 1; \ Rd9 = Rd9 && Ad[i] > 0; \ Rd10 = Rd10 || Ad[i] > 0; #define REDUCTION_LOOP() \ for (int i = 0; i < N; i++) { \ REDUCTION_BODY(); \ } #define REDUCTION_FINAL() { \ OUT[0] += (long long) (Rd1 + Rd2 /*+ Rd3 + Rd4 */ + Rd5 + Rd9 + Rd10); \ } int main(void) { check_offloading(); double Ad[N], Bd[N], Cd[N], Dd[N], Ed[N]; double Rd1, Rd2, Rd3, Rd4, Rd5, Rd6, Rd7, Rd8, Rd9, Rd10; long long OUT[1]; long long EXPECTED[1]; EXPECTED[0] = EXPECTED_RESULT; int cpuExec = 0; cpuExec = omp_is_initial_device(); int gpu_threads = 512; int cpu_threads = 32; int max_threads = cpuExec ? cpu_threads : gpu_threads; INIT(); if (cpuExec) { // Certain tests in this testcase fails on the host. A bug report has // been filed: https://puna0.watson.ibm.com/T143 // Disabling this test on the host for now. DUMP_SUCCESS(3153); return 0; } // // Test: reduction on teams. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams num_teams(tms) REDUCTION_CLAUSES") { int tid = omp_get_team_num(); int th = omp_get_num_teams(); for (int i = tid; i < N; i+= th) { REDUCTION_BODY(); } } }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } // // Test: reduction on teams with nested parallel. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams num_teams(tms) REDUCTION_CLAUSES") { int tid = omp_get_team_num(); int th = omp_get_num_teams(); _Pragma("omp parallel for REDUCTION_CLAUSES") for (int i = tid; i < N; i+= th) { REDUCTION_BODY(); } } }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } // // Test: reduction on target teams. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { OUT[0] = 0; TESTD2("omp target teams num_teams(tms) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, { { int tid = omp_get_team_num(); int th = omp_get_num_teams(); for (int i = tid; i < N; i+= th) { REDUCTION_BODY(); } } }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } // // Test: reduction on teams with nested parallel. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { OUT[0] = 0; TESTD2("omp target teams num_teams(tms) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, { { int tid = omp_get_team_num(); int th = omp_get_num_teams(); _Pragma("omp parallel for REDUCTION_CLAUSES") for (int i = tid; i < N; i+= th) { REDUCTION_BODY(); } } }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } // // Test: reduction on target parallel with nested parallel. // OUT[0] = 0; TESTD2("omp target parallel num_threads(30) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, { { int tid = omp_get_thread_num(); int th = omp_get_num_threads(); _Pragma("omp simd REDUCTION_CLAUSES") for (int i = tid; i < N; i+= th) { REDUCTION_BODY(); } } }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); // // Test: reduction on target teams distribute parallel for. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for num_teams(tms) thread_limit(ths) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } // // Test: reduction on target teams distribute parallel for with schedule static nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for num_teams(tms) thread_limit(ths) schedule(static) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } // // Test: reduction on target teams distribute parallel for with schedule static chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for num_teams(tms) thread_limit(ths) schedule(static,sch) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for with schedule dynamic nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for num_teams(tms) thread_limit(ths) schedule(dynamic) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } // // Test: reduction on target teams distribute parallel for with schedule dynamic chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for num_teams(tms) thread_limit(ths) schedule(dynamic,sch) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for with dist_schedule static nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } // // Test: reduction on target teams distribute parallel for with dist_schedule static nochunk, schedule static nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static) schedule(static) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } // // Test: reduction on target teams distribute parallel for with dist_schedule static nochunk, schedule static chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static) schedule(static,sch) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for with dist_schedule static chunk, schedule static nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(static) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for with dist_schedule static chunk, schedule static chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(static,sch) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for with dist_schedule static chunk, schedule dynamic nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(dynamic) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for with dist_schedule static chunk,schedule dynamic chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(dynamic,sch) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for with dist_schedule static chunk, schedule guided nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(guided) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for with dist_schedule static chunk, schedule guided chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(guided,sch) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for with dist_schedule static chunk, schedule auto. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(dynamic) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for with dist_schedule static chunk, schedule runtime. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(runtime) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for num_teams(tms) thread_limit(ths) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } // // Test: reduction on teams distribute parallel for with schedule static nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for num_teams(tms) thread_limit(ths) schedule(static) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } // // Test: reduction on teams distribute parallel for with schedule static chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for num_teams(tms) thread_limit(ths) schedule(static,sch) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for with schedule dynamic nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for num_teams(tms) thread_limit(ths) schedule(dynamic) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } // // Test: reduction on teams distribute parallel for with schedule dynamic chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for num_teams(tms) thread_limit(ths) schedule(dynamic,sch) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for with dist_schedule static nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } // // Test: reduction on teams distribute parallel for with dist_schedule static nochunk, schedule static nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static) schedule(static) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } // // Test: reduction on teams distribute parallel for with dist_schedule static nochunk, schedule static chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static) schedule(static,sch) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for with dist_schedule static chunk, schedule static nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(static) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for with dist_schedule static chunk, schedule static chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(static,sch) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for with dist_schedule static chunk, schedule dynamic nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(dynamic) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for with dist_schedule static chunk,schedule dynamic chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(dynamic,sch) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for with dist_schedule static chunk, schedule guided nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(guided) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for with dist_schedule static chunk, schedule guided chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(guided,sch) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for with dist_schedule static chunk, schedule auto. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(dynamic) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for with dist_schedule static chunk, schedule runtime. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(runtime) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for simd. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for simd num_teams(tms) thread_limit(ths) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } // // Test: reduction on target teams distribute parallel for simd with schedule static nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for simd num_teams(tms) thread_limit(ths) schedule(static) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } // // Test: reduction on target teams distribute parallel for simd with schedule static chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for simd num_teams(tms) thread_limit(ths) schedule(static,sch) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for simd with schedule dynamic nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for simd num_teams(tms) thread_limit(ths) schedule(dynamic) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } // // Test: reduction on target teams distribute parallel for simd with schedule dynamic chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for simd num_teams(tms) thread_limit(ths) schedule(dynamic,sch) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for simd with dist_schedule static nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } // // Test: reduction on target teams distribute parallel for simd with dist_schedule static nochunk, schedule static nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static) schedule(static) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } // // Test: reduction on target teams distribute parallel for simd with dist_schedule static nochunk, schedule static chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static) schedule(static,sch) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for simd with dist_schedule static chunk, schedule static nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(static) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for simd with dist_schedule static chunk, schedule static chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(static,sch) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for simd with dist_schedule static chunk, schedule dynamic nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(dynamic) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for simd with dist_schedule static chunk,schedule dynamic chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(dynamic,sch) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for simd with dist_schedule static chunk, schedule guided nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(guided) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for simd with dist_schedule static chunk, schedule guided chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(guided,sch) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for simd with dist_schedule static chunk, schedule auto. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(dynamic) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for simd with dist_schedule static chunk, schedule runtime. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(runtime) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for simd. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for simd num_teams(tms) thread_limit(ths) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } // // Test: reduction on teams distribute parallel for simd with schedule static nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for simd num_teams(tms) thread_limit(ths) schedule(static) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } // // Test: reduction on teams distribute parallel for simd with schedule static chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for simd num_teams(tms) thread_limit(ths) schedule(static,sch) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for simd with schedule dynamic nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for simd num_teams(tms) thread_limit(ths) schedule(dynamic) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } // // Test: reduction on teams distribute parallel for simd with schedule dynamic chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for simd num_teams(tms) thread_limit(ths) schedule(dynamic,sch) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for simd with dist_schedule static nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } // // Test: reduction on teams distribute parallel for simd with dist_schedule static nochunk, schedule static nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static) schedule(static) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } // // Test: reduction on teams distribute parallel for simd with dist_schedule static nochunk, schedule static chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static) schedule(static,sch) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for simd with dist_schedule static chunk, schedule static nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(static) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for simd with dist_schedule static chunk, schedule static chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(static,sch) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for simd with dist_schedule static chunk, schedule dynamic nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(dynamic) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for simd with dist_schedule static chunk,schedule dynamic chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(dynamic,sch) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for simd with dist_schedule static chunk, schedule guided nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(guided) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for simd with dist_schedule static chunk, schedule guided chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(guided,sch) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for simd with dist_schedule static chunk, schedule auto. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(dynamic) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for simd with dist_schedule static chunk, schedule runtime. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(runtime) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } return 0; }
#include <stdio.h> #include <float.h> #include <stdlib.h> #include <math.h> #include <omp.h> #pragma omp requires unified_shared_memory #include "../utilities/check.h" #include "../utilities/utilities.h" #define TRIALS (1) #define N (957*3) #define ZERO(X) ZERO_ARRAY(N, X) #define INIT() { \ INIT_LOOP(N, { \ Ad[i] = 1 << 16; \ Bd[i] = i << 16; \ Cd[i] = -(i << 16); \ Dd[i] = (2*i+1) << 16; \ Ed[i] = ((i % 2 == 0 ? 0x1 : 0x0) << 16) | \ ((i % 3 == 0 ? 0x2 : 0x0) << 16); \ }) \ } #define INIT1 (1) #define INIT2 (3) #define INIT3 (5) #define INIT4 (7) #define INITc5 (9) #define INITs5 (9 << 4) #define INITi5 (9 << 16) #define INITll5 (9ll << 32) #define INITf5 (9 << 8) #define INITd5 (9 << 16) #define INITc6 (0xf) #define INITs6 (0xff << 4) #define INITi6 (0xff << 16) #define INITll6 (0xffll << 32) #define INITf6 (0xff << 8) #define INITd6 (0xff << 16) #define INIT7 (0) #define INIT8 (0) #define INIT9 (1) #define INIT10 (0) #define EXPECTED_RESULT ( \ INIT1 + INIT2 + \ (N << 16) + (N << 16) + \ /* + (2*(N-1)+1) - (N-1) */ + \ (INITd5*2*2*2) + \ 1 + 1 \ ) #define REDUCTION_CLAUSES reduction(+:Rd1) reduction(-:Rd2) reduction(*:Rd5) \ reduction(&&:Rd9) reduction(||:Rd10) //reduction(max:Ri3) reduction(min:Ri4) #define REDUCTION_MAP map(tofrom: Rd1, Rd2, Rd5, Rd9, Rd10) #define REDUCTION_INIT() { \ Rd1 = INIT1; Rd2 = INIT2; \ Rd3 = INIT3; Rd4 = INIT4; \ Rd5 = INITd5; Rd6 = INITd6; \ Rd7 = INIT7; Rd8 = INIT8; \ Rd9 = INIT9; Rd10 = INIT10; \ } #define REDUCTION_BODY() \ Rd1 += Ad[i] + (Bd[i] + Cd[i]); \ Rd2 += Ad[i] + (Bd[i] + Cd[i]); \ /*Rd3 = Dd[i] > Rd3 ? Dd[i] : Rd3; \ Rd4 = Cd[i] < Rd4 ? Cd[i] : Rd4; \*/ \ Rd5 *= i % 1000 == 0 ? 2 : 1; \ Rd9 = Rd9 && Ad[i] > 0; \ Rd10 = Rd10 || Ad[i] > 0; #define REDUCTION_LOOP() \ for (int i = 0; i < N; i++) { \ REDUCTION_BODY(); \ } #define REDUCTION_FINAL() { \ OUT[0] += (long long) (Rd1 + Rd2 /*+ Rd3 + Rd4 */ + Rd5 + Rd9 + Rd10); \ } int main(void) { check_offloading(); double Ad[N], Bd[N], Cd[N], Dd[N], Ed[N]; double Rd1, Rd2, Rd3, Rd4, Rd5, Rd6, Rd7, Rd8, Rd9, Rd10; long long OUT[1]; long long EXPECTED[1]; EXPECTED[0] = EXPECTED_RESULT; int cpuExec = 0; #pragma omp target map(tofrom: cpuExec) { cpuExec = omp_is_initial_device(); } int gpu_threads = 512; int cpu_threads = 32; int max_threads = cpuExec ? cpu_threads : gpu_threads; INIT(); if (cpuExec) { // Certain tests in this testcase fails on the host. A bug report has // been filed: https://puna0.watson.ibm.com/T143 // Disabling this test on the host for now. DUMP_SUCCESS(3153); return 0; } // // Test: reduction on teams. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams num_teams(tms) REDUCTION_CLAUSES") { int tid = omp_get_team_num(); int th = omp_get_num_teams(); for (int i = tid; i < N; i+= th) { REDUCTION_BODY(); } } }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } // // Test: reduction on teams with nested parallel. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams num_teams(tms) REDUCTION_CLAUSES") { int tid = omp_get_team_num(); int th = omp_get_num_teams(); _Pragma("omp parallel for REDUCTION_CLAUSES") for (int i = tid; i < N; i+= th) { REDUCTION_BODY(); } } }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } // // Test: reduction on target teams. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { OUT[0] = 0; TESTD2("omp target teams num_teams(tms) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, { { int tid = omp_get_team_num(); int th = omp_get_num_teams(); for (int i = tid; i < N; i+= th) { REDUCTION_BODY(); } } }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } // // Test: reduction on teams with nested parallel. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { OUT[0] = 0; TESTD2("omp target teams num_teams(tms) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, { { int tid = omp_get_team_num(); int th = omp_get_num_teams(); _Pragma("omp parallel for REDUCTION_CLAUSES") for (int i = tid; i < N; i+= th) { REDUCTION_BODY(); } } }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } // // Test: reduction on target parallel with nested parallel. // OUT[0] = 0; TESTD2("omp target parallel num_threads(30) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, { { int tid = omp_get_thread_num(); int th = omp_get_num_threads(); _Pragma("omp simd REDUCTION_CLAUSES") for (int i = tid; i < N; i+= th) { REDUCTION_BODY(); } } }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); // // Test: reduction on target teams distribute parallel for. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for num_teams(tms) thread_limit(ths) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } // // Test: reduction on target teams distribute parallel for with schedule static nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for num_teams(tms) thread_limit(ths) schedule(static) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } // // Test: reduction on target teams distribute parallel for with schedule static chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for num_teams(tms) thread_limit(ths) schedule(static,sch) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for with schedule dynamic nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for num_teams(tms) thread_limit(ths) schedule(dynamic) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } // // Test: reduction on target teams distribute parallel for with schedule dynamic chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for num_teams(tms) thread_limit(ths) schedule(dynamic,sch) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for with dist_schedule static nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } // // Test: reduction on target teams distribute parallel for with dist_schedule static nochunk, schedule static nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static) schedule(static) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } // // Test: reduction on target teams distribute parallel for with dist_schedule static nochunk, schedule static chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static) schedule(static,sch) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for with dist_schedule static chunk, schedule static nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(static) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for with dist_schedule static chunk, schedule static chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(static,sch) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for with dist_schedule static chunk, schedule dynamic nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(dynamic) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for with dist_schedule static chunk,schedule dynamic chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(dynamic,sch) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for with dist_schedule static chunk, schedule guided nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(guided) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for with dist_schedule static chunk, schedule guided chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(guided,sch) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for with dist_schedule static chunk, schedule auto. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(dynamic) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for with dist_schedule static chunk, schedule runtime. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(runtime) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for num_teams(tms) thread_limit(ths) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } // // Test: reduction on teams distribute parallel for with schedule static nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for num_teams(tms) thread_limit(ths) schedule(static) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } // // Test: reduction on teams distribute parallel for with schedule static chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for num_teams(tms) thread_limit(ths) schedule(static,sch) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for with schedule dynamic nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for num_teams(tms) thread_limit(ths) schedule(dynamic) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } // // Test: reduction on teams distribute parallel for with schedule dynamic chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for num_teams(tms) thread_limit(ths) schedule(dynamic,sch) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for with dist_schedule static nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } // // Test: reduction on teams distribute parallel for with dist_schedule static nochunk, schedule static nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static) schedule(static) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } // // Test: reduction on teams distribute parallel for with dist_schedule static nochunk, schedule static chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static) schedule(static,sch) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for with dist_schedule static chunk, schedule static nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(static) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for with dist_schedule static chunk, schedule static chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(static,sch) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for with dist_schedule static chunk, schedule dynamic nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(dynamic) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for with dist_schedule static chunk,schedule dynamic chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(dynamic,sch) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for with dist_schedule static chunk, schedule guided nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(guided) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for with dist_schedule static chunk, schedule guided chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(guided,sch) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for with dist_schedule static chunk, schedule auto. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(dynamic) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for with dist_schedule static chunk, schedule runtime. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(runtime) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for simd. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for simd num_teams(tms) thread_limit(ths) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } // // Test: reduction on target teams distribute parallel for simd with schedule static nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for simd num_teams(tms) thread_limit(ths) schedule(static) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } // // Test: reduction on target teams distribute parallel for simd with schedule static chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for simd num_teams(tms) thread_limit(ths) schedule(static,sch) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for simd with schedule dynamic nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for simd num_teams(tms) thread_limit(ths) schedule(dynamic) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } // // Test: reduction on target teams distribute parallel for simd with schedule dynamic chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for simd num_teams(tms) thread_limit(ths) schedule(dynamic,sch) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for simd with dist_schedule static nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } // // Test: reduction on target teams distribute parallel for simd with dist_schedule static nochunk, schedule static nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static) schedule(static) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } // // Test: reduction on target teams distribute parallel for simd with dist_schedule static nochunk, schedule static chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static) schedule(static,sch) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for simd with dist_schedule static chunk, schedule static nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(static) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for simd with dist_schedule static chunk, schedule static chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(static,sch) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for simd with dist_schedule static chunk, schedule dynamic nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(dynamic) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for simd with dist_schedule static chunk,schedule dynamic chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(dynamic,sch) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for simd with dist_schedule static chunk, schedule guided nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(guided) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for simd with dist_schedule static chunk, schedule guided chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(guided,sch) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for simd with dist_schedule static chunk, schedule auto. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(dynamic) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on target teams distribute parallel for simd with dist_schedule static chunk, schedule runtime. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(runtime) REDUCTION_MAP REDUCTION_CLAUSES", { REDUCTION_INIT(); }, REDUCTION_LOOP(), { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for simd. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for simd num_teams(tms) thread_limit(ths) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } // // Test: reduction on teams distribute parallel for simd with schedule static nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for simd num_teams(tms) thread_limit(ths) schedule(static) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } // // Test: reduction on teams distribute parallel for simd with schedule static chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for simd num_teams(tms) thread_limit(ths) schedule(static,sch) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for simd with schedule dynamic nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for simd num_teams(tms) thread_limit(ths) schedule(dynamic) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } // // Test: reduction on teams distribute parallel for simd with schedule dynamic chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for simd num_teams(tms) thread_limit(ths) schedule(dynamic,sch) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for simd with dist_schedule static nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } // // Test: reduction on teams distribute parallel for simd with dist_schedule static nochunk, schedule static nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static) schedule(static) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } // // Test: reduction on teams distribute parallel for simd with dist_schedule static nochunk, schedule static chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static) schedule(static,sch) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for simd with dist_schedule static chunk, schedule static nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(static) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for simd with dist_schedule static chunk, schedule static chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(static,sch) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for simd with dist_schedule static chunk, schedule dynamic nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(dynamic) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for simd with dist_schedule static chunk,schedule dynamic chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(dynamic,sch) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for simd with dist_schedule static chunk, schedule guided nochunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(guided) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for simd with dist_schedule static chunk, schedule guided chunk. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(guided,sch) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for simd with dist_schedule static chunk, schedule auto. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(dynamic) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } // // Test: reduction on teams distribute parallel for simd with dist_schedule static chunk, schedule runtime. // for (int tms = 1 ; tms <= 512 ; tms *= 7) { for (int ths = 1 ; ths <= 1024 ; ths *= 9) { for(int sch = 1 ; sch <= N ; sch *= 9) { OUT[0] = 0; TESTD2("omp target REDUCTION_MAP", { REDUCTION_INIT(); }, { _Pragma("omp teams distribute parallel for simd num_teams(tms) thread_limit(ths) dist_schedule(static,sch) schedule(runtime) REDUCTION_CLAUSES") REDUCTION_LOOP() }, { REDUCTION_FINAL(); }, VERIFY(0, 1, OUT[i], (trial+1) * EXPECTED[i])); } } } return 0; }
extract_image_patches.h
/* Copyright 2018 The Blueoil Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #ifndef DLK_FUNC_EXTRACT_IMAGE_PATCHES #define DLK_FUNC_EXTRACT_IMAGE_PATCHES #include <algorithm> #include "global.h" #include "tensor_view.h" #include "time_measurement.h" #include "pack_input_to_qwords.h" #include <limits.h> #ifdef USE_NEON #include <arm_neon.h> #endif template <typename T> void func_ExtractImagePatches( const TensorView<T, MemoryLayout::NHWC>& input, const TensorView<T, MemoryLayout::NHWC>& output, T_UINT kernel_size, T_UINT stride) { Measurement::Start("ExtractImagePatches"); const auto in_shape = input.get_shape(); const T_UINT input_width = in_shape[2]; const T_UINT input_depth = in_shape[3]; const auto out_shape = output.get_shape(); const T_UINT out_height = out_shape[1]; const T_UINT out_width = out_shape[2]; const T_UINT out_depth = out_shape[3]; for(T_UINT kz = 0; kz < input_depth; ++kz) for(T_UINT wi = 0; wi < out_height; wi++) for(T_UINT wj = 0; wj < out_width; wj++) for(T_UINT ki = 0; ki < kernel_size; ki++) for(T_UINT kj = 0; kj < kernel_size; kj++) { T_INT row = (wi * stride) + ki; T_INT col = (wj * stride) + kj; const auto ch = kz + (ki * kernel_size + kj) * input_depth; const auto out_idx = wi * out_width * out_depth + wj * out_depth + ch; const auto in_idx = row * input_width * input_depth + col * input_depth + kz; output.data()[out_idx] = input.data()[in_idx]; } Measurement::Stop(); } inline void func_ExtractImagePatches( const TensorView<QUANTIZED_PACKED, MemoryLayout::HWChBCl>& input, const TensorView<QUANTIZED_PACKED, MemoryLayout::HWChBCl>& output, T_UINT kernel_size, T_UINT stride) { Measurement::Start("ExtractImagePatches"); const auto in_shape = input.get_shape(); const T_UINT input_width = in_shape[1]; const T_UINT input_depth = in_shape[2]; const T_UINT bits_per_input = in_shape[3]; const auto out_shape = output.get_shape(); const T_UINT out_height = out_shape[0]; const T_UINT out_width = out_shape[1]; const T_UINT out_depth = out_shape[2]; T_UINT output_index = 0; if (out_depth < kernel_size * kernel_size) { int bit_shift = out_depth * QUANTIZED_PACKED::BitCount / (kernel_size * kernel_size); const QUANTIZED_PACKED::base_t mask((QUANTIZED_PACKED::base_t(1) << bit_shift) - 1); std::fill(output.data(), output.data() + output.size(), QUANTIZED_PACKED(0)); for(T_UINT wi = 0; wi < out_height; wi++) for(T_UINT wj = 0; wj < out_width; wj++) for(T_UINT ki = 0; ki < kernel_size; ki++) for(T_UINT kj = 0; kj < kernel_size; kj++) { T_INT row = (wi * stride) + ki; T_INT col = (wj * stride) + kj; T_UINT ch = (ki * kernel_size + kj) * bit_shift; T_UINT ch_high = ch / QUANTIZED_PACKED::BitCount; T_UINT ch_low = ch % QUANTIZED_PACKED::BitCount; #ifdef USE_NEON const auto out_idx = wi * out_width * out_depth * bits_per_input + wj * out_depth * bits_per_input + ch_high * bits_per_input; const auto in_idx = row * input_width * input_depth * bits_per_input + col * input_depth * bits_per_input; const auto in = vld1_u32(reinterpret_cast<uint32_t*>(input.data() + in_idx)); const auto masked = vand_u32(vdup_n_u32(mask), in); #ifdef AARCH32 const auto shifted = vshl_u32(masked, vdup_n_s32(ch_low)); #else const auto shifted = vshl_n_u32(masked, ch_low); #endif const auto out_old = vld1_u32(reinterpret_cast<uint32_t*>(output.data() + out_idx)); const auto out_new = vorr_u32(out_old, shifted); vst1_u32(reinterpret_cast<uint32_t*>(output.data() + out_idx), out_new); #else for(T_UINT digit = 0; digit < bits_per_input; ++digit) { const auto out_idx = wi * out_width * out_depth * bits_per_input + wj * out_depth * bits_per_input + ch_high * bits_per_input + digit; const auto in_idx = row * input_width * input_depth * bits_per_input + col * input_depth * bits_per_input + digit; output.data()[out_idx] |= QUANTIZED_PACKED((mask & input.data()[in_idx].Raw()) << ch_low); } #endif } } else { for(T_UINT ih = 0; ih < input_depth; ++ih) for(T_UINT wi = 0; wi < out_height; wi++) for(T_UINT wj = 0; wj < out_width; wj++) for(T_UINT ki = 0; ki < kernel_size; ki++) for(T_UINT kj = 0; kj < kernel_size; kj++) { T_INT row = (wi * stride) + ki; T_INT col = (wj * stride) + kj; #ifdef USE_NEON const auto ch_high = ih + (ki * kernel_size + kj) * input_depth; const auto out_idx = wi * out_width * out_depth * bits_per_input + wj * out_depth * bits_per_input + ch_high * bits_per_input; const auto in_idx = row * input_width * input_depth * bits_per_input + col * input_depth * bits_per_input + ih * bits_per_input; const auto in = vld1_u32(reinterpret_cast<uint32_t*>(input.data() + in_idx)); vst1_u32(reinterpret_cast<uint32_t*>(output.data() + out_idx), in); #else for(T_UINT digit = 0; digit < bits_per_input; ++digit) { const auto ch_high = ih + (ki * kernel_size + kj) * input_depth; const auto out_idx = wi * out_width * out_depth * bits_per_input + wj * out_depth * bits_per_input + ch_high * bits_per_input + digit; const auto in_idx = row * input_width * input_depth * bits_per_input + col * input_depth * bits_per_input + ih * bits_per_input + digit; output.data()[out_idx] = input.data()[in_idx]; } #endif } } Measurement::Stop(); } inline void func_ExtractImagePatches( const TensorView<QUANTIZED_PACKED, MemoryLayout::ChHWBCl>& input, const TensorView<QUANTIZED_PACKED, MemoryLayout::ChHWBCl>& output, T_UINT kernel_size, T_UINT stride) { Measurement::Start("ExtractImagePatches"); const auto in_shape = input.get_shape(); const T_UINT input_height = in_shape[1]; const T_UINT input_width = in_shape[2]; const T_UINT input_depth = in_shape[0]; const T_UINT bits_per_input = in_shape[3]; const auto out_shape = output.get_shape(); const T_UINT out_height = out_shape[1]; const T_UINT out_width = out_shape[2]; const T_UINT out_depth = out_shape[0]; T_UINT output_index = 0; if (out_depth < kernel_size * kernel_size) { const T_UINT kernel_area = kernel_size * kernel_size; const T_UINT bit_shift = out_depth * QUANTIZED_PACKED::BitCount / kernel_area; const QUANTIZED_PACKED::base_t mask((QUANTIZED_PACKED::base_t(1) << bit_shift) - 1); const T_UINT lb_kernel_size = __builtin_ctz(kernel_size); const T_UINT kernel_mask = (1 << lb_kernel_size) - 1; #ifdef USE_NEON const auto shift_ref = vcombine_s32(vdup_n_s32(0), vdup_n_s32(bit_shift)); const auto add = vdupq_n_s32(bit_shift * 2); const auto mask_v = vdupq_n_u32(mask); #else const uint64_t mask64 = mask * 0x1'0000'0001ull; #endif const T_UINT blocks = kernel_area / out_depth; #pragma omp parallel for for(T_UINT wi = 0; wi < out_height; wi++) for(T_UINT wj = 0; wj < out_width; wj++) #ifdef USE_NEON for(T_UINT k = 0; k < out_depth; ++k) { auto tmp = vdupq_n_u32(0); auto shift = shift_ref; for(T_UINT i = 0; i < blocks; i += 2) { T_UINT ki = (k * blocks + i) >> lb_kernel_size; T_UINT kj = (k * blocks + i) & kernel_mask; T_INT row = (wi * stride) + ki; T_INT col = (wj * stride) + kj; const auto in_idx = row * input_width * bits_per_input + col * bits_per_input; const auto in = vld1q_u32(reinterpret_cast<uint32_t*>(input.data() + in_idx)); const auto masked = vandq_u32(mask_v, in); const auto shifted = vshlq_u32(masked, shift); shift += add; tmp |= shifted; } const auto out = vorr_u32(vget_low_u32(tmp), vget_high_u32(tmp)); const auto out_idx = k * out_height * out_width * bits_per_input + wi * out_width * bits_per_input + wj * bits_per_input; vst1_u32(reinterpret_cast<uint32_t*>(output.data() + out_idx), out); } #else for(T_UINT k = 0; k < out_depth; ++k) { uint64_t out = 0; for(T_UINT i = 0; i < blocks; ++i) { T_UINT ki = (k * blocks + i) >> lb_kernel_size; T_UINT kj = (k * blocks + i) & kernel_mask; T_INT row = (wi * stride) + ki; T_INT col = (wj * stride) + kj; const auto in_idx = row * input_width * bits_per_input + col * bits_per_input; const auto in = *reinterpret_cast<uint64_t*>(input.data() + in_idx); out |= (mask64 & in) << (i * bit_shift); } const auto out_idx = k * out_height * out_width * bits_per_input + wi * out_width * bits_per_input + wj * bits_per_input; *reinterpret_cast<uint64_t*>(output.data() + out_idx) = out; } #endif } else { for(T_UINT ih = 0; ih < input_depth; ++ih) for(T_UINT wi = 0; wi < out_height; wi++) for(T_UINT wj = 0; wj < out_width; wj++) for(T_UINT ki = 0; ki < kernel_size; ki++) for(T_UINT kj = 0; kj < kernel_size; kj++) { T_INT row = (wi * stride) + ki; T_INT col = (wj * stride) + kj; const auto ch_high = ih + (ki * kernel_size + kj) * input_depth; const auto out_idx = ch_high * out_height * out_width * bits_per_input + wi * out_width * bits_per_input + wj * bits_per_input; const auto in_idx = ih * input_height * input_width * bits_per_input + row * input_width * bits_per_input + col * bits_per_input; #ifdef USE_NEON const auto in = vld1_u32(reinterpret_cast<uint32_t*>(input.data() + in_idx)); vst1_u32(reinterpret_cast<uint32_t*>(output.data() + out_idx), in); #else *reinterpret_cast<uint64_t*>(output.data() + out_idx) = *reinterpret_cast<uint64_t*>(input.data() + in_idx); #endif } } Measurement::Stop(); } #endif // DLK_FUNC_EXTRACT_IMAGE_PATCHES
#ifndef DLK_FUNC_EXTRACT_IMAGE_PATCHES #define DLK_FUNC_EXTRACT_IMAGE_PATCHES #include <algorithm> #include "global.h" #include "tensor_view.h" #include "time_measurement.h" #include "pack_input_to_qwords.h" #include <limits.h> #ifdef USE_NEON #include <arm_neon.h> #endif template < typename T > void func_ExtractImagePatches( const TensorView < T, MemoryLayout: :NHWC > &input, const TensorView < T, MemoryLayout: :NHWC > &output, T_UINT kernel_size, T_UINT stride) { Measurement: :Start("ExtractImagePatches"); const auto in_shape = input.get_shape(); const T_UINT input_width = in_shape[2]; const T_UINT input_depth = in_shape[3]; const auto out_shape = output.get_shape(); const T_UINT out_height = out_shape[1]; const T_UINT out_width = out_shape[2]; const T_UINT out_depth = out_shape[3]; for (T_UINT kz = 0; kz < input_depth; ++kz) for (T_UINT wi = 0; wi < out_height; wi++) for (T_UINT wj = 0; wj < out_width; wj++) for (T_UINT ki = 0; ki < kernel_size; ki++) for (T_UINT kj = 0; kj < kernel_size; kj++) { T_INT row = (wi * stride) + ki; T_INT col = (wj * stride) + kj; const auto ch = kz + (ki * kernel_size + kj) * input_depth; const auto out_idx = wi * out_width * out_depth + wj * out_depth + ch; const auto in_idx = row * input_width * input_depth + col * input_depth + kz; output.data()[out_idx] = input.data()[in_idx]; } Measurement: :Stop(); } inline void func_ExtractImagePatches( const TensorView < QUANTIZED_PACKED, MemoryLayout: :HWChBCl > &input, const TensorView < QUANTIZED_PACKED, MemoryLayout: :HWChBCl > &output, T_UINT kernel_size, T_UINT stride) { Measurement: :Start("ExtractImagePatches"); const auto in_shape = input.get_shape(); const T_UINT input_width = in_shape[1]; const T_UINT input_depth = in_shape[2]; const T_UINT bits_per_input = in_shape[3]; const auto out_shape = output.get_shape(); const T_UINT out_height = out_shape[0]; const T_UINT out_width = out_shape[1]; const T_UINT out_depth = out_shape[2]; T_UINT output_index = 0; if (out_depth < kernel_size * kernel_size) { int bit_shift = out_depth * QUANTIZED_PACKED::BitCount / (kernel_size * kernel_size); const QUANTIZED_PACKED::base_t mask((QUANTIZED_PACKED::base_t(1) << bit_shift) - 1); std: : fill(output.data(), output.data() + output.size(), QUANTIZED_PACKED(0)); for (T_UINT wi = 0; wi < out_height; wi++) for (T_UINT wj = 0; wj < out_width; wj++) for (T_UINT ki = 0; ki < kernel_size; ki++) for (T_UINT kj = 0; kj < kernel_size; kj++) { T_INT row = (wi * stride) + ki; T_INT col = (wj * stride) + kj; T_UINT ch = (ki * kernel_size + kj) * bit_shift; T_UINT ch_high = ch / QUANTIZED_PACKED::BitCount; T_UINT ch_low = ch % QUANTIZED_PACKED::BitCount; #ifdef USE_NEON const auto out_idx = wi * out_width * out_depth * bits_per_input + wj * out_depth * bits_per_input + ch_high * bits_per_input; const auto in_idx = row * input_width * input_depth * bits_per_input + col * input_depth * bits_per_input; const auto in = vld1_u32(reinterpret_cast < uint32_t * >(input.data() + in_idx)); const auto masked = vand_u32(vdup_n_u32(mask), in); #ifdef AARCH32 const auto shifted = vshl_u32(masked, vdup_n_s32(ch_low)); #else const auto shifted = vshl_n_u32(masked, ch_low); #endif const auto out_old = vld1_u32(reinterpret_cast < uint32_t * >(output.data() + out_idx)); const auto out_new = vorr_u32(out_old, shifted); vst1_u32(reinterpret_cast < uint32_t * >(output.data() + out_idx), out_new); #else for (T_UINT digit = 0; digit < bits_per_input; ++digit) { const auto out_idx = wi * out_width * out_depth * bits_per_input + wj * out_depth * bits_per_input + ch_high * bits_per_input + digit; const auto in_idx = row * input_width * input_depth * bits_per_input + col * input_depth * bits_per_input + digit; output.data()[out_idx] |= QUANTIZED_PACKED((mask & input.data()[in_idx].Raw()) << ch_low); } #endif } } else { for (T_UINT ih = 0; ih < input_depth; ++ih) for (T_UINT wi = 0; wi < out_height; wi++) for (T_UINT wj = 0; wj < out_width; wj++) for (T_UINT ki = 0; ki < kernel_size; ki++) for (T_UINT kj = 0; kj < kernel_size; kj++) { T_INT row = (wi * stride) + ki; T_INT col = (wj * stride) + kj; #ifdef USE_NEON const auto ch_high = ih + (ki * kernel_size + kj) * input_depth; const auto out_idx = wi * out_width * out_depth * bits_per_input + wj * out_depth * bits_per_input + ch_high * bits_per_input; const auto in_idx = row * input_width * input_depth * bits_per_input + col * input_depth * bits_per_input + ih * bits_per_input; const auto in = vld1_u32(reinterpret_cast < uint32_t * >(input.data() + in_idx)); vst1_u32(reinterpret_cast < uint32_t * >(output.data() + out_idx), in); #else for (T_UINT digit = 0; digit < bits_per_input; ++digit) { const auto ch_high = ih + (ki * kernel_size + kj) * input_depth; const auto out_idx = wi * out_width * out_depth * bits_per_input + wj * out_depth * bits_per_input + ch_high * bits_per_input + digit; const auto in_idx = row * input_width * input_depth * bits_per_input + col * input_depth * bits_per_input + ih * bits_per_input + digit; output.data()[out_idx] = input.data()[in_idx]; } #endif } } Measurement: :Stop(); } inline void func_ExtractImagePatches( const TensorView < QUANTIZED_PACKED, MemoryLayout: :ChHWBCl > &input, const TensorView < QUANTIZED_PACKED, MemoryLayout: :ChHWBCl > &output, T_UINT kernel_size, T_UINT stride) { Measurement: :Start("ExtractImagePatches"); const auto in_shape = input.get_shape(); const T_UINT input_height = in_shape[1]; const T_UINT input_width = in_shape[2]; const T_UINT input_depth = in_shape[0]; const T_UINT bits_per_input = in_shape[3]; const auto out_shape = output.get_shape(); const T_UINT out_height = out_shape[1]; const T_UINT out_width = out_shape[2]; const T_UINT out_depth = out_shape[0]; T_UINT output_index = 0; if (out_depth < kernel_size * kernel_size) { const T_UINT kernel_area = kernel_size * kernel_size; const T_UINT bit_shift = out_depth * QUANTIZED_PACKED::BitCount / kernel_area; const QUANTIZED_PACKED::base_t mask((QUANTIZED_PACKED::base_t(1) << bit_shift) - 1); const T_UINT lb_kernel_size = __builtin_ctz(kernel_size); const T_UINT kernel_mask = (1 << lb_kernel_size) - 1; #ifdef USE_NEON const auto shift_ref = vcombine_s32(vdup_n_s32(0), vdup_n_s32(bit_shift)); const auto add = vdupq_n_s32(bit_shift * 2); const auto mask_v = vdupq_n_u32(mask); #else const uint64_t mask64 = mask * 0x1 '0000' 0001ull; #endif const T_UINT blocks = kernel_area / out_depth; for (T_UINT wi = 0; wi < out_height; wi++) for (T_UINT wj = 0; wj < out_width; wj++) #ifdef USE_NEON for (T_UINT k = 0; k < out_depth; ++k) { auto tmp = vdupq_n_u32(0); auto shift = shift_ref; for (T_UINT i = 0; i < blocks; i += 2) { T_UINT ki = (k * blocks + i) >> lb_kernel_size; T_UINT kj = (k * blocks + i) & kernel_mask; T_INT row = (wi * stride) + ki; T_INT col = (wj * stride) + kj; const auto in_idx = row * input_width * bits_per_input + col * bits_per_input; const auto in = vld1q_u32(reinterpret_cast < uint32_t * >(input.data() + in_idx)); const auto masked = vandq_u32(mask_v, in); const auto shifted = vshlq_u32(masked, shift); shift += add; tmp |= shifted; } const auto out = vorr_u32(vget_low_u32(tmp), vget_high_u32(tmp)); const auto out_idx = k * out_height * out_width * bits_per_input + wi * out_width * bits_per_input + wj * bits_per_input; vst1_u32(reinterpret_cast < uint32_t * >(output.data() + out_idx), out); } #else for (T_UINT k = 0; k < out_depth; ++k) { uint64_t out = 0; for (T_UINT i = 0; i < blocks; ++i) { T_UINT ki = (k * blocks + i) >> lb_kernel_size; T_UINT kj = (k * blocks + i) & kernel_mask; T_INT row = (wi * stride) + ki; T_INT col = (wj * stride) + kj; const auto in_idx = row * input_width * bits_per_input + col * bits_per_input; const auto in = *reinterpret_cast < uint64_t * >(input.data() + in_idx); out |= (mask64 & in) << (i * bit_shift); } const auto out_idx = k * out_height * out_width * bits_per_input + wi * out_width * bits_per_input + wj * bits_per_input; *reinterpret_cast < uint64_t * >(output.data() + out_idx) = out; } #endif } else { for (T_UINT ih = 0; ih < input_depth; ++ih) for (T_UINT wi = 0; wi < out_height; wi++) for (T_UINT wj = 0; wj < out_width; wj++) for (T_UINT ki = 0; ki < kernel_size; ki++) for (T_UINT kj = 0; kj < kernel_size; kj++) { T_INT row = (wi * stride) + ki; T_INT col = (wj * stride) + kj; const auto ch_high = ih + (ki * kernel_size + kj) * input_depth; const auto out_idx = ch_high * out_height * out_width * bits_per_input + wi * out_width * bits_per_input + wj * bits_per_input; const auto in_idx = ih * input_height * input_width * bits_per_input + row * input_width * bits_per_input + col * bits_per_input; #ifdef USE_NEON const auto in = vld1_u32(reinterpret_cast < uint32_t * >(input.data() + in_idx)); vst1_u32(reinterpret_cast < uint32_t * >(output.data() + out_idx), in); #else *reinterpret_cast < uint64_t * >(output.data() + out_idx) = *reinterpret_cast < uint64_t * >(input.data() + in_idx); #endif } } Measurement: :Stop(); } #endif /* // DLK_FUNC_EXTRACT_IMAGE_PATCHES */
#ifndef DLK_FUNC_EXTRACT_IMAGE_PATCHES #define DLK_FUNC_EXTRACT_IMAGE_PATCHES #include <algorithm> #include "global.h" #include "tensor_view.h" #include "time_measurement.h" #include "pack_input_to_qwords.h" #include <limits.h> #ifdef USE_NEON #include <arm_neon.h> #endif template < typename T > void func_ExtractImagePatches( const TensorView < T, MemoryLayout: :NHWC > &input, const TensorView < T, MemoryLayout: :NHWC > &output, T_UINT kernel_size, T_UINT stride) { Measurement: :Start("ExtractImagePatches"); const auto in_shape = input.get_shape(); const T_UINT input_width = in_shape[2]; const T_UINT input_depth = in_shape[3]; const auto out_shape = output.get_shape(); const T_UINT out_height = out_shape[1]; const T_UINT out_width = out_shape[2]; const T_UINT out_depth = out_shape[3]; for (T_UINT kz = 0; kz < input_depth; ++kz) for (T_UINT wi = 0; wi < out_height; wi++) for (T_UINT wj = 0; wj < out_width; wj++) for (T_UINT ki = 0; ki < kernel_size; ki++) for (T_UINT kj = 0; kj < kernel_size; kj++) { T_INT row = (wi * stride) + ki; T_INT col = (wj * stride) + kj; const auto ch = kz + (ki * kernel_size + kj) * input_depth; const auto out_idx = wi * out_width * out_depth + wj * out_depth + ch; const auto in_idx = row * input_width * input_depth + col * input_depth + kz; output.data()[out_idx] = input.data()[in_idx]; } Measurement: :Stop(); } inline void func_ExtractImagePatches( const TensorView < QUANTIZED_PACKED, MemoryLayout: :HWChBCl > &input, const TensorView < QUANTIZED_PACKED, MemoryLayout: :HWChBCl > &output, T_UINT kernel_size, T_UINT stride) { Measurement: :Start("ExtractImagePatches"); const auto in_shape = input.get_shape(); const T_UINT input_width = in_shape[1]; const T_UINT input_depth = in_shape[2]; const T_UINT bits_per_input = in_shape[3]; const auto out_shape = output.get_shape(); const T_UINT out_height = out_shape[0]; const T_UINT out_width = out_shape[1]; const T_UINT out_depth = out_shape[2]; T_UINT output_index = 0; if (out_depth < kernel_size * kernel_size) { int bit_shift = out_depth * QUANTIZED_PACKED::BitCount / (kernel_size * kernel_size); const QUANTIZED_PACKED::base_t mask((QUANTIZED_PACKED::base_t(1) << bit_shift) - 1); std: : fill(output.data(), output.data() + output.size(), QUANTIZED_PACKED(0)); for (T_UINT wi = 0; wi < out_height; wi++) for (T_UINT wj = 0; wj < out_width; wj++) for (T_UINT ki = 0; ki < kernel_size; ki++) for (T_UINT kj = 0; kj < kernel_size; kj++) { T_INT row = (wi * stride) + ki; T_INT col = (wj * stride) + kj; T_UINT ch = (ki * kernel_size + kj) * bit_shift; T_UINT ch_high = ch / QUANTIZED_PACKED::BitCount; T_UINT ch_low = ch % QUANTIZED_PACKED::BitCount; #ifdef USE_NEON const auto out_idx = wi * out_width * out_depth * bits_per_input + wj * out_depth * bits_per_input + ch_high * bits_per_input; const auto in_idx = row * input_width * input_depth * bits_per_input + col * input_depth * bits_per_input; const auto in = vld1_u32(reinterpret_cast < uint32_t * >(input.data() + in_idx)); const auto masked = vand_u32(vdup_n_u32(mask), in); #ifdef AARCH32 const auto shifted = vshl_u32(masked, vdup_n_s32(ch_low)); #else const auto shifted = vshl_n_u32(masked, ch_low); #endif const auto out_old = vld1_u32(reinterpret_cast < uint32_t * >(output.data() + out_idx)); const auto out_new = vorr_u32(out_old, shifted); vst1_u32(reinterpret_cast < uint32_t * >(output.data() + out_idx), out_new); #else for (T_UINT digit = 0; digit < bits_per_input; ++digit) { const auto out_idx = wi * out_width * out_depth * bits_per_input + wj * out_depth * bits_per_input + ch_high * bits_per_input + digit; const auto in_idx = row * input_width * input_depth * bits_per_input + col * input_depth * bits_per_input + digit; output.data()[out_idx] |= QUANTIZED_PACKED((mask & input.data()[in_idx].Raw()) << ch_low); } #endif } } else { for (T_UINT ih = 0; ih < input_depth; ++ih) for (T_UINT wi = 0; wi < out_height; wi++) for (T_UINT wj = 0; wj < out_width; wj++) for (T_UINT ki = 0; ki < kernel_size; ki++) for (T_UINT kj = 0; kj < kernel_size; kj++) { T_INT row = (wi * stride) + ki; T_INT col = (wj * stride) + kj; #ifdef USE_NEON const auto ch_high = ih + (ki * kernel_size + kj) * input_depth; const auto out_idx = wi * out_width * out_depth * bits_per_input + wj * out_depth * bits_per_input + ch_high * bits_per_input; const auto in_idx = row * input_width * input_depth * bits_per_input + col * input_depth * bits_per_input + ih * bits_per_input; const auto in = vld1_u32(reinterpret_cast < uint32_t * >(input.data() + in_idx)); vst1_u32(reinterpret_cast < uint32_t * >(output.data() + out_idx), in); #else for (T_UINT digit = 0; digit < bits_per_input; ++digit) { const auto ch_high = ih + (ki * kernel_size + kj) * input_depth; const auto out_idx = wi * out_width * out_depth * bits_per_input + wj * out_depth * bits_per_input + ch_high * bits_per_input + digit; const auto in_idx = row * input_width * input_depth * bits_per_input + col * input_depth * bits_per_input + ih * bits_per_input + digit; output.data()[out_idx] = input.data()[in_idx]; } #endif } } Measurement: :Stop(); } inline void func_ExtractImagePatches( const TensorView < QUANTIZED_PACKED, MemoryLayout: :ChHWBCl > &input, const TensorView < QUANTIZED_PACKED, MemoryLayout: :ChHWBCl > &output, T_UINT kernel_size, T_UINT stride) { Measurement: :Start("ExtractImagePatches"); const auto in_shape = input.get_shape(); const T_UINT input_height = in_shape[1]; const T_UINT input_width = in_shape[2]; const T_UINT input_depth = in_shape[0]; const T_UINT bits_per_input = in_shape[3]; const auto out_shape = output.get_shape(); const T_UINT out_height = out_shape[1]; const T_UINT out_width = out_shape[2]; const T_UINT out_depth = out_shape[0]; T_UINT output_index = 0; if (out_depth < kernel_size * kernel_size) { const T_UINT kernel_area = kernel_size * kernel_size; const T_UINT bit_shift = out_depth * QUANTIZED_PACKED::BitCount / kernel_area; const QUANTIZED_PACKED::base_t mask((QUANTIZED_PACKED::base_t(1) << bit_shift) - 1); const T_UINT lb_kernel_size = __builtin_ctz(kernel_size); const T_UINT kernel_mask = (1 << lb_kernel_size) - 1; #ifdef USE_NEON const auto shift_ref = vcombine_s32(vdup_n_s32(0), vdup_n_s32(bit_shift)); const auto add = vdupq_n_s32(bit_shift * 2); const auto mask_v = vdupq_n_u32(mask); #else const uint64_t mask64 = mask * 0x1 '0000' 0001ull; #endif const T_UINT blocks = kernel_area / out_depth; #pragma omp parallel for for (T_UINT wi = 0; wi < out_height; wi++) for (T_UINT wj = 0; wj < out_width; wj++) #ifdef USE_NEON for (T_UINT k = 0; k < out_depth; ++k) { auto tmp = vdupq_n_u32(0); auto shift = shift_ref; for (T_UINT i = 0; i < blocks; i += 2) { T_UINT ki = (k * blocks + i) >> lb_kernel_size; T_UINT kj = (k * blocks + i) & kernel_mask; T_INT row = (wi * stride) + ki; T_INT col = (wj * stride) + kj; const auto in_idx = row * input_width * bits_per_input + col * bits_per_input; const auto in = vld1q_u32(reinterpret_cast < uint32_t * >(input.data() + in_idx)); const auto masked = vandq_u32(mask_v, in); const auto shifted = vshlq_u32(masked, shift); shift += add; tmp |= shifted; } const auto out = vorr_u32(vget_low_u32(tmp), vget_high_u32(tmp)); const auto out_idx = k * out_height * out_width * bits_per_input + wi * out_width * bits_per_input + wj * bits_per_input; vst1_u32(reinterpret_cast < uint32_t * >(output.data() + out_idx), out); } #else for (T_UINT k = 0; k < out_depth; ++k) { uint64_t out = 0; for (T_UINT i = 0; i < blocks; ++i) { T_UINT ki = (k * blocks + i) >> lb_kernel_size; T_UINT kj = (k * blocks + i) & kernel_mask; T_INT row = (wi * stride) + ki; T_INT col = (wj * stride) + kj; const auto in_idx = row * input_width * bits_per_input + col * bits_per_input; const auto in = *reinterpret_cast < uint64_t * >(input.data() + in_idx); out |= (mask64 & in) << (i * bit_shift); } const auto out_idx = k * out_height * out_width * bits_per_input + wi * out_width * bits_per_input + wj * bits_per_input; *reinterpret_cast < uint64_t * >(output.data() + out_idx) = out; } #endif } else { for (T_UINT ih = 0; ih < input_depth; ++ih) for (T_UINT wi = 0; wi < out_height; wi++) for (T_UINT wj = 0; wj < out_width; wj++) for (T_UINT ki = 0; ki < kernel_size; ki++) for (T_UINT kj = 0; kj < kernel_size; kj++) { T_INT row = (wi * stride) + ki; T_INT col = (wj * stride) + kj; const auto ch_high = ih + (ki * kernel_size + kj) * input_depth; const auto out_idx = ch_high * out_height * out_width * bits_per_input + wi * out_width * bits_per_input + wj * bits_per_input; const auto in_idx = ih * input_height * input_width * bits_per_input + row * input_width * bits_per_input + col * bits_per_input; #ifdef USE_NEON const auto in = vld1_u32(reinterpret_cast < uint32_t * >(input.data() + in_idx)); vst1_u32(reinterpret_cast < uint32_t * >(output.data() + out_idx), in); #else *reinterpret_cast < uint64_t * >(output.data() + out_idx) = *reinterpret_cast < uint64_t * >(input.data() + in_idx); #endif } } Measurement: :Stop(); } #endif /* // DLK_FUNC_EXTRACT_IMAGE_PATCHES */
vectors.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include "timer.h" int g_matrix_n, g_matrix_m, g_num_threads; typedef struct matrix_t { int** values; int* row_size; } matrix_t; void print_matrix(matrix_t*); /* Sum each row of the provided matrix, using the row_size to determine how many items are in a row */ int* sum_rows(matrix_t* matrix) { int *sum_vector = malloc(sizeof(int) * g_matrix_n); if (!sum_vector) { fprintf(stderr, "Failed to malloc sum_vector\n"); return NULL; } #pragma omp parallel for schedule(static) num_threads(g_num_threads) for (int i = 0; i < g_matrix_n; i++) { sum_vector[i] = 0; } #pragma omp parallel for schedule(static) num_threads(g_num_threads) for (int n = 0; n < g_matrix_n; n++) { for (int m = 0; m < matrix->row_size[n]; m++) { sum_vector[n] += matrix->values[n][m]; } } return sum_vector; } /* Create a matrix, either triangular or non triangular and fill it with the appropriate values */ matrix_t* matrix(int is_triangular) { /* Malloc matrix struct */ matrix_t* matrix = malloc(sizeof(matrix_t*)); if (!matrix) { fprintf(stderr, "Failed to malloc struct matrix\n"); return NULL; } /* Malloc matrix values */ matrix->values = malloc(sizeof(int*) * g_matrix_m); if (!matrix->values) { fprintf(stderr, "Failed to malloc matrix\n"); return NULL; } /* Malloc matrix row sizes */ matrix->row_size = malloc(sizeof(int) * g_matrix_n); if (!matrix->row_size) { fprintf(stderr, "Failed to malloc row size\n"); return NULL; } /* Malloc matrix columns */ #pragma omp parallel for schedule(static) num_threads(g_num_threads) for (int i = 0; i < g_matrix_n; i++) { matrix->row_size[i] = g_matrix_n - (i * is_triangular); matrix->values[i] = malloc(sizeof(int) * matrix->row_size[i]); if (!matrix->values[i]) { fprintf(stderr, "Failed to malloc matrix[%d]\n", i); } } /* Matrix[n][m] n = vertical, m = horizontal. eg. Matrix[2][3] is 2nd row (from top) 3rd value. */ /* n is vert size m = hori size */ #pragma omp parallel for schedule(static) num_threads(g_num_threads) for (int n = 0; n < g_matrix_n; n++) { //#pragma omp parallel for schedule(static) num_threads(g_num_threads) for (int m = 0; m < matrix->row_size[n]; m++) { matrix->values[n][m] = n + (m + (g_matrix_m - matrix->row_size[n])); } } return matrix; } matrix_t* init_matrix() { /* Make a normal, non-triangular matrix */ return matrix(0); } matrix_t* init_matrix_triangular() { /* Make a triangular matrix */ return matrix(1); } /* Print a matrix */ void print_matrix(matrix_t* matrix) { for (int n = 0; n < g_matrix_n; n++) { for (int m = 0; m < matrix->row_size[n]; m++) { printf("%d ", matrix->values[n][m]); if (matrix->values[n][m] < 10) { printf(" "); } else if (matrix->values[n][m] < 100) { printf(" "); } else if (matrix->values[n][m] < 1000) { printf(" "); } else if (matrix->values[n][m] < 10000) { printf(" "); } } printf("\n"); } return; } int main(int argc, char* argv[]) { double time; int* sum_vector; /* We allow only square matrices */ g_matrix_n = g_matrix_m = atoi(argv[1]); g_num_threads = atoi(argv[2]); matrix_t* matrix; matrix = init_matrix_triangular(); if (!matrix) { return EXIT_FAILURE; } timer_start(); sum_vector = sum_rows(matrix); if (!sum_vector) { return EXIT_FAILURE; } time = timer_end(); printf("%d, %d, %lf\n", g_matrix_n, g_num_threads, time); /* print_matrix(matrix); */ /* Free this stupid shit */ for (int i = 0; i < g_matrix_n; i++) { free(matrix->values[i]); } free(matrix->values); free(matrix->row_size); free(matrix); free(sum_vector); return EXIT_SUCCESS; }
#include <stdio.h> #include <stdlib.h> #include <string.h> #include "timer.h" int g_matrix_n, g_matrix_m, g_num_threads; typedef struct matrix_t { int **values; int *row_size; } matrix_t; void print_matrix(matrix_t *); /* * Sum each row of the provided matrix, using the row_size to determine how * many items are in a row */ int * sum_rows(matrix_t * matrix) { int *sum_vector = malloc(sizeof(int) * g_matrix_n); if (!sum_vector) { fprintf(stderr, "Failed to malloc sum_vector\n"); return NULL; } for (int i = 0; i < g_matrix_n; i++) { sum_vector[i] = 0; } for (int n = 0; n < g_matrix_n; n++) { for (int m = 0; m < matrix->row_size[n]; m++) { sum_vector[n] += matrix->values[n][m]; } } return sum_vector; } /* * Create a matrix, either triangular or non triangular and fill it with the * appropriate values */ matrix_t * matrix(int is_triangular) { /* Malloc matrix struct */ matrix_t *matrix = malloc(sizeof(matrix_t *)); if (!matrix) { fprintf(stderr, "Failed to malloc struct matrix\n"); return NULL; } /* Malloc matrix values */ matrix->values = malloc(sizeof(int *) * g_matrix_m); if (!matrix->values) { fprintf(stderr, "Failed to malloc matrix\n"); return NULL; } /* Malloc matrix row sizes */ matrix->row_size = malloc(sizeof(int) * g_matrix_n); if (!matrix->row_size) { fprintf(stderr, "Failed to malloc row size\n"); return NULL; } /* Malloc matrix columns */ for (int i = 0; i < g_matrix_n; i++) { matrix->row_size[i] = g_matrix_n - (i * is_triangular); matrix->values[i] = malloc(sizeof(int) * matrix->row_size[i]); if (!matrix->values[i]) { fprintf(stderr, "Failed to malloc matrix[%d]\n", i); } } /* * Matrix[n][m] n = vertical, m = horizontal. eg. Matrix[2][3] is 2nd row * (from top) 3rd value. */ /* n is vert size m = hori size */ for (int n = 0; n < g_matrix_n; n++) { // for (int m = 0; m < matrix->row_size[n]; m++) { matrix->values[n][m] = n + (m + (g_matrix_m - matrix->row_size[n])); } } return matrix; } matrix_t * init_matrix() { /* Make a normal, non-triangular matrix */ return matrix(0); } matrix_t * init_matrix_triangular() { /* Make a triangular matrix */ return matrix(1); } /* Print a matrix */ void print_matrix(matrix_t * matrix) { for (int n = 0; n < g_matrix_n; n++) { for (int m = 0; m < matrix->row_size[n]; m++) { printf("%d ", matrix->values[n][m]); if (matrix->values[n][m] < 10) { printf(" "); } else if (matrix->values[n][m] < 100) { printf(" "); } else if (matrix->values[n][m] < 1000) { printf(" "); } else if (matrix->values[n][m] < 10000) { printf(" "); } } printf("\n"); } return; } int main(int argc, char *argv[]) { double time; int *sum_vector; /* We allow only square matrices */ g_matrix_n = g_matrix_m = atoi(argv[1]); g_num_threads = atoi(argv[2]); matrix_t *matrix; matrix = init_matrix_triangular(); if (!matrix) { return EXIT_FAILURE; } timer_start(); sum_vector = sum_rows(matrix); if (!sum_vector) { return EXIT_FAILURE; } time = timer_end(); printf("%d, %d, %lf\n", g_matrix_n, g_num_threads, time); /* print_matrix(matrix); */ /* Free this stupid shit */ for (int i = 0; i < g_matrix_n; i++) { free(matrix->values[i]); } free(matrix->values); free(matrix->row_size); free(matrix); free(sum_vector); return EXIT_SUCCESS; }
#include <stdio.h> #include <stdlib.h> #include <string.h> #include "timer.h" int g_matrix_n, g_matrix_m, g_num_threads; typedef struct matrix_t { int **values; int *row_size; } matrix_t; void print_matrix(matrix_t *); /* * Sum each row of the provided matrix, using the row_size to determine how * many items are in a row */ int * sum_rows(matrix_t * matrix) { int *sum_vector = malloc(sizeof(int) * g_matrix_n); if (!sum_vector) { fprintf(stderr, "Failed to malloc sum_vector\n"); return NULL; } #pragma omp parallel for schedule(static) num_threads(g_num_threads) for (int i = 0; i < g_matrix_n; i++) { sum_vector[i] = 0; } #pragma omp parallel for schedule(static) num_threads(g_num_threads) for (int n = 0; n < g_matrix_n; n++) { for (int m = 0; m < matrix->row_size[n]; m++) { sum_vector[n] += matrix->values[n][m]; } } return sum_vector; } /* * Create a matrix, either triangular or non triangular and fill it with the * appropriate values */ matrix_t * matrix(int is_triangular) { /* Malloc matrix struct */ matrix_t *matrix = malloc(sizeof(matrix_t *)); if (!matrix) { fprintf(stderr, "Failed to malloc struct matrix\n"); return NULL; } /* Malloc matrix values */ matrix->values = malloc(sizeof(int *) * g_matrix_m); if (!matrix->values) { fprintf(stderr, "Failed to malloc matrix\n"); return NULL; } /* Malloc matrix row sizes */ matrix->row_size = malloc(sizeof(int) * g_matrix_n); if (!matrix->row_size) { fprintf(stderr, "Failed to malloc row size\n"); return NULL; } /* Malloc matrix columns */ #pragma omp parallel for schedule(static) num_threads(g_num_threads) for (int i = 0; i < g_matrix_n; i++) { matrix->row_size[i] = g_matrix_n - (i * is_triangular); matrix->values[i] = malloc(sizeof(int) * matrix->row_size[i]); if (!matrix->values[i]) { fprintf(stderr, "Failed to malloc matrix[%d]\n", i); } } /* * Matrix[n][m] n = vertical, m = horizontal. eg. Matrix[2][3] is 2nd row * (from top) 3rd value. */ /* n is vert size m = hori size */ #pragma omp parallel for schedule(static) num_threads(g_num_threads) for (int n = 0; n < g_matrix_n; n++) { // #pragma omp parallel for schedule(static) num_threads(g_num_threads) for (int m = 0; m < matrix->row_size[n]; m++) { matrix->values[n][m] = n + (m + (g_matrix_m - matrix->row_size[n])); } } return matrix; } matrix_t * init_matrix() { /* Make a normal, non-triangular matrix */ return matrix(0); } matrix_t * init_matrix_triangular() { /* Make a triangular matrix */ return matrix(1); } /* Print a matrix */ void print_matrix(matrix_t * matrix) { for (int n = 0; n < g_matrix_n; n++) { for (int m = 0; m < matrix->row_size[n]; m++) { printf("%d ", matrix->values[n][m]); if (matrix->values[n][m] < 10) { printf(" "); } else if (matrix->values[n][m] < 100) { printf(" "); } else if (matrix->values[n][m] < 1000) { printf(" "); } else if (matrix->values[n][m] < 10000) { printf(" "); } } printf("\n"); } return; } int main(int argc, char *argv[]) { double time; int *sum_vector; /* We allow only square matrices */ g_matrix_n = g_matrix_m = atoi(argv[1]); g_num_threads = atoi(argv[2]); matrix_t *matrix; matrix = init_matrix_triangular(); if (!matrix) { return EXIT_FAILURE; } timer_start(); sum_vector = sum_rows(matrix); if (!sum_vector) { return EXIT_FAILURE; } time = timer_end(); printf("%d, %d, %lf\n", g_matrix_n, g_num_threads, time); /* print_matrix(matrix); */ /* Free this stupid shit */ for (int i = 0; i < g_matrix_n; i++) { free(matrix->values[i]); } free(matrix->values); free(matrix->row_size); free(matrix); free(sum_vector); return EXIT_SUCCESS; }
merge_when_user_omp_pragma.c
int main() { int A[10][10]; int i,j; // This pragma use to lead to an invalid omp code because we don't detect that an omp pragma already exist #pragma omp parallel for for(i=0; i<10; i++) { for(j=0; j<10; j++) { A[i][j] =0; } } }
int main() { int A[10][10]; int i,j; // This pragma use to lead to an invalid omp code because we don't detect that an omp pragma already exist for(i=0; i<10; i++) { for(j=0; j<10; j++) { A[i][j] =0; } } }
int main() { int A[10][10]; int i,j; // This pragma use to lead to an invalid omp code because we don't detect that an omp pragma already exist #pragma omp parallel for for(i=0; i<10; i++) { for(j=0; j<10; j++) { A[i][j] =0; } } }
GB_binop__rdiv_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__rdiv_uint64) // A.*B function (eWiseMult): GB (_AemultB_08__rdiv_uint64) // A.*B function (eWiseMult): GB (_AemultB_02__rdiv_uint64) // A.*B function (eWiseMult): GB (_AemultB_04__rdiv_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__rdiv_uint64) // A*D function (colscale): GB (_AxD__rdiv_uint64) // D*A function (rowscale): GB (_DxB__rdiv_uint64) // C+=B function (dense accum): GB (_Cdense_accumB__rdiv_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__rdiv_uint64) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rdiv_uint64) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rdiv_uint64) // C=scalar+B GB (_bind1st__rdiv_uint64) // C=scalar+B' GB (_bind1st_tran__rdiv_uint64) // C=A+scalar GB (_bind2nd__rdiv_uint64) // C=A'+scalar GB (_bind2nd_tran__rdiv_uint64) // C type: uint64_t // A type: uint64_t // A pattern? 0 // B type: uint64_t // B pattern? 0 // BinaryOp: cij = GB_IDIV_UNSIGNED (bij, aij, 64) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_IDIV_UNSIGNED (y, x, 64) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RDIV || GxB_NO_UINT64 || GxB_NO_RDIV_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__rdiv_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__rdiv_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__rdiv_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__rdiv_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__rdiv_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__rdiv_uint64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__rdiv_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint64_t alpha_scalar ; uint64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ; beta_scalar = (*((uint64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__rdiv_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__rdiv_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__rdiv_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__rdiv_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__rdiv_uint64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = GBX (Bx, p, false) ; Cx [p] = GB_IDIV_UNSIGNED (bij, x, 64) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__rdiv_uint64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = GBX (Ax, p, false) ; Cx [p] = GB_IDIV_UNSIGNED (y, aij, 64) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_UNSIGNED (aij, x, 64) ; \ } GrB_Info GB (_bind1st_tran__rdiv_uint64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_UNSIGNED (y, aij, 64) ; \ } GrB_Info GB (_bind2nd_tran__rdiv_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__rdiv_uint64) // A.*B function (eWiseMult): GB (_AemultB_08__rdiv_uint64) // A.*B function (eWiseMult): GB (_AemultB_02__rdiv_uint64) // A.*B function (eWiseMult): GB (_AemultB_04__rdiv_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__rdiv_uint64) // A*D function (colscale): GB (_AxD__rdiv_uint64) // D*A function (rowscale): GB (_DxB__rdiv_uint64) // C+=B function (dense accum): GB (_Cdense_accumB__rdiv_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__rdiv_uint64) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rdiv_uint64) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rdiv_uint64) // C=scalar+B GB (_bind1st__rdiv_uint64) // C=scalar+B' GB (_bind1st_tran__rdiv_uint64) // C=A+scalar GB (_bind2nd__rdiv_uint64) // C=A'+scalar GB (_bind2nd_tran__rdiv_uint64) // C type: uint64_t // A type: uint64_t // A pattern? 0 // B type: uint64_t // B pattern? 0 // BinaryOp: cij = GB_IDIV_UNSIGNED (bij, aij, 64) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_IDIV_UNSIGNED (y, x, 64) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RDIV || GxB_NO_UINT64 || GxB_NO_RDIV_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__rdiv_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__rdiv_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__rdiv_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__rdiv_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__rdiv_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__rdiv_uint64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__rdiv_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint64_t alpha_scalar ; uint64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ; beta_scalar = (*((uint64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__rdiv_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__rdiv_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__rdiv_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__rdiv_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__rdiv_uint64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = GBX (Bx, p, false) ; Cx [p] = GB_IDIV_UNSIGNED (bij, x, 64) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__rdiv_uint64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = GBX (Ax, p, false) ; Cx [p] = GB_IDIV_UNSIGNED (y, aij, 64) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_UNSIGNED (aij, x, 64) ; \ } GrB_Info GB (_bind1st_tran__rdiv_uint64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_UNSIGNED (y, aij, 64) ; \ } GrB_Info GB (_bind2nd_tran__rdiv_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__rdiv_uint64) // A.*B function (eWiseMult): GB (_AemultB_08__rdiv_uint64) // A.*B function (eWiseMult): GB (_AemultB_02__rdiv_uint64) // A.*B function (eWiseMult): GB (_AemultB_04__rdiv_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__rdiv_uint64) // A*D function (colscale): GB (_AxD__rdiv_uint64) // D*A function (rowscale): GB (_DxB__rdiv_uint64) // C+=B function (dense accum): GB (_Cdense_accumB__rdiv_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__rdiv_uint64) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rdiv_uint64) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rdiv_uint64) // C=scalar+B GB (_bind1st__rdiv_uint64) // C=scalar+B' GB (_bind1st_tran__rdiv_uint64) // C=A+scalar GB (_bind2nd__rdiv_uint64) // C=A'+scalar GB (_bind2nd_tran__rdiv_uint64) // C type: uint64_t // A type: uint64_t // A pattern? 0 // B type: uint64_t // B pattern? 0 // BinaryOp: cij = GB_IDIV_UNSIGNED (bij, aij, 64) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_IDIV_UNSIGNED (y, x, 64) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RDIV || GxB_NO_UINT64 || GxB_NO_RDIV_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__rdiv_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__rdiv_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__rdiv_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__rdiv_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__rdiv_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__rdiv_uint64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__rdiv_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint64_t alpha_scalar ; uint64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ; beta_scalar = (*((uint64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__rdiv_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__rdiv_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__rdiv_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__rdiv_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__rdiv_uint64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = GBX (Bx, p, false) ; Cx [p] = GB_IDIV_UNSIGNED (bij, x, 64) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__rdiv_uint64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = GBX (Ax, p, false) ; Cx [p] = GB_IDIV_UNSIGNED (y, aij, 64) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_UNSIGNED (aij, x, 64) ; \ } GrB_Info GB (_bind1st_tran__rdiv_uint64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_UNSIGNED (y, aij, 64) ; \ } GrB_Info GB (_bind2nd_tran__rdiv_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
main_omp.c
// // main_serial.cc // Non-parallelized version of Mandelbrot set calculation // Created by Joshua Wallace on 2/12/15. // #include <stdio.h> #include <stdlib.h> #include <complex.h> #include <omp.h> /*The following function computes z_{n+1} given z_n and the real and imaginary component of c*/ double complex next_z(double complex z, double c_real, double c_img) { return z*z + c_real + c_img*I; } /*The following function is called if the wrong number of command line arguments is given*/ void intro_message(char* name) { printf("Wrong number of command line arguments given.\n"); printf("usage: %s number_of_threads\n",name); exit(1); } int main(int argc, char **argv) { if(argc!=2) //If not the right number of command line arguments given { intro_message(argv[0]); } if(atoi(argv[1])<1) //If the number of threads is set to be less than 1 { printf("You set the number of threads to be %d, which is less than 1\n",atoi(argv[1])); printf("Now quitting...\n"); exit(1); } double start_time = omp_get_wtime(); //Begin timing double realbounds[2] = {-2.0,0.5}; //Boundaries on real axis double imgbounds[2] = {-2,2}; //Boundaries on imaginary axis int resolution = 10000; //Number of resolution elements per axis int MAX_ITER = 1000; //Maximum number of iterations before //declaring "final" convergence double value_to_check_against = 4.0; //Value to check against for divergence //I use 4.0 since this is 2.0 squared int i,j,k; //indices int **divergence_array = (int **)malloc(resolution*sizeof(int *)); //Above is a pointer to pointer, to dynamically //allocate an array as follows for(i=0;i<resolution;i++) { divergence_array[i]=malloc(resolution*sizeof(int)); } //An array used to store the number of iterations //before a specific point on the grid diverges. //I set the array to be allocated dynamically so //that the resolution can be changed easily //in the code if desired. omp_set_num_threads(atoi(argv[1])); //Set the number of threads #pragma omp parallel for private(i,j,k) for(i=0;i<resolution;i++) //For each value on real axis { //First, calculate position on real axis double realposition = (realbounds[1] - realbounds[0])/resolution * (i+.5) + realbounds[0]; for(j=0;j<resolution;j++) //For each value on imaginary axis { //First, calculate position on imaginary axis double imgposition = (imgbounds[1] - imgbounds[0])/resolution * (j+.5) + imgbounds[0]; double complex z=0 + 0*I; //z is our z_n and here we initalize it to 0 + 0*I for(k=0;k<MAX_ITER;k++) { z = next_z(z,realposition,imgposition);//Calculate z_{n+1} //Then, check if result has diverged if( (creal(z)*creal(z) + cimag(z)*cimag(z)) > value_to_check_against) { break; //If result has diverged, break out of loop. We have our value. } } if(k==MAX_ITER) //If the value did not diverge after MAX_ITER //iterations, the final k value will be incremented one last //time by the for loop before it terminates. This corrects this. { k--; } //printf("Thread: %d, i: %d, j: %d\n",omp_get_thread_num(),i,j); divergence_array[i][j]=k; //Give number of iterations until divergence to // divergence_array } } if(0) //Allows me to turn output on and off: 0 is off, non-zero on { FILE *output; //FILE pointer to output file //Make 'output.dat' the output file and double check that it opened. if(!(output=fopen("output.dat","w"))) { //If file isn't open, let us know printf("File was unable to open! Didn't work. Sorry.\n"); } else //If file did open, write to it. Real axis is written row-wise, imaginary column-wise { fprintf(output,"#Mandelbrot set data\n"); fprintf(output,"#Columns imaginary axis, rows real axis\n"); fprintf(output,"#Number: one less than number of iterations before diverging. Max: %d\n",MAX_ITER-1); for(i=0;i<resolution;i++) //For all values on real axis { for(j=0;j<resolution;j++) //For all values on imaginary axis { fprintf(output,"%d ",divergence_array[i][j]); } fprintf(output,"\n"); } fclose(output); } }//closes if() statement //Free divergence_array for(i=0;i<resolution;i++) { free(divergence_array[i]); } free(divergence_array); //Print how long the code took. printf("threads: %d time in seconds: %e\n",atoi(argv[1]),omp_get_wtime()-start_time); return 0; }
// // main_serial.cc // Non-parallelized version of Mandelbrot set calculation // Created by Joshua Wallace on 2/12/15. // #include <stdio.h> #include <stdlib.h> #include <complex.h> #include <omp.h> /*The following function computes z_{n+1} given z_n and the real and imaginary component of c*/ double complex next_z(double complex z, double c_real, double c_img) { return z*z + c_real + c_img*I; } /*The following function is called if the wrong number of command line arguments is given*/ void intro_message(char* name) { printf("Wrong number of command line arguments given.\n"); printf("usage: %s number_of_threads\n",name); exit(1); } int main(int argc, char **argv) { if(argc!=2) //If not the right number of command line arguments given { intro_message(argv[0]); } if(atoi(argv[1])<1) //If the number of threads is set to be less than 1 { printf("You set the number of threads to be %d, which is less than 1\n",atoi(argv[1])); printf("Now quitting...\n"); exit(1); } double start_time = omp_get_wtime(); //Begin timing double realbounds[2] = {-2.0,0.5}; //Boundaries on real axis double imgbounds[2] = {-2,2}; //Boundaries on imaginary axis int resolution = 10000; //Number of resolution elements per axis int MAX_ITER = 1000; //Maximum number of iterations before //declaring "final" convergence double value_to_check_against = 4.0; //Value to check against for divergence //I use 4.0 since this is 2.0 squared int i,j,k; //indices int **divergence_array = (int **)malloc(resolution*sizeof(int *)); //Above is a pointer to pointer, to dynamically //allocate an array as follows for(i=0;i<resolution;i++) { divergence_array[i]=malloc(resolution*sizeof(int)); } //An array used to store the number of iterations //before a specific point on the grid diverges. //I set the array to be allocated dynamically so //that the resolution can be changed easily //in the code if desired. omp_set_num_threads(atoi(argv[1])); //Set the number of threads for(i=0;i<resolution;i++) //For each value on real axis { //First, calculate position on real axis double realposition = (realbounds[1] - realbounds[0])/resolution * (i+.5) + realbounds[0]; for(j=0;j<resolution;j++) //For each value on imaginary axis { //First, calculate position on imaginary axis double imgposition = (imgbounds[1] - imgbounds[0])/resolution * (j+.5) + imgbounds[0]; double complex z=0 + 0*I; //z is our z_n and here we initalize it to 0 + 0*I for(k=0;k<MAX_ITER;k++) { z = next_z(z,realposition,imgposition);//Calculate z_{n+1} //Then, check if result has diverged if( (creal(z)*creal(z) + cimag(z)*cimag(z)) > value_to_check_against) { break; //If result has diverged, break out of loop. We have our value. } } if(k==MAX_ITER) //If the value did not diverge after MAX_ITER //iterations, the final k value will be incremented one last //time by the for loop before it terminates. This corrects this. { k--; } //printf("Thread: %d, i: %d, j: %d\n",omp_get_thread_num(),i,j); divergence_array[i][j]=k; //Give number of iterations until divergence to // divergence_array } } if(0) //Allows me to turn output on and off: 0 is off, non-zero on { FILE *output; //FILE pointer to output file //Make 'output.dat' the output file and double check that it opened. if(!(output=fopen("output.dat","w"))) { //If file isn't open, let us know printf("File was unable to open! Didn't work. Sorry.\n"); } else //If file did open, write to it. Real axis is written row-wise, imaginary column-wise { fprintf(output,"#Mandelbrot set data\n"); fprintf(output,"#Columns imaginary axis, rows real axis\n"); fprintf(output,"#Number: one less than number of iterations before diverging. Max: %d\n",MAX_ITER-1); for(i=0;i<resolution;i++) //For all values on real axis { for(j=0;j<resolution;j++) //For all values on imaginary axis { fprintf(output,"%d ",divergence_array[i][j]); } fprintf(output,"\n"); } fclose(output); } }//closes if() statement //Free divergence_array for(i=0;i<resolution;i++) { free(divergence_array[i]); } free(divergence_array); //Print how long the code took. printf("threads: %d time in seconds: %e\n",atoi(argv[1]),omp_get_wtime()-start_time); return 0; }
// // main_serial.cc // Non-parallelized version of Mandelbrot set calculation // Created by Joshua Wallace on 2/12/15. // #include <stdio.h> #include <stdlib.h> #include <complex.h> #include <omp.h> /*The following function computes z_{n+1} given z_n and the real and imaginary component of c*/ double complex next_z(double complex z, double c_real, double c_img) { return z*z + c_real + c_img*I; } /*The following function is called if the wrong number of command line arguments is given*/ void intro_message(char* name) { printf("Wrong number of command line arguments given.\n"); printf("usage: %s number_of_threads\n",name); exit(1); } int main(int argc, char **argv) { if(argc!=2) //If not the right number of command line arguments given { intro_message(argv[0]); } if(atoi(argv[1])<1) //If the number of threads is set to be less than 1 { printf("You set the number of threads to be %d, which is less than 1\n",atoi(argv[1])); printf("Now quitting...\n"); exit(1); } double start_time = omp_get_wtime(); //Begin timing double realbounds[2] = {-2.0,0.5}; //Boundaries on real axis double imgbounds[2] = {-2,2}; //Boundaries on imaginary axis int resolution = 10000; //Number of resolution elements per axis int MAX_ITER = 1000; //Maximum number of iterations before //declaring "final" convergence double value_to_check_against = 4.0; //Value to check against for divergence //I use 4.0 since this is 2.0 squared int i,j,k; //indices int **divergence_array = (int **)malloc(resolution*sizeof(int *)); //Above is a pointer to pointer, to dynamically //allocate an array as follows for(i=0;i<resolution;i++) { divergence_array[i]=malloc(resolution*sizeof(int)); } //An array used to store the number of iterations //before a specific point on the grid diverges. //I set the array to be allocated dynamically so //that the resolution can be changed easily //in the code if desired. omp_set_num_threads(atoi(argv[1])); //Set the number of threads #pragma omp parallel for private(i,j,k) for(i=0;i<resolution;i++) //For each value on real axis { //First, calculate position on real axis double realposition = (realbounds[1] - realbounds[0])/resolution * (i+.5) + realbounds[0]; for(j=0;j<resolution;j++) //For each value on imaginary axis { //First, calculate position on imaginary axis double imgposition = (imgbounds[1] - imgbounds[0])/resolution * (j+.5) + imgbounds[0]; double complex z=0 + 0*I; //z is our z_n and here we initalize it to 0 + 0*I for(k=0;k<MAX_ITER;k++) { z = next_z(z,realposition,imgposition);//Calculate z_{n+1} //Then, check if result has diverged if( (creal(z)*creal(z) + cimag(z)*cimag(z)) > value_to_check_against) { break; //If result has diverged, break out of loop. We have our value. } } if(k==MAX_ITER) //If the value did not diverge after MAX_ITER //iterations, the final k value will be incremented one last //time by the for loop before it terminates. This corrects this. { k--; } //printf("Thread: %d, i: %d, j: %d\n",omp_get_thread_num(),i,j); divergence_array[i][j]=k; //Give number of iterations until divergence to // divergence_array } } if(0) //Allows me to turn output on and off: 0 is off, non-zero on { FILE *output; //FILE pointer to output file //Make 'output.dat' the output file and double check that it opened. if(!(output=fopen("output.dat","w"))) { //If file isn't open, let us know printf("File was unable to open! Didn't work. Sorry.\n"); } else //If file did open, write to it. Real axis is written row-wise, imaginary column-wise { fprintf(output,"#Mandelbrot set data\n"); fprintf(output,"#Columns imaginary axis, rows real axis\n"); fprintf(output,"#Number: one less than number of iterations before diverging. Max: %d\n",MAX_ITER-1); for(i=0;i<resolution;i++) //For all values on real axis { for(j=0;j<resolution;j++) //For all values on imaginary axis { fprintf(output,"%d ",divergence_array[i][j]); } fprintf(output,"\n"); } fclose(output); } }//closes if() statement //Free divergence_array for(i=0;i<resolution;i++) { free(divergence_array[i]); } free(divergence_array); //Print how long the code took. printf("threads: %d time in seconds: %e\n",atoi(argv[1]),omp_get_wtime()-start_time); return 0; }
ejercicio7.c
/*Para compilar usar (-lrt: real time library): gcc -O2 Sumavectores.c -o SumaVectores -lrt Para ejecutar use: SumaVectores longitud */ #include <stdlib.h> #include <stdio.h> #include <omp.h> #include <time.h> #define PRINTF_ALL //Sólo puede estar definida una de las tres constantes VECTOR_ (sólo uno de los ... //tres defines siguientes puede estar descomentado): //#define VECTOR_LOCAL // descomentar para que los vectores sean variables ... // locales (si se supera el tamaño de la pila se ... // generará el error "Violación de Segmento") //#define VECTOR_GLOBAL // descomentar para que los vectores sean variables ... // globales (su longitud no estará limitada por el ... // tamaño de la pila del programa) #define VECTOR_DYNAMIC //descomentar para que los vectores sean variables ... //dinámicas (memoria reautilizable durante la ejecución) #ifdef VECTOR_GLOBAL #define MAX 33554432 double v1[MAX], v2[MAX], v3[MAX]; #endif int main(int argc, char** argv){ int i; struct timespec cgt1,cgt2; double ncgt; //para tiempo de ejecución if(argc<2){ printf("Faltan nº componentes del vector\n"); exit(-1); } unsigned int N=atoi(argv[1]); #ifdef VECTOR_LOCAL double v1[N], v2[N], v3[N]; #endif #ifdef VECTOR_GLOBAL if(N>MAX) N=MAX; #endif #ifdef VECTOR_DYNAMIC double *v1, *v2, *v3; v1= (double*) malloc(N*sizeof(double)); v2= (double*) malloc(N*sizeof(double)); v3= (double*) malloc(N*sizeof(double)); if((v1==NULL) || (v2==NULL) || (v3==NULL)){ printf("Error en la reserva de espacio para los vectores\n"); exit(-2); } #endif //Inicializar vectores #pragma omp parallel { #pragma omp for for(i=0;i<N;i++){ v1[i]= N*0.1+i*0.1; v2[i]=N*0.1-i*0.1; //los valores dependen de N } } //clock_gettime(CLOCK_REALTIME,&cgt1); double a= omp_get_wtime(); //Calcular suma de vectores #pragma omp parallel for for(i=0;i<N;i++) v3[i]=v1[i] + v2[i]; //clock_gettime(CLOCK_REALTIME,&cgt2); ncgt=(double) (cgt2.tv_sec-cgt1.tv_sec)+ (double) ((cgt2.tv_nsec-cgt1.tv_nsec)/(1.e+9)); //Imprimir resultado de la suma y el tiempo de ejecución #ifdef PRINTF_ALL printf("Tiempo(seg.): %11.9f\t / Tamaño Vectores:%u\n",omp_get_wtime()/*ncgt*/,N); for(i=0;i<N;i++){ printf("thread %d ejecuta la iteración %d del bucle\n",omp_get_thread_num(),i); printf("Elapsed time: %11.9f\t\n",omp_get_wtime()-a); printf("/V1[%d]+V2[%d](%8.6f+%8.6f=%8.6f)/\n", i,i,i,v1[i],v2[i],v3[i]); } #else printf("Tiempo: %11.9f\t / Tamaño Vectores:%u\n", ncgt,N,v1[0],v2[0],v3[0],N-1,N-1,N-1,v1[N-1],v2[N-1],v3[N-1]); #endif #ifdef VECTOR_DYNAMIC free(v1); //libera el espacio reservado para v1 free(v2); //libera el espacio reservado para v2 free(v3); //libera el espacio reservado para v3 #endif return 0; }
/*Para compilar usar (-lrt: real time library): gcc -O2 Sumavectores.c -o SumaVectores -lrt Para ejecutar use: SumaVectores longitud */ #include <stdlib.h> #include <stdio.h> #include <omp.h> #include <time.h> #define PRINTF_ALL //Sólo puede estar definida una de las tres constantes VECTOR_ (sólo uno de los ... //tres defines siguientes puede estar descomentado): //#define VECTOR_LOCAL // descomentar para que los vectores sean variables ... // locales (si se supera el tamaño de la pila se ... // generará el error "Violación de Segmento") //#define VECTOR_GLOBAL // descomentar para que los vectores sean variables ... // globales (su longitud no estará limitada por el ... // tamaño de la pila del programa) #define VECTOR_DYNAMIC //descomentar para que los vectores sean variables ... //dinámicas (memoria reautilizable durante la ejecución) #ifdef VECTOR_GLOBAL #define MAX 33554432 double v1[MAX], v2[MAX], v3[MAX]; #endif int main(int argc, char** argv){ int i; struct timespec cgt1,cgt2; double ncgt; //para tiempo de ejecución if(argc<2){ printf("Faltan nº componentes del vector\n"); exit(-1); } unsigned int N=atoi(argv[1]); #ifdef VECTOR_LOCAL double v1[N], v2[N], v3[N]; #endif #ifdef VECTOR_GLOBAL if(N>MAX) N=MAX; #endif #ifdef VECTOR_DYNAMIC double *v1, *v2, *v3; v1= (double*) malloc(N*sizeof(double)); v2= (double*) malloc(N*sizeof(double)); v3= (double*) malloc(N*sizeof(double)); if((v1==NULL) || (v2==NULL) || (v3==NULL)){ printf("Error en la reserva de espacio para los vectores\n"); exit(-2); } #endif //Inicializar vectores for(i=0;i<N;i++){ v1[i]= N*0.1+i*0.1; v2[i]=N*0.1-i*0.1; //los valores dependen de N } //clock_gettime(CLOCK_REALTIME,&cgt1); double a= omp_get_wtime(); //Calcular suma de vectores for(i=0;i<N;i++) v3[i]=v1[i] + v2[i]; //clock_gettime(CLOCK_REALTIME,&cgt2); ncgt=(double) (cgt2.tv_sec-cgt1.tv_sec)+ (double) ((cgt2.tv_nsec-cgt1.tv_nsec)/(1.e+9)); //Imprimir resultado de la suma y el tiempo de ejecución #ifdef PRINTF_ALL printf("Tiempo(seg.): %11.9f\t / Tamaño Vectores:%u\n",omp_get_wtime()/*ncgt*/,N); for(i=0;i<N;i++){ printf("thread %d ejecuta la iteración %d del bucle\n",omp_get_thread_num(),i); printf("Elapsed time: %11.9f\t\n",omp_get_wtime()-a); printf("/V1[%d]+V2[%d](%8.6f+%8.6f=%8.6f)/\n", i,i,i,v1[i],v2[i],v3[i]); } #else printf("Tiempo: %11.9f\t / Tamaño Vectores:%u\n", ncgt,N,v1[0],v2[0],v3[0],N-1,N-1,N-1,v1[N-1],v2[N-1],v3[N-1]); #endif #ifdef VECTOR_DYNAMIC free(v1); //libera el espacio reservado para v1 free(v2); //libera el espacio reservado para v2 free(v3); //libera el espacio reservado para v3 #endif return 0; }
/*Para compilar usar (-lrt: real time library): gcc -O2 Sumavectores.c -o SumaVectores -lrt Para ejecutar use: SumaVectores longitud */ #include <stdlib.h> #include <stdio.h> #include <omp.h> #include <time.h> #define PRINTF_ALL //Sólo puede estar definida una de las tres constantes VECTOR_ (sólo uno de los ... //tres defines siguientes puede estar descomentado): //#define VECTOR_LOCAL // descomentar para que los vectores sean variables ... // locales (si se supera el tamaño de la pila se ... // generará el error "Violación de Segmento") //#define VECTOR_GLOBAL // descomentar para que los vectores sean variables ... // globales (su longitud no estará limitada por el ... // tamaño de la pila del programa) #define VECTOR_DYNAMIC //descomentar para que los vectores sean variables ... //dinámicas (memoria reautilizable durante la ejecución) #ifdef VECTOR_GLOBAL #define MAX 33554432 double v1[MAX], v2[MAX], v3[MAX]; #endif int main(int argc, char** argv){ int i; struct timespec cgt1,cgt2; double ncgt; //para tiempo de ejecución if(argc<2){ printf("Faltan nº componentes del vector\n"); exit(-1); } unsigned int N=atoi(argv[1]); #ifdef VECTOR_LOCAL double v1[N], v2[N], v3[N]; #endif #ifdef VECTOR_GLOBAL if(N>MAX) N=MAX; #endif #ifdef VECTOR_DYNAMIC double *v1, *v2, *v3; v1= (double*) malloc(N*sizeof(double)); v2= (double*) malloc(N*sizeof(double)); v3= (double*) malloc(N*sizeof(double)); if((v1==NULL) || (v2==NULL) || (v3==NULL)){ printf("Error en la reserva de espacio para los vectores\n"); exit(-2); } #endif //Inicializar vectores #pragma omp parallel { #pragma omp for for(i=0;i<N;i++){ v1[i]= N*0.1+i*0.1; v2[i]=N*0.1-i*0.1; //los valores dependen de N } } //clock_gettime(CLOCK_REALTIME,&cgt1); double a= omp_get_wtime(); //Calcular suma de vectores #pragma omp parallel for for(i=0;i<N;i++) v3[i]=v1[i] + v2[i]; //clock_gettime(CLOCK_REALTIME,&cgt2); ncgt=(double) (cgt2.tv_sec-cgt1.tv_sec)+ (double) ((cgt2.tv_nsec-cgt1.tv_nsec)/(1.e+9)); //Imprimir resultado de la suma y el tiempo de ejecución #ifdef PRINTF_ALL printf("Tiempo(seg.): %11.9f\t / Tamaño Vectores:%u\n",omp_get_wtime()/*ncgt*/,N); for(i=0;i<N;i++){ printf("thread %d ejecuta la iteración %d del bucle\n",omp_get_thread_num(),i); printf("Elapsed time: %11.9f\t\n",omp_get_wtime()-a); printf("/V1[%d]+V2[%d](%8.6f+%8.6f=%8.6f)/\n", i,i,i,v1[i],v2[i],v3[i]); } #else printf("Tiempo: %11.9f\t / Tamaño Vectores:%u\n", ncgt,N,v1[0],v2[0],v3[0],N-1,N-1,N-1,v1[N-1],v2[N-1],v3[N-1]); #endif #ifdef VECTOR_DYNAMIC free(v1); //libera el espacio reservado para v1 free(v2); //libera el espacio reservado para v2 free(v3); //libera el espacio reservado para v3 #endif return 0; }
BaseFunc.h
bool IsPolarized(double &Polarization) { if (Polarization==-1.){return false;} else {return true;} } void PolarizationTerm(uint ThetaLength, double * ThetaPtr, double * CosTerm, double * SinTerm, bool & Polarized) { if (Polarized==true) { for (uint t = 0; t < ThetaLength; t++) { CosTerm[t] = cos(Polarization + ThetaPtr[t]) ; SinTerm[t] = sin(Polarization + ThetaPtr[t]) ; } } else { const double term = 1./sqrt(2); for (uint t = 0; t < ThetaLength; t++) { CosTerm[t] = term ; SinTerm[t] = term ; } } } void Unstructured(uint ThetaLength, uint PhiLength, complex128 *array0, double *array1, complex128 scalar, complex128 *output) { #pragma omp parallel for default(none) shared(output, scalar, array0, array1, p, PhiLength) for (uint p=0; p < PhiLength; p++ ) { *output = scalar * array0[p] * array1[p]; output++; } } void Structured(uint ThetaLength, uint PhiLength, complex128 *array0, double *array1, complex128 scalar, complex128 *output) { for (uint p=0; p < PhiLength; p++ ) { for (uint t=0; t < ThetaLength; t++ ) { *output = scalar * array0[p] * array1[t]; output++; } } } // -
bool IsPolarized(double &Polarization) { if (Polarization == -1.) { return false; } else { return true; } } void PolarizationTerm(uint ThetaLength, double *ThetaPtr, double *CosTerm, double *SinTerm, bool & Polarized) { if (Polarized == true) { for (uint t = 0; t < ThetaLength; t++) { CosTerm[t] = cos(Polarization + ThetaPtr[t]); SinTerm[t] = sin(Polarization + ThetaPtr[t]); } } else { const double term = 1. / sqrt(2); for (uint t = 0; t < ThetaLength; t++) { CosTerm[t] = term; SinTerm[t] = term; } } } void Unstructured(uint ThetaLength, uint PhiLength, complex128 * array0, double *array1, complex128 scalar, complex128 * output) { for (uint p = 0; p < PhiLength; p++) { *output = scalar * array0[p] * array1[p]; output++; } } void Structured(uint ThetaLength, uint PhiLength, complex128 * array0, double *array1, complex128 scalar, complex128 * output) { for (uint p = 0; p < PhiLength; p++) { for (uint t = 0; t < ThetaLength; t++) { *output = scalar * array0[p] * array1[t]; output++; } } } //-
bool IsPolarized(double &Polarization) { if (Polarization == -1.) { return false; } else { return true; } } void PolarizationTerm(uint ThetaLength, double *ThetaPtr, double *CosTerm, double *SinTerm, bool & Polarized) { if (Polarized == true) { for (uint t = 0; t < ThetaLength; t++) { CosTerm[t] = cos(Polarization + ThetaPtr[t]); SinTerm[t] = sin(Polarization + ThetaPtr[t]); } } else { const double term = 1. / sqrt(2); for (uint t = 0; t < ThetaLength; t++) { CosTerm[t] = term; SinTerm[t] = term; } } } void Unstructured(uint ThetaLength, uint PhiLength, complex128 * array0, double *array1, complex128 scalar, complex128 * output) { #pragma omp parallel for default(none) shared(output, scalar, array0, array1, p, PhiLength) for (uint p = 0; p < PhiLength; p++) { *output = scalar * array0[p] * array1[p]; output++; } } void Structured(uint ThetaLength, uint PhiLength, complex128 * array0, double *array1, complex128 scalar, complex128 * output) { for (uint p = 0; p < PhiLength; p++) { for (uint t = 0; t < ThetaLength; t++) { *output = scalar * array0[p] * array1[t]; output++; } } } //-
guess.c
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <omp.h> #define _DISP 0 void disp(int * num1){ #ifndef _DISP printf( "num: %d %d %d %d\n",num1[0],num1[1],num1[2],num1[3]); #endif } inline void num2p(int seqn, int * nump){ //数字转为数组,并用指针传出 int i; for(i=0; i<4; i++) nump[i]=0; i=3; while(seqn){ nump[i--]=seqn%10; seqn=seqn/10; } } int p2num(int * nump){ //数组转化为数字 int i,num=0; for(i=0;i<4;i++) num=10*num+nump[i]; return num; } inline int check1( int * nump){ //检查数组生成是否符合规则 //1: 符合规则 //0: 不符合规则 int i,j; for(i=0;i<4;i++){ for(j=i+1;j<4;j++){ if(nump[i]==nump[j]) return 0; } } return 1; } void randnum(int * nump){ //随机产生数字 int array[10]={0,1,2,3,4,5,6,7,8,9}; int i; int tmp,rd; for(i=0; i<4; i++){ rd=rand()%10; tmp = array[rd]; array[rd] = array[9-i]; array[9-i] = tmp; } for(i=0; i<4; i++){ nump[i]=array[6+i]; } } inline void seqnum(int * nump){ //顺序产生数字 static int cnt=0; cnt++; static int seqn=0; while(1){ seqn++; num2p(seqn,nump); if(check1(nump)==1) break; } } inline void check2(int * num0,int * numg,int * a, int *b){ //检查数组于猜测数组吻合程度 //a,数字相同,位置相同 //b,数字相同,位置不同 int i,j; *a=0; *b=0; for(i=0;i<4;i++){ if(num0[i]==numg[i]) (*a)++; for(j=0;j<4;j++){ if(num0[i]==numg[j]) (*b)++; } } (*b)=(*b)-(*a); } /* void getinput(int * nump){ static int cnt=0; while(1){ printf( "Please Input Your Number!\n"); int num; scanf("%d",&num); if(num<123 || num >9876){ printf( "Wrong Format Number!\n"); cnt++; if( cnt==3){ printf( "Wrong Formant Too Many!\nExit!\n"); exit(0); } continue; } num2p(num,nump); if(check1(nump)) return; cnt++; printf( "Wrong Format Number!\n"); if(cnt>=3){ printf( "Wrong Format Too Many!\nExit!\n"); exit(0); } } } void CPguess(){ //电脑随机数字,玩家进行游戏 int num[4]; srand(time(NULL)); randnum( num); disp(num); int numg[4]; int cnt=0; int a,b; while(1){ cnt++; getinput(numg); check2(num,numg,&a,&b); printf( "a:%d b:%d\n",a,b); if(a==4){ printf( "You Got it!\n%d times!\n",cnt); return; } if(cnt>10){ printf( "Too Many Times Guess!\n"); return; } } } */ int Division(int * array,int * nump){ //nump数组将array数组分类,返回分类的区分度 int hist[15]; int i; for(i=0;i<15;i++) hist[i]=0; int numt[4]; int ta,tb; int cnt=0; for(i=0;i<9999;i++){ if(array[i]==1){ num2p(i,numt); check2(numt,nump,&ta,&tb); hist[ta*(11-ta)/2+tb]++; cnt++; } } int div=0; cnt=cnt/14; for(i=0;i<13;i++) div+=(hist[i]-cnt)*(hist[i]-cnt); div+=(hist[14]-cnt)*(hist[14]-cnt); return div; } int BestDivision(int *array){ int best=100000000; int bestindex=-1; int new; int i; int numt[4]; int cnt=0; for(i=0;i<9999;i++){ if(array[i]==1) { num2p(i,numt); if(check1(numt)==1){ new=Division(array,numt); if(best>new){ best=new; bestindex=i; } } cnt++; } } #ifndef _DISP printf( "array cnt:%d\n",cnt); #endif return bestindex; } int CCguess(){ int num[4]; //srand(time(NULL));randnum(num); seqnum(num); disp(num); int numg[4]; int cnt=0,i; //int j,k; int a,b,ta,tb; int numt[4]; int array[9999]; for(i=0;i<9999;i++){ num2p(i,numt); array[i]=check1(numt); } for(i=0;i<4;i++) numg[i]=i; while(1){ disp(numg); check2(num,numg,&a,&b); cnt++; #ifndef _DISP printf( "cnt:%d a:%d b:%d\n",cnt,a,b); #endif if(a==4&&b==0){ #ifndef _DISP printf( "You Got it!\nFinal CNT: %d\n",cnt); #endif return cnt; } if( cnt>10){ #ifndef _DISP printf( "Too Many Times!\n"); #endif return 0; } //从array剔除不可能为真的数字 for(i=0;i<9999;i++){ if(array[i]){ num2p(i,numt); check2(numt,numg,&ta,&tb); //if(cnt==2)printf( "i: %d,ta: %d,tb: %d|",i,ta,tb); //if(i%6==5)printf( "\n"); if(ta!=a || tb!=b) array[i]=0; } } /* int tmp=0; for(i=0;i<9999;i++){ if(array[i]==1){ printf(" 1"); tmp++; } else printf(" "); if(i%70==69) printf("$\n"); } printf( "\ncnt 1: %d\n",tmp); */ //根据剩余的可能值,推测下一个numg int ans=BestDivision(array); num2p(ans,numg); } } int main(){ int i,cnt=0; int ans; int hist[11]; for(i=0;i<11;i++) hist[i]=0; #pragma omp parallel for for(i=0;i<5040;i++){ ans=CCguess(); if(ans==0){ printf("\nerror,%d,%d",i,ans); exit(1); } //printf( "%5d,ans:%d\n",i,ans); printf("%5d,%d",i,ans); if(i%10==9) printf("\n"); cnt+=ans; hist[ans]++; } printf( "average cnt:%f,cnt:%d,i:%d\n",cnt/(i+0.0),cnt,i); for(i=0;i<11;i++) printf("time:%3d:%4d\n",i,hist[i]); return 1; }
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <omp.h> #define _DISP 0 void disp(int * num1){ #ifndef _DISP printf( "num: %d %d %d %d\n",num1[0],num1[1],num1[2],num1[3]); #endif } inline void num2p(int seqn, int * nump){ //数字转为数组,并用指针传出 int i; for(i=0; i<4; i++) nump[i]=0; i=3; while(seqn){ nump[i--]=seqn%10; seqn=seqn/10; } } int p2num(int * nump){ //数组转化为数字 int i,num=0; for(i=0;i<4;i++) num=10*num+nump[i]; return num; } inline int check1( int * nump){ //检查数组生成是否符合规则 //1: 符合规则 //0: 不符合规则 int i,j; for(i=0;i<4;i++){ for(j=i+1;j<4;j++){ if(nump[i]==nump[j]) return 0; } } return 1; } void randnum(int * nump){ //随机产生数字 int array[10]={0,1,2,3,4,5,6,7,8,9}; int i; int tmp,rd; for(i=0; i<4; i++){ rd=rand()%10; tmp = array[rd]; array[rd] = array[9-i]; array[9-i] = tmp; } for(i=0; i<4; i++){ nump[i]=array[6+i]; } } inline void seqnum(int * nump){ //顺序产生数字 static int cnt=0; cnt++; static int seqn=0; while(1){ seqn++; num2p(seqn,nump); if(check1(nump)==1) break; } } inline void check2(int * num0,int * numg,int * a, int *b){ //检查数组于猜测数组吻合程度 //a,数字相同,位置相同 //b,数字相同,位置不同 int i,j; *a=0; *b=0; for(i=0;i<4;i++){ if(num0[i]==numg[i]) (*a)++; for(j=0;j<4;j++){ if(num0[i]==numg[j]) (*b)++; } } (*b)=(*b)-(*a); } /* void getinput(int * nump){ static int cnt=0; while(1){ printf( "Please Input Your Number!\n"); int num; scanf("%d",&num); if(num<123 || num >9876){ printf( "Wrong Format Number!\n"); cnt++; if( cnt==3){ printf( "Wrong Formant Too Many!\nExit!\n"); exit(0); } continue; } num2p(num,nump); if(check1(nump)) return; cnt++; printf( "Wrong Format Number!\n"); if(cnt>=3){ printf( "Wrong Format Too Many!\nExit!\n"); exit(0); } } } void CPguess(){ //电脑随机数字,玩家进行游戏 int num[4]; srand(time(NULL)); randnum( num); disp(num); int numg[4]; int cnt=0; int a,b; while(1){ cnt++; getinput(numg); check2(num,numg,&a,&b); printf( "a:%d b:%d\n",a,b); if(a==4){ printf( "You Got it!\n%d times!\n",cnt); return; } if(cnt>10){ printf( "Too Many Times Guess!\n"); return; } } } */ int Division(int * array,int * nump){ //nump数组将array数组分类,返回分类的区分度 int hist[15]; int i; for(i=0;i<15;i++) hist[i]=0; int numt[4]; int ta,tb; int cnt=0; for(i=0;i<9999;i++){ if(array[i]==1){ num2p(i,numt); check2(numt,nump,&ta,&tb); hist[ta*(11-ta)/2+tb]++; cnt++; } } int div=0; cnt=cnt/14; for(i=0;i<13;i++) div+=(hist[i]-cnt)*(hist[i]-cnt); div+=(hist[14]-cnt)*(hist[14]-cnt); return div; } int BestDivision(int *array){ int best=100000000; int bestindex=-1; int new; int i; int numt[4]; int cnt=0; for(i=0;i<9999;i++){ if(array[i]==1) { num2p(i,numt); if(check1(numt)==1){ new=Division(array,numt); if(best>new){ best=new; bestindex=i; } } cnt++; } } #ifndef _DISP printf( "array cnt:%d\n",cnt); #endif return bestindex; } int CCguess(){ int num[4]; //srand(time(NULL));randnum(num); seqnum(num); disp(num); int numg[4]; int cnt=0,i; //int j,k; int a,b,ta,tb; int numt[4]; int array[9999]; for(i=0;i<9999;i++){ num2p(i,numt); array[i]=check1(numt); } for(i=0;i<4;i++) numg[i]=i; while(1){ disp(numg); check2(num,numg,&a,&b); cnt++; #ifndef _DISP printf( "cnt:%d a:%d b:%d\n",cnt,a,b); #endif if(a==4&&b==0){ #ifndef _DISP printf( "You Got it!\nFinal CNT: %d\n",cnt); #endif return cnt; } if( cnt>10){ #ifndef _DISP printf( "Too Many Times!\n"); #endif return 0; } //从array剔除不可能为真的数字 for(i=0;i<9999;i++){ if(array[i]){ num2p(i,numt); check2(numt,numg,&ta,&tb); //if(cnt==2)printf( "i: %d,ta: %d,tb: %d|",i,ta,tb); //if(i%6==5)printf( "\n"); if(ta!=a || tb!=b) array[i]=0; } } /* int tmp=0; for(i=0;i<9999;i++){ if(array[i]==1){ printf(" 1"); tmp++; } else printf(" "); if(i%70==69) printf("$\n"); } printf( "\ncnt 1: %d\n",tmp); */ //根据剩余的可能值,推测下一个numg int ans=BestDivision(array); num2p(ans,numg); } } int main(){ int i,cnt=0; int ans; int hist[11]; for(i=0;i<11;i++) hist[i]=0; for(i=0;i<5040;i++){ ans=CCguess(); if(ans==0){ printf("\nerror,%d,%d",i,ans); exit(1); } //printf( "%5d,ans:%d\n",i,ans); printf("%5d,%d",i,ans); if(i%10==9) printf("\n"); cnt+=ans; hist[ans]++; } printf( "average cnt:%f,cnt:%d,i:%d\n",cnt/(i+0.0),cnt,i); for(i=0;i<11;i++) printf("time:%3d:%4d\n",i,hist[i]); return 1; }
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <omp.h> #define _DISP 0 void disp(int * num1){ #ifndef _DISP printf( "num: %d %d %d %d\n",num1[0],num1[1],num1[2],num1[3]); #endif } inline void num2p(int seqn, int * nump){ //数字转为数组,并用指针传出 int i; for(i=0; i<4; i++) nump[i]=0; i=3; while(seqn){ nump[i--]=seqn%10; seqn=seqn/10; } } int p2num(int * nump){ //数组转化为数字 int i,num=0; for(i=0;i<4;i++) num=10*num+nump[i]; return num; } inline int check1( int * nump){ //检查数组生成是否符合规则 //1: 符合规则 //0: 不符合规则 int i,j; for(i=0;i<4;i++){ for(j=i+1;j<4;j++){ if(nump[i]==nump[j]) return 0; } } return 1; } void randnum(int * nump){ //随机产生数字 int array[10]={0,1,2,3,4,5,6,7,8,9}; int i; int tmp,rd; for(i=0; i<4; i++){ rd=rand()%10; tmp = array[rd]; array[rd] = array[9-i]; array[9-i] = tmp; } for(i=0; i<4; i++){ nump[i]=array[6+i]; } } inline void seqnum(int * nump){ //顺序产生数字 static int cnt=0; cnt++; static int seqn=0; while(1){ seqn++; num2p(seqn,nump); if(check1(nump)==1) break; } } inline void check2(int * num0,int * numg,int * a, int *b){ //检查数组于猜测数组吻合程度 //a,数字相同,位置相同 //b,数字相同,位置不同 int i,j; *a=0; *b=0; for(i=0;i<4;i++){ if(num0[i]==numg[i]) (*a)++; for(j=0;j<4;j++){ if(num0[i]==numg[j]) (*b)++; } } (*b)=(*b)-(*a); } /* void getinput(int * nump){ static int cnt=0; while(1){ printf( "Please Input Your Number!\n"); int num; scanf("%d",&num); if(num<123 || num >9876){ printf( "Wrong Format Number!\n"); cnt++; if( cnt==3){ printf( "Wrong Formant Too Many!\nExit!\n"); exit(0); } continue; } num2p(num,nump); if(check1(nump)) return; cnt++; printf( "Wrong Format Number!\n"); if(cnt>=3){ printf( "Wrong Format Too Many!\nExit!\n"); exit(0); } } } void CPguess(){ //电脑随机数字,玩家进行游戏 int num[4]; srand(time(NULL)); randnum( num); disp(num); int numg[4]; int cnt=0; int a,b; while(1){ cnt++; getinput(numg); check2(num,numg,&a,&b); printf( "a:%d b:%d\n",a,b); if(a==4){ printf( "You Got it!\n%d times!\n",cnt); return; } if(cnt>10){ printf( "Too Many Times Guess!\n"); return; } } } */ int Division(int * array,int * nump){ //nump数组将array数组分类,返回分类的区分度 int hist[15]; int i; for(i=0;i<15;i++) hist[i]=0; int numt[4]; int ta,tb; int cnt=0; for(i=0;i<9999;i++){ if(array[i]==1){ num2p(i,numt); check2(numt,nump,&ta,&tb); hist[ta*(11-ta)/2+tb]++; cnt++; } } int div=0; cnt=cnt/14; for(i=0;i<13;i++) div+=(hist[i]-cnt)*(hist[i]-cnt); div+=(hist[14]-cnt)*(hist[14]-cnt); return div; } int BestDivision(int *array){ int best=100000000; int bestindex=-1; int new; int i; int numt[4]; int cnt=0; for(i=0;i<9999;i++){ if(array[i]==1) { num2p(i,numt); if(check1(numt)==1){ new=Division(array,numt); if(best>new){ best=new; bestindex=i; } } cnt++; } } #ifndef _DISP printf( "array cnt:%d\n",cnt); #endif return bestindex; } int CCguess(){ int num[4]; //srand(time(NULL));randnum(num); seqnum(num); disp(num); int numg[4]; int cnt=0,i; //int j,k; int a,b,ta,tb; int numt[4]; int array[9999]; for(i=0;i<9999;i++){ num2p(i,numt); array[i]=check1(numt); } for(i=0;i<4;i++) numg[i]=i; while(1){ disp(numg); check2(num,numg,&a,&b); cnt++; #ifndef _DISP printf( "cnt:%d a:%d b:%d\n",cnt,a,b); #endif if(a==4&&b==0){ #ifndef _DISP printf( "You Got it!\nFinal CNT: %d\n",cnt); #endif return cnt; } if( cnt>10){ #ifndef _DISP printf( "Too Many Times!\n"); #endif return 0; } //从array剔除不可能为真的数字 for(i=0;i<9999;i++){ if(array[i]){ num2p(i,numt); check2(numt,numg,&ta,&tb); //if(cnt==2)printf( "i: %d,ta: %d,tb: %d|",i,ta,tb); //if(i%6==5)printf( "\n"); if(ta!=a || tb!=b) array[i]=0; } } /* int tmp=0; for(i=0;i<9999;i++){ if(array[i]==1){ printf(" 1"); tmp++; } else printf(" "); if(i%70==69) printf("$\n"); } printf( "\ncnt 1: %d\n",tmp); */ //根据剩余的可能值,推测下一个numg int ans=BestDivision(array); num2p(ans,numg); } } int main(){ int i,cnt=0; int ans; int hist[11]; for(i=0;i<11;i++) hist[i]=0; #pragma omp parallel for for(i=0;i<5040;i++){ ans=CCguess(); if(ans==0){ printf("\nerror,%d,%d",i,ans); exit(1); } //printf( "%5d,ans:%d\n",i,ans); printf("%5d,%d",i,ans); if(i%10==9) printf("\n"); cnt+=ans; hist[ans]++; } printf( "average cnt:%f,cnt:%d,i:%d\n",cnt/(i+0.0),cnt,i); for(i=0;i<11;i++) printf("time:%3d:%4d\n",i,hist[i]); return 1; }
no_loop_1.c
#include <stdio.h> #include <omp.h> #pragma omp declare target int foo(int i) { return i+1; } #pragma omp end declare target int main() { int N = 100000; int a[N]; int b[N]; int i; for (i=0; i<N; i++) b[i]=i; for (i=0; i<N; i++) a[i]=0; int j; #pragma omp target teams distribute parallel for { for (j = 0; j< N; j++) a[j]=b[j]; } #pragma omp target teams distribute parallel for { for (int k = 0; k< N; k++) a[k]=b[k]; } #pragma omp target teams distribute parallel for { for (int k = 0; k< N; k++) { a[k]=b[k]; foo(k); } } #pragma omp target teams distribute parallel for { for (int k = 0; k< N; k++) { a[k]=b[k]; omp_get_num_teams(); } } #pragma omp target teams distribute parallel for { for (int k = 0; k< N; k++) { #pragma omp simd for (int p = 0; p < N; p++) a[k]=b[k]; } } int rc = 0; for (i=0; i<N; i++) if (a[i] != b[i] ) { rc++; printf ("Wrong value: a[%d]=%d\n", i, a[i]); } if (!rc) printf("Success\n"); return rc; } /// CHECK: DEVID:[[S:[ ]*]][[DEVID:[0-9]+]] SGN:4 /// CHECK: DEVID:[[S:[ ]*]][[DEVID:[0-9]+]] SGN:4 /// CHECK: DEVID:[[S:[ ]*]][[DEVID:[0-9]+]] SGN:4 /// CHECK: DEVID:[[S:[ ]*]][[DEVID:[0-9]+]] SGN:2 /// CHECK: DEVID:[[S:[ ]*]][[DEVID:[0-9]+]] SGN:2
#include <stdio.h> #include <omp.h> int foo(int i) { return i + 1; } int main() { int N = 100000; int a[N]; int b[N]; int i; for (i = 0; i < N; i++) b[i] = i; for (i = 0; i < N; i++) a[i] = 0; int j; for (j = 0; j < N; j++) a[j] = b[j]; for (int k = 0; k < N; k++) a[k] = b[k]; for (int k = 0; k < N; k++) { a[k] = b[k]; foo(k); } for (int k = 0; k < N; k++) { a[k] = b[k]; omp_get_num_teams(); } for (int k = 0; k < N; k++) { for (int p = 0; p < N; p++) a[k] = b[k]; } int rc = 0; for (i = 0; i < N; i++) if (a[i] != b[i]) { rc++; printf("Wrong value: a[%d]=%d\n", i, a[i]); } if (!rc) printf("Success\n"); return rc; } ///CHECK: DEVID: [[S: [] *]][[DEVID: [0 - 9] +]] SGN:4 /// CHECK: DEVID: [[S: [] *]][[DEVID: [0 - 9] +]] SGN:4 /// CHECK: DEVID: [[S: [] *]][[DEVID: [0 - 9] +]] SGN:4 /// CHECK: DEVID: [[S: [] *]][[DEVID: [0 - 9] +]] SGN:2 /// CHECK: DEVID: [[S: [] *]][[DEVID: [0 - 9] +]] SGN:2
#include <stdio.h> #include <omp.h> #pragma omp declare target int foo(int i) { return i + 1; } #pragma omp end declare target int main() { int N = 100000; int a[N]; int b[N]; int i; for (i = 0; i < N; i++) b[i] = i; for (i = 0; i < N; i++) a[i] = 0; int j; #pragma omp target teams distribute parallel for { for (j = 0; j < N; j++) a[j] = b[j]; } #pragma omp target teams distribute parallel for { for (int k = 0; k < N; k++) a[k] = b[k]; } #pragma omp target teams distribute parallel for { for (int k = 0; k < N; k++) { a[k] = b[k]; foo(k); } } #pragma omp target teams distribute parallel for { for (int k = 0; k < N; k++) { a[k] = b[k]; omp_get_num_teams(); } } #pragma omp target teams distribute parallel for { for (int k = 0; k < N; k++) { #pragma omp simd for (int p = 0; p < N; p++) a[k] = b[k]; } } int rc = 0; for (i = 0; i < N; i++) if (a[i] != b[i]) { rc++; printf("Wrong value: a[%d]=%d\n", i, a[i]); } if (!rc) printf("Success\n"); return rc; } ///CHECK: DEVID: [[S: [] *]][[DEVID: [0 - 9] +]] SGN:4 /// CHECK: DEVID: [[S: [] *]][[DEVID: [0 - 9] +]] SGN:4 /// CHECK: DEVID: [[S: [] *]][[DEVID: [0 - 9] +]] SGN:4 /// CHECK: DEVID: [[S: [] *]][[DEVID: [0 - 9] +]] SGN:2 /// CHECK: DEVID: [[S: [] *]][[DEVID: [0 - 9] +]] SGN:2
GB_unop__identity_uint8_int16.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint8_int16) // op(A') function: GB (_unop_tran__identity_uint8_int16) // C type: uint8_t // A type: int16_t // cast: uint8_t cij = (uint8_t) aij // unaryop: cij = aij #define GB_ATYPE \ int16_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint8_t z = (uint8_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int16_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint8_t z = (uint8_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint8_int16) ( uint8_t *Cx, // Cx and Ax may be aliased const int16_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int16_t aij = Ax [p] ; uint8_t z = (uint8_t) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int16_t aij = Ax [p] ; uint8_t z = (uint8_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint8_int16) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint8_int16) // op(A') function: GB (_unop_tran__identity_uint8_int16) // C type: uint8_t // A type: int16_t // cast: uint8_t cij = (uint8_t) aij // unaryop: cij = aij #define GB_ATYPE \ int16_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint8_t z = (uint8_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int16_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint8_t z = (uint8_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint8_int16) ( uint8_t *Cx, // Cx and Ax may be aliased const int16_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { for (p = 0 ; p < anz ; p++) { int16_t aij = Ax [p] ; uint8_t z = (uint8_t) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int16_t aij = Ax [p] ; uint8_t z = (uint8_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint8_int16) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint8_int16) // op(A') function: GB (_unop_tran__identity_uint8_int16) // C type: uint8_t // A type: int16_t // cast: uint8_t cij = (uint8_t) aij // unaryop: cij = aij #define GB_ATYPE \ int16_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint8_t z = (uint8_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int16_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint8_t z = (uint8_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint8_int16) ( uint8_t *Cx, // Cx and Ax may be aliased const int16_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int16_t aij = Ax [p] ; uint8_t z = (uint8_t) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int16_t aij = Ax [p] ; uint8_t z = (uint8_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint8_int16) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
fastmarching_tree.h
//last change: by PHC, 2013-02-13. adjust memory allocation to make it more robust /***************************************************************** * file : fastmarching_tree.h, Hang Xiao, Jan 18, 2012 * * fastmarching_tree * fastmarching_tracing * * **************************************************************/ #ifndef __FAST_MARCHING_TREE_H__ #define __FAST_MARCHING_TREE_H__ #include <cstdlib> #include <cmath> #include <vector> #include <map> #include <iostream> #include "stackutil.h" #include "my_surf_objs.h" #include "heap.h" #include "upwind_solver.h" #include "fastmarching_macro.h" using namespace std; #ifndef ABS #define ABS(x) ((x) > 0 ? (x) : (-(x))) #endif #ifndef MAX #define MAX(x,y) ((x) > (y) ? (x) : (y)) #endif #ifndef MIN #define MIN(x,y) ((x) < (y) ? (x) : (y)) #endif double consin(double vec1[], double vec2[], int len){ double sum = 0; double A = 0; double B = 0; for (int i = 0; i < len; i++){ sum += vec1[i] * vec2[i]; A += vec1[i] * vec1[i]; B += vec2[i] * vec2[i]; } return ABS(sqrt(A*B) / sum); } struct HeapElemXX : public HeapElem { MyMarker * parent_marker; HeapElemXX(long _ind, double _value, MyMarker * _parent_marker) : HeapElem(_ind, _value) { parent_marker = _parent_marker; } }; /********************************************************************* * Function : fastmarching_linear_tree * * Features : * 1. Create fast marcing tree from root marker only * 2. Background (intensity less than bkg_thresh) will be ignored. * 3. The distance is the sum of intensity *** * * Input : root root marker * inimg1d original 8bit image * * Output : tree output swc * phi the distance for each pixels * *******************************************************************/ template<class T> bool fastmarching_linear_tree(MyMarker root, T * inimg1d, vector<MyMarker*> &outtree, int sz0, int sz1, int sz2, int cnn_type = 3, double bkg_thresh = 1) { enum{ALIVE = -1, TRIAL = 0, FAR = 1}; long tol_sz = sz0 * sz1 * sz2; long sz01 = sz0 * sz1; //int cnn_type = 3; // ? float * phi = new float[tol_sz]; for(long i = 0; i < tol_sz; i++){phi[i] = INF;} long * parent = new long[tol_sz]; for(long i = 0; i < tol_sz; i++) parent[i] = i; // each pixel point to itself at the beginning // GI parameter min_int, max_int, li double max_int = 0; // maximum intensity, used in GI double min_int = INF; for(long i = 0; i < tol_sz; i++) { if(inimg1d[i] > max_int) max_int = inimg1d[i]; if(inimg1d[i] < min_int) min_int = inimg1d[i]; } max_int -= min_int; double li = 10; // initialization char * state = new char[tol_sz]; for(long i = 0; i < tol_sz; i++) state[i] = FAR; // init state and phi for root int rootx = root.x + 0.5; int rooty = root.y + 0.5; int rootz = root.z + 0.5; long root_ind = rootz*sz01 + rooty*sz0 + rootx; state[root_ind] = ALIVE; phi[root_ind] = 0.0; BasicHeap<HeapElemX> heap; map<long, HeapElemX*> elems; // init heap { long index = root_ind; HeapElemX *elem = new HeapElemX(index, phi[index]); elem->prev_ind = index; heap.insert(elem); elems[index] = elem; } // loop int time_counter = 1; double process1 = 0; while(!heap.empty()) { double process2 = (time_counter++)*10000.0/tol_sz; if(process2 - process1 >= 1) { cout<<"\r"<<((int)process2)/100.0<<"%";cout.flush(); process1 = process2; } HeapElemX* min_elem = heap.delete_min(); elems.erase(min_elem->img_ind); long min_ind = min_elem->img_ind; long prev_ind = min_elem->prev_ind; delete min_elem; parent[min_ind] = prev_ind; state[min_ind] = ALIVE; int i = min_ind % sz0; int j = (min_ind/sz0) % sz1; int k = (min_ind/sz01) % sz2; int w, h, d; for(int kk = -1; kk <= 1; kk++) { d = k+kk; if(d < 0 || d >= sz2) continue; for(int jj = -1; jj <= 1; jj++) { h = j+jj; if(h < 0 || h >= sz1) continue; for(int ii = -1; ii <= 1; ii++) { w = i+ii; if(w < 0 || w >= sz0) continue; int offset = ABS(ii) + ABS(jj) + ABS(kk); if(offset == 0 || offset > cnn_type) continue; double factor = (offset == 1) ? 1.0 : ((offset == 2) ? 1.414214 : ((offset == 3) ? 1.732051 : 0.0)); long index = d*sz01 + h*sz0 + w; if(inimg1d[index] <= bkg_thresh) continue; if(state[index] != ALIVE) { double new_dist = phi[min_ind] + (1.0 - (inimg1d[index] - min_ind)/max_int)*1000.0; long prev_ind = min_ind; if(state[index] == FAR) { phi[index] = new_dist; HeapElemX * elem = new HeapElemX(index, phi[index]); elem->prev_ind = prev_ind; heap.insert(elem); elems[index] = elem; state[index] = TRIAL; } else if(state[index] == TRIAL) { if(phi[index] > new_dist) { phi[index] = new_dist; HeapElemX * elem = elems[index]; heap.adjust(elem->heap_id, phi[index]); elem->prev_ind = prev_ind; } } } } } } } // save current swc tree if(1) { int i = -1, j = -1, k = -1; map<long, MyMarker*> tmp_map; for(long ind = 0; ind < tol_sz; ind++) { i++; if(i%sz0 == 0){i=0;j++; if(j%sz1 == 0) {j=0; k++;}} if(state[ind] != ALIVE) continue; MyMarker * marker = new MyMarker(i,j,k); tmp_map[ind] = marker; outtree.push_back(marker); } i=-1; j = -1; k = -1; for(long ind = 0; ind < tol_sz; ind++) { i++; if(i%sz0 == 0){i=0; j++; if(j%sz1==0){j=0; k++;}} if(state[ind] != ALIVE) continue; long ind2 = parent[ind]; MyMarker * marker1 = tmp_map[ind]; MyMarker * marker2 = tmp_map[ind2]; if(marker1 == marker2) marker1->parent = 0; else marker1->parent = marker2; //tmp_map[ind]->parent = tmp_map[ind2]; } } // over map<long, HeapElemX*>::iterator mit = elems.begin(); while(mit != elems.end()){HeapElemX * elem = mit->second; delete elem; mit++;} if(phi){delete [] phi; phi = 0;} if(parent){delete [] parent; parent = 0;} if(state) {delete [] state; state = 0;} return true; } /********************************************************************* * Function : fastmarching_tree * * Features : * 1. Create fast marcing tree from root marker only * 2. Background (intensity 0) will be ignored. * 3. Graph augumented distance is used * * Input : root root marker * inimg1d original 8bit image * * Output : tree output swc * phi the distance for each pixels * *******************************************************************/ template<class T> bool fastmarching_tree(MyMarker root, T * inimg1d, vector<MyMarker*> &outtree, long sz0, long sz1, long sz2, int cnn_type = 3, double bkg_thresh = 20, bool is_break_accept = false) { cout << "wp_debug: " << __FUNCTION__ << " " << __LINE__ << endl; //add dircetion T ***indata3d; indata3d = (T ***)malloc(sz2 * sizeof(T**)); for (int i = 0; i < sz2; i++){ indata3d[i] = (T **)malloc(sz1 * sizeof(T*)); for (int j = 0; j < sz1; j++){ indata3d[i][j] = (T *)malloc(sz0 * sizeof(T)); } } for (int z = 0; z < sz2; z++){ for (int y = 0; y < sz1; y++){ for (int x = 0; x < sz0; x++){ indata3d[z][y][x] = inimg1d[z*sz1 * sz0 + y*sz0 + x]; } } } double **direction; direction = (double **)malloc(sz0*sz1*sz2 * sizeof(double*)); for (int i = 0; i < sz0*sz1*sz2; i++){ direction[i] = (double *)malloc(3 * sizeof(double)); } LocationSimple pt; double sigma1, sigma2, sigma3; double vec1[3], vec2[3], vec3[3]; //endTime1 = GetTickCount(); omp_set_num_threads(10); #pragma omp parallel for private(pt,vec1,vec2,vec3) for (int i = 0; i < sz0 * sz1 * sz2; i++){ pt.x = i%sz0; pt.y = int(i/sz0)%sz1; pt.z = int(i/(sz0*sz1)); pt.radius = 8; //cout << pt.x << " " << pt.y << " " << pt.z << endl; compute_rgn_stat_new(pt, indata3d, 1, sz0, sz1, sz2, 1, V3D_UINT8, vec1, vec2, vec3, sigma1, sigma2, sigma3); direction[i][0] = vec1[0]; direction[i][1] = vec1[1]; direction[i][2] = vec1[2]; //cout << i << endl; } cout << "wp_debug: " << __FUNCTION__ << " " << __LINE__ << endl; //end dircetiond enum{ALIVE = -1, TRIAL = 0, FAR = 1}; long tol_sz = sz0 * sz1 * sz2; long sz01 = sz0 * sz1; long i; //int cnn_type = 3; // ? float * phi = 0; long * parent = 0; char * state = 0; try { phi = new float[tol_sz]; parent = new long[tol_sz]; state = new char[tol_sz]; for(i = 0; i < tol_sz; i++) { phi[i] = INF; parent[i] = i; // each pixel point to itself at the statements beginning state[i] = FAR; } } catch (...) { cout << "********* Fail to allocate memory. quit fastmarching_tree()." << endl; if (phi) {delete []phi; phi=0;} if (parent) {delete []parent; parent=0;} if (state) {delete []state; state=0;} return false; } // GI parameter min_int, max_int, li double max_int = 0; // maximum intensity, used in GI double min_int = INF; for(i = 0; i < tol_sz; i++) { if (inimg1d[i] > max_int) max_int = inimg1d[i]; else if (inimg1d[i] < min_int) min_int = inimg1d[i]; } max_int -= min_int; double li = 10; // initialization // init state and phi for root long rootx = root.x + 0.5; long rooty = root.y + 0.5; long rootz = root.z + 0.5; long root_ind = rootz*sz01 + rooty*sz0 + rootx; state[root_ind] = ALIVE; phi[root_ind] = 0.0; BasicHeap<HeapElemX> heap; map<long, HeapElemX*> elems; // init heap { long index = root_ind; HeapElemX *elem = new HeapElemX(index, phi[index]); elem->prev_ind = index; heap.insert(elem); elems[index] = elem; } // loop int time_counter = 1; double process1 = 0; while(!heap.empty()) { double process2 = (time_counter++)*10000.0/tol_sz; //cout<<"\r"<<((int)process2)/100.0<<"%";cout.flush(); if(process2 - process1 >= 1) { cout<<"\r"<<((int)process2)/100.0<<"%";cout.flush(); process1 = process2; } HeapElemX* min_elem = heap.delete_min(); elems.erase(min_elem->img_ind); long min_ind = min_elem->img_ind; long prev_ind = min_elem->prev_ind; delete min_elem; parent[min_ind] = prev_ind; state[min_ind] = ALIVE; int i = min_ind % sz0; int j = (min_ind/sz0) % sz1; int k = (min_ind/sz01) % sz2; int w, h, d; for(int kk = -1; kk <= 1; kk++) { d = k+kk; if(d < 0 || d >= sz2) continue; for(int jj = -1; jj <= 1; jj++) { h = j+jj; if(h < 0 || h >= sz1) continue; for(int ii = -1; ii <= 1; ii++) { w = i+ii; if(w < 0 || w >= sz0) continue; int offset = ABS(ii) + ABS(jj) + ABS(kk); if(offset == 0 || offset > cnn_type) continue; double factor = (offset == 1) ? 1.0 : ((offset == 2) ? 1.414214 : ((offset == 3) ? 1.732051 : 0.0)); long index = d*sz01 + h*sz0 + w; if (is_break_accept) { if(inimg1d[index] <= bkg_thresh && inimg1d[min_ind] <= bkg_thresh) continue; } else { if(inimg1d[index] <= bkg_thresh) continue; } if(state[index] != ALIVE) { //change by wp //double new_dist = phi[min_ind] + (GI(index) + GI(min_ind))*factor*0.5; double vec[3] = { ii, jj, kk }; double new_dist = phi[min_ind] + (GI(index) + GI(min_ind))*factor*0.5; //double consin_dist = consin(direction[index], direction[min_ind],3); double consin_dist = double(exp((consin(vec, direction[min_ind], 3) - 1)) * 2000); //consin_dist = 0; //delete[] vec; cout << "new_dist:" << (GI(index) + GI(min_ind))*factor*0.5 << "consin_dist:" << consin_dist << endl; new_dist += consin_dist; /*new_dist += consin_dist;*/ long prev_ind = min_ind; if(state[index] == FAR) { phi[index] = new_dist; HeapElemX * elem = new HeapElemX(index, phi[index]); elem->prev_ind = prev_ind; heap.insert(elem); elems[index] = elem; state[index] = TRIAL; } else if(state[index] == TRIAL) { if (phi[index] > new_dist) { phi[index] = new_dist; HeapElemX * elem = elems[index]; heap.adjust(elem->heap_id, phi[index]); elem->prev_ind = prev_ind; } } } } } } } // save current swc tree if (1) { int i = -1, j = -1, k = -1; map<long, MyMarker*> tmp_map; for(long ind = 0; ind < tol_sz; ind++) { i++; if(i%sz0 == 0){i=0;j++; if(j%sz1 == 0) {j=0; k++;}} if(state[ind] != ALIVE) continue; MyMarker * marker = new MyMarker(i,j,k); tmp_map[ind] = marker; outtree.push_back(marker); } i=-1; j = -1; k = -1; for(long ind = 0; ind < tol_sz; ind++) { i++; if(i%sz0 == 0){i=0; j++; if(j%sz1==0){j=0; k++;}} if(state[ind] != ALIVE) continue; long ind2 = parent[ind]; MyMarker * marker1 = tmp_map[ind]; MyMarker * marker2 = tmp_map[ind2]; if(marker1 == marker2) marker1->parent = 0; else marker1->parent = marker2; //tmp_map[ind]->parent = tmp_map[ind2]; } } // over map<long, HeapElemX*>::iterator mit = elems.begin(); while (mit != elems.end()) { HeapElemX * elem = mit->second; delete elem; mit++; } if(phi){delete [] phi; phi = 0;} if(parent){delete [] parent; parent = 0;} if(state) {delete [] state; state = 0;} return true; } /********************************************************************* * Function : fastmarching_tree * * Features : * 1. Create fast marcing tree from root marker * 2. Background (intensity 0) will be ignored. * 3. Euclidean distance is used * * Input : root root marker * inimg1d original 8bit image * * Output : tree output swc * phi the distance for each pixels * *******************************************************************/ // inimg1d is binary template<class T> bool fastmarching_tree_old(MyMarker root, T * inimg1d, vector<MyMarker*> &tree, double * & phi, int sz0, int sz1, int sz2) { enum{ALIVE = -1, TRIAL = 0, FAR = 1}; long tol_sz = sz0 * sz1 * sz2; long sz01 = sz0 * sz1; int rootx = (int)(root.x + 0.5); int rooty = (int)(root.y + 0.5); int rootz = (int)(root.z + 0.5); long root_ind = rootz*sz01 + rooty*sz0 + rootx; if(inimg1d[root_ind] == 0){cerr<<"the root position is not in forground"<<endl; return false;} int cnn_type = 1; // cnn_type should be 1 phi = new double[tol_sz]; for(long i = 0; i < tol_sz; i++) phi[i] = INF; // initialization char * state = new char[tol_sz]; for(long i = 0; i < tol_sz; i++) state[i] = FAR; state[root_ind] = ALIVE; phi[root_ind] = 0.0; BasicHeap<HeapElemXX> heap; map<long, HeapElemXX*> elems; // init state around root int i = rootx, j = rooty, k = rootz; MyMarker * root_node = new MyMarker(i,j,k); tree.push_back(root_node); int w, h, d; for(int kk = -1; kk <= 1; kk++) { d = k + kk; if(d < 0 || d >= sz2) continue; for(int jj = -1; jj <= 1; jj++) { h = j + jj; if(h < 0 || h >= sz1) continue; for(int ii = -1; ii <= 1; ii++) { w = i + ii; if(w < 0 || w >= sz0) continue; int offset = ABS(ii) + ABS(jj) + ABS(kk); if(offset == 0 || offset > cnn_type) continue; long index = d*sz01 + h*sz0 + w; if(inimg1d[index] == 0) continue; if(state[index] == FAR) { state[index] = TRIAL; double u1 = INF; double u2 = INF; double u3 = INF; if(w - 1 >= 0 && state[index - 1] == ALIVE) u1 = u1 < phi[index - 1]? u1: phi[index -1 ]; if(w + 1 < sz0 && state[index + 1] == ALIVE) u1 = u1 < phi[index + 1] ? u1: phi[index + 1]; if(h - 1 >= 0 && state[index - sz0] == ALIVE) u2 = u2 < phi[index - sz0] ? u2:phi[index-sz0]; if(h + 1 < sz1 && state[index + sz0] == ALIVE) u2 = u2 < phi[index + sz0] ? u2:phi[index + sz0]; if(d - 1 >=0 && state[index - sz0*sz1] == ALIVE) u3 = u3 < phi[index - sz0*sz1] ? u3: phi[index -sz0*sz1]; if(d + 1 < sz2 && state[index + sz0*sz1] == ALIVE) u3 = u3 < phi[index + sz0*sz1] ? u3: phi[index + sz0*sz1]; vector<double> parameters; if( u1 != INF) parameters.push_back(u1); if( u2 != INF) parameters.push_back(u2); if( u3 != INF) parameters.push_back(u3); phi[index] = upwind_solver(parameters); HeapElemXX *elem = new HeapElemXX(index, phi[index], root_node); heap.insert(elem); elems[index] = elem; } } } } // loop int time_counter = 1; int process1 = 0, process2 = 0; while(!heap.empty()) { double process2 = (time_counter++)*1000.0/tol_sz; if(process2 - process1 >= 1) { cout<<"\r"<<((int)process2)/10.0<<"%";cout.flush(); process1 = process2; } HeapElemXX* min_elem = heap.delete_min(); elems.erase(min_elem->img_ind); long min_ind = min_elem->img_ind; state[min_ind] = ALIVE; int i = min_ind % sz0; int j = (min_ind/sz0) % sz1; int k = (min_ind/sz01) % sz2; MyMarker * cur_marker = new MyMarker(i,j,k); cur_marker->parent = min_elem->parent_marker; tree.push_back(cur_marker); delete min_elem; int w, h, d; for(int kk = -1; kk <= 1; kk++) { d = k+kk; if(d < 0 || d >= sz2) continue; for(int jj = -1; jj <= 1; jj++) { h = j+jj; if(h < 0 || h >= sz1) continue; for(int ii = -1; ii <= 1; ii++) { w = i+ii; if(w < 0 || w >= sz0) continue; int offset = ABS(ii) + ABS(jj) + ABS(kk); if(offset == 0 || offset > cnn_type) continue; long index = d*sz01 + h*sz0 + w; if(inimg1d[index] == 0) continue; if(state[index] != ALIVE) { double u1 = INF; double u2 = INF; double u3 = INF; if(w - 1 >= 0 && state[index - 1] == ALIVE) u1 = u1 < phi[index - 1]? u1: phi[index -1 ]; if(w + 1 < sz0 && state[index + 1] == ALIVE) u1 = u1 < phi[index + 1] ? u1: phi[index + 1]; if(h - 1 >= 0 && state[index - sz0] == ALIVE) u2 = u2 < phi[index - sz0] ? u2:phi[index-sz0]; if(h + 1 < sz1 && state[index + sz0] == ALIVE) u2 = u2 < phi[index + sz0] ? u2:phi[index + sz0]; if(d - 1 >=0 && state[index - sz0*sz1] == ALIVE) u3 = u3 < phi[index - sz0*sz1] ? u3: phi[index -sz0*sz1]; if(d + 1 < sz2 && state[index + sz0*sz1] == ALIVE) u3 = u3 < phi[index + sz0*sz1] ? u3: phi[index + sz0*sz1]; vector<double> parameters; if( u1 != INF) parameters.push_back(u1); if( u2 != INF) parameters.push_back(u2); if( u3 != INF) parameters.push_back(u3); double solver_result = upwind_solver(parameters); if(state[index] == FAR) { phi[index] = solver_result; HeapElemXX * elem = new HeapElemXX(index, phi[index], cur_marker); heap.insert(elem); elems[index] = elem; state[index] = TRIAL; } else if(state[index] == TRIAL) { if(phi[index] > solver_result) { phi[index] = solver_result; HeapElemXX * elem = elems[index]; heap.adjust(elem->heap_id, phi[index]); elem->parent_marker = cur_marker; } } } } } } } for(long i = 0; i < tol_sz; i++) if(phi[i] == INF) phi[i] = 0; if(state) {delete [] state; state = 0;} return true; } /****************************************************************************** * Fast marching based tree construction * 1. use graph augmented distance (GD) * 2. stop when all target marker are marched * * Input : root root marker * target the set of target markers * inimg1d original input image * * Output : outtree output tracing result * * Notice : * 1. the input pixel number should not be larger than 2G if sizeof(long) == 4 * 2. target markers should not contain root marker * 3. the root marker in outswc, is point to itself * 4. the cnn_type is default 3 * *****************************************************************************/ template<class T> bool fastmarching_tree(MyMarker root, vector<MyMarker> &target, T * inimg1d, vector<MyMarker*> &outtree, long sz0, long sz1, long sz2, int cnn_type = 3) { enum{ALIVE = -1, TRIAL = 0, FAR = 1}; long tol_sz = sz0 * sz1 * sz2; long sz01 = sz0 * sz1; //int cnn_type = 3; // ? //float * phi = new float[tol_sz]; for(long i = 0; i < tol_sz; i++){phi[i] = INF;} //long * parent = new long[tol_sz]; for(long i = 0; i < tol_sz; i++) parent[i] = i; // each pixel point to itself at the beginning long i; float * phi = 0; long * parent = 0; char * state = 0; try { phi = new float[tol_sz]; parent = new long[tol_sz]; state = new char[tol_sz]; for(i = 0; i < tol_sz; i++) { phi[i] = INF; parent[i] = i; // each pixel point to itself at the statements beginning state[i] = FAR; } } catch (...) { cout << "********* Fail to allocate memory. quit fastmarching_tree()." << endl; if (phi) {delete []phi; phi=0;} if (parent) {delete []parent; parent=0;} if (state) {delete []state; state=0;} return false; } // GI parameter min_int, max_int, li double max_int = 0; // maximum intensity, used in GI double min_int = INF; for(long i = 0; i < tol_sz; i++) { if(inimg1d[i] > max_int) max_int = inimg1d[i]; if(inimg1d[i] < min_int) min_int = inimg1d[i]; } max_int -= min_int; double li = 10; // initialization //char * state = new char[tol_sz]; //for(long i = 0; i < tol_sz; i++) state[i] = FAR; // init state and phi for root int rootx = root.x + 0.5; int rooty = root.y + 0.5; int rootz = root.z + 0.5; long root_ind = rootz*sz01 + rooty*sz0 + rootx; state[root_ind] = ALIVE; phi[root_ind] = 0.0; vector<long> target_inds; for(long t = 0; t < target.size(); t++) { int i = target[t].x + 0.5; int j = target[t].y + 0.5; int k = target[t].z + 0.5; long ind = k*sz01 + j*sz0 + i; target_inds.push_back(ind); //if(ind == root_ind) {cerr<<"please remove root marker from target markers"<<endl; exit(0);} } BasicHeap<HeapElemX> heap; map<long, HeapElemX*> elems; // init heap { long index = root_ind; HeapElemX *elem = new HeapElemX(index, phi[index]); elem->prev_ind = index; heap.insert(elem); elems[index] = elem; } // loop int time_counter = 1; double process1 = 0; while(!heap.empty()) { double process2 = (time_counter++)*100000.0/tol_sz; if(process2 - process1 >= 1) { cout<<"\r"<<((int)process2)/1000.0<<"%";cout.flush(); process1 = process2; bool is_break = true; for(int t = 0; t < target_inds.size(); t++){long tind = target_inds[t]; if(parent[tind] == tind && tind != root_ind) {is_break = false; break;}} if(is_break) break; } HeapElemX* min_elem = heap.delete_min(); elems.erase(min_elem->img_ind); long min_ind = min_elem->img_ind; long prev_ind = min_elem->prev_ind; delete min_elem; parent[min_ind] = prev_ind; state[min_ind] = ALIVE; int i = min_ind % sz0; int j = (min_ind/sz0) % sz1; int k = (min_ind/sz01) % sz2; int w, h, d; for(int kk = -1; kk <= 1; kk++) { d = k+kk; if(d < 0 || d >= sz2) continue; for(int jj = -1; jj <= 1; jj++) { h = j+jj; if(h < 0 || h >= sz1) continue; for(int ii = -1; ii <= 1; ii++) { w = i+ii; if(w < 0 || w >= sz0) continue; int offset = ABS(ii) + ABS(jj) + ABS(kk); if(offset == 0 || offset > cnn_type) continue; double factor = (offset == 1) ? 1.0 : ((offset == 2) ? 1.414214 : ((offset == 3) ? 1.732051 : 0.0)); long index = d*sz01 + h*sz0 + w; if(state[index] != ALIVE) { double new_dist = phi[min_ind] + (GI(index) + GI(min_ind))*factor*0.5; long prev_ind = min_ind; if(state[index] == FAR) { phi[index] = new_dist; HeapElemX * elem = new HeapElemX(index, phi[index]); elem->prev_ind = prev_ind; heap.insert(elem); elems[index] = elem; state[index] = TRIAL; } else if(state[index] == TRIAL) { if(phi[index] > new_dist) { phi[index] = new_dist; HeapElemX * elem = elems[index]; heap.adjust(elem->heap_id, phi[index]); elem->prev_ind = prev_ind; } } } } } } } // save current swc tree if(1) { int i = -1, j = -1, k = -1; map<long, MyMarker*> tmp_map; for(long ind = 0; ind < tol_sz; ind++) { i++; if(i%sz0 == 0){i=0;j++; if(j%sz1 == 0) {j=0; k++;}} if(state[ind] != ALIVE) continue; MyMarker * marker = new MyMarker(i,j,k); tmp_map[ind] = marker; outtree.push_back(marker); } i=-1; j = -1; k = -1; for(long ind = 0; ind < tol_sz; ind++) { i++; if(i%sz0 == 0){i=0; j++; if(j%sz1==0){j=0; k++;}} if(state[ind] != ALIVE) continue; long ind2 = parent[ind]; MyMarker * marker1 = tmp_map[ind]; MyMarker * marker2 = tmp_map[ind2]; if(marker1 == marker2) marker1->parent = 0; else marker1->parent = marker2; //tmp_map[ind]->parent = tmp_map[ind2]; } } // over map<long, HeapElemX*>::iterator mit = elems.begin(); while(mit != elems.end()){HeapElemX * elem = mit->second; delete elem; mit++;} if(phi){delete [] phi; phi = 0;} if(parent){delete [] parent; parent = 0;} if(state) {delete [] state; state = 0;} return true; } /****************************************************************************** * Fast marching based manual tracing, with root marker and a set of target marker * * Input : root root marker * target the set of target markers * inimg1d original input image * * Output : outswc output tracing result * phi finial distance value for each pixel [todo : replace INF to 0] * * Notice : * 1. the input pixel number should not be larger than 2G if sizeof(long) == 4 * 2. target markers should not contain root marker * 3. the root marker in outswc, is point to itself * 4. the cnn_type is default 3 * *****************************************************************************/ template<class T1, class T2> bool fastmarching_tracing(MyMarker root, vector<MyMarker> &target, T1 * inimg1d, vector<MyMarker*> &outswc, T2 * &phi, int sz0, int sz1, int sz2, int cnn_type = 3) { int rootx = root.x + 0.5; int rooty = root.y + 0.5; int rootz = root.z + 0.5; if(rootx < 0 || rootx >= sz0 || rooty < 0 || rooty >= sz1 || rootz < 0 || rootz >= sz2) { cerr<<"Invalid root marker("<<root.x<<","<<root.y<<","<<root.z<<")"<<endl; return false; } enum{ALIVE = -1, TRIAL = 0, FAR = 1}; long tol_sz = sz0 * sz1 * sz2; long sz01 = sz0 * sz1; //int cnn_type = 3; // ? if(phi == 0) phi = new T2[tol_sz]; for(long i = 0; i < tol_sz; i++){phi[i] = INF;} long * parent = new long[tol_sz]; for(long i = 0; i < tol_sz; i++) parent[i] = i; // each pixel point to itself at the beginning // GI parameter min_int, max_int, li double max_int = 0; // maximum intensity, used in GI double min_int = INF; for(long i = 0; i < tol_sz; i++) { if(inimg1d[i] > max_int) max_int = inimg1d[i]; if(inimg1d[i] < min_int) min_int = inimg1d[i]; } max_int -= min_int; double li = 10; // initialization char * state = new char[tol_sz]; for(long i = 0; i < tol_sz; i++) state[i] = FAR; long root_ind = rootz*sz01 + rooty*sz0 + rootx; state[root_ind] = ALIVE; phi[root_ind] = 0.0; vector<long> target_inds; for(long t = 0; t < target.size(); t++) { int i = target[t].x + 0.5; int j = target[t].y + 0.5; int k = target[t].z + 0.5; if(i < 0 || i >= sz0 || j < 0 || j >= sz1 || k < 0 || k >= sz2) { cerr<<"t = "<<t+1<<", invalid target marker("<<target[t].x<<","<<target[t].y<<","<<target[t].z<<")"<<endl; continue; } long ind = k*sz01 + j*sz0 + i; target_inds.push_back(ind); //if(ind == root_ind) {cerr<<"please remove root marker from target markers"<<endl; exit(0);} } BasicHeap<HeapElemX> heap; map<long, HeapElemX*> elems; // init heap { long index = root_ind; HeapElemX *elem = new HeapElemX(index, phi[index]); elem->prev_ind = index; heap.insert(elem); elems[index] = elem; } // loop int time_counter = 1; double process1 = 0; while(!heap.empty()) { double process2 = (time_counter++)*100000.0/tol_sz; if(process2 - process1 >= 1) { cout<<"\r"<<((int)process2)/1000.0<<"%";cout.flush(); process1 = process2; bool is_break = true; for(int t = 0; t < target_inds.size(); t++){long tind = target_inds[t]; if(parent[tind] == tind && tind != root_ind) {is_break = false; break;}} if(is_break) break; } HeapElemX* min_elem = heap.delete_min(); elems.erase(min_elem->img_ind); long min_ind = min_elem->img_ind; long prev_ind = min_elem->prev_ind; delete min_elem; parent[min_ind] = prev_ind; state[min_ind] = ALIVE; int i = min_ind % sz0; int j = (min_ind/sz0) % sz1; int k = (min_ind/sz01) % sz2; int w, h, d; for(int kk = -1; kk <= 1; kk++) { d = k+kk; if(d < 0 || d >= sz2) continue; for(int jj = -1; jj <= 1; jj++) { h = j+jj; if(h < 0 || h >= sz1) continue; for(int ii = -1; ii <= 1; ii++) { w = i+ii; if(w < 0 || w >= sz0) continue; int offset = ABS(ii) + ABS(jj) + ABS(kk); if(offset == 0 || offset > cnn_type) continue; double factor = (offset == 1) ? 1.0 : ((offset == 2) ? 1.414214 : ((offset == 3) ? 1.732051 : 0.0)); long index = d*sz01 + h*sz0 + w; if(state[index] != ALIVE) { double new_dist = phi[min_ind] + (GI(index) + GI(min_ind))*factor*0.5; long prev_ind = min_ind; if(state[index] == FAR) { phi[index] = new_dist; HeapElemX * elem = new HeapElemX(index, phi[index]); elem->prev_ind = prev_ind; heap.insert(elem); elems[index] = elem; state[index] = TRIAL; } else if(state[index] == TRIAL) { if(phi[index] > new_dist) { phi[index] = new_dist; HeapElemX * elem = elems[index]; heap.adjust(elem->heap_id, phi[index]); elem->prev_ind = prev_ind; } } } } } } } // extract the tree nodes containing target markers map<long, MyMarker *> marker_map; for(int t = 0; t < target_inds.size(); t++) { long tind = target_inds[t]; long p = tind; while(true) { if(marker_map.find(p) != marker_map.end()) break; int i = p % sz0; int j = p/sz0 % sz1; int k = p/sz01 % sz2; MyMarker * marker = new MyMarker(i,j,k); marker_map[p] = marker; if(p == parent[p]) { assert(p == root.ind(sz0,sz01)); assert(marker_map.find(root.ind(sz0,sz01)) != marker_map.end()); break; } else p = parent[p]; } } if(marker_map.find(root.ind(sz0,sz01)) == marker_map.end()) { cout<<"break here"<<endl; } map<long, MyMarker*>::iterator it = marker_map.begin(); V3DLONG in_sz[4] = {sz0, sz1, sz2, 1}; while(it != marker_map.end()) { long tind = it->first; MyMarker * marker = it->second; MyMarker * parent_marker = marker_map[parent[tind]]; marker->parent = parent_marker; marker->radius = markerRadius(inimg1d, in_sz, *marker, 20); outswc.push_back(marker); it++; } map<long, HeapElemX*>::iterator mit = elems.begin(); while(mit != elems.end()){HeapElemX * elem = mit->second; delete elem; mit++;} if(parent){delete [] parent; parent = 0;} if(state) {delete [] state; state = 0;} return true; } #endif
// last change:by PHC, 2013 - 02 - 13. adjust memory allocation to make it more robust /***************************************************************** * file : fastmarching_tree.h, Hang Xiao, Jan 18, 2012 * * fastmarching_tree * fastmarching_tracing * * **************************************************************/ #ifndef __FAST_MARCHING_TREE_H__ #define __FAST_MARCHING_TREE_H__ #include <cstdlib> #include <cmath> #include <vector> #include <map> #include <iostream> #include "stackutil.h" #include "my_surf_objs.h" #include "heap.h" #include "upwind_solver.h" #include "fastmarching_macro.h" using namespace std; #ifndef ABS #define ABS(x) ((x) > 0 ? (x) : (-(x))) #endif #ifndef MAX #define MAX(x,y) ((x) > (y) ? (x) : (y)) #endif #ifndef MIN #define MIN(x,y) ((x) < (y) ? (x) : (y)) #endif double consin(double vec1[], double vec2[], int len) { double sum = 0; double A = 0; double B = 0; for (int i = 0; i < len; i++) { sum += vec1[i] * vec2[i]; A += vec1[i] * vec1[i]; B += vec2[i] * vec2[i]; } return ABS(sqrt(A * B) / sum); } struct HeapElemXX:public HeapElem { MyMarker *parent_marker; HeapElemXX(long _ind, double _value, MyMarker * _parent_marker): HeapElem(_ind, _value) { parent_marker = _parent_marker; } }; /********************************************************************* * Function : fastmarching_linear_tree * * Features : * 1. Create fast marcing tree from root marker only * 2. Background (intensity less than bkg_thresh) will be ignored. * 3. The distance is the sum of intensity *** * * Input : root root marker * inimg1d original 8bit image * * Output : tree output swc * phi the distance for each pixels * *******************************************************************/ template < class T > bool fastmarching_linear_tree(MyMarker root, T * inimg1d, vector < MyMarker * >&outtree, int sz0, int sz1, int sz2, int cnn_type = 3, double bkg_thresh = 1) { enum { ALIVE = -1, TRIAL = 0, FAR = 1}; long tol_sz = sz0 * sz1 * sz2; long sz01 = sz0 * sz1; //int cnn_type = 3; //? float *phi = new float[tol_sz]; for (long i = 0; i < tol_sz; i++) { phi[i] = INF; } long *parent = new long[tol_sz]; for (long i = 0; i < tol_sz; i++) parent[i] = i; //each pixel point to itself at the beginning // GI parameter min_int, max_int, li double max_int = 0; //maximum intensity, used in GI double min_int = INF; for (long i = 0; i < tol_sz; i++) { if (inimg1d[i] > max_int) max_int = inimg1d[i]; if (inimg1d[i] < min_int) min_int = inimg1d[i]; } max_int -= min_int; double li = 10; //initialization char *state = new char[tol_sz]; for (long i = 0; i < tol_sz; i++) state[i] = FAR; //init state and phi for root int rootx = root.x + 0.5; int rooty = root.y + 0.5; int rootz = root.z + 0.5; long root_ind = rootz * sz01 + rooty * sz0 + rootx; state[root_ind] = ALIVE; phi[root_ind] = 0.0; BasicHeap < HeapElemX > heap; map < long, HeapElemX * >elems; //init heap { long index = root_ind; HeapElemX *elem = new HeapElemX(index, phi[index]); elem->prev_ind = index; heap.insert(elem); elems[index] = elem; } //loop int time_counter = 1; double process1 = 0; while (!heap.empty()) { double process2 = (time_counter++) * 10000.0 / tol_sz; if (process2 - process1 >= 1) { cout << "\r" << ((int)process2) / 100.0 << "%"; cout.flush(); process1 = process2; } HeapElemX *min_elem = heap.delete_min(); elems.erase(min_elem->img_ind); long min_ind = min_elem->img_ind; long prev_ind = min_elem->prev_ind; delete min_elem; parent[min_ind] = prev_ind; state[min_ind] = ALIVE; int i = min_ind % sz0; int j = (min_ind / sz0) % sz1; int k = (min_ind / sz01) % sz2; int w, h, d; for (int kk = -1; kk <= 1; kk++) { d = k + kk; if (d < 0 || d >= sz2) continue; for (int jj = -1; jj <= 1; jj++) { h = j + jj; if (h < 0 || h >= sz1) continue; for (int ii = -1; ii <= 1; ii++) { w = i + ii; if (w < 0 || w >= sz0) continue; int offset = ABS(ii) + ABS(jj) + ABS(kk); if (offset == 0 || offset > cnn_type) continue; double factor = (offset == 1) ? 1.0 : ((offset == 2) ? 1.414214 : ((offset == 3) ? 1.732051 : 0.0)); long index = d * sz01 + h * sz0 + w; if (inimg1d[index] <= bkg_thresh) continue; if (state[index] != ALIVE) { double new_dist = phi[min_ind] + (1.0 - (inimg1d[index] - min_ind) / max_int) * 1000.0; long prev_ind = min_ind; if (state[index] == FAR) { phi[index] = new_dist; HeapElemX *elem = new HeapElemX(index, phi[index]); elem->prev_ind = prev_ind; heap.insert(elem); elems[index] = elem; state[index] = TRIAL; } else if (state[index] == TRIAL) { if (phi[index] > new_dist) { phi[index] = new_dist; HeapElemX *elem = elems[index]; heap.adjust(elem->heap_id, phi[index]); elem->prev_ind = prev_ind; } } } } } } } //save current swc tree if (1) { int i = -1, j = -1, k = -1; map < long, MyMarker * >tmp_map; for (long ind = 0; ind < tol_sz; ind++) { i++; if (i % sz0 == 0) { i = 0; j++; if (j % sz1 == 0) { j = 0; k++; } } if (state[ind] != ALIVE) continue; MyMarker *marker = new MyMarker(i, j, k); tmp_map[ind] = marker; outtree.push_back(marker); } i = -1; j = -1; k = -1; for (long ind = 0; ind < tol_sz; ind++) { i++; if (i % sz0 == 0) { i = 0; j++; if (j % sz1 == 0) { j = 0; k++; } } if (state[ind] != ALIVE) continue; long ind2 = parent[ind]; MyMarker *marker1 = tmp_map[ind]; MyMarker *marker2 = tmp_map[ind2]; if (marker1 == marker2) marker1->parent = 0; else marker1->parent = marker2; //tmp_map[ind]->parent = tmp_map[ind2]; } } //over map < long, HeapElemX * >::iterator mit = elems.begin(); while (mit != elems.end()) { HeapElemX *elem = mit->second; delete elem; mit++; } if (phi) { delete[] phi; phi = 0; } if (parent) { delete[] parent; parent = 0; } if (state) { delete[] state; state = 0; } return true; } /********************************************************************* * Function : fastmarching_tree * * Features : * 1. Create fast marcing tree from root marker only * 2. Background (intensity 0) will be ignored. * 3. Graph augumented distance is used * * Input : root root marker * inimg1d original 8bit image * * Output : tree output swc * phi the distance for each pixels * *******************************************************************/ template < class T > bool fastmarching_tree(MyMarker root, T * inimg1d, vector < MyMarker * >&outtree, long sz0, long sz1, long sz2, int cnn_type = 3, double bkg_thresh = 20, bool is_break_accept = false) { cout << "wp_debug: " << __FUNCTION__ << " " << __LINE__ << endl; //add dircetion T *** indata3d; indata3d = (T ***) malloc(sz2 * sizeof(T **)); for (int i = 0; i < sz2; i++) { indata3d[i] = (T **) malloc(sz1 * sizeof(T *)); for (int j = 0; j < sz1; j++) { indata3d[i][j] = (T *) malloc(sz0 * sizeof(T)); } } for (int z = 0; z < sz2; z++) { for (int y = 0; y < sz1; y++) { for (int x = 0; x < sz0; x++) { indata3d[z][y][x] = inimg1d[z * sz1 * sz0 + y * sz0 + x]; } } } double **direction; direction = (double **)malloc(sz0 * sz1 * sz2 * sizeof(double *)); for (int i = 0; i < sz0 * sz1 * sz2; i++) { direction[i] = (double *)malloc(3 * sizeof(double)); } LocationSimple pt; double sigma1, sigma2, sigma3; double vec1[3], vec2[3], vec3[3]; //endTime1 = GetTickCount(); omp_set_num_threads(10); for (int i = 0; i < sz0 * sz1 * sz2; i++) { pt.x = i % sz0; pt.y = int (i / sz0) % sz1; pt.z = int (i / (sz0 * sz1)); pt.radius = 8; //cout << pt.x << " " << pt.y << " " << pt.z << endl; compute_rgn_stat_new(pt, indata3d, 1, sz0, sz1, sz2, 1, V3D_UINT8, vec1, vec2, vec3, sigma1, sigma2, sigma3); direction[i][0] = vec1[0]; direction[i][1] = vec1[1]; direction[i][2] = vec1[2]; //cout << i << endl; } cout << "wp_debug: " << __FUNCTION__ << " " << __LINE__ << endl; //end dircetiond enum { ALIVE = -1, TRIAL = 0, FAR = 1}; long tol_sz = sz0 * sz1 * sz2; long sz01 = sz0 * sz1; long i; //int cnn_type = 3; //? float *phi = 0; long *parent = 0; char *state = 0; try { phi = new float[tol_sz]; parent = new long[tol_sz]; state = new char[tol_sz]; for (i = 0; i < tol_sz; i++) { phi[i] = INF; parent[i] = i; //each pixel point to itself at the statements beginning state[i] = FAR; } } catch(...) { cout << "********* Fail to allocate memory. quit fastmarching_tree()." << endl; if (phi) { delete[] phi; phi = 0; } if (parent) { delete[] parent; parent = 0; } if (state) { delete[] state; state = 0; } return false; } //GI parameter min_int, max_int, li double max_int = 0; //maximum intensity, used in GI double min_int = INF; for (i = 0; i < tol_sz; i++) { if (inimg1d[i] > max_int) max_int = inimg1d[i]; else if (inimg1d[i] < min_int) min_int = inimg1d[i]; } max_int -= min_int; double li = 10; //initialization // init state and phi for root long rootx = root.x + 0.5; long rooty = root.y + 0.5; long rootz = root.z + 0.5; long root_ind = rootz * sz01 + rooty * sz0 + rootx; state[root_ind] = ALIVE; phi[root_ind] = 0.0; BasicHeap < HeapElemX > heap; map < long, HeapElemX * >elems; //init heap { long index = root_ind; HeapElemX *elem = new HeapElemX(index, phi[index]); elem->prev_ind = index; heap.insert(elem); elems[index] = elem; } //loop int time_counter = 1; double process1 = 0; while (!heap.empty()) { double process2 = (time_counter++) * 10000.0 / tol_sz; //cout << "\r" << ((int)process2) / 100.0 << "%"; cout.flush(); if (process2 - process1 >= 1) { cout << "\r" << ((int)process2) / 100.0 << "%"; cout.flush(); process1 = process2; } HeapElemX *min_elem = heap.delete_min(); elems.erase(min_elem->img_ind); long min_ind = min_elem->img_ind; long prev_ind = min_elem->prev_ind; delete min_elem; parent[min_ind] = prev_ind; state[min_ind] = ALIVE; int i = min_ind % sz0; int j = (min_ind / sz0) % sz1; int k = (min_ind / sz01) % sz2; int w, h, d; for (int kk = -1; kk <= 1; kk++) { d = k + kk; if (d < 0 || d >= sz2) continue; for (int jj = -1; jj <= 1; jj++) { h = j + jj; if (h < 0 || h >= sz1) continue; for (int ii = -1; ii <= 1; ii++) { w = i + ii; if (w < 0 || w >= sz0) continue; int offset = ABS(ii) + ABS(jj) + ABS(kk); if (offset == 0 || offset > cnn_type) continue; double factor = (offset == 1) ? 1.0 : ((offset == 2) ? 1.414214 : ((offset == 3) ? 1.732051 : 0.0)); long index = d * sz01 + h * sz0 + w; if (is_break_accept) { if (inimg1d[index] <= bkg_thresh && inimg1d[min_ind] <= bkg_thresh) continue; } else { if (inimg1d[index] <= bkg_thresh) continue; } if (state[index] != ALIVE) { //change by wp // double new_dist = phi[min_ind] + (GI(index) + GI(min_ind)) * factor * 0.5; double vec[3] = {ii, jj, kk}; double new_dist = phi[min_ind] + (GI(index) + GI(min_ind)) * factor * 0.5; //double consin_dist = consin(direction[index], direction[min_ind], 3); double consin_dist = double (exp((consin(vec, direction[min_ind], 3) - 1)) * 2000); //consin_dist = 0; //delete[] vec; cout << "new_dist:" << (GI(index) + GI(min_ind)) * factor * 0.5 << "consin_dist:" << consin_dist << endl; new_dist += consin_dist; /* new_dist += consin_dist; */ long prev_ind = min_ind; if (state[index] == FAR) { phi[index] = new_dist; HeapElemX *elem = new HeapElemX(index, phi[index]); elem->prev_ind = prev_ind; heap.insert(elem); elems[index] = elem; state[index] = TRIAL; } else if (state[index] == TRIAL) { if (phi[index] > new_dist) { phi[index] = new_dist; HeapElemX *elem = elems[index]; heap.adjust(elem->heap_id, phi[index]); elem->prev_ind = prev_ind; } } } } } } } //save current swc tree if (1) { int i = -1, j = -1, k = -1; map < long, MyMarker * >tmp_map; for (long ind = 0; ind < tol_sz; ind++) { i++; if (i % sz0 == 0) { i = 0; j++; if (j % sz1 == 0) { j = 0; k++; } } if (state[ind] != ALIVE) continue; MyMarker *marker = new MyMarker(i, j, k); tmp_map[ind] = marker; outtree.push_back(marker); } i = -1; j = -1; k = -1; for (long ind = 0; ind < tol_sz; ind++) { i++; if (i % sz0 == 0) { i = 0; j++; if (j % sz1 == 0) { j = 0; k++; } } if (state[ind] != ALIVE) continue; long ind2 = parent[ind]; MyMarker *marker1 = tmp_map[ind]; MyMarker *marker2 = tmp_map[ind2]; if (marker1 == marker2) marker1->parent = 0; else marker1->parent = marker2; //tmp_map[ind]->parent = tmp_map[ind2]; } } //over map < long, HeapElemX * >::iterator mit = elems.begin(); while (mit != elems.end()) { HeapElemX *elem = mit->second; delete elem; mit++; } if (phi) { delete[] phi; phi = 0; } if (parent) { delete[] parent; parent = 0; } if (state) { delete[] state; state = 0; } return true; } /********************************************************************* * Function : fastmarching_tree * * Features : * 1. Create fast marcing tree from root marker * 2. Background (intensity 0) will be ignored. * 3. Euclidean distance is used * * Input : root root marker * inimg1d original 8bit image * * Output : tree output swc * phi the distance for each pixels * *******************************************************************/ //inimg1d is binary template < class T > bool fastmarching_tree_old(MyMarker root, T * inimg1d, vector < MyMarker * >&tree, double *&phi, int sz0, int sz1, int sz2) { enum { ALIVE = -1, TRIAL = 0, FAR = 1}; long tol_sz = sz0 * sz1 * sz2; long sz01 = sz0 * sz1; int rootx = (int)(root.x + 0.5); int rooty = (int)(root.y + 0.5); int rootz = (int)(root.z + 0.5); long root_ind = rootz * sz01 + rooty * sz0 + rootx; if (inimg1d[root_ind] == 0) { cerr << "the root position is not in forground" << endl; return false; } int cnn_type = 1; //cnn_type should be 1 phi = new double[tol_sz]; for (long i = 0; i < tol_sz; i++) phi[i] = INF; //initialization char *state = new char[tol_sz]; for (long i = 0; i < tol_sz; i++) state[i] = FAR; state[root_ind] = ALIVE; phi[root_ind] = 0.0; BasicHeap < HeapElemXX > heap; map < long, HeapElemXX * >elems; //init state around root int i = rootx, j = rooty, k = rootz; MyMarker *root_node = new MyMarker(i, j, k); tree.push_back(root_node); int w, h, d; for (int kk = -1; kk <= 1; kk++) { d = k + kk; if (d < 0 || d >= sz2) continue; for (int jj = -1; jj <= 1; jj++) { h = j + jj; if (h < 0 || h >= sz1) continue; for (int ii = -1; ii <= 1; ii++) { w = i + ii; if (w < 0 || w >= sz0) continue; int offset = ABS(ii) + ABS(jj) + ABS(kk); if (offset == 0 || offset > cnn_type) continue; long index = d * sz01 + h * sz0 + w; if (inimg1d[index] == 0) continue; if (state[index] == FAR) { state[index] = TRIAL; double u1 = INF; double u2 = INF; double u3 = INF; if (w - 1 >= 0 && state[index - 1] == ALIVE) u1 = u1 < phi[index - 1] ? u1 : phi[index - 1]; if (w + 1 < sz0 && state[index + 1] == ALIVE) u1 = u1 < phi[index + 1] ? u1 : phi[index + 1]; if (h - 1 >= 0 && state[index - sz0] == ALIVE) u2 = u2 < phi[index - sz0] ? u2 : phi[index - sz0]; if (h + 1 < sz1 && state[index + sz0] == ALIVE) u2 = u2 < phi[index + sz0] ? u2 : phi[index + sz0]; if (d - 1 >= 0 && state[index - sz0 * sz1] == ALIVE) u3 = u3 < phi[index - sz0 * sz1] ? u3 : phi[index - sz0 * sz1]; if (d + 1 < sz2 && state[index + sz0 * sz1] == ALIVE) u3 = u3 < phi[index + sz0 * sz1] ? u3 : phi[index + sz0 * sz1]; vector < double >parameters; if (u1 != INF) parameters.push_back(u1); if (u2 != INF) parameters.push_back(u2); if (u3 != INF) parameters.push_back(u3); phi[index] = upwind_solver(parameters); HeapElemXX *elem = new HeapElemXX(index, phi[index], root_node); heap.insert(elem); elems[index] = elem; } } } } //loop int time_counter = 1; int process1 = 0, process2 = 0; while (!heap.empty()) { double process2 = (time_counter++) * 1000.0 / tol_sz; if (process2 - process1 >= 1) { cout << "\r" << ((int)process2) / 10.0 << "%"; cout.flush(); process1 = process2; } HeapElemXX *min_elem = heap.delete_min(); elems.erase(min_elem->img_ind); long min_ind = min_elem->img_ind; state[min_ind] = ALIVE; int i = min_ind % sz0; int j = (min_ind / sz0) % sz1; int k = (min_ind / sz01) % sz2; MyMarker *cur_marker = new MyMarker(i, j, k); cur_marker->parent = min_elem->parent_marker; tree.push_back(cur_marker); delete min_elem; int w, h, d; for (int kk = -1; kk <= 1; kk++) { d = k + kk; if (d < 0 || d >= sz2) continue; for (int jj = -1; jj <= 1; jj++) { h = j + jj; if (h < 0 || h >= sz1) continue; for (int ii = -1; ii <= 1; ii++) { w = i + ii; if (w < 0 || w >= sz0) continue; int offset = ABS(ii) + ABS(jj) + ABS(kk); if (offset == 0 || offset > cnn_type) continue; long index = d * sz01 + h * sz0 + w; if (inimg1d[index] == 0) continue; if (state[index] != ALIVE) { double u1 = INF; double u2 = INF; double u3 = INF; if (w - 1 >= 0 && state[index - 1] == ALIVE) u1 = u1 < phi[index - 1] ? u1 : phi[index - 1]; if (w + 1 < sz0 && state[index + 1] == ALIVE) u1 = u1 < phi[index + 1] ? u1 : phi[index + 1]; if (h - 1 >= 0 && state[index - sz0] == ALIVE) u2 = u2 < phi[index - sz0] ? u2 : phi[index - sz0]; if (h + 1 < sz1 && state[index + sz0] == ALIVE) u2 = u2 < phi[index + sz0] ? u2 : phi[index + sz0]; if (d - 1 >= 0 && state[index - sz0 * sz1] == ALIVE) u3 = u3 < phi[index - sz0 * sz1] ? u3 : phi[index - sz0 * sz1]; if (d + 1 < sz2 && state[index + sz0 * sz1] == ALIVE) u3 = u3 < phi[index + sz0 * sz1] ? u3 : phi[index + sz0 * sz1]; vector < double >parameters; if (u1 != INF) parameters.push_back(u1); if (u2 != INF) parameters.push_back(u2); if (u3 != INF) parameters.push_back(u3); double solver_result = upwind_solver(parameters); if (state[index] == FAR) { phi[index] = solver_result; HeapElemXX *elem = new HeapElemXX(index, phi[index], cur_marker); heap.insert(elem); elems[index] = elem; state[index] = TRIAL; } else if (state[index] == TRIAL) { if (phi[index] > solver_result) { phi[index] = solver_result; HeapElemXX *elem = elems[index]; heap.adjust(elem->heap_id, phi[index]); elem->parent_marker = cur_marker; } } } } } } } for (long i = 0; i < tol_sz; i++) if (phi[i] == INF) phi[i] = 0; if (state) { delete[] state; state = 0; } return true; } /****************************************************************************** * Fast marching based tree construction * 1. use graph augmented distance (GD) * 2. stop when all target marker are marched * * Input : root root marker * target the set of target markers * inimg1d original input image * * Output : outtree output tracing result * * Notice : * 1. the input pixel number should not be larger than 2G if sizeof(long) == 4 * 2. target markers should not contain root marker * 3. the root marker in outswc, is point to itself * 4. the cnn_type is default 3 * *****************************************************************************/ template < class T > bool fastmarching_tree(MyMarker root, vector < MyMarker > &target, T * inimg1d, vector < MyMarker * >&outtree, long sz0, long sz1, long sz2, int cnn_type = 3) { enum { ALIVE = -1, TRIAL = 0, FAR = 1}; long tol_sz = sz0 * sz1 * sz2; long sz01 = sz0 * sz1; //int cnn_type = 3; //? //float *phi = new float[tol_sz]; for (long i = 0; i < tol_sz; i++) { phi[i] = INF; } //long *parent = new long[tol_sz]; for (long i = 0; i < tol_sz; i++) parent[i] = i; //each pixel point to itself at the beginning long i; float *phi = 0; long *parent = 0; char *state = 0; try { phi = new float[tol_sz]; parent = new long[tol_sz]; state = new char[tol_sz]; for (i = 0; i < tol_sz; i++) { phi[i] = INF; parent[i] = i; //each pixel point to itself at the statements beginning state[i] = FAR; } } catch(...) { cout << "********* Fail to allocate memory. quit fastmarching_tree()." << endl; if (phi) { delete[] phi; phi = 0; } if (parent) { delete[] parent; parent = 0; } if (state) { delete[] state; state = 0; } return false; } //GI parameter min_int, max_int, li double max_int = 0; //maximum intensity, used in GI double min_int = INF; for (long i = 0; i < tol_sz; i++) { if (inimg1d[i] > max_int) max_int = inimg1d[i]; if (inimg1d[i] < min_int) min_int = inimg1d[i]; } max_int -= min_int; double li = 10; //initialization // char *state = new char[tol_sz]; //for (long i = 0; i < tol_sz; i++) state[i] = FAR; //init state and phi for root int rootx = root.x + 0.5; int rooty = root.y + 0.5; int rootz = root.z + 0.5; long root_ind = rootz * sz01 + rooty * sz0 + rootx; state[root_ind] = ALIVE; phi[root_ind] = 0.0; vector < long >target_inds; for (long t = 0; t < target.size(); t++) { int i = target[t].x + 0.5; int j = target[t].y + 0.5; int k = target[t].z + 0.5; long ind = k * sz01 + j * sz0 + i; target_inds.push_back(ind); //if (ind == root_ind) { cerr << "please remove root marker from target markers" << endl; exit(0); } } BasicHeap < HeapElemX > heap; map < long, HeapElemX * >elems; //init heap { long index = root_ind; HeapElemX *elem = new HeapElemX(index, phi[index]); elem->prev_ind = index; heap.insert(elem); elems[index] = elem; } //loop int time_counter = 1; double process1 = 0; while (!heap.empty()) { double process2 = (time_counter++) * 100000.0 / tol_sz; if (process2 - process1 >= 1) { cout << "\r" << ((int)process2) / 1000.0 << "%"; cout.flush(); process1 = process2; bool is_break = true; for (int t = 0; t < target_inds.size(); t++) { long tind = target_inds[t]; if (parent[tind] == tind && tind != root_ind) { is_break = false; break; } } if (is_break) break; } HeapElemX *min_elem = heap.delete_min(); elems.erase(min_elem->img_ind); long min_ind = min_elem->img_ind; long prev_ind = min_elem->prev_ind; delete min_elem; parent[min_ind] = prev_ind; state[min_ind] = ALIVE; int i = min_ind % sz0; int j = (min_ind / sz0) % sz1; int k = (min_ind / sz01) % sz2; int w, h, d; for (int kk = -1; kk <= 1; kk++) { d = k + kk; if (d < 0 || d >= sz2) continue; for (int jj = -1; jj <= 1; jj++) { h = j + jj; if (h < 0 || h >= sz1) continue; for (int ii = -1; ii <= 1; ii++) { w = i + ii; if (w < 0 || w >= sz0) continue; int offset = ABS(ii) + ABS(jj) + ABS(kk); if (offset == 0 || offset > cnn_type) continue; double factor = (offset == 1) ? 1.0 : ((offset == 2) ? 1.414214 : ((offset == 3) ? 1.732051 : 0.0)); long index = d * sz01 + h * sz0 + w; if (state[index] != ALIVE) { double new_dist = phi[min_ind] + (GI(index) + GI(min_ind)) * factor * 0.5; long prev_ind = min_ind; if (state[index] == FAR) { phi[index] = new_dist; HeapElemX *elem = new HeapElemX(index, phi[index]); elem->prev_ind = prev_ind; heap.insert(elem); elems[index] = elem; state[index] = TRIAL; } else if (state[index] == TRIAL) { if (phi[index] > new_dist) { phi[index] = new_dist; HeapElemX *elem = elems[index]; heap.adjust(elem->heap_id, phi[index]); elem->prev_ind = prev_ind; } } } } } } } //save current swc tree if (1) { int i = -1, j = -1, k = -1; map < long, MyMarker * >tmp_map; for (long ind = 0; ind < tol_sz; ind++) { i++; if (i % sz0 == 0) { i = 0; j++; if (j % sz1 == 0) { j = 0; k++; } } if (state[ind] != ALIVE) continue; MyMarker *marker = new MyMarker(i, j, k); tmp_map[ind] = marker; outtree.push_back(marker); } i = -1; j = -1; k = -1; for (long ind = 0; ind < tol_sz; ind++) { i++; if (i % sz0 == 0) { i = 0; j++; if (j % sz1 == 0) { j = 0; k++; } } if (state[ind] != ALIVE) continue; long ind2 = parent[ind]; MyMarker *marker1 = tmp_map[ind]; MyMarker *marker2 = tmp_map[ind2]; if (marker1 == marker2) marker1->parent = 0; else marker1->parent = marker2; //tmp_map[ind]->parent = tmp_map[ind2]; } } //over map < long, HeapElemX * >::iterator mit = elems.begin(); while (mit != elems.end()) { HeapElemX *elem = mit->second; delete elem; mit++; } if (phi) { delete[] phi; phi = 0; } if (parent) { delete[] parent; parent = 0; } if (state) { delete[] state; state = 0; } return true; } /****************************************************************************** * Fast marching based manual tracing, with root marker and a set of target marker * * Input : root root marker * target the set of target markers * inimg1d original input image * * Output : outswc output tracing result * phi finial distance value for each pixel [todo : replace INF to 0] * * Notice : * 1. the input pixel number should not be larger than 2G if sizeof(long) == 4 * 2. target markers should not contain root marker * 3. the root marker in outswc, is point to itself * 4. the cnn_type is default 3 * *****************************************************************************/ template < class T1, class T2 > bool fastmarching_tracing(MyMarker root, vector < MyMarker > &target, T1 * inimg1d, vector < MyMarker * >&outswc, T2 * &phi, int sz0, int sz1, int sz2, int cnn_type = 3) { int rootx = root.x + 0.5; int rooty = root.y + 0.5; int rootz = root.z + 0.5; if (rootx < 0 || rootx >= sz0 || rooty < 0 || rooty >= sz1 || rootz < 0 || rootz >= sz2) { cerr << "Invalid root marker(" << root.x << "," << root.y << "," << root.z << ")" << endl; return false; } enum { ALIVE = -1, TRIAL = 0, FAR = 1}; long tol_sz = sz0 * sz1 * sz2; long sz01 = sz0 * sz1; //int cnn_type = 3; //? if (phi == 0) phi = new T2[tol_sz]; for (long i = 0; i < tol_sz; i++) { phi[i] = INF; } long *parent = new long[tol_sz]; for (long i = 0; i < tol_sz; i++) parent[i] = i; //each pixel point to itself at the beginning // GI parameter min_int, max_int, li double max_int = 0; //maximum intensity, used in GI double min_int = INF; for (long i = 0; i < tol_sz; i++) { if (inimg1d[i] > max_int) max_int = inimg1d[i]; if (inimg1d[i] < min_int) min_int = inimg1d[i]; } max_int -= min_int; double li = 10; //initialization char *state = new char[tol_sz]; for (long i = 0; i < tol_sz; i++) state[i] = FAR; long root_ind = rootz * sz01 + rooty * sz0 + rootx; state[root_ind] = ALIVE; phi[root_ind] = 0.0; vector < long >target_inds; for (long t = 0; t < target.size(); t++) { int i = target[t].x + 0.5; int j = target[t].y + 0.5; int k = target[t].z + 0.5; if (i < 0 || i >= sz0 || j < 0 || j >= sz1 || k < 0 || k >= sz2) { cerr << "t = " << t + 1 << ", invalid target marker(" << target[t].x << "," << target[t].y << "," << target[t].z << ")" << endl; continue; } long ind = k * sz01 + j * sz0 + i; target_inds.push_back(ind); //if (ind == root_ind) { cerr << "please remove root marker from target markers" << endl; exit(0); } } BasicHeap < HeapElemX > heap; map < long, HeapElemX * >elems; //init heap { long index = root_ind; HeapElemX *elem = new HeapElemX(index, phi[index]); elem->prev_ind = index; heap.insert(elem); elems[index] = elem; } //loop int time_counter = 1; double process1 = 0; while (!heap.empty()) { double process2 = (time_counter++) * 100000.0 / tol_sz; if (process2 - process1 >= 1) { cout << "\r" << ((int)process2) / 1000.0 << "%"; cout.flush(); process1 = process2; bool is_break = true; for (int t = 0; t < target_inds.size(); t++) { long tind = target_inds[t]; if (parent[tind] == tind && tind != root_ind) { is_break = false; break; } } if (is_break) break; } HeapElemX *min_elem = heap.delete_min(); elems.erase(min_elem->img_ind); long min_ind = min_elem->img_ind; long prev_ind = min_elem->prev_ind; delete min_elem; parent[min_ind] = prev_ind; state[min_ind] = ALIVE; int i = min_ind % sz0; int j = (min_ind / sz0) % sz1; int k = (min_ind / sz01) % sz2; int w, h, d; for (int kk = -1; kk <= 1; kk++) { d = k + kk; if (d < 0 || d >= sz2) continue; for (int jj = -1; jj <= 1; jj++) { h = j + jj; if (h < 0 || h >= sz1) continue; for (int ii = -1; ii <= 1; ii++) { w = i + ii; if (w < 0 || w >= sz0) continue; int offset = ABS(ii) + ABS(jj) + ABS(kk); if (offset == 0 || offset > cnn_type) continue; double factor = (offset == 1) ? 1.0 : ((offset == 2) ? 1.414214 : ((offset == 3) ? 1.732051 : 0.0)); long index = d * sz01 + h * sz0 + w; if (state[index] != ALIVE) { double new_dist = phi[min_ind] + (GI(index) + GI(min_ind)) * factor * 0.5; long prev_ind = min_ind; if (state[index] == FAR) { phi[index] = new_dist; HeapElemX *elem = new HeapElemX(index, phi[index]); elem->prev_ind = prev_ind; heap.insert(elem); elems[index] = elem; state[index] = TRIAL; } else if (state[index] == TRIAL) { if (phi[index] > new_dist) { phi[index] = new_dist; HeapElemX *elem = elems[index]; heap.adjust(elem->heap_id, phi[index]); elem->prev_ind = prev_ind; } } } } } } } //extract the tree nodes containing target markers map < long, MyMarker * >marker_map; for (int t = 0; t < target_inds.size(); t++) { long tind = target_inds[t]; long p = tind; while (true) { if (marker_map.find(p) != marker_map.end()) break; int i = p % sz0; int j = p / sz0 % sz1; int k = p / sz01 % sz2; MyMarker *marker = new MyMarker(i, j, k); marker_map[p] = marker; if (p == parent[p]) { assert(p == root.ind(sz0, sz01)); assert(marker_map.find(root.ind(sz0, sz01)) != marker_map.end()); break; } else p = parent[p]; } } if (marker_map.find(root.ind(sz0, sz01)) == marker_map.end()) { cout << "break here" << endl; } map < long, MyMarker * >::iterator it = marker_map.begin(); V3DLONG in_sz[4] = {sz0, sz1, sz2, 1}; while (it != marker_map.end()) { long tind = it->first; MyMarker *marker = it->second; MyMarker *parent_marker = marker_map[parent[tind]]; marker->parent = parent_marker; marker->radius = markerRadius(inimg1d, in_sz, *marker, 20); outswc.push_back(marker); it++; } map < long, HeapElemX * >::iterator mit = elems.begin(); while (mit != elems.end()) { HeapElemX *elem = mit->second; delete elem; mit++; } if (parent) { delete[] parent; parent = 0; } if (state) { delete[] state; state = 0; } return true; } #endif
// last change:by PHC, 2013 - 02 - 13. adjust memory allocation to make it more robust /***************************************************************** * file : fastmarching_tree.h, Hang Xiao, Jan 18, 2012 * * fastmarching_tree * fastmarching_tracing * * **************************************************************/ #ifndef __FAST_MARCHING_TREE_H__ #define __FAST_MARCHING_TREE_H__ #include <cstdlib> #include <cmath> #include <vector> #include <map> #include <iostream> #include "stackutil.h" #include "my_surf_objs.h" #include "heap.h" #include "upwind_solver.h" #include "fastmarching_macro.h" using namespace std; #ifndef ABS #define ABS(x) ((x) > 0 ? (x) : (-(x))) #endif #ifndef MAX #define MAX(x,y) ((x) > (y) ? (x) : (y)) #endif #ifndef MIN #define MIN(x,y) ((x) < (y) ? (x) : (y)) #endif double consin(double vec1[], double vec2[], int len) { double sum = 0; double A = 0; double B = 0; for (int i = 0; i < len; i++) { sum += vec1[i] * vec2[i]; A += vec1[i] * vec1[i]; B += vec2[i] * vec2[i]; } return ABS(sqrt(A * B) / sum); } struct HeapElemXX:public HeapElem { MyMarker *parent_marker; HeapElemXX(long _ind, double _value, MyMarker * _parent_marker): HeapElem(_ind, _value) { parent_marker = _parent_marker; } }; /********************************************************************* * Function : fastmarching_linear_tree * * Features : * 1. Create fast marcing tree from root marker only * 2. Background (intensity less than bkg_thresh) will be ignored. * 3. The distance is the sum of intensity *** * * Input : root root marker * inimg1d original 8bit image * * Output : tree output swc * phi the distance for each pixels * *******************************************************************/ template < class T > bool fastmarching_linear_tree(MyMarker root, T * inimg1d, vector < MyMarker * >&outtree, int sz0, int sz1, int sz2, int cnn_type = 3, double bkg_thresh = 1) { enum { ALIVE = -1, TRIAL = 0, FAR = 1}; long tol_sz = sz0 * sz1 * sz2; long sz01 = sz0 * sz1; //int cnn_type = 3; //? float *phi = new float[tol_sz]; for (long i = 0; i < tol_sz; i++) { phi[i] = INF; } long *parent = new long[tol_sz]; for (long i = 0; i < tol_sz; i++) parent[i] = i; //each pixel point to itself at the beginning // GI parameter min_int, max_int, li double max_int = 0; //maximum intensity, used in GI double min_int = INF; for (long i = 0; i < tol_sz; i++) { if (inimg1d[i] > max_int) max_int = inimg1d[i]; if (inimg1d[i] < min_int) min_int = inimg1d[i]; } max_int -= min_int; double li = 10; //initialization char *state = new char[tol_sz]; for (long i = 0; i < tol_sz; i++) state[i] = FAR; //init state and phi for root int rootx = root.x + 0.5; int rooty = root.y + 0.5; int rootz = root.z + 0.5; long root_ind = rootz * sz01 + rooty * sz0 + rootx; state[root_ind] = ALIVE; phi[root_ind] = 0.0; BasicHeap < HeapElemX > heap; map < long, HeapElemX * >elems; //init heap { long index = root_ind; HeapElemX *elem = new HeapElemX(index, phi[index]); elem->prev_ind = index; heap.insert(elem); elems[index] = elem; } //loop int time_counter = 1; double process1 = 0; while (!heap.empty()) { double process2 = (time_counter++) * 10000.0 / tol_sz; if (process2 - process1 >= 1) { cout << "\r" << ((int)process2) / 100.0 << "%"; cout.flush(); process1 = process2; } HeapElemX *min_elem = heap.delete_min(); elems.erase(min_elem->img_ind); long min_ind = min_elem->img_ind; long prev_ind = min_elem->prev_ind; delete min_elem; parent[min_ind] = prev_ind; state[min_ind] = ALIVE; int i = min_ind % sz0; int j = (min_ind / sz0) % sz1; int k = (min_ind / sz01) % sz2; int w, h, d; for (int kk = -1; kk <= 1; kk++) { d = k + kk; if (d < 0 || d >= sz2) continue; for (int jj = -1; jj <= 1; jj++) { h = j + jj; if (h < 0 || h >= sz1) continue; for (int ii = -1; ii <= 1; ii++) { w = i + ii; if (w < 0 || w >= sz0) continue; int offset = ABS(ii) + ABS(jj) + ABS(kk); if (offset == 0 || offset > cnn_type) continue; double factor = (offset == 1) ? 1.0 : ((offset == 2) ? 1.414214 : ((offset == 3) ? 1.732051 : 0.0)); long index = d * sz01 + h * sz0 + w; if (inimg1d[index] <= bkg_thresh) continue; if (state[index] != ALIVE) { double new_dist = phi[min_ind] + (1.0 - (inimg1d[index] - min_ind) / max_int) * 1000.0; long prev_ind = min_ind; if (state[index] == FAR) { phi[index] = new_dist; HeapElemX *elem = new HeapElemX(index, phi[index]); elem->prev_ind = prev_ind; heap.insert(elem); elems[index] = elem; state[index] = TRIAL; } else if (state[index] == TRIAL) { if (phi[index] > new_dist) { phi[index] = new_dist; HeapElemX *elem = elems[index]; heap.adjust(elem->heap_id, phi[index]); elem->prev_ind = prev_ind; } } } } } } } //save current swc tree if (1) { int i = -1, j = -1, k = -1; map < long, MyMarker * >tmp_map; for (long ind = 0; ind < tol_sz; ind++) { i++; if (i % sz0 == 0) { i = 0; j++; if (j % sz1 == 0) { j = 0; k++; } } if (state[ind] != ALIVE) continue; MyMarker *marker = new MyMarker(i, j, k); tmp_map[ind] = marker; outtree.push_back(marker); } i = -1; j = -1; k = -1; for (long ind = 0; ind < tol_sz; ind++) { i++; if (i % sz0 == 0) { i = 0; j++; if (j % sz1 == 0) { j = 0; k++; } } if (state[ind] != ALIVE) continue; long ind2 = parent[ind]; MyMarker *marker1 = tmp_map[ind]; MyMarker *marker2 = tmp_map[ind2]; if (marker1 == marker2) marker1->parent = 0; else marker1->parent = marker2; //tmp_map[ind]->parent = tmp_map[ind2]; } } //over map < long, HeapElemX * >::iterator mit = elems.begin(); while (mit != elems.end()) { HeapElemX *elem = mit->second; delete elem; mit++; } if (phi) { delete[] phi; phi = 0; } if (parent) { delete[] parent; parent = 0; } if (state) { delete[] state; state = 0; } return true; } /********************************************************************* * Function : fastmarching_tree * * Features : * 1. Create fast marcing tree from root marker only * 2. Background (intensity 0) will be ignored. * 3. Graph augumented distance is used * * Input : root root marker * inimg1d original 8bit image * * Output : tree output swc * phi the distance for each pixels * *******************************************************************/ template < class T > bool fastmarching_tree(MyMarker root, T * inimg1d, vector < MyMarker * >&outtree, long sz0, long sz1, long sz2, int cnn_type = 3, double bkg_thresh = 20, bool is_break_accept = false) { cout << "wp_debug: " << __FUNCTION__ << " " << __LINE__ << endl; //add dircetion T *** indata3d; indata3d = (T ***) malloc(sz2 * sizeof(T **)); for (int i = 0; i < sz2; i++) { indata3d[i] = (T **) malloc(sz1 * sizeof(T *)); for (int j = 0; j < sz1; j++) { indata3d[i][j] = (T *) malloc(sz0 * sizeof(T)); } } for (int z = 0; z < sz2; z++) { for (int y = 0; y < sz1; y++) { for (int x = 0; x < sz0; x++) { indata3d[z][y][x] = inimg1d[z * sz1 * sz0 + y * sz0 + x]; } } } double **direction; direction = (double **)malloc(sz0 * sz1 * sz2 * sizeof(double *)); for (int i = 0; i < sz0 * sz1 * sz2; i++) { direction[i] = (double *)malloc(3 * sizeof(double)); } LocationSimple pt; double sigma1, sigma2, sigma3; double vec1[3], vec2[3], vec3[3]; //endTime1 = GetTickCount(); omp_set_num_threads(10); #pragma omp parallel for private(pt,vec1,vec2,vec3) for (int i = 0; i < sz0 * sz1 * sz2; i++) { pt.x = i % sz0; pt.y = int (i / sz0) % sz1; pt.z = int (i / (sz0 * sz1)); pt.radius = 8; //cout << pt.x << " " << pt.y << " " << pt.z << endl; compute_rgn_stat_new(pt, indata3d, 1, sz0, sz1, sz2, 1, V3D_UINT8, vec1, vec2, vec3, sigma1, sigma2, sigma3); direction[i][0] = vec1[0]; direction[i][1] = vec1[1]; direction[i][2] = vec1[2]; //cout << i << endl; } cout << "wp_debug: " << __FUNCTION__ << " " << __LINE__ << endl; //end dircetiond enum { ALIVE = -1, TRIAL = 0, FAR = 1}; long tol_sz = sz0 * sz1 * sz2; long sz01 = sz0 * sz1; long i; //int cnn_type = 3; //? float *phi = 0; long *parent = 0; char *state = 0; try { phi = new float[tol_sz]; parent = new long[tol_sz]; state = new char[tol_sz]; for (i = 0; i < tol_sz; i++) { phi[i] = INF; parent[i] = i; //each pixel point to itself at the statements beginning state[i] = FAR; } } catch(...) { cout << "********* Fail to allocate memory. quit fastmarching_tree()." << endl; if (phi) { delete[] phi; phi = 0; } if (parent) { delete[] parent; parent = 0; } if (state) { delete[] state; state = 0; } return false; } //GI parameter min_int, max_int, li double max_int = 0; //maximum intensity, used in GI double min_int = INF; for (i = 0; i < tol_sz; i++) { if (inimg1d[i] > max_int) max_int = inimg1d[i]; else if (inimg1d[i] < min_int) min_int = inimg1d[i]; } max_int -= min_int; double li = 10; //initialization // init state and phi for root long rootx = root.x + 0.5; long rooty = root.y + 0.5; long rootz = root.z + 0.5; long root_ind = rootz * sz01 + rooty * sz0 + rootx; state[root_ind] = ALIVE; phi[root_ind] = 0.0; BasicHeap < HeapElemX > heap; map < long, HeapElemX * >elems; //init heap { long index = root_ind; HeapElemX *elem = new HeapElemX(index, phi[index]); elem->prev_ind = index; heap.insert(elem); elems[index] = elem; } //loop int time_counter = 1; double process1 = 0; while (!heap.empty()) { double process2 = (time_counter++) * 10000.0 / tol_sz; //cout << "\r" << ((int)process2) / 100.0 << "%"; cout.flush(); if (process2 - process1 >= 1) { cout << "\r" << ((int)process2) / 100.0 << "%"; cout.flush(); process1 = process2; } HeapElemX *min_elem = heap.delete_min(); elems.erase(min_elem->img_ind); long min_ind = min_elem->img_ind; long prev_ind = min_elem->prev_ind; delete min_elem; parent[min_ind] = prev_ind; state[min_ind] = ALIVE; int i = min_ind % sz0; int j = (min_ind / sz0) % sz1; int k = (min_ind / sz01) % sz2; int w, h, d; for (int kk = -1; kk <= 1; kk++) { d = k + kk; if (d < 0 || d >= sz2) continue; for (int jj = -1; jj <= 1; jj++) { h = j + jj; if (h < 0 || h >= sz1) continue; for (int ii = -1; ii <= 1; ii++) { w = i + ii; if (w < 0 || w >= sz0) continue; int offset = ABS(ii) + ABS(jj) + ABS(kk); if (offset == 0 || offset > cnn_type) continue; double factor = (offset == 1) ? 1.0 : ((offset == 2) ? 1.414214 : ((offset == 3) ? 1.732051 : 0.0)); long index = d * sz01 + h * sz0 + w; if (is_break_accept) { if (inimg1d[index] <= bkg_thresh && inimg1d[min_ind] <= bkg_thresh) continue; } else { if (inimg1d[index] <= bkg_thresh) continue; } if (state[index] != ALIVE) { //change by wp // double new_dist = phi[min_ind] + (GI(index) + GI(min_ind)) * factor * 0.5; double vec[3] = {ii, jj, kk}; double new_dist = phi[min_ind] + (GI(index) + GI(min_ind)) * factor * 0.5; //double consin_dist = consin(direction[index], direction[min_ind], 3); double consin_dist = double (exp((consin(vec, direction[min_ind], 3) - 1)) * 2000); //consin_dist = 0; //delete[] vec; cout << "new_dist:" << (GI(index) + GI(min_ind)) * factor * 0.5 << "consin_dist:" << consin_dist << endl; new_dist += consin_dist; /* new_dist += consin_dist; */ long prev_ind = min_ind; if (state[index] == FAR) { phi[index] = new_dist; HeapElemX *elem = new HeapElemX(index, phi[index]); elem->prev_ind = prev_ind; heap.insert(elem); elems[index] = elem; state[index] = TRIAL; } else if (state[index] == TRIAL) { if (phi[index] > new_dist) { phi[index] = new_dist; HeapElemX *elem = elems[index]; heap.adjust(elem->heap_id, phi[index]); elem->prev_ind = prev_ind; } } } } } } } //save current swc tree if (1) { int i = -1, j = -1, k = -1; map < long, MyMarker * >tmp_map; for (long ind = 0; ind < tol_sz; ind++) { i++; if (i % sz0 == 0) { i = 0; j++; if (j % sz1 == 0) { j = 0; k++; } } if (state[ind] != ALIVE) continue; MyMarker *marker = new MyMarker(i, j, k); tmp_map[ind] = marker; outtree.push_back(marker); } i = -1; j = -1; k = -1; for (long ind = 0; ind < tol_sz; ind++) { i++; if (i % sz0 == 0) { i = 0; j++; if (j % sz1 == 0) { j = 0; k++; } } if (state[ind] != ALIVE) continue; long ind2 = parent[ind]; MyMarker *marker1 = tmp_map[ind]; MyMarker *marker2 = tmp_map[ind2]; if (marker1 == marker2) marker1->parent = 0; else marker1->parent = marker2; //tmp_map[ind]->parent = tmp_map[ind2]; } } //over map < long, HeapElemX * >::iterator mit = elems.begin(); while (mit != elems.end()) { HeapElemX *elem = mit->second; delete elem; mit++; } if (phi) { delete[] phi; phi = 0; } if (parent) { delete[] parent; parent = 0; } if (state) { delete[] state; state = 0; } return true; } /********************************************************************* * Function : fastmarching_tree * * Features : * 1. Create fast marcing tree from root marker * 2. Background (intensity 0) will be ignored. * 3. Euclidean distance is used * * Input : root root marker * inimg1d original 8bit image * * Output : tree output swc * phi the distance for each pixels * *******************************************************************/ //inimg1d is binary template < class T > bool fastmarching_tree_old(MyMarker root, T * inimg1d, vector < MyMarker * >&tree, double *&phi, int sz0, int sz1, int sz2) { enum { ALIVE = -1, TRIAL = 0, FAR = 1}; long tol_sz = sz0 * sz1 * sz2; long sz01 = sz0 * sz1; int rootx = (int)(root.x + 0.5); int rooty = (int)(root.y + 0.5); int rootz = (int)(root.z + 0.5); long root_ind = rootz * sz01 + rooty * sz0 + rootx; if (inimg1d[root_ind] == 0) { cerr << "the root position is not in forground" << endl; return false; } int cnn_type = 1; //cnn_type should be 1 phi = new double[tol_sz]; for (long i = 0; i < tol_sz; i++) phi[i] = INF; //initialization char *state = new char[tol_sz]; for (long i = 0; i < tol_sz; i++) state[i] = FAR; state[root_ind] = ALIVE; phi[root_ind] = 0.0; BasicHeap < HeapElemXX > heap; map < long, HeapElemXX * >elems; //init state around root int i = rootx, j = rooty, k = rootz; MyMarker *root_node = new MyMarker(i, j, k); tree.push_back(root_node); int w, h, d; for (int kk = -1; kk <= 1; kk++) { d = k + kk; if (d < 0 || d >= sz2) continue; for (int jj = -1; jj <= 1; jj++) { h = j + jj; if (h < 0 || h >= sz1) continue; for (int ii = -1; ii <= 1; ii++) { w = i + ii; if (w < 0 || w >= sz0) continue; int offset = ABS(ii) + ABS(jj) + ABS(kk); if (offset == 0 || offset > cnn_type) continue; long index = d * sz01 + h * sz0 + w; if (inimg1d[index] == 0) continue; if (state[index] == FAR) { state[index] = TRIAL; double u1 = INF; double u2 = INF; double u3 = INF; if (w - 1 >= 0 && state[index - 1] == ALIVE) u1 = u1 < phi[index - 1] ? u1 : phi[index - 1]; if (w + 1 < sz0 && state[index + 1] == ALIVE) u1 = u1 < phi[index + 1] ? u1 : phi[index + 1]; if (h - 1 >= 0 && state[index - sz0] == ALIVE) u2 = u2 < phi[index - sz0] ? u2 : phi[index - sz0]; if (h + 1 < sz1 && state[index + sz0] == ALIVE) u2 = u2 < phi[index + sz0] ? u2 : phi[index + sz0]; if (d - 1 >= 0 && state[index - sz0 * sz1] == ALIVE) u3 = u3 < phi[index - sz0 * sz1] ? u3 : phi[index - sz0 * sz1]; if (d + 1 < sz2 && state[index + sz0 * sz1] == ALIVE) u3 = u3 < phi[index + sz0 * sz1] ? u3 : phi[index + sz0 * sz1]; vector < double >parameters; if (u1 != INF) parameters.push_back(u1); if (u2 != INF) parameters.push_back(u2); if (u3 != INF) parameters.push_back(u3); phi[index] = upwind_solver(parameters); HeapElemXX *elem = new HeapElemXX(index, phi[index], root_node); heap.insert(elem); elems[index] = elem; } } } } //loop int time_counter = 1; int process1 = 0, process2 = 0; while (!heap.empty()) { double process2 = (time_counter++) * 1000.0 / tol_sz; if (process2 - process1 >= 1) { cout << "\r" << ((int)process2) / 10.0 << "%"; cout.flush(); process1 = process2; } HeapElemXX *min_elem = heap.delete_min(); elems.erase(min_elem->img_ind); long min_ind = min_elem->img_ind; state[min_ind] = ALIVE; int i = min_ind % sz0; int j = (min_ind / sz0) % sz1; int k = (min_ind / sz01) % sz2; MyMarker *cur_marker = new MyMarker(i, j, k); cur_marker->parent = min_elem->parent_marker; tree.push_back(cur_marker); delete min_elem; int w, h, d; for (int kk = -1; kk <= 1; kk++) { d = k + kk; if (d < 0 || d >= sz2) continue; for (int jj = -1; jj <= 1; jj++) { h = j + jj; if (h < 0 || h >= sz1) continue; for (int ii = -1; ii <= 1; ii++) { w = i + ii; if (w < 0 || w >= sz0) continue; int offset = ABS(ii) + ABS(jj) + ABS(kk); if (offset == 0 || offset > cnn_type) continue; long index = d * sz01 + h * sz0 + w; if (inimg1d[index] == 0) continue; if (state[index] != ALIVE) { double u1 = INF; double u2 = INF; double u3 = INF; if (w - 1 >= 0 && state[index - 1] == ALIVE) u1 = u1 < phi[index - 1] ? u1 : phi[index - 1]; if (w + 1 < sz0 && state[index + 1] == ALIVE) u1 = u1 < phi[index + 1] ? u1 : phi[index + 1]; if (h - 1 >= 0 && state[index - sz0] == ALIVE) u2 = u2 < phi[index - sz0] ? u2 : phi[index - sz0]; if (h + 1 < sz1 && state[index + sz0] == ALIVE) u2 = u2 < phi[index + sz0] ? u2 : phi[index + sz0]; if (d - 1 >= 0 && state[index - sz0 * sz1] == ALIVE) u3 = u3 < phi[index - sz0 * sz1] ? u3 : phi[index - sz0 * sz1]; if (d + 1 < sz2 && state[index + sz0 * sz1] == ALIVE) u3 = u3 < phi[index + sz0 * sz1] ? u3 : phi[index + sz0 * sz1]; vector < double >parameters; if (u1 != INF) parameters.push_back(u1); if (u2 != INF) parameters.push_back(u2); if (u3 != INF) parameters.push_back(u3); double solver_result = upwind_solver(parameters); if (state[index] == FAR) { phi[index] = solver_result; HeapElemXX *elem = new HeapElemXX(index, phi[index], cur_marker); heap.insert(elem); elems[index] = elem; state[index] = TRIAL; } else if (state[index] == TRIAL) { if (phi[index] > solver_result) { phi[index] = solver_result; HeapElemXX *elem = elems[index]; heap.adjust(elem->heap_id, phi[index]); elem->parent_marker = cur_marker; } } } } } } } for (long i = 0; i < tol_sz; i++) if (phi[i] == INF) phi[i] = 0; if (state) { delete[] state; state = 0; } return true; } /****************************************************************************** * Fast marching based tree construction * 1. use graph augmented distance (GD) * 2. stop when all target marker are marched * * Input : root root marker * target the set of target markers * inimg1d original input image * * Output : outtree output tracing result * * Notice : * 1. the input pixel number should not be larger than 2G if sizeof(long) == 4 * 2. target markers should not contain root marker * 3. the root marker in outswc, is point to itself * 4. the cnn_type is default 3 * *****************************************************************************/ template < class T > bool fastmarching_tree(MyMarker root, vector < MyMarker > &target, T * inimg1d, vector < MyMarker * >&outtree, long sz0, long sz1, long sz2, int cnn_type = 3) { enum { ALIVE = -1, TRIAL = 0, FAR = 1}; long tol_sz = sz0 * sz1 * sz2; long sz01 = sz0 * sz1; //int cnn_type = 3; //? //float *phi = new float[tol_sz]; for (long i = 0; i < tol_sz; i++) { phi[i] = INF; } //long *parent = new long[tol_sz]; for (long i = 0; i < tol_sz; i++) parent[i] = i; //each pixel point to itself at the beginning long i; float *phi = 0; long *parent = 0; char *state = 0; try { phi = new float[tol_sz]; parent = new long[tol_sz]; state = new char[tol_sz]; for (i = 0; i < tol_sz; i++) { phi[i] = INF; parent[i] = i; //each pixel point to itself at the statements beginning state[i] = FAR; } } catch(...) { cout << "********* Fail to allocate memory. quit fastmarching_tree()." << endl; if (phi) { delete[] phi; phi = 0; } if (parent) { delete[] parent; parent = 0; } if (state) { delete[] state; state = 0; } return false; } //GI parameter min_int, max_int, li double max_int = 0; //maximum intensity, used in GI double min_int = INF; for (long i = 0; i < tol_sz; i++) { if (inimg1d[i] > max_int) max_int = inimg1d[i]; if (inimg1d[i] < min_int) min_int = inimg1d[i]; } max_int -= min_int; double li = 10; //initialization // char *state = new char[tol_sz]; //for (long i = 0; i < tol_sz; i++) state[i] = FAR; //init state and phi for root int rootx = root.x + 0.5; int rooty = root.y + 0.5; int rootz = root.z + 0.5; long root_ind = rootz * sz01 + rooty * sz0 + rootx; state[root_ind] = ALIVE; phi[root_ind] = 0.0; vector < long >target_inds; for (long t = 0; t < target.size(); t++) { int i = target[t].x + 0.5; int j = target[t].y + 0.5; int k = target[t].z + 0.5; long ind = k * sz01 + j * sz0 + i; target_inds.push_back(ind); //if (ind == root_ind) { cerr << "please remove root marker from target markers" << endl; exit(0); } } BasicHeap < HeapElemX > heap; map < long, HeapElemX * >elems; //init heap { long index = root_ind; HeapElemX *elem = new HeapElemX(index, phi[index]); elem->prev_ind = index; heap.insert(elem); elems[index] = elem; } //loop int time_counter = 1; double process1 = 0; while (!heap.empty()) { double process2 = (time_counter++) * 100000.0 / tol_sz; if (process2 - process1 >= 1) { cout << "\r" << ((int)process2) / 1000.0 << "%"; cout.flush(); process1 = process2; bool is_break = true; for (int t = 0; t < target_inds.size(); t++) { long tind = target_inds[t]; if (parent[tind] == tind && tind != root_ind) { is_break = false; break; } } if (is_break) break; } HeapElemX *min_elem = heap.delete_min(); elems.erase(min_elem->img_ind); long min_ind = min_elem->img_ind; long prev_ind = min_elem->prev_ind; delete min_elem; parent[min_ind] = prev_ind; state[min_ind] = ALIVE; int i = min_ind % sz0; int j = (min_ind / sz0) % sz1; int k = (min_ind / sz01) % sz2; int w, h, d; for (int kk = -1; kk <= 1; kk++) { d = k + kk; if (d < 0 || d >= sz2) continue; for (int jj = -1; jj <= 1; jj++) { h = j + jj; if (h < 0 || h >= sz1) continue; for (int ii = -1; ii <= 1; ii++) { w = i + ii; if (w < 0 || w >= sz0) continue; int offset = ABS(ii) + ABS(jj) + ABS(kk); if (offset == 0 || offset > cnn_type) continue; double factor = (offset == 1) ? 1.0 : ((offset == 2) ? 1.414214 : ((offset == 3) ? 1.732051 : 0.0)); long index = d * sz01 + h * sz0 + w; if (state[index] != ALIVE) { double new_dist = phi[min_ind] + (GI(index) + GI(min_ind)) * factor * 0.5; long prev_ind = min_ind; if (state[index] == FAR) { phi[index] = new_dist; HeapElemX *elem = new HeapElemX(index, phi[index]); elem->prev_ind = prev_ind; heap.insert(elem); elems[index] = elem; state[index] = TRIAL; } else if (state[index] == TRIAL) { if (phi[index] > new_dist) { phi[index] = new_dist; HeapElemX *elem = elems[index]; heap.adjust(elem->heap_id, phi[index]); elem->prev_ind = prev_ind; } } } } } } } //save current swc tree if (1) { int i = -1, j = -1, k = -1; map < long, MyMarker * >tmp_map; for (long ind = 0; ind < tol_sz; ind++) { i++; if (i % sz0 == 0) { i = 0; j++; if (j % sz1 == 0) { j = 0; k++; } } if (state[ind] != ALIVE) continue; MyMarker *marker = new MyMarker(i, j, k); tmp_map[ind] = marker; outtree.push_back(marker); } i = -1; j = -1; k = -1; for (long ind = 0; ind < tol_sz; ind++) { i++; if (i % sz0 == 0) { i = 0; j++; if (j % sz1 == 0) { j = 0; k++; } } if (state[ind] != ALIVE) continue; long ind2 = parent[ind]; MyMarker *marker1 = tmp_map[ind]; MyMarker *marker2 = tmp_map[ind2]; if (marker1 == marker2) marker1->parent = 0; else marker1->parent = marker2; //tmp_map[ind]->parent = tmp_map[ind2]; } } //over map < long, HeapElemX * >::iterator mit = elems.begin(); while (mit != elems.end()) { HeapElemX *elem = mit->second; delete elem; mit++; } if (phi) { delete[] phi; phi = 0; } if (parent) { delete[] parent; parent = 0; } if (state) { delete[] state; state = 0; } return true; } /****************************************************************************** * Fast marching based manual tracing, with root marker and a set of target marker * * Input : root root marker * target the set of target markers * inimg1d original input image * * Output : outswc output tracing result * phi finial distance value for each pixel [todo : replace INF to 0] * * Notice : * 1. the input pixel number should not be larger than 2G if sizeof(long) == 4 * 2. target markers should not contain root marker * 3. the root marker in outswc, is point to itself * 4. the cnn_type is default 3 * *****************************************************************************/ template < class T1, class T2 > bool fastmarching_tracing(MyMarker root, vector < MyMarker > &target, T1 * inimg1d, vector < MyMarker * >&outswc, T2 * &phi, int sz0, int sz1, int sz2, int cnn_type = 3) { int rootx = root.x + 0.5; int rooty = root.y + 0.5; int rootz = root.z + 0.5; if (rootx < 0 || rootx >= sz0 || rooty < 0 || rooty >= sz1 || rootz < 0 || rootz >= sz2) { cerr << "Invalid root marker(" << root.x << "," << root.y << "," << root.z << ")" << endl; return false; } enum { ALIVE = -1, TRIAL = 0, FAR = 1}; long tol_sz = sz0 * sz1 * sz2; long sz01 = sz0 * sz1; //int cnn_type = 3; //? if (phi == 0) phi = new T2[tol_sz]; for (long i = 0; i < tol_sz; i++) { phi[i] = INF; } long *parent = new long[tol_sz]; for (long i = 0; i < tol_sz; i++) parent[i] = i; //each pixel point to itself at the beginning // GI parameter min_int, max_int, li double max_int = 0; //maximum intensity, used in GI double min_int = INF; for (long i = 0; i < tol_sz; i++) { if (inimg1d[i] > max_int) max_int = inimg1d[i]; if (inimg1d[i] < min_int) min_int = inimg1d[i]; } max_int -= min_int; double li = 10; //initialization char *state = new char[tol_sz]; for (long i = 0; i < tol_sz; i++) state[i] = FAR; long root_ind = rootz * sz01 + rooty * sz0 + rootx; state[root_ind] = ALIVE; phi[root_ind] = 0.0; vector < long >target_inds; for (long t = 0; t < target.size(); t++) { int i = target[t].x + 0.5; int j = target[t].y + 0.5; int k = target[t].z + 0.5; if (i < 0 || i >= sz0 || j < 0 || j >= sz1 || k < 0 || k >= sz2) { cerr << "t = " << t + 1 << ", invalid target marker(" << target[t].x << "," << target[t].y << "," << target[t].z << ")" << endl; continue; } long ind = k * sz01 + j * sz0 + i; target_inds.push_back(ind); //if (ind == root_ind) { cerr << "please remove root marker from target markers" << endl; exit(0); } } BasicHeap < HeapElemX > heap; map < long, HeapElemX * >elems; //init heap { long index = root_ind; HeapElemX *elem = new HeapElemX(index, phi[index]); elem->prev_ind = index; heap.insert(elem); elems[index] = elem; } //loop int time_counter = 1; double process1 = 0; while (!heap.empty()) { double process2 = (time_counter++) * 100000.0 / tol_sz; if (process2 - process1 >= 1) { cout << "\r" << ((int)process2) / 1000.0 << "%"; cout.flush(); process1 = process2; bool is_break = true; for (int t = 0; t < target_inds.size(); t++) { long tind = target_inds[t]; if (parent[tind] == tind && tind != root_ind) { is_break = false; break; } } if (is_break) break; } HeapElemX *min_elem = heap.delete_min(); elems.erase(min_elem->img_ind); long min_ind = min_elem->img_ind; long prev_ind = min_elem->prev_ind; delete min_elem; parent[min_ind] = prev_ind; state[min_ind] = ALIVE; int i = min_ind % sz0; int j = (min_ind / sz0) % sz1; int k = (min_ind / sz01) % sz2; int w, h, d; for (int kk = -1; kk <= 1; kk++) { d = k + kk; if (d < 0 || d >= sz2) continue; for (int jj = -1; jj <= 1; jj++) { h = j + jj; if (h < 0 || h >= sz1) continue; for (int ii = -1; ii <= 1; ii++) { w = i + ii; if (w < 0 || w >= sz0) continue; int offset = ABS(ii) + ABS(jj) + ABS(kk); if (offset == 0 || offset > cnn_type) continue; double factor = (offset == 1) ? 1.0 : ((offset == 2) ? 1.414214 : ((offset == 3) ? 1.732051 : 0.0)); long index = d * sz01 + h * sz0 + w; if (state[index] != ALIVE) { double new_dist = phi[min_ind] + (GI(index) + GI(min_ind)) * factor * 0.5; long prev_ind = min_ind; if (state[index] == FAR) { phi[index] = new_dist; HeapElemX *elem = new HeapElemX(index, phi[index]); elem->prev_ind = prev_ind; heap.insert(elem); elems[index] = elem; state[index] = TRIAL; } else if (state[index] == TRIAL) { if (phi[index] > new_dist) { phi[index] = new_dist; HeapElemX *elem = elems[index]; heap.adjust(elem->heap_id, phi[index]); elem->prev_ind = prev_ind; } } } } } } } //extract the tree nodes containing target markers map < long, MyMarker * >marker_map; for (int t = 0; t < target_inds.size(); t++) { long tind = target_inds[t]; long p = tind; while (true) { if (marker_map.find(p) != marker_map.end()) break; int i = p % sz0; int j = p / sz0 % sz1; int k = p / sz01 % sz2; MyMarker *marker = new MyMarker(i, j, k); marker_map[p] = marker; if (p == parent[p]) { assert(p == root.ind(sz0, sz01)); assert(marker_map.find(root.ind(sz0, sz01)) != marker_map.end()); break; } else p = parent[p]; } } if (marker_map.find(root.ind(sz0, sz01)) == marker_map.end()) { cout << "break here" << endl; } map < long, MyMarker * >::iterator it = marker_map.begin(); V3DLONG in_sz[4] = {sz0, sz1, sz2, 1}; while (it != marker_map.end()) { long tind = it->first; MyMarker *marker = it->second; MyMarker *parent_marker = marker_map[parent[tind]]; marker->parent = parent_marker; marker->radius = markerRadius(inimg1d, in_sz, *marker, 20); outswc.push_back(marker); it++; } map < long, HeapElemX * >::iterator mit = elems.begin(); while (mit != elems.end()) { HeapElemX *elem = mit->second; delete elem; mit++; } if (parent) { delete[] parent; parent = 0; } if (state) { delete[] state; state = 0; } return true; } #endif
bspline_create.c
///////////////////////////////////////////////////////////////////////////// // einspline: a library for creating and evaluating B-splines // // Copyright (C) 2007 Kenneth P. Esler, Jr. // // // // This program is free software; you can redistribute it and/or modify // // it under the terms of the GNU General Public License as published by // // the Free Software Foundation; either version 2 of the License, or // // (at your option) any later version. // // // // This program is distributed in the hope that it will be useful, // // but WITHOUT ANY WARRANTY; without even the implied warranty of // // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // // GNU General Public License for more details. // // // // You should have received a copy of the GNU General Public License // // along with this program; if not, write to the Free Software // // Foundation, Inc., 51 Franklin Street, Fifth Floor, // // Boston, MA 02110-1301 USA // ///////////////////////////////////////////////////////////////////////////// #include "bspline_create.h" #ifndef _XOPEN_SOURCE #define _XOPEN_SOURCE 600 #endif #ifndef __USE_XOPEN2K #define __USE_XOPEN2K #endif #include <stdlib.h> #include <stdio.h> #include <inttypes.h> int posix_memalign(void **memptr, size_t alignment, size_t size); //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// //// Helper functions for spline creation //// //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// void init_sse_data(); void find_coefs_1d_d (Ugrid grid, BCtype_d bc, double *data, intptr_t dstride, double *coefs, intptr_t cstride); void solve_deriv_interp_1d_s (float bands[], float coefs[], int M, int cstride) { // Solve interpolating equations // First and last rows are different bands[4*(0)+1] /= bands[4*(0)+0]; bands[4*(0)+2] /= bands[4*(0)+0]; bands[4*(0)+3] /= bands[4*(0)+0]; bands[4*(0)+0] = 1.0; bands[4*(1)+1] -= bands[4*(1)+0]*bands[4*(0)+1]; bands[4*(1)+2] -= bands[4*(1)+0]*bands[4*(0)+2]; bands[4*(1)+3] -= bands[4*(1)+0]*bands[4*(0)+3]; bands[4*(0)+0] = 0.0; bands[4*(1)+2] /= bands[4*(1)+1]; bands[4*(1)+3] /= bands[4*(1)+1]; bands[4*(1)+1] = 1.0; // Now do rows 2 through M+1 for (int row=2; row < (M+1); row++) { bands[4*(row)+1] -= bands[4*(row)+0]*bands[4*(row-1)+2]; bands[4*(row)+3] -= bands[4*(row)+0]*bands[4*(row-1)+3]; bands[4*(row)+2] /= bands[4*(row)+1]; bands[4*(row)+3] /= bands[4*(row)+1]; bands[4*(row)+0] = 0.0; bands[4*(row)+1] = 1.0; } // Do last row bands[4*(M+1)+1] -= bands[4*(M+1)+0]*bands[4*(M-1)+2]; bands[4*(M+1)+3] -= bands[4*(M+1)+0]*bands[4*(M-1)+3]; bands[4*(M+1)+2] -= bands[4*(M+1)+1]*bands[4*(M)+2]; bands[4*(M+1)+3] -= bands[4*(M+1)+1]*bands[4*(M)+3]; bands[4*(M+1)+3] /= bands[4*(M+1)+2]; bands[4*(M+1)+2] = 1.0; coefs[(M+1)*cstride] = bands[4*(M+1)+3]; // Now back substitute up for (int row=M; row>0; row--) coefs[row*cstride] = bands[4*(row)+3] - bands[4*(row)+2]*coefs[cstride*(row+1)]; // Finish with first row coefs[0] = bands[4*(0)+3] - bands[4*(0)+1]*coefs[1*cstride] - bands[4*(0)+2]*coefs[2*cstride]; } // On input, bands should be filled with: // row 0 : abcdInitial from boundary conditions // rows 1:M: basis functions in first 3 cols, data in last // row M+1 : abcdFinal from boundary conditions // cstride gives the stride between values in coefs. // On exit, coefs with contain interpolating B-spline coefs void solve_periodic_interp_1d_s (float bands[], float coefs[], int M, size_t cstride) //int M, int cstride) { float lastCol[M]; // Now solve: // First and last rows are different bands[4*(0)+2] /= bands[4*(0)+1]; bands[4*(0)+0] /= bands[4*(0)+1]; bands[4*(0)+3] /= bands[4*(0)+1]; bands[4*(0)+1] = 1.0; bands[4*(M-1)+1] -= bands[4*(M-1)+2]*bands[4*(0)+0]; bands[4*(M-1)+3] -= bands[4*(M-1)+2]*bands[4*(0)+3]; bands[4*(M-1)+2] = -bands[4*(M-1)+2]*bands[4*(0)+2]; lastCol[0] = bands[4*(0)+0]; for (int row=1; row < (M-1); row++) { bands[4*(row)+1] -= bands[4*(row)+0] * bands[4*(row-1)+2]; bands[4*(row)+3] -= bands[4*(row)+0] * bands[4*(row-1)+3]; lastCol[row] = -bands[4*(row)+0] * lastCol[row-1]; bands[4*(row)+0] = 0.0; bands[4*(row)+2] /= bands[4*(row)+1]; bands[4*(row)+3] /= bands[4*(row)+1]; lastCol[row] /= bands[4*(row)+1]; bands[4*(row)+1] = 1.0; if (row < (M-2)) { bands[4*(M-1)+3] -= bands[4*(M-1)+2]*bands[4*(row)+3]; bands[4*(M-1)+1] -= bands[4*(M-1)+2]*lastCol[row]; bands[4*(M-1)+2] = -bands[4*(M-1)+2]*bands[4*(row)+2]; } } // Now do last row // The [2] element and [0] element are now on top of each other bands[4*(M-1)+0] += bands[4*(M-1)+2]; bands[4*(M-1)+1] -= bands[4*(M-1)+0] * (bands[4*(M-2)+2]+lastCol[M-2]); bands[4*(M-1)+3] -= bands[4*(M-1)+0] * bands[4*(M-2)+3]; bands[4*(M-1)+3] /= bands[4*(M-1)+1]; coefs[M*cstride] = bands[4*(M-1)+3]; for (int row=M-2; row>=0; row--) coefs[(row+1)*cstride] = bands[4*(row)+3] - bands[4*(row)+2]*coefs[(row+2)*cstride] - lastCol[row]*coefs[M*cstride]; coefs[0*cstride] = coefs[M*cstride]; coefs[(M+1)*cstride] = coefs[1*cstride]; coefs[(M+2)*cstride] = coefs[2*cstride]; } // On input, bands should be filled with: // row 0 : abcdInitial from boundary conditions // rows 1:M: basis functions in first 3 cols, data in last // row M+1 : abcdFinal from boundary conditions // cstride gives the stride between values in coefs. // On exit, coefs with contain interpolating B-spline coefs void solve_antiperiodic_interp_1d_s (float bands[], float coefs[], int M, int cstride) { bands[4*0+0] *= -1.0; bands[4*(M-1)+2] *= -1.0; float lastCol[M]; // Now solve: // First and last rows are different bands[4*(0)+2] /= bands[4*(0)+1]; bands[4*(0)+0] /= bands[4*(0)+1]; bands[4*(0)+3] /= bands[4*(0)+1]; bands[4*(0)+1] = 1.0; bands[4*(M-1)+1] -= bands[4*(M-1)+2]*bands[4*(0)+0]; bands[4*(M-1)+3] -= bands[4*(M-1)+2]*bands[4*(0)+3]; bands[4*(M-1)+2] = -bands[4*(M-1)+2]*bands[4*(0)+2]; lastCol[0] = bands[4*(0)+0]; for (int row=1; row < (M-1); row++) { bands[4*(row)+1] -= bands[4*(row)+0] * bands[4*(row-1)+2]; bands[4*(row)+3] -= bands[4*(row)+0] * bands[4*(row-1)+3]; lastCol[row] = -bands[4*(row)+0] * lastCol[row-1]; bands[4*(row)+0] = 0.0; bands[4*(row)+2] /= bands[4*(row)+1]; bands[4*(row)+3] /= bands[4*(row)+1]; lastCol[row] /= bands[4*(row)+1]; bands[4*(row)+1] = 1.0; if (row < (M-2)) { bands[4*(M-1)+3] -= bands[4*(M-1)+2]*bands[4*(row)+3]; bands[4*(M-1)+1] -= bands[4*(M-1)+2]*lastCol[row]; bands[4*(M-1)+2] = -bands[4*(M-1)+2]*bands[4*(row)+2]; } } // Now do last row // The [2] element and [0] element are now on top of each other bands[4*(M-1)+0] += bands[4*(M-1)+2]; bands[4*(M-1)+1] -= bands[4*(M-1)+0] * (bands[4*(M-2)+2]+lastCol[M-2]); bands[4*(M-1)+3] -= bands[4*(M-1)+0] * bands[4*(M-2)+3]; bands[4*(M-1)+3] /= bands[4*(M-1)+1]; coefs[M*cstride] = bands[4*(M-1)+3]; for (int row=M-2; row>=0; row--) coefs[(row+1)*cstride] = bands[4*(row)+3] - bands[4*(row)+2]*coefs[(row+2)*cstride] - lastCol[row]*coefs[M*cstride]; coefs[0*cstride] = -coefs[M*cstride]; coefs[(M+1)*cstride] = -coefs[1*cstride]; coefs[(M+2)*cstride] = -coefs[2*cstride]; } #ifdef HIGH_PRECISION void find_coefs_1d_s (Ugrid grid, BCtype_s bc, float *data, intptr_t dstride, float *coefs, intptr_t cstride) { BCtype_d d_bc; double *d_data, *d_coefs; d_bc.lCode = bc.lCode; d_bc.rCode = bc.rCode; d_bc.lVal = bc.lVal; d_bc.rVal = bc.rVal; int M = grid.num, N; if (bc.lCode == PERIODIC || bc.lCode == ANTIPERIODIC) N = M+3; else N = M+2; d_data = malloc (N*sizeof(double)); d_coefs = malloc (N*sizeof(double)); for (int i=0; i<M; i++) d_data[i] = data[i*dstride]; find_coefs_1d_d (grid, d_bc, d_data, 1, d_coefs, 1); for (int i=0; i<N; i++) coefs[i*cstride] = d_coefs[i]; free (d_data); free (d_coefs); } #else void find_coefs_1d_s (Ugrid grid, BCtype_s bc, float *data, intptr_t dstride, float *coefs, intptr_t cstride) { size_t M = grid.num; float basis[4] = {1.0/6.0, 2.0/3.0, 1.0/6.0, 0.0}; if (bc.lCode == PERIODIC || bc.lCode == ANTIPERIODIC) { #ifdef HAVE_C_VARARRAYS float bands[4*M]; #else float *bands = malloc(4*M*sizeof(float)); #endif for (size_t i=0; i<M; i++) { bands[4*i+0] = basis[0]; bands[4*i+1] = basis[1]; bands[4*i+2] = basis[2]; bands[4*i+3] = data[i*dstride]; } if (bc.lCode == PERIODIC) solve_periodic_interp_1d_s (bands, coefs, M, cstride); else solve_antiperiodic_interp_1d_s (bands, coefs, M, cstride); #ifndef HAVE_C_VARARRAYS free (bands); #endif } else { // Setup boundary conditions float abcd_left[4], abcd_right[4]; // Left boundary if (bc.lCode == FLAT || bc.lCode == NATURAL) bc.lVal = 0.0; if (bc.lCode == FLAT || bc.lCode == DERIV1) { abcd_left[0] = -0.5 * grid.delta_inv; abcd_left[1] = 0.0 * grid.delta_inv; abcd_left[2] = 0.5 * grid.delta_inv; abcd_left[3] = bc.lVal; } if (bc.lCode == NATURAL || bc.lCode == DERIV2) { abcd_left[0] = 1.0 * grid.delta_inv * grid.delta_inv; abcd_left[1] =-2.0 * grid.delta_inv * grid.delta_inv; abcd_left[2] = 1.0 * grid.delta_inv * grid.delta_inv; abcd_left[3] = bc.lVal; } // Right boundary if (bc.rCode == FLAT || bc.rCode == NATURAL) bc.rVal = 0.0; if (bc.rCode == FLAT || bc.rCode == DERIV1) { abcd_right[0] = -0.5 * grid.delta_inv; abcd_right[1] = 0.0 * grid.delta_inv; abcd_right[2] = 0.5 * grid.delta_inv; abcd_right[3] = bc.rVal; } if (bc.rCode == NATURAL || bc.rCode == DERIV2) { abcd_right[0] = 1.0 *grid.delta_inv * grid.delta_inv; abcd_right[1] =-2.0 *grid.delta_inv * grid.delta_inv; abcd_right[2] = 1.0 *grid.delta_inv * grid.delta_inv; abcd_right[3] = bc.rVal; } #ifdef HAVE_C_VARARRAYS float bands[4*(M+2)]; #else float *bands = malloc ((M+2)*4*sizeof(float)); #endif for (int i=0; i<4; i++) { bands[4*( 0 )+i] = abcd_left[i]; bands[4*(M+1)+i] = abcd_right[i]; } for (int i=0; i<M; i++) { for (int j=0; j<3; j++) bands[4*(i+1)+j] = basis[j]; bands[4*(i+1)+3] = data[i*dstride]; } // Now, solve for coefficients solve_deriv_interp_1d_s (bands, coefs, M, cstride); #ifndef HAVE_C_VARARRAYS free (bands); #endif } } #endif //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// //// Single-Precision, Real Creation Routines //// //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// // On input, bands should be filled with: // row 0 : abcdInitial from boundary conditions // rows 1:M: basis functions in first 3 cols, data in last // row M+1 : abcdFinal from boundary conditions // cstride gives the stride between values in coefs. // On exit, coefs with contain interpolating B-spline coefs UBspline_1d_s* create_UBspline_1d_s (Ugrid x_grid, BCtype_s xBC, float *data) { // Create new spline UBspline_1d_s* restrict spline = malloc (sizeof(UBspline_1d_s)); spline->spcode = U1D; spline->tcode = SINGLE_REAL; spline->xBC = xBC; spline->x_grid = x_grid; // Setup internal variables int M = x_grid.num; int N; if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) { x_grid.delta = (x_grid.end-x_grid.start)/(double)(x_grid.num); N = M+3; } else { x_grid.delta = (x_grid.end-x_grid.start)/(double)(x_grid.num-1); N = M+2; } x_grid.delta_inv = 1.0/x_grid.delta; spline->x_grid = x_grid; #ifndef HAVE_SSE2 spline->coefs = malloc (sizeof(float)*N); #else posix_memalign ((void**)&spline->coefs, 16, (sizeof(float)*N)); #endif find_coefs_1d_s (spline->x_grid, xBC, data, 1, spline->coefs, 1); init_sse_data(); return spline; } void recompute_UBspline_1d_s (UBspline_1d_s* spline, float *data) { find_coefs_1d_s (spline->x_grid, spline->xBC, data, 1, spline->coefs, 1); } UBspline_2d_s* create_UBspline_2d_s (Ugrid x_grid, Ugrid y_grid, BCtype_s xBC, BCtype_s yBC, float *data) { // Create new spline UBspline_2d_s* restrict spline = malloc (sizeof(UBspline_2d_s)); spline->spcode = U2D; spline->tcode = SINGLE_REAL; spline->xBC = xBC; spline->yBC = yBC; // Setup internal variables int Mx = x_grid.num; int My = y_grid.num; int Nx, Ny; if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; x_grid.delta = (x_grid.end - x_grid.start)/(double)(Nx-3); x_grid.delta_inv = 1.0/x_grid.delta; spline->x_grid = x_grid; if (yBC.lCode == PERIODIC || yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; y_grid.delta = (y_grid.end - y_grid.start)/(double)(Ny-3); y_grid.delta_inv = 1.0/y_grid.delta; spline->y_grid = y_grid; spline->x_stride = Ny; #ifndef HAVE_SSE2 spline->coefs = malloc (sizeof(float)*Nx*Ny); #else posix_memalign ((void**)&spline->coefs, 16, sizeof(float)*Nx*Ny); #endif // First, solve in the X-direction for (int iy=0; iy<My; iy++) { intptr_t doffset = iy; intptr_t coffset = iy; find_coefs_1d_s (spline->x_grid, spline->xBC, data+doffset, My, spline->coefs+coffset, Ny); } // Now, solve in the Y-direction for (int ix=0; ix<Nx; ix++) { intptr_t doffset = ix*Ny; intptr_t coffset = ix*Ny; find_coefs_1d_s (spline->y_grid, spline->yBC, spline->coefs+doffset, 1, spline->coefs+coffset, 1); } init_sse_data(); return spline; } void recompute_UBspline_2d_s (UBspline_2d_s* spline, float *data) { int Mx = spline->x_grid.num; int My = spline->y_grid.num; int Nx, Ny; if (spline->xBC.lCode == PERIODIC || spline->xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; if (spline->yBC.lCode == PERIODIC || spline->yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; // First, solve in the X-direction for (int iy=0; iy<My; iy++) { intptr_t doffset = iy; intptr_t coffset = iy; find_coefs_1d_s (spline->x_grid, spline->xBC, data+doffset, My, spline->coefs+coffset, Ny); } // Now, solve in the Y-direction for (int ix=0; ix<Nx; ix++) { intptr_t doffset = ix*Ny; intptr_t coffset = ix*Ny; find_coefs_1d_s (spline->y_grid, spline->yBC, spline->coefs+doffset, 1, spline->coefs+coffset, 1); } } UBspline_3d_s* create_UBspline_3d_s (Ugrid x_grid, Ugrid y_grid, Ugrid z_grid, BCtype_s xBC, BCtype_s yBC, BCtype_s zBC, float *data) { // Create new spline UBspline_3d_s* spline = malloc (sizeof(UBspline_3d_s)); spline->spcode = U3D; spline->tcode = SINGLE_REAL; spline->xBC = xBC; spline->yBC = yBC; spline->zBC = zBC; // Setup internal variables int Mx = x_grid.num; int My = y_grid.num; int Mz = z_grid.num; int Nx, Ny, Nz; if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; x_grid.delta = (x_grid.end - x_grid.start)/(double)(Nx-3); x_grid.delta_inv = 1.0/x_grid.delta; spline->x_grid = x_grid; if (yBC.lCode == PERIODIC || yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; y_grid.delta = (y_grid.end - y_grid.start)/(double)(Ny-3); y_grid.delta_inv = 1.0/y_grid.delta; spline->y_grid = y_grid; if (zBC.lCode == PERIODIC || zBC.lCode == ANTIPERIODIC) Nz = Mz+3; else Nz = Mz+2; z_grid.delta = (z_grid.end - z_grid.start)/(double)(Nz-3); z_grid.delta_inv = 1.0/z_grid.delta; spline->z_grid = z_grid; spline->x_stride = Ny*Nz; spline->y_stride = Nz; spline->coefs_size=(size_t)Nx*(size_t)Ny*(size_t)Nz; #ifndef HAVE_SSE2 spline->coefs = malloc (sizeof(float)*spline->coefs_size); #else posix_memalign ((void**)&spline->coefs, 16, (sizeof(float)*spline->coefs_size)); #endif // First, solve in the X-direction for (int iy=0; iy<My; iy++) for (int iz=0; iz<Mz; iz++) { intptr_t doffset = iy*Mz+iz; intptr_t coffset = iy*Nz+iz; find_coefs_1d_s (spline->x_grid, xBC, data+doffset, My*Mz, spline->coefs+coffset, Ny*Nz); } // Now, solve in the Y-direction for (int ix=0; ix<Nx; ix++) for (int iz=0; iz<Nz; iz++) { intptr_t doffset = ix*Ny*Nz + iz; intptr_t coffset = ix*Ny*Nz + iz; find_coefs_1d_s (spline->y_grid, yBC, spline->coefs+doffset, Nz, spline->coefs+coffset, Nz); } // Now, solve in the Z-direction for (int ix=0; ix<Nx; ix++) for (int iy=0; iy<Ny; iy++) { intptr_t doffset = (ix*Ny+iy)*Nz; intptr_t coffset = (ix*Ny+iy)*Nz; find_coefs_1d_s (spline->z_grid, zBC, spline->coefs+doffset, 1, spline->coefs+coffset, 1); } init_sse_data(); return spline; } void recompute_UBspline_3d_s (UBspline_3d_s* spline, float *data) { int Mx = spline->x_grid.num; int My = spline->y_grid.num; int Mz = spline->z_grid.num; int Nx, Ny, Nz; if (spline->xBC.lCode == PERIODIC || spline->xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; if (spline->yBC.lCode == PERIODIC || spline->yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; if (spline->zBC.lCode == PERIODIC || spline->zBC.lCode == ANTIPERIODIC) Nz = Mz+3; else Nz = Mz+2; // First, solve in the X-direction for (int iy=0; iy<My; iy++) for (int iz=0; iz<Mz; iz++) { intptr_t doffset = iy*Mz+iz; intptr_t coffset = iy*Nz+iz; find_coefs_1d_s (spline->x_grid, spline->xBC, data+doffset, My*Mz, spline->coefs+coffset, Ny*Nz); } // Now, solve in the Y-direction for (int ix=0; ix<Nx; ix++) for (int iz=0; iz<Nz; iz++) { intptr_t doffset = ix*Ny*Nz + iz; intptr_t coffset = ix*Ny*Nz + iz; find_coefs_1d_s (spline->y_grid, spline->yBC, spline->coefs+doffset, Nz, spline->coefs+coffset, Nz); } // Now, solve in the Z-direction for (int ix=0; ix<Nx; ix++) for (int iy=0; iy<Ny; iy++) { intptr_t doffset = (ix*Ny+iy)*Nz; intptr_t coffset = (ix*Ny+iy)*Nz; find_coefs_1d_s (spline->z_grid, spline->zBC, spline->coefs+doffset, 1, spline->coefs+coffset, 1); } } //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// //// Single-Precision, Complex Creation Routines //// //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// // On input, bands should be filled with: // row 0 : abcdInitial from boundary conditions // rows 1:M: basis functions in first 3 cols, data in last // row M+1 : abcdFinal from boundary conditions // cstride gives the stride between values in coefs. // On exit, coefs with contain interpolating B-spline coefs UBspline_1d_c* create_UBspline_1d_c (Ugrid x_grid, BCtype_c xBC, complex_float *data) { // Create new spline UBspline_1d_c* restrict spline = malloc (sizeof(UBspline_1d_c)); spline->spcode = U1D; spline->tcode = SINGLE_COMPLEX; spline->xBC = xBC; // Setup internal variables int M = x_grid.num; int N; if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) { x_grid.delta = (x_grid.end-x_grid.start)/(double)(x_grid.num); N = M+3; } else { x_grid.delta = (x_grid.end-x_grid.start)/(double)(x_grid.num-1); N = M+2; } x_grid.delta_inv = 1.0/x_grid.delta; spline->x_grid = x_grid; #ifndef HAVE_SSE2 spline->coefs = malloc (2*sizeof(float)*N); #else posix_memalign ((void**)&spline->coefs, 16, 2*sizeof(float)*N); #endif BCtype_s xBC_r, xBC_i; xBC_r.lCode = xBC.lCode; xBC_r.rCode = xBC.rCode; xBC_r.lVal = xBC.lVal_r; xBC_r.rVal = xBC.rVal_r; xBC_i.lCode = xBC.lCode; xBC_i.rCode = xBC.rCode; xBC_i.lVal = xBC.lVal_i; xBC_i.rVal = xBC.rVal_i; // Real part find_coefs_1d_s (spline->x_grid, xBC_r, (float*)data, 2, (float*)spline->coefs, 2); // Imaginarty part find_coefs_1d_s (spline->x_grid, xBC_i, ((float*)data)+1, 2, ((float*)spline->coefs+1), 2); init_sse_data(); return spline; } void recompute_UBspline_1d_c (UBspline_1d_c* spline, complex_float *data) { BCtype_s xBC_r, xBC_i; xBC_r.lCode = spline->xBC.lCode; xBC_r.rCode = spline->xBC.rCode; xBC_r.lVal = spline->xBC.lVal_r; xBC_r.rVal = spline->xBC.rVal_r; xBC_i.lCode = spline->xBC.lCode; xBC_i.rCode = spline->xBC.rCode; xBC_i.lVal = spline->xBC.lVal_i; xBC_i.rVal = spline->xBC.rVal_i; // Real part find_coefs_1d_s (spline->x_grid, xBC_r, (float*)data, 2, (float*)spline->coefs, 2); // Imaginarty part find_coefs_1d_s (spline->x_grid, xBC_i, ((float*)data)+1, 2, ((float*)spline->coefs+1), 2); } UBspline_2d_c* create_UBspline_2d_c (Ugrid x_grid, Ugrid y_grid, BCtype_c xBC, BCtype_c yBC, complex_float *data) { // Create new spline UBspline_2d_c* restrict spline = malloc (sizeof(UBspline_2d_c)); spline->spcode = U2D; spline->tcode = SINGLE_COMPLEX; spline->xBC = xBC; spline->yBC = yBC; // Setup internal variables int Mx = x_grid.num; int My = y_grid.num; int Nx, Ny; if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; x_grid.delta = (x_grid.end - x_grid.start)/(double)(Nx-3); x_grid.delta_inv = 1.0/x_grid.delta; spline->x_grid = x_grid; if (yBC.lCode == PERIODIC || yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; y_grid.delta = (y_grid.end - y_grid.start)/(double)(Ny-3); y_grid.delta_inv = 1.0/y_grid.delta; spline->y_grid = y_grid; spline->x_stride = Ny; #ifndef HAVE_SSE2 spline->coefs = malloc (2*sizeof(float)*Nx*Ny); #else posix_memalign ((void**)&spline->coefs, 16, 2*sizeof(float)*Nx*Ny); #endif BCtype_s xBC_r, xBC_i, yBC_r, yBC_i; xBC_r.lCode = xBC.lCode; xBC_r.rCode = xBC.rCode; xBC_r.lVal = xBC.lVal_r; xBC_r.rVal = xBC.rVal_r; xBC_i.lCode = xBC.lCode; xBC_i.rCode = xBC.rCode; xBC_i.lVal = xBC.lVal_i; xBC_i.rVal = xBC.rVal_i; yBC_r.lCode = yBC.lCode; yBC_r.rCode = yBC.rCode; yBC_r.lVal = yBC.lVal_r; yBC_r.rVal = yBC.rVal_r; yBC_i.lCode = yBC.lCode; yBC_i.rCode = yBC.rCode; yBC_i.lVal = yBC.lVal_i; yBC_i.rVal = yBC.rVal_i; // First, solve in the X-direction for (int iy=0; iy<My; iy++) { intptr_t doffset = 2*iy; intptr_t coffset = 2*iy; // Real part find_coefs_1d_s (spline->x_grid, xBC_r, ((float*)data)+doffset, 2*My, (float*)spline->coefs+coffset, 2*Ny); // Imag part find_coefs_1d_s (spline->x_grid, xBC_i, ((float*)data)+doffset+1, 2*My, ((float*)spline->coefs)+coffset+1, 2*Ny); } // Now, solve in the Y-direction for (int ix=0; ix<Nx; ix++) { intptr_t doffset = 2*ix*Ny; intptr_t coffset = 2*ix*Ny; // Real part find_coefs_1d_s (spline->y_grid, yBC_r, ((float*)spline->coefs)+doffset, 2, ((float*)spline->coefs)+coffset, 2); // Imag part find_coefs_1d_s (spline->y_grid, yBC_i, ((float*)spline->coefs)+doffset+1, 2, ((float*)spline->coefs)+coffset+1, 2); } init_sse_data(); return spline; } void recompute_UBspline_2d_c (UBspline_2d_c* spline, complex_float *data) { // Setup internal variables int Mx = spline->x_grid.num; int My = spline->y_grid.num; int Nx, Ny; if (spline->xBC.lCode == PERIODIC || spline->xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; if (spline->yBC.lCode == PERIODIC || spline->yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; BCtype_s xBC_r, xBC_i, yBC_r, yBC_i; xBC_r.lCode = spline->xBC.lCode; xBC_r.rCode = spline->xBC.rCode; xBC_r.lVal = spline->xBC.lVal_r; xBC_r.rVal = spline->xBC.rVal_r; xBC_i.lCode = spline->xBC.lCode; xBC_i.rCode = spline->xBC.rCode; xBC_i.lVal = spline->xBC.lVal_i; xBC_i.rVal = spline->xBC.rVal_i; yBC_r.lCode = spline->yBC.lCode; yBC_r.rCode = spline->yBC.rCode; yBC_r.lVal = spline->yBC.lVal_r; yBC_r.rVal = spline->yBC.rVal_r; yBC_i.lCode = spline->yBC.lCode; yBC_i.rCode = spline->yBC.rCode; yBC_i.lVal = spline->yBC.lVal_i; yBC_i.rVal = spline->yBC.rVal_i; // First, solve in the X-direction for (int iy=0; iy<My; iy++) { intptr_t doffset = 2*iy; intptr_t coffset = 2*iy; // Real part find_coefs_1d_s (spline->x_grid, xBC_r, ((float*)data)+doffset, 2*My, (float*)spline->coefs+coffset, 2*Ny); // Imag part find_coefs_1d_s (spline->x_grid, xBC_i, ((float*)data)+doffset+1, 2*My, ((float*)spline->coefs)+coffset+1, 2*Ny); } // Now, solve in the Y-direction for (int ix=0; ix<Nx; ix++) { intptr_t doffset = 2*ix*Ny; intptr_t coffset = 2*ix*Ny; // Real part find_coefs_1d_s (spline->y_grid, yBC_r, ((float*)spline->coefs)+doffset, 2, ((float*)spline->coefs)+coffset, 2); // Imag part find_coefs_1d_s (spline->y_grid, yBC_i, ((float*)spline->coefs)+doffset+1, 2, ((float*)spline->coefs)+coffset+1, 2); } } UBspline_3d_c* create_UBspline_3d_c (Ugrid x_grid, Ugrid y_grid, Ugrid z_grid, BCtype_c xBC, BCtype_c yBC, BCtype_c zBC, complex_float *data) { // Create new spline UBspline_3d_c* restrict spline = malloc (sizeof(UBspline_3d_c)); spline->spcode = U3D; spline->tcode = SINGLE_COMPLEX; spline->xBC = xBC; spline->yBC = yBC; spline->zBC = zBC; // Setup internal variables int Mx = x_grid.num; int My = y_grid.num; int Mz = z_grid.num; int Nx, Ny, Nz; if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; x_grid.delta = (x_grid.end - x_grid.start)/(double)(Nx-3); x_grid.delta_inv = 1.0/x_grid.delta; spline->x_grid = x_grid; if (yBC.lCode == PERIODIC || yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; y_grid.delta = (y_grid.end - y_grid.start)/(double)(Ny-3); y_grid.delta_inv = 1.0/y_grid.delta; spline->y_grid = y_grid; if (zBC.lCode == PERIODIC || zBC.lCode == ANTIPERIODIC) Nz = Mz+3; else Nz = Mz+2; z_grid.delta = (z_grid.end - z_grid.start)/(double)(Nz-3); z_grid.delta_inv = 1.0/z_grid.delta; spline->z_grid = z_grid; spline->x_stride = Ny*Nz; spline->y_stride = Nz; #ifndef HAVE_SSE2 spline->coefs = malloc (2*sizeof(float)*Nx*Ny*Nz); #else posix_memalign ((void**)&spline->coefs, 16, 2*sizeof(float)*Nx*Ny*Nz); #endif BCtype_s xBC_r, xBC_i, yBC_r, yBC_i, zBC_r, zBC_i; xBC_r.lCode = xBC.lCode; xBC_r.rCode = xBC.rCode; xBC_r.lVal = xBC.lVal_r; xBC_r.rVal = xBC.rVal_r; xBC_i.lCode = xBC.lCode; xBC_i.rCode = xBC.rCode; xBC_i.lVal = xBC.lVal_i; xBC_i.rVal = xBC.rVal_i; yBC_r.lCode = yBC.lCode; yBC_r.rCode = yBC.rCode; yBC_r.lVal = yBC.lVal_r; yBC_r.rVal = yBC.rVal_r; yBC_i.lCode = yBC.lCode; yBC_i.rCode = yBC.rCode; yBC_i.lVal = yBC.lVal_i; yBC_i.rVal = yBC.rVal_i; zBC_r.lCode = zBC.lCode; zBC_r.rCode = zBC.rCode; zBC_r.lVal = zBC.lVal_r; zBC_r.rVal = zBC.rVal_r; zBC_i.lCode = zBC.lCode; zBC_i.rCode = zBC.rCode; zBC_i.lVal = zBC.lVal_i; zBC_i.rVal = zBC.rVal_i; // First, solve in the X-direction for (int iy=0; iy<My; iy++) for (int iz=0; iz<Mz; iz++) { intptr_t doffset = 2*(iy*Mz+iz); intptr_t coffset = 2*(iy*Nz+iz); // Real part find_coefs_1d_s (spline->x_grid, xBC_r, ((float*)data)+doffset, 2*My*Mz, ((float*)spline->coefs)+coffset, 2*Ny*Nz); // Imag part find_coefs_1d_s (spline->x_grid, xBC_i, ((float*)data)+doffset+1, 2*My*Mz, ((float*)spline->coefs)+coffset+1, 2*Ny*Nz); } // Now, solve in the Y-direction for (int ix=0; ix<Nx; ix++) for (int iz=0; iz<Nz; iz++) { intptr_t doffset = 2*(ix*Ny*Nz + iz); intptr_t coffset = 2*(ix*Ny*Nz + iz); // Real part find_coefs_1d_s (spline->y_grid, yBC_r, ((float*)spline->coefs)+doffset, 2*Nz, ((float*)spline->coefs)+coffset, 2*Nz); // Imag part find_coefs_1d_s (spline->y_grid, yBC_i, ((float*)spline->coefs)+doffset+1, 2*Nz, ((float*)spline->coefs)+coffset+1, 2*Nz); } // Now, solve in the Z-direction for (int ix=0; ix<Nx; ix++) for (int iy=0; iy<Ny; iy++) { intptr_t doffset = 2*((ix*Ny+iy)*Nz); intptr_t coffset = 2*((ix*Ny+iy)*Nz); // Real part find_coefs_1d_s (spline->z_grid, zBC_r, ((float*)spline->coefs)+doffset, 2, ((float*)spline->coefs)+coffset, 2); // Imag part find_coefs_1d_s (spline->z_grid, zBC_i, ((float*)spline->coefs)+doffset+1, 2, ((float*)spline->coefs)+coffset+1, 2); } init_sse_data(); return spline; } void recompute_UBspline_3d_c (UBspline_3d_c* spline, complex_float *data) { // Setup internal variables int Mx = spline->x_grid.num; int My = spline->y_grid.num; int Mz = spline->z_grid.num; int Nx, Ny, Nz; if (spline->xBC.lCode == PERIODIC || spline->xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; if (spline->yBC.lCode == PERIODIC || spline->yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; if (spline->zBC.lCode == PERIODIC || spline->zBC.lCode == ANTIPERIODIC) Nz = Mz+3; else Nz = Mz+2; BCtype_s xBC_r, xBC_i, yBC_r, yBC_i, zBC_r, zBC_i; xBC_r.lCode = spline->xBC.lCode; xBC_r.rCode = spline->xBC.rCode; xBC_r.lVal = spline->xBC.lVal_r; xBC_r.rVal = spline->xBC.rVal_r; xBC_i.lCode = spline->xBC.lCode; xBC_i.rCode = spline->xBC.rCode; xBC_i.lVal = spline->xBC.lVal_i; xBC_i.rVal = spline->xBC.rVal_i; yBC_r.lCode = spline->yBC.lCode; yBC_r.rCode = spline->yBC.rCode; yBC_r.lVal = spline->yBC.lVal_r; yBC_r.rVal = spline->yBC.rVal_r; yBC_i.lCode = spline->yBC.lCode; yBC_i.rCode = spline->yBC.rCode; yBC_i.lVal = spline->yBC.lVal_i; yBC_i.rVal = spline->yBC.rVal_i; zBC_r.lCode = spline->zBC.lCode; zBC_r.rCode = spline->zBC.rCode; zBC_r.lVal = spline->zBC.lVal_r; zBC_r.rVal = spline->zBC.rVal_r; zBC_i.lCode = spline->zBC.lCode; zBC_i.rCode = spline->zBC.rCode; zBC_i.lVal = spline->zBC.lVal_i; zBC_i.rVal = spline->zBC.rVal_i; // First, solve in the X-direction for (int iy=0; iy<My; iy++) for (int iz=0; iz<Mz; iz++) { intptr_t doffset = 2*(iy*Mz+iz); intptr_t coffset = 2*(iy*Nz+iz); // Real part find_coefs_1d_s (spline->x_grid, xBC_r, ((float*)data)+doffset, 2*My*Mz, ((float*)spline->coefs)+coffset, 2*Ny*Nz); // Imag part find_coefs_1d_s (spline->x_grid, xBC_i, ((float*)data)+doffset+1, 2*My*Mz, ((float*)spline->coefs)+coffset+1, 2*Ny*Nz); } // Now, solve in the Y-direction for (int ix=0; ix<Nx; ix++) for (int iz=0; iz<Nz; iz++) { intptr_t doffset = 2*(ix*Ny*Nz + iz); intptr_t coffset = 2*(ix*Ny*Nz + iz); // Real part find_coefs_1d_s (spline->y_grid, yBC_r, ((float*)spline->coefs)+doffset, 2*Nz, ((float*)spline->coefs)+coffset, 2*Nz); // Imag part find_coefs_1d_s (spline->y_grid, yBC_i, ((float*)spline->coefs)+doffset+1, 2*Nz, ((float*)spline->coefs)+coffset+1, 2*Nz); } // Now, solve in the Z-direction for (int ix=0; ix<Nx; ix++) for (int iy=0; iy<Ny; iy++) { intptr_t doffset = 2*((ix*Ny+iy)*Nz); intptr_t coffset = 2*((ix*Ny+iy)*Nz); // Real part find_coefs_1d_s (spline->z_grid, zBC_r, ((float*)spline->coefs)+doffset, 2, ((float*)spline->coefs)+coffset, 2); // Imag part find_coefs_1d_s (spline->z_grid, zBC_i, ((float*)spline->coefs)+doffset+1, 2, ((float*)spline->coefs)+coffset+1, 2); } } //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// //// Double-Precision, Real Creation Routines //// //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// // On input, bands should be filled with: // row 0 : abcdInitial from boundary conditions // rows 1:M: basis functions in first 3 cols, data in last // row M+1 : abcdFinal from boundary conditions // cstride gives the stride between values in coefs. // On exit, coefs with contain interpolating B-spline coefs void solve_deriv_interp_1d_d (double bands[], double coefs[], int M, int cstride) { // Solve interpolating equations // First and last rows are different bands[4*(0)+1] /= bands[4*(0)+0]; bands[4*(0)+2] /= bands[4*(0)+0]; bands[4*(0)+3] /= bands[4*(0)+0]; bands[4*(0)+0] = 1.0; bands[4*(1)+1] -= bands[4*(1)+0]*bands[4*(0)+1]; bands[4*(1)+2] -= bands[4*(1)+0]*bands[4*(0)+2]; bands[4*(1)+3] -= bands[4*(1)+0]*bands[4*(0)+3]; bands[4*(0)+0] = 0.0; bands[4*(1)+2] /= bands[4*(1)+1]; bands[4*(1)+3] /= bands[4*(1)+1]; bands[4*(1)+1] = 1.0; // Now do rows 2 through M+1 for (int row=2; row < (M+1); row++) { bands[4*(row)+1] -= bands[4*(row)+0]*bands[4*(row-1)+2]; bands[4*(row)+3] -= bands[4*(row)+0]*bands[4*(row-1)+3]; bands[4*(row)+2] /= bands[4*(row)+1]; bands[4*(row)+3] /= bands[4*(row)+1]; bands[4*(row)+0] = 0.0; bands[4*(row)+1] = 1.0; } // Do last row bands[4*(M+1)+1] -= bands[4*(M+1)+0]*bands[4*(M-1)+2]; bands[4*(M+1)+3] -= bands[4*(M+1)+0]*bands[4*(M-1)+3]; bands[4*(M+1)+2] -= bands[4*(M+1)+1]*bands[4*(M)+2]; bands[4*(M+1)+3] -= bands[4*(M+1)+1]*bands[4*(M)+3]; bands[4*(M+1)+3] /= bands[4*(M+1)+2]; bands[4*(M+1)+2] = 1.0; coefs[(M+1)*cstride] = bands[4*(M+1)+3]; // Now back substitute up for (int row=M; row>0; row--) coefs[row*cstride] = bands[4*(row)+3] - bands[4*(row)+2]*coefs[cstride*(row+1)]; // Finish with first row coefs[0] = bands[4*(0)+3] - bands[4*(0)+1]*coefs[1*cstride] - bands[4*(0)+2]*coefs[2*cstride]; } // On input, bands should be filled with: // row 0 : abcdInitial from boundary conditions // rows 1:M: basis functions in first 3 cols, data in last // row M+1 : abcdFinal from boundary conditions // cstride gives the stride between values in coefs. // On exit, coefs with contain interpolating B-spline coefs void solve_periodic_interp_1d_d (double bands[], double coefs[], int M, intptr_t cstride) { double lastCol[M]; // Now solve: // First and last rows are different bands[4*(0)+2] /= bands[4*(0)+1]; bands[4*(0)+0] /= bands[4*(0)+1]; bands[4*(0)+3] /= bands[4*(0)+1]; bands[4*(0)+1] = 1.0; bands[4*(M-1)+1] -= bands[4*(M-1)+2]*bands[4*(0)+0]; bands[4*(M-1)+3] -= bands[4*(M-1)+2]*bands[4*(0)+3]; bands[4*(M-1)+2] = -bands[4*(M-1)+2]*bands[4*(0)+2]; lastCol[0] = bands[4*(0)+0]; for (int row=1; row < (M-1); row++) { bands[4*(row)+1] -= bands[4*(row)+0] * bands[4*(row-1)+2]; bands[4*(row)+3] -= bands[4*(row)+0] * bands[4*(row-1)+3]; lastCol[row] = -bands[4*(row)+0] * lastCol[row-1]; bands[4*(row)+0] = 0.0; bands[4*(row)+2] /= bands[4*(row)+1]; bands[4*(row)+3] /= bands[4*(row)+1]; lastCol[row] /= bands[4*(row)+1]; bands[4*(row)+1] = 1.0; if (row < (M-2)) { bands[4*(M-1)+3] -= bands[4*(M-1)+2]*bands[4*(row)+3]; bands[4*(M-1)+1] -= bands[4*(M-1)+2]*lastCol[row]; bands[4*(M-1)+2] = -bands[4*(M-1)+2]*bands[4*(row)+2]; } } // Now do last row // The [2] element and [0] element are now on top of each other bands[4*(M-1)+0] += bands[4*(M-1)+2]; bands[4*(M-1)+1] -= bands[4*(M-1)+0] * (bands[4*(M-2)+2]+lastCol[M-2]); bands[4*(M-1)+3] -= bands[4*(M-1)+0] * bands[4*(M-2)+3]; bands[4*(M-1)+3] /= bands[4*(M-1)+1]; coefs[M*cstride] = bands[4*(M-1)+3]; for (int row=M-2; row>=0; row--) coefs[(row+1)*cstride] = bands[4*(row)+3] - bands[4*(row)+2]*coefs[(row+2)*cstride] - lastCol[row]*coefs[M*cstride]; coefs[0*cstride] = coefs[M*cstride]; coefs[(M+1)*cstride] = coefs[1*cstride]; coefs[(M+2)*cstride] = coefs[2*cstride]; } // On input, bands should be filled with: // row 0 : abcdInitial from boundary conditions // rows 1:M: basis functions in first 3 cols, data in last // row M+1 : abcdFinal from boundary conditions // cstride gives the stride between values in coefs. // On exit, coefs with contain interpolating B-spline coefs void solve_antiperiodic_interp_1d_d (double bands[], double coefs[], int M, int cstride) { double lastCol[M]; bands[4*0+0] *= -1.0; bands[4*(M-1)+2] *= -1.0; // Now solve: // First and last rows are different bands[4*(0)+2] /= bands[4*(0)+1]; bands[4*(0)+0] /= bands[4*(0)+1]; bands[4*(0)+3] /= bands[4*(0)+1]; bands[4*(0)+1] = 1.0; bands[4*(M-1)+1] -= bands[4*(M-1)+2]*bands[4*(0)+0]; bands[4*(M-1)+3] -= bands[4*(M-1)+2]*bands[4*(0)+3]; bands[4*(M-1)+2] = -bands[4*(M-1)+2]*bands[4*(0)+2]; lastCol[0] = bands[4*(0)+0]; for (int row=1; row < (M-1); row++) { bands[4*(row)+1] -= bands[4*(row)+0] * bands[4*(row-1)+2]; bands[4*(row)+3] -= bands[4*(row)+0] * bands[4*(row-1)+3]; lastCol[row] = -bands[4*(row)+0] * lastCol[row-1]; bands[4*(row)+0] = 0.0; bands[4*(row)+2] /= bands[4*(row)+1]; bands[4*(row)+3] /= bands[4*(row)+1]; lastCol[row] /= bands[4*(row)+1]; bands[4*(row)+1] = 1.0; if (row < (M-2)) { bands[4*(M-1)+3] -= bands[4*(M-1)+2]*bands[4*(row)+3]; bands[4*(M-1)+1] -= bands[4*(M-1)+2]*lastCol[row]; bands[4*(M-1)+2] = -bands[4*(M-1)+2]*bands[4*(row)+2]; } } // Now do last row // The [2] element and [0] element are now on top of each other bands[4*(M-1)+0] += bands[4*(M-1)+2]; bands[4*(M-1)+1] -= bands[4*(M-1)+0] * (bands[4*(M-2)+2]+lastCol[M-2]); bands[4*(M-1)+3] -= bands[4*(M-1)+0] * bands[4*(M-2)+3]; bands[4*(M-1)+3] /= bands[4*(M-1)+1]; coefs[M*cstride] = bands[4*(M-1)+3]; for (int row=M-2; row>=0; row--) coefs[(row+1)*cstride] = bands[4*(row)+3] - bands[4*(row)+2]*coefs[(row+2)*cstride] - lastCol[row]*coefs[M*cstride]; coefs[0*cstride] = -coefs[M*cstride]; coefs[(M+1)*cstride] = -coefs[1*cstride]; coefs[(M+2)*cstride] = -coefs[2*cstride]; } void find_coefs_1d_d (Ugrid grid, BCtype_d bc, double *data, intptr_t dstride, double *coefs, intptr_t cstride) { int M = grid.num; double basis[4] = {1.0/6.0, 2.0/3.0, 1.0/6.0, 0.0}; if (bc.lCode == PERIODIC || bc.lCode == ANTIPERIODIC) { #ifdef HAVE_C_VARARRAYS double bands[M*4]; #else double *bands = malloc (4*M*sizeof(double)); #endif for (int i=0; i<M; i++) { bands[4*i+0] = basis[0]; bands[4*i+1] = basis[1]; bands[4*i+2] = basis[2]; bands[4*i+3] = data[i*dstride]; } if (bc.lCode == ANTIPERIODIC) solve_antiperiodic_interp_1d_d (bands, coefs, M, cstride); else solve_periodic_interp_1d_d (bands, coefs, M, cstride); #ifndef HAVE_C_VARARRAYS free (bands); #endif } else { // Setup boundary conditions double abcd_left[4], abcd_right[4]; // Left boundary if (bc.lCode == FLAT || bc.lCode == NATURAL) bc.lVal = 0.0; if (bc.lCode == FLAT || bc.lCode == DERIV1) { abcd_left[0] = -0.5 * grid.delta_inv; abcd_left[1] = 0.0 * grid.delta_inv; abcd_left[2] = 0.5 * grid.delta_inv; abcd_left[3] = bc.lVal; } if (bc.lCode == NATURAL || bc.lCode == DERIV2) { abcd_left[0] = 1.0 * grid.delta_inv * grid.delta_inv; abcd_left[1] =-2.0 * grid.delta_inv * grid.delta_inv; abcd_left[2] = 1.0 * grid.delta_inv * grid.delta_inv; abcd_left[3] = bc.lVal; } // Right boundary if (bc.rCode == FLAT || bc.rCode == NATURAL) bc.rVal = 0.0; if (bc.rCode == FLAT || bc.rCode == DERIV1) { abcd_right[0] = -0.5 * grid.delta_inv; abcd_right[1] = 0.0 * grid.delta_inv; abcd_right[2] = 0.5 * grid.delta_inv; abcd_right[3] = bc.rVal; } if (bc.rCode == NATURAL || bc.rCode == DERIV2) { abcd_right[0] = 1.0 *grid.delta_inv * grid.delta_inv; abcd_right[1] =-2.0 *grid.delta_inv * grid.delta_inv; abcd_right[2] = 1.0 *grid.delta_inv * grid.delta_inv; abcd_right[3] = bc.rVal; } #ifdef HAVE_C_VARARRAYS double bands[(M+2)*4]; #else double *bands = malloc ((M+2)*4*sizeof(double)); #endif for (int i=0; i<4; i++) { bands[4*( 0 )+i] = abcd_left[i]; bands[4*(M+1)+i] = abcd_right[i]; } for (int i=0; i<M; i++) { for (int j=0; j<3; j++) bands[4*(i+1)+j] = basis[j]; bands[4*(i+1)+3] = data[i*dstride]; } // Now, solve for coefficients solve_deriv_interp_1d_d (bands, coefs, M, cstride); #ifndef HAVE_C_VARARRAYS free (bands); #endif } } UBspline_1d_d* create_UBspline_1d_d (Ugrid x_grid, BCtype_d xBC, double *data) { // Create new spline UBspline_1d_d* restrict spline = malloc (sizeof(UBspline_1d_d)); spline->spcode = U1D; spline->tcode = DOUBLE_REAL; spline->xBC = xBC; // Setup internal variables int M = x_grid.num; int N; if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) { x_grid.delta = (x_grid.end-x_grid.start)/(double)(x_grid.num); N = M+3; } else { x_grid.delta = (x_grid.end-x_grid.start)/(double)(x_grid.num-1); N = M+2; } x_grid.delta_inv = 1.0/x_grid.delta; spline->x_grid = x_grid; #ifndef HAVE_SSE2 spline->coefs = malloc (sizeof(double)*N); #else posix_memalign ((void**)&spline->coefs, 16, sizeof(double)*N); #endif find_coefs_1d_d (spline->x_grid, xBC, data, 1, spline->coefs, 1); init_sse_data(); return spline; } void recompute_UBspline_1d_d (UBspline_1d_d* spline, double *data) { find_coefs_1d_d (spline->x_grid, spline->xBC, data, 1, spline->coefs, 1); } UBspline_2d_d* create_UBspline_2d_d (Ugrid x_grid, Ugrid y_grid, BCtype_d xBC, BCtype_d yBC, double *data) { // Create new spline UBspline_2d_d* restrict spline = malloc (sizeof(UBspline_2d_d)); spline->spcode = U2D; spline->tcode = DOUBLE_REAL; spline->xBC = xBC; spline->yBC = yBC; // Setup internal variables int Mx = x_grid.num; int My = y_grid.num; int Nx, Ny; if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; x_grid.delta = (x_grid.end - x_grid.start)/(double)(Nx-3); x_grid.delta_inv = 1.0/x_grid.delta; spline->x_grid = x_grid; if (yBC.lCode == PERIODIC || yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; y_grid.delta = (y_grid.end - y_grid.start)/(double)(Ny-3); y_grid.delta_inv = 1.0/y_grid.delta; spline->y_grid = y_grid; spline->x_stride = Ny; #ifndef HAVE_SSE2 spline->coefs = malloc (sizeof(double)*Nx*Ny); #else posix_memalign ((void**)&spline->coefs, 16, (sizeof(double)*Nx*Ny)); #endif // First, solve in the X-direction for (int iy=0; iy<My; iy++) { intptr_t doffset = iy; intptr_t coffset = iy; find_coefs_1d_d (spline->x_grid, xBC, data+doffset, My, spline->coefs+coffset, Ny); } // Now, solve in the Y-direction for (int ix=0; ix<Nx; ix++) { intptr_t doffset = ix*Ny; intptr_t coffset = ix*Ny; find_coefs_1d_d (spline->y_grid, yBC, spline->coefs+doffset, 1, spline->coefs+coffset, 1); } init_sse_data(); return spline; } void recompute_UBspline_2d_d (UBspline_2d_d* spline, double *data) { int Mx = spline->x_grid.num; int My = spline->y_grid.num; int Nx, Ny; if (spline->xBC.lCode == PERIODIC || spline->xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; if (spline->yBC.lCode == PERIODIC || spline->yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; // First, solve in the X-direction for (int iy=0; iy<My; iy++) { intptr_t doffset = iy; intptr_t coffset = iy; find_coefs_1d_d (spline->x_grid, spline->xBC, data+doffset, My, spline->coefs+coffset, Ny); } // Now, solve in the Y-direction for (int ix=0; ix<Nx; ix++) { intptr_t doffset = ix*Ny; intptr_t coffset = ix*Ny; find_coefs_1d_d (spline->y_grid, spline->yBC, spline->coefs+doffset, 1, spline->coefs+coffset, 1); } } UBspline_3d_d* create_UBspline_3d_d (Ugrid x_grid, Ugrid y_grid, Ugrid z_grid, BCtype_d xBC, BCtype_d yBC, BCtype_d zBC, double *data) { // Create new spline UBspline_3d_d* restrict spline = malloc (sizeof(UBspline_3d_d)); spline->spcode = U3D; spline->tcode = DOUBLE_REAL; spline->xBC = xBC; spline->yBC = yBC; spline->zBC = zBC; // Setup internal variables int Mx = x_grid.num; int My = y_grid.num; int Mz = z_grid.num; int Nx, Ny, Nz; if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; x_grid.delta = (x_grid.end - x_grid.start)/(double)(Nx-3); x_grid.delta_inv = 1.0/x_grid.delta; spline->x_grid = x_grid; if (yBC.lCode == PERIODIC || yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; y_grid.delta = (y_grid.end - y_grid.start)/(double)(Ny-3); y_grid.delta_inv = 1.0/y_grid.delta; spline->y_grid = y_grid; if (zBC.lCode == PERIODIC || zBC.lCode == ANTIPERIODIC) Nz = Mz+3; else Nz = Mz+2; z_grid.delta = (z_grid.end - z_grid.start)/(double)(Nz-3); z_grid.delta_inv = 1.0/z_grid.delta; spline->z_grid = z_grid; spline->x_stride = Ny*Nz; spline->y_stride = Nz; spline->coefs_size=(size_t)Nx*(size_t)Ny*(size_t)Nz; #ifndef HAVE_SSE2 spline->coefs = malloc (sizeof(double)*spline->coefs_size); #else posix_memalign ((void**)&spline->coefs, 16, (sizeof(double)*spline->coefs_size)); #endif if(data != NULL) // only data is provided { // First, solve in the X-direction #pragma omp parallel for for (int iy=0; iy<My; iy++) for (int iz=0; iz<Mz; iz++) { intptr_t doffset = iy*Mz+iz; intptr_t coffset = iy*Nz+iz; find_coefs_1d_d (spline->x_grid, xBC, data+doffset, My*Mz, spline->coefs+coffset, Ny*Nz); } // Now, solve in the Y-direction #pragma omp parallel for for (int ix=0; ix<Nx; ix++) for (int iz=0; iz<Nz; iz++) { intptr_t doffset = ix*Ny*Nz + iz; intptr_t coffset = ix*Ny*Nz + iz; find_coefs_1d_d (spline->y_grid, yBC, spline->coefs+doffset, Nz, spline->coefs+coffset, Nz); } // Now, solve in the Z-direction #pragma omp parallel for for (int ix=0; ix<Nx; ix++) for (int iy=0; iy<Ny; iy++) { intptr_t doffset = (ix*Ny+iy)*Nz; intptr_t coffset = (ix*Ny+iy)*Nz; find_coefs_1d_d (spline->z_grid, zBC, spline->coefs+doffset, 1, spline->coefs+coffset, 1); } } init_sse_data(); return spline; } void recompute_UBspline_3d_d (UBspline_3d_d* spline, double *data) { int Mx = spline->x_grid.num; int My = spline->y_grid.num; int Mz = spline->z_grid.num; int Nx, Ny, Nz; if (spline->xBC.lCode == PERIODIC || spline->xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; if (spline->yBC.lCode == PERIODIC || spline->yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; if (spline->zBC.lCode == PERIODIC || spline->zBC.lCode == ANTIPERIODIC) Nz = Mz+3; else Nz = Mz+2; // First, solve in the X-direction #pragma omp parallel for for (int iy=0; iy<My; iy++) for (int iz=0; iz<Mz; iz++) { intptr_t doffset = iy*Mz+iz; intptr_t coffset = iy*Nz+iz; find_coefs_1d_d (spline->x_grid, spline->xBC, data+doffset, My*Mz, spline->coefs+coffset, Ny*Nz); } // Now, solve in the Y-direction #pragma omp parallel for for (int ix=0; ix<Nx; ix++) for (int iz=0; iz<Nz; iz++) { intptr_t doffset = ix*Ny*Nz + iz; intptr_t coffset = ix*Ny*Nz + iz; find_coefs_1d_d (spline->y_grid, spline->yBC, spline->coefs+doffset, Nz, spline->coefs+coffset, Nz); } // Now, solve in the Z-direction #pragma omp parallel for for (int ix=0; ix<Nx; ix++) for (int iy=0; iy<Ny; iy++) { intptr_t doffset = (ix*Ny+iy)*Nz; intptr_t coffset = (ix*Ny+iy)*Nz; find_coefs_1d_d (spline->z_grid, spline->zBC, spline->coefs+doffset, 1, spline->coefs+coffset, 1); } } //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// //// Double-Precision, Complex Creation Routines //// //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// // On input, bands should be filled with: // row 0 : abcdInitial from boundary conditions // rows 1:M: basis functions in first 3 cols, data in last // row M+1 : abcdFinal from boundary conditions // cstride gives the stride between values in coefs. // On exit, coefs with contain interpolating B-spline coefs UBspline_1d_z* create_UBspline_1d_z (Ugrid x_grid, BCtype_z xBC, complex_double *data) { // Create new spline UBspline_1d_z* restrict spline = malloc (sizeof(UBspline_1d_z)); spline->spcode = U1D; spline->tcode = DOUBLE_COMPLEX; spline->xBC = xBC; // Setup internal variables int M = x_grid.num; int N; if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) { x_grid.delta = (x_grid.end-x_grid.start)/(double)(x_grid.num); N = M+3; } else { x_grid.delta = (x_grid.end-x_grid.start)/(double)(x_grid.num-1); N = M+2; } x_grid.delta_inv = 1.0/x_grid.delta; spline->x_grid = x_grid; #ifndef HAVE_SSE2 spline->coefs = malloc (2*sizeof(double)*N); #else posix_memalign ((void**)&spline->coefs, 16, 2*sizeof(double)*N); #endif BCtype_d xBC_r, xBC_i; xBC_r.lCode = xBC.lCode; xBC_r.rCode = xBC.rCode; xBC_r.lVal = xBC.lVal_r; xBC_r.rVal = xBC.rVal_r; xBC_i.lCode = xBC.lCode; xBC_i.rCode = xBC.rCode; xBC_i.lVal = xBC.lVal_i; xBC_i.rVal = xBC.rVal_i; // Real part find_coefs_1d_d (spline->x_grid, xBC_r, (double*)data, 2, (double*)spline->coefs, 2); // Imaginarty part find_coefs_1d_d (spline->x_grid, xBC_i, ((double*)data)+1, 2, ((double*)spline->coefs)+1, 2); init_sse_data(); return spline; } void recompute_UBspline_1d_z (UBspline_1d_z* spline, complex_double *data) { int M = spline->x_grid.num; int N; if (spline->xBC.lCode == PERIODIC || spline->xBC.lCode == ANTIPERIODIC) N = M+3; else N = M+2; BCtype_d xBC_r, xBC_i; xBC_r.lCode = spline->xBC.lCode; xBC_r.rCode = spline->xBC.rCode; xBC_r.lVal = spline->xBC.lVal_r; xBC_r.rVal = spline->xBC.rVal_r; xBC_i.lCode = spline->xBC.lCode; xBC_i.rCode = spline->xBC.rCode; xBC_i.lVal = spline->xBC.lVal_i; xBC_i.rVal = spline->xBC.rVal_i; // Real part find_coefs_1d_d (spline->x_grid, xBC_r, (double*)data, 2, (double*)spline->coefs, 2); // Imaginarty part find_coefs_1d_d (spline->x_grid, xBC_i, ((double*)data)+1, 2, ((double*)spline->coefs)+1, 2); } UBspline_2d_z* create_UBspline_2d_z (Ugrid x_grid, Ugrid y_grid, BCtype_z xBC, BCtype_z yBC, complex_double *data) { // Create new spline UBspline_2d_z* restrict spline = malloc (sizeof(UBspline_2d_z)); spline->spcode = U2D; spline->tcode = DOUBLE_COMPLEX; spline->xBC = xBC; spline->yBC = yBC; // Setup internal variables int Mx = x_grid.num; int My = y_grid.num; int Nx, Ny; if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; x_grid.delta = (x_grid.end - x_grid.start)/(double)(Nx-3); x_grid.delta_inv = 1.0/x_grid.delta; spline->x_grid = x_grid; if (yBC.lCode == PERIODIC || yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; y_grid.delta = (y_grid.end - y_grid.start)/(double)(Ny-3); y_grid.delta_inv = 1.0/y_grid.delta; spline->y_grid = y_grid; spline->x_stride = Ny; #ifndef HAVE_SSE2 spline->coefs = malloc (2*sizeof(double)*Nx*Ny); #else posix_memalign ((void**)&spline->coefs, 16, 2*sizeof(double)*Nx*Ny); #endif BCtype_d xBC_r, xBC_i, yBC_r, yBC_i; xBC_r.lCode = xBC.lCode; xBC_r.rCode = xBC.rCode; xBC_r.lVal = xBC.lVal_r; xBC_r.rVal = xBC.rVal_r; xBC_i.lCode = xBC.lCode; xBC_i.rCode = xBC.rCode; xBC_i.lVal = xBC.lVal_i; xBC_i.rVal = xBC.rVal_i; yBC_r.lCode = yBC.lCode; yBC_r.rCode = yBC.rCode; yBC_r.lVal = yBC.lVal_r; yBC_r.rVal = yBC.rVal_r; yBC_i.lCode = yBC.lCode; yBC_i.rCode = yBC.rCode; yBC_i.lVal = yBC.lVal_i; yBC_i.rVal = yBC.rVal_i; // First, solve in the X-direction for (int iy=0; iy<My; iy++) { intptr_t doffset = 2*iy; intptr_t coffset = 2*iy; // Real part find_coefs_1d_d (spline->x_grid, xBC_r, ((double*)data+doffset), 2*My, (double*)spline->coefs+coffset, 2*Ny); // Imag part find_coefs_1d_d (spline->x_grid, xBC_i, ((double*)data)+doffset+1, 2*My, ((double*)spline->coefs)+coffset+1, 2*Ny); } // Now, solve in the Y-direction for (int ix=0; ix<Nx; ix++) { intptr_t doffset = 2*ix*Ny; intptr_t coffset = 2*ix*Ny; // Real part find_coefs_1d_d (spline->y_grid, yBC_r, ((double*)spline->coefs)+doffset, 2, (double*)spline->coefs+coffset, 2); // Imag part find_coefs_1d_d (spline->y_grid, yBC_i, (double*)spline->coefs+doffset+1, 2, ((double*)spline->coefs)+coffset+1, 2); } init_sse_data(); return spline; } void recompute_UBspline_2d_z (UBspline_2d_z* spline, complex_double *data) { int Mx = spline->x_grid.num; int My = spline->y_grid.num; int Nx, Ny; if (spline->xBC.lCode == PERIODIC || spline->xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; if (spline->yBC.lCode == PERIODIC || spline->yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; BCtype_d xBC_r, xBC_i, yBC_r, yBC_i; xBC_r.lCode = spline->xBC.lCode; xBC_r.rCode = spline->xBC.rCode; xBC_r.lVal = spline->xBC.lVal_r; xBC_r.rVal = spline->xBC.rVal_r; xBC_i.lCode = spline->xBC.lCode; xBC_i.rCode = spline->xBC.rCode; xBC_i.lVal = spline->xBC.lVal_i; xBC_i.rVal = spline->xBC.rVal_i; yBC_r.lCode = spline->yBC.lCode; yBC_r.rCode = spline->yBC.rCode; yBC_r.lVal = spline->yBC.lVal_r; yBC_r.rVal = spline->yBC.rVal_r; yBC_i.lCode = spline->yBC.lCode; yBC_i.rCode = spline->yBC.rCode; yBC_i.lVal = spline->yBC.lVal_i; yBC_i.rVal = spline->yBC.rVal_i; // First, solve in the X-direction for (int iy=0; iy<My; iy++) { intptr_t doffset = 2*iy; intptr_t coffset = 2*iy; // Real part find_coefs_1d_d (spline->x_grid, xBC_r, ((double*)data+doffset), 2*My, (double*)spline->coefs+coffset, 2*Ny); // Imag part find_coefs_1d_d (spline->x_grid, xBC_i, ((double*)data)+doffset+1, 2*My, ((double*)spline->coefs)+coffset+1, 2*Ny); } // Now, solve in the Y-direction for (int ix=0; ix<Nx; ix++) { intptr_t doffset = 2*ix*Ny; intptr_t coffset = 2*ix*Ny; // Real part find_coefs_1d_d (spline->y_grid, yBC_r, ((double*)spline->coefs)+doffset, 2, (double*)spline->coefs+coffset, 2); // Imag part find_coefs_1d_d (spline->y_grid, yBC_i, (double*)spline->coefs+doffset+1, 2, ((double*)spline->coefs)+coffset+1, 2); } } UBspline_3d_z* create_UBspline_3d_z (Ugrid x_grid, Ugrid y_grid, Ugrid z_grid, BCtype_z xBC, BCtype_z yBC, BCtype_z zBC, complex_double *data) { // Create new spline UBspline_3d_z* restrict spline = malloc (sizeof(UBspline_3d_z)); spline->spcode = U3D; spline->tcode = DOUBLE_COMPLEX; spline->xBC = xBC; spline->yBC = yBC; spline->zBC = zBC; // Setup internal variables int Mx = x_grid.num; int My = y_grid.num; int Mz = z_grid.num; int Nx, Ny, Nz; if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; x_grid.delta = (x_grid.end - x_grid.start)/(double)(Nx-3); x_grid.delta_inv = 1.0/x_grid.delta; spline->x_grid = x_grid; if (yBC.lCode == PERIODIC || yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; y_grid.delta = (y_grid.end - y_grid.start)/(double)(Ny-3); y_grid.delta_inv = 1.0/y_grid.delta; spline->y_grid = y_grid; if (zBC.lCode == PERIODIC || zBC.lCode == ANTIPERIODIC) Nz = Mz+3; else Nz = Mz+2; z_grid.delta = (z_grid.end - z_grid.start)/(double)(Nz-3); z_grid.delta_inv = 1.0/z_grid.delta; spline->z_grid = z_grid; spline->x_stride = Ny*Nz; spline->y_stride = Nz; #ifndef HAVE_SSE2 spline->coefs = malloc (2*sizeof(double)*Nx*Ny*Nz); #else posix_memalign ((void**)&spline->coefs, 16, 2*sizeof(double)*Nx*Ny*Nz); #endif BCtype_d xBC_r, xBC_i, yBC_r, yBC_i, zBC_r, zBC_i; xBC_r.lCode = xBC.lCode; xBC_r.rCode = xBC.rCode; xBC_r.lVal = xBC.lVal_r; xBC_r.rVal = xBC.rVal_r; xBC_i.lCode = xBC.lCode; xBC_i.rCode = xBC.rCode; xBC_i.lVal = xBC.lVal_i; xBC_i.rVal = xBC.rVal_i; yBC_r.lCode = yBC.lCode; yBC_r.rCode = yBC.rCode; yBC_r.lVal = yBC.lVal_r; yBC_r.rVal = yBC.rVal_r; yBC_i.lCode = yBC.lCode; yBC_i.rCode = yBC.rCode; yBC_i.lVal = yBC.lVal_i; yBC_i.rVal = yBC.rVal_i; zBC_r.lCode = zBC.lCode; zBC_r.rCode = zBC.rCode; zBC_r.lVal = zBC.lVal_r; zBC_r.rVal = zBC.rVal_r; zBC_i.lCode = zBC.lCode; zBC_i.rCode = zBC.rCode; zBC_i.lVal = zBC.lVal_i; zBC_i.rVal = zBC.rVal_i; // First, solve in the X-direction for (int iy=0; iy<My; iy++) for (int iz=0; iz<Mz; iz++) { intptr_t doffset = 2*(iy*Mz+iz); intptr_t coffset = 2*(iy*Nz+iz); // Real part find_coefs_1d_d (spline->x_grid, xBC_r, ((double*)data)+doffset, 2*My*Mz, ((double*)spline->coefs)+coffset, 2*Ny*Nz); // Imag part find_coefs_1d_d (spline->x_grid, xBC_i, ((double*)data)+doffset+1, 2*My*Mz, ((double*)spline->coefs)+coffset+1, 2*Ny*Nz); } // Now, solve in the Y-direction for (int ix=0; ix<Nx; ix++) for (int iz=0; iz<Nz; iz++) { intptr_t doffset = 2*(ix*Ny*Nz + iz); intptr_t coffset = 2*(ix*Ny*Nz + iz); // Real part find_coefs_1d_d (spline->y_grid, yBC_r, ((double*)spline->coefs)+doffset, 2*Nz, ((double*)spline->coefs)+coffset, 2*Nz); // Imag part find_coefs_1d_d (spline->y_grid, yBC_i, ((double*)spline->coefs)+doffset+1, 2*Nz, ((double*)spline->coefs)+coffset+1, 2*Nz); } // Now, solve in the Z-direction for (int ix=0; ix<Nx; ix++) for (int iy=0; iy<Ny; iy++) { intptr_t doffset = 2*((ix*Ny+iy)*Nz); intptr_t coffset = 2*((ix*Ny+iy)*Nz); // Real part find_coefs_1d_d (spline->z_grid, zBC_r, ((double*)spline->coefs)+doffset, 2, ((double*)spline->coefs)+coffset, 2); // Imag part find_coefs_1d_d (spline->z_grid, zBC_i, ((double*)spline->coefs)+doffset+1, 2, ((double*)spline->coefs)+coffset+1, 2); } init_sse_data(); return spline; } void recompute_UBspline_3d_z (UBspline_3d_z* spline, complex_double *data) { // Setup internal variables int Mx = spline->x_grid.num; int My = spline->y_grid.num; int Mz = spline->z_grid.num; int Nx, Ny, Nz; if (spline->xBC.lCode == PERIODIC || spline->xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; if (spline->yBC.lCode == PERIODIC || spline->yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; if (spline->zBC.lCode == PERIODIC || spline->zBC.lCode == ANTIPERIODIC) Nz = Mz+3; else Nz = Mz+2; BCtype_d xBC_r, xBC_i, yBC_r, yBC_i, zBC_r, zBC_i; xBC_r.lCode = spline->xBC.lCode; xBC_r.rCode = spline->xBC.rCode; xBC_r.lVal = spline->xBC.lVal_r; xBC_r.rVal = spline->xBC.rVal_r; xBC_i.lCode = spline->xBC.lCode; xBC_i.rCode = spline->xBC.rCode; xBC_i.lVal = spline->xBC.lVal_i; xBC_i.rVal = spline->xBC.rVal_i; yBC_r.lCode = spline->yBC.lCode; yBC_r.rCode = spline->yBC.rCode; yBC_r.lVal = spline->yBC.lVal_r; yBC_r.rVal = spline->yBC.rVal_r; yBC_i.lCode = spline->yBC.lCode; yBC_i.rCode = spline->yBC.rCode; yBC_i.lVal = spline->yBC.lVal_i; yBC_i.rVal = spline->yBC.rVal_i; zBC_r.lCode = spline->zBC.lCode; zBC_r.rCode = spline->zBC.rCode; zBC_r.lVal = spline->zBC.lVal_r; zBC_r.rVal = spline->zBC.rVal_r; zBC_i.lCode = spline->zBC.lCode; zBC_i.rCode = spline->zBC.rCode; zBC_i.lVal = spline->zBC.lVal_i; zBC_i.rVal = spline->zBC.rVal_i; // First, solve in the X-direction for (int iy=0; iy<My; iy++) for (int iz=0; iz<Mz; iz++) { intptr_t doffset = 2*(iy*Mz+iz); intptr_t coffset = 2*(iy*Nz+iz); // Real part find_coefs_1d_d (spline->x_grid, xBC_r, ((double*)data)+doffset, 2*My*Mz, ((double*)spline->coefs)+coffset, 2*Ny*Nz); // Imag part find_coefs_1d_d (spline->x_grid, xBC_i, ((double*)data)+doffset+1, 2*My*Mz, ((double*)spline->coefs)+coffset+1, 2*Ny*Nz); } // Now, solve in the Y-direction for (int ix=0; ix<Nx; ix++) for (int iz=0; iz<Nz; iz++) { intptr_t doffset = 2*(ix*Ny*Nz + iz); intptr_t coffset = 2*(ix*Ny*Nz + iz); // Real part find_coefs_1d_d (spline->y_grid, yBC_r, ((double*)spline->coefs)+doffset, 2*Nz, ((double*)spline->coefs)+coffset, 2*Nz); // Imag part find_coefs_1d_d (spline->y_grid, yBC_i, ((double*)spline->coefs)+doffset+1, 2*Nz, ((double*)spline->coefs)+coffset+1, 2*Nz); } // Now, solve in the Z-direction for (int ix=0; ix<Nx; ix++) for (int iy=0; iy<Ny; iy++) { intptr_t doffset = 2*((ix*Ny+iy)*Nz); intptr_t coffset = 2*((ix*Ny+iy)*Nz); // Real part find_coefs_1d_d (spline->z_grid, zBC_r, ((double*)spline->coefs)+doffset, 2, ((double*)spline->coefs)+coffset, 2); // Imag part find_coefs_1d_d (spline->z_grid, zBC_i, ((double*)spline->coefs)+doffset+1, 2, ((double*)spline->coefs)+coffset+1, 2); } } void destroy_UBspline (Bspline *spline) { free (spline->coefs); free (spline); } void destroy_NUBspline (Bspline *spline); void destroy_multi_UBspline (Bspline *spline); void destroy_Bspline (void *spline) { Bspline *sp = (Bspline *)spline; if (sp->sp_code <= U3D) destroy_UBspline (sp); else if (sp->sp_code <= NU3D) destroy_NUBspline (sp); else if (sp->sp_code <= MULTI_U3D) destroy_multi_UBspline (sp); else fprintf (stderr, "Error in destroy_Bspline: invalide spline code %d.\n", sp->sp_code); }
///////////////////////////////////////////////////////////////////////////// // einspline: a library for creating and evaluating B-splines // // Copyright (C) 2007 Kenneth P. Esler, Jr. // // // // This program is free software; you can redistribute it and/or modify // // it under the terms of the GNU General Public License as published by // // the Free Software Foundation; either version 2 of the License, or // // (at your option) any later version. // // // // This program is distributed in the hope that it will be useful, // // but WITHOUT ANY WARRANTY; without even the implied warranty of // // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // // GNU General Public License for more details. // // // // You should have received a copy of the GNU General Public License // // along with this program; if not, write to the Free Software // // Foundation, Inc., 51 Franklin Street, Fifth Floor, // // Boston, MA 02110-1301 USA // ///////////////////////////////////////////////////////////////////////////// #include "bspline_create.h" #ifndef _XOPEN_SOURCE #define _XOPEN_SOURCE 600 #endif #ifndef __USE_XOPEN2K #define __USE_XOPEN2K #endif #include <stdlib.h> #include <stdio.h> #include <inttypes.h> int posix_memalign(void **memptr, size_t alignment, size_t size); //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// //// Helper functions for spline creation //// //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// void init_sse_data(); void find_coefs_1d_d (Ugrid grid, BCtype_d bc, double *data, intptr_t dstride, double *coefs, intptr_t cstride); void solve_deriv_interp_1d_s (float bands[], float coefs[], int M, int cstride) { // Solve interpolating equations // First and last rows are different bands[4*(0)+1] /= bands[4*(0)+0]; bands[4*(0)+2] /= bands[4*(0)+0]; bands[4*(0)+3] /= bands[4*(0)+0]; bands[4*(0)+0] = 1.0; bands[4*(1)+1] -= bands[4*(1)+0]*bands[4*(0)+1]; bands[4*(1)+2] -= bands[4*(1)+0]*bands[4*(0)+2]; bands[4*(1)+3] -= bands[4*(1)+0]*bands[4*(0)+3]; bands[4*(0)+0] = 0.0; bands[4*(1)+2] /= bands[4*(1)+1]; bands[4*(1)+3] /= bands[4*(1)+1]; bands[4*(1)+1] = 1.0; // Now do rows 2 through M+1 for (int row=2; row < (M+1); row++) { bands[4*(row)+1] -= bands[4*(row)+0]*bands[4*(row-1)+2]; bands[4*(row)+3] -= bands[4*(row)+0]*bands[4*(row-1)+3]; bands[4*(row)+2] /= bands[4*(row)+1]; bands[4*(row)+3] /= bands[4*(row)+1]; bands[4*(row)+0] = 0.0; bands[4*(row)+1] = 1.0; } // Do last row bands[4*(M+1)+1] -= bands[4*(M+1)+0]*bands[4*(M-1)+2]; bands[4*(M+1)+3] -= bands[4*(M+1)+0]*bands[4*(M-1)+3]; bands[4*(M+1)+2] -= bands[4*(M+1)+1]*bands[4*(M)+2]; bands[4*(M+1)+3] -= bands[4*(M+1)+1]*bands[4*(M)+3]; bands[4*(M+1)+3] /= bands[4*(M+1)+2]; bands[4*(M+1)+2] = 1.0; coefs[(M+1)*cstride] = bands[4*(M+1)+3]; // Now back substitute up for (int row=M; row>0; row--) coefs[row*cstride] = bands[4*(row)+3] - bands[4*(row)+2]*coefs[cstride*(row+1)]; // Finish with first row coefs[0] = bands[4*(0)+3] - bands[4*(0)+1]*coefs[1*cstride] - bands[4*(0)+2]*coefs[2*cstride]; } // On input, bands should be filled with: // row 0 : abcdInitial from boundary conditions // rows 1:M: basis functions in first 3 cols, data in last // row M+1 : abcdFinal from boundary conditions // cstride gives the stride between values in coefs. // On exit, coefs with contain interpolating B-spline coefs void solve_periodic_interp_1d_s (float bands[], float coefs[], int M, size_t cstride) //int M, int cstride) { float lastCol[M]; // Now solve: // First and last rows are different bands[4*(0)+2] /= bands[4*(0)+1]; bands[4*(0)+0] /= bands[4*(0)+1]; bands[4*(0)+3] /= bands[4*(0)+1]; bands[4*(0)+1] = 1.0; bands[4*(M-1)+1] -= bands[4*(M-1)+2]*bands[4*(0)+0]; bands[4*(M-1)+3] -= bands[4*(M-1)+2]*bands[4*(0)+3]; bands[4*(M-1)+2] = -bands[4*(M-1)+2]*bands[4*(0)+2]; lastCol[0] = bands[4*(0)+0]; for (int row=1; row < (M-1); row++) { bands[4*(row)+1] -= bands[4*(row)+0] * bands[4*(row-1)+2]; bands[4*(row)+3] -= bands[4*(row)+0] * bands[4*(row-1)+3]; lastCol[row] = -bands[4*(row)+0] * lastCol[row-1]; bands[4*(row)+0] = 0.0; bands[4*(row)+2] /= bands[4*(row)+1]; bands[4*(row)+3] /= bands[4*(row)+1]; lastCol[row] /= bands[4*(row)+1]; bands[4*(row)+1] = 1.0; if (row < (M-2)) { bands[4*(M-1)+3] -= bands[4*(M-1)+2]*bands[4*(row)+3]; bands[4*(M-1)+1] -= bands[4*(M-1)+2]*lastCol[row]; bands[4*(M-1)+2] = -bands[4*(M-1)+2]*bands[4*(row)+2]; } } // Now do last row // The [2] element and [0] element are now on top of each other bands[4*(M-1)+0] += bands[4*(M-1)+2]; bands[4*(M-1)+1] -= bands[4*(M-1)+0] * (bands[4*(M-2)+2]+lastCol[M-2]); bands[4*(M-1)+3] -= bands[4*(M-1)+0] * bands[4*(M-2)+3]; bands[4*(M-1)+3] /= bands[4*(M-1)+1]; coefs[M*cstride] = bands[4*(M-1)+3]; for (int row=M-2; row>=0; row--) coefs[(row+1)*cstride] = bands[4*(row)+3] - bands[4*(row)+2]*coefs[(row+2)*cstride] - lastCol[row]*coefs[M*cstride]; coefs[0*cstride] = coefs[M*cstride]; coefs[(M+1)*cstride] = coefs[1*cstride]; coefs[(M+2)*cstride] = coefs[2*cstride]; } // On input, bands should be filled with: // row 0 : abcdInitial from boundary conditions // rows 1:M: basis functions in first 3 cols, data in last // row M+1 : abcdFinal from boundary conditions // cstride gives the stride between values in coefs. // On exit, coefs with contain interpolating B-spline coefs void solve_antiperiodic_interp_1d_s (float bands[], float coefs[], int M, int cstride) { bands[4*0+0] *= -1.0; bands[4*(M-1)+2] *= -1.0; float lastCol[M]; // Now solve: // First and last rows are different bands[4*(0)+2] /= bands[4*(0)+1]; bands[4*(0)+0] /= bands[4*(0)+1]; bands[4*(0)+3] /= bands[4*(0)+1]; bands[4*(0)+1] = 1.0; bands[4*(M-1)+1] -= bands[4*(M-1)+2]*bands[4*(0)+0]; bands[4*(M-1)+3] -= bands[4*(M-1)+2]*bands[4*(0)+3]; bands[4*(M-1)+2] = -bands[4*(M-1)+2]*bands[4*(0)+2]; lastCol[0] = bands[4*(0)+0]; for (int row=1; row < (M-1); row++) { bands[4*(row)+1] -= bands[4*(row)+0] * bands[4*(row-1)+2]; bands[4*(row)+3] -= bands[4*(row)+0] * bands[4*(row-1)+3]; lastCol[row] = -bands[4*(row)+0] * lastCol[row-1]; bands[4*(row)+0] = 0.0; bands[4*(row)+2] /= bands[4*(row)+1]; bands[4*(row)+3] /= bands[4*(row)+1]; lastCol[row] /= bands[4*(row)+1]; bands[4*(row)+1] = 1.0; if (row < (M-2)) { bands[4*(M-1)+3] -= bands[4*(M-1)+2]*bands[4*(row)+3]; bands[4*(M-1)+1] -= bands[4*(M-1)+2]*lastCol[row]; bands[4*(M-1)+2] = -bands[4*(M-1)+2]*bands[4*(row)+2]; } } // Now do last row // The [2] element and [0] element are now on top of each other bands[4*(M-1)+0] += bands[4*(M-1)+2]; bands[4*(M-1)+1] -= bands[4*(M-1)+0] * (bands[4*(M-2)+2]+lastCol[M-2]); bands[4*(M-1)+3] -= bands[4*(M-1)+0] * bands[4*(M-2)+3]; bands[4*(M-1)+3] /= bands[4*(M-1)+1]; coefs[M*cstride] = bands[4*(M-1)+3]; for (int row=M-2; row>=0; row--) coefs[(row+1)*cstride] = bands[4*(row)+3] - bands[4*(row)+2]*coefs[(row+2)*cstride] - lastCol[row]*coefs[M*cstride]; coefs[0*cstride] = -coefs[M*cstride]; coefs[(M+1)*cstride] = -coefs[1*cstride]; coefs[(M+2)*cstride] = -coefs[2*cstride]; } #ifdef HIGH_PRECISION void find_coefs_1d_s (Ugrid grid, BCtype_s bc, float *data, intptr_t dstride, float *coefs, intptr_t cstride) { BCtype_d d_bc; double *d_data, *d_coefs; d_bc.lCode = bc.lCode; d_bc.rCode = bc.rCode; d_bc.lVal = bc.lVal; d_bc.rVal = bc.rVal; int M = grid.num, N; if (bc.lCode == PERIODIC || bc.lCode == ANTIPERIODIC) N = M+3; else N = M+2; d_data = malloc (N*sizeof(double)); d_coefs = malloc (N*sizeof(double)); for (int i=0; i<M; i++) d_data[i] = data[i*dstride]; find_coefs_1d_d (grid, d_bc, d_data, 1, d_coefs, 1); for (int i=0; i<N; i++) coefs[i*cstride] = d_coefs[i]; free (d_data); free (d_coefs); } #else void find_coefs_1d_s (Ugrid grid, BCtype_s bc, float *data, intptr_t dstride, float *coefs, intptr_t cstride) { size_t M = grid.num; float basis[4] = {1.0/6.0, 2.0/3.0, 1.0/6.0, 0.0}; if (bc.lCode == PERIODIC || bc.lCode == ANTIPERIODIC) { #ifdef HAVE_C_VARARRAYS float bands[4*M]; #else float *bands = malloc(4*M*sizeof(float)); #endif for (size_t i=0; i<M; i++) { bands[4*i+0] = basis[0]; bands[4*i+1] = basis[1]; bands[4*i+2] = basis[2]; bands[4*i+3] = data[i*dstride]; } if (bc.lCode == PERIODIC) solve_periodic_interp_1d_s (bands, coefs, M, cstride); else solve_antiperiodic_interp_1d_s (bands, coefs, M, cstride); #ifndef HAVE_C_VARARRAYS free (bands); #endif } else { // Setup boundary conditions float abcd_left[4], abcd_right[4]; // Left boundary if (bc.lCode == FLAT || bc.lCode == NATURAL) bc.lVal = 0.0; if (bc.lCode == FLAT || bc.lCode == DERIV1) { abcd_left[0] = -0.5 * grid.delta_inv; abcd_left[1] = 0.0 * grid.delta_inv; abcd_left[2] = 0.5 * grid.delta_inv; abcd_left[3] = bc.lVal; } if (bc.lCode == NATURAL || bc.lCode == DERIV2) { abcd_left[0] = 1.0 * grid.delta_inv * grid.delta_inv; abcd_left[1] =-2.0 * grid.delta_inv * grid.delta_inv; abcd_left[2] = 1.0 * grid.delta_inv * grid.delta_inv; abcd_left[3] = bc.lVal; } // Right boundary if (bc.rCode == FLAT || bc.rCode == NATURAL) bc.rVal = 0.0; if (bc.rCode == FLAT || bc.rCode == DERIV1) { abcd_right[0] = -0.5 * grid.delta_inv; abcd_right[1] = 0.0 * grid.delta_inv; abcd_right[2] = 0.5 * grid.delta_inv; abcd_right[3] = bc.rVal; } if (bc.rCode == NATURAL || bc.rCode == DERIV2) { abcd_right[0] = 1.0 *grid.delta_inv * grid.delta_inv; abcd_right[1] =-2.0 *grid.delta_inv * grid.delta_inv; abcd_right[2] = 1.0 *grid.delta_inv * grid.delta_inv; abcd_right[3] = bc.rVal; } #ifdef HAVE_C_VARARRAYS float bands[4*(M+2)]; #else float *bands = malloc ((M+2)*4*sizeof(float)); #endif for (int i=0; i<4; i++) { bands[4*( 0 )+i] = abcd_left[i]; bands[4*(M+1)+i] = abcd_right[i]; } for (int i=0; i<M; i++) { for (int j=0; j<3; j++) bands[4*(i+1)+j] = basis[j]; bands[4*(i+1)+3] = data[i*dstride]; } // Now, solve for coefficients solve_deriv_interp_1d_s (bands, coefs, M, cstride); #ifndef HAVE_C_VARARRAYS free (bands); #endif } } #endif //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// //// Single-Precision, Real Creation Routines //// //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// // On input, bands should be filled with: // row 0 : abcdInitial from boundary conditions // rows 1:M: basis functions in first 3 cols, data in last // row M+1 : abcdFinal from boundary conditions // cstride gives the stride between values in coefs. // On exit, coefs with contain interpolating B-spline coefs UBspline_1d_s* create_UBspline_1d_s (Ugrid x_grid, BCtype_s xBC, float *data) { // Create new spline UBspline_1d_s* restrict spline = malloc (sizeof(UBspline_1d_s)); spline->spcode = U1D; spline->tcode = SINGLE_REAL; spline->xBC = xBC; spline->x_grid = x_grid; // Setup internal variables int M = x_grid.num; int N; if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) { x_grid.delta = (x_grid.end-x_grid.start)/(double)(x_grid.num); N = M+3; } else { x_grid.delta = (x_grid.end-x_grid.start)/(double)(x_grid.num-1); N = M+2; } x_grid.delta_inv = 1.0/x_grid.delta; spline->x_grid = x_grid; #ifndef HAVE_SSE2 spline->coefs = malloc (sizeof(float)*N); #else posix_memalign ((void**)&spline->coefs, 16, (sizeof(float)*N)); #endif find_coefs_1d_s (spline->x_grid, xBC, data, 1, spline->coefs, 1); init_sse_data(); return spline; } void recompute_UBspline_1d_s (UBspline_1d_s* spline, float *data) { find_coefs_1d_s (spline->x_grid, spline->xBC, data, 1, spline->coefs, 1); } UBspline_2d_s* create_UBspline_2d_s (Ugrid x_grid, Ugrid y_grid, BCtype_s xBC, BCtype_s yBC, float *data) { // Create new spline UBspline_2d_s* restrict spline = malloc (sizeof(UBspline_2d_s)); spline->spcode = U2D; spline->tcode = SINGLE_REAL; spline->xBC = xBC; spline->yBC = yBC; // Setup internal variables int Mx = x_grid.num; int My = y_grid.num; int Nx, Ny; if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; x_grid.delta = (x_grid.end - x_grid.start)/(double)(Nx-3); x_grid.delta_inv = 1.0/x_grid.delta; spline->x_grid = x_grid; if (yBC.lCode == PERIODIC || yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; y_grid.delta = (y_grid.end - y_grid.start)/(double)(Ny-3); y_grid.delta_inv = 1.0/y_grid.delta; spline->y_grid = y_grid; spline->x_stride = Ny; #ifndef HAVE_SSE2 spline->coefs = malloc (sizeof(float)*Nx*Ny); #else posix_memalign ((void**)&spline->coefs, 16, sizeof(float)*Nx*Ny); #endif // First, solve in the X-direction for (int iy=0; iy<My; iy++) { intptr_t doffset = iy; intptr_t coffset = iy; find_coefs_1d_s (spline->x_grid, spline->xBC, data+doffset, My, spline->coefs+coffset, Ny); } // Now, solve in the Y-direction for (int ix=0; ix<Nx; ix++) { intptr_t doffset = ix*Ny; intptr_t coffset = ix*Ny; find_coefs_1d_s (spline->y_grid, spline->yBC, spline->coefs+doffset, 1, spline->coefs+coffset, 1); } init_sse_data(); return spline; } void recompute_UBspline_2d_s (UBspline_2d_s* spline, float *data) { int Mx = spline->x_grid.num; int My = spline->y_grid.num; int Nx, Ny; if (spline->xBC.lCode == PERIODIC || spline->xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; if (spline->yBC.lCode == PERIODIC || spline->yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; // First, solve in the X-direction for (int iy=0; iy<My; iy++) { intptr_t doffset = iy; intptr_t coffset = iy; find_coefs_1d_s (spline->x_grid, spline->xBC, data+doffset, My, spline->coefs+coffset, Ny); } // Now, solve in the Y-direction for (int ix=0; ix<Nx; ix++) { intptr_t doffset = ix*Ny; intptr_t coffset = ix*Ny; find_coefs_1d_s (spline->y_grid, spline->yBC, spline->coefs+doffset, 1, spline->coefs+coffset, 1); } } UBspline_3d_s* create_UBspline_3d_s (Ugrid x_grid, Ugrid y_grid, Ugrid z_grid, BCtype_s xBC, BCtype_s yBC, BCtype_s zBC, float *data) { // Create new spline UBspline_3d_s* spline = malloc (sizeof(UBspline_3d_s)); spline->spcode = U3D; spline->tcode = SINGLE_REAL; spline->xBC = xBC; spline->yBC = yBC; spline->zBC = zBC; // Setup internal variables int Mx = x_grid.num; int My = y_grid.num; int Mz = z_grid.num; int Nx, Ny, Nz; if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; x_grid.delta = (x_grid.end - x_grid.start)/(double)(Nx-3); x_grid.delta_inv = 1.0/x_grid.delta; spline->x_grid = x_grid; if (yBC.lCode == PERIODIC || yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; y_grid.delta = (y_grid.end - y_grid.start)/(double)(Ny-3); y_grid.delta_inv = 1.0/y_grid.delta; spline->y_grid = y_grid; if (zBC.lCode == PERIODIC || zBC.lCode == ANTIPERIODIC) Nz = Mz+3; else Nz = Mz+2; z_grid.delta = (z_grid.end - z_grid.start)/(double)(Nz-3); z_grid.delta_inv = 1.0/z_grid.delta; spline->z_grid = z_grid; spline->x_stride = Ny*Nz; spline->y_stride = Nz; spline->coefs_size=(size_t)Nx*(size_t)Ny*(size_t)Nz; #ifndef HAVE_SSE2 spline->coefs = malloc (sizeof(float)*spline->coefs_size); #else posix_memalign ((void**)&spline->coefs, 16, (sizeof(float)*spline->coefs_size)); #endif // First, solve in the X-direction for (int iy=0; iy<My; iy++) for (int iz=0; iz<Mz; iz++) { intptr_t doffset = iy*Mz+iz; intptr_t coffset = iy*Nz+iz; find_coefs_1d_s (spline->x_grid, xBC, data+doffset, My*Mz, spline->coefs+coffset, Ny*Nz); } // Now, solve in the Y-direction for (int ix=0; ix<Nx; ix++) for (int iz=0; iz<Nz; iz++) { intptr_t doffset = ix*Ny*Nz + iz; intptr_t coffset = ix*Ny*Nz + iz; find_coefs_1d_s (spline->y_grid, yBC, spline->coefs+doffset, Nz, spline->coefs+coffset, Nz); } // Now, solve in the Z-direction for (int ix=0; ix<Nx; ix++) for (int iy=0; iy<Ny; iy++) { intptr_t doffset = (ix*Ny+iy)*Nz; intptr_t coffset = (ix*Ny+iy)*Nz; find_coefs_1d_s (spline->z_grid, zBC, spline->coefs+doffset, 1, spline->coefs+coffset, 1); } init_sse_data(); return spline; } void recompute_UBspline_3d_s (UBspline_3d_s* spline, float *data) { int Mx = spline->x_grid.num; int My = spline->y_grid.num; int Mz = spline->z_grid.num; int Nx, Ny, Nz; if (spline->xBC.lCode == PERIODIC || spline->xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; if (spline->yBC.lCode == PERIODIC || spline->yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; if (spline->zBC.lCode == PERIODIC || spline->zBC.lCode == ANTIPERIODIC) Nz = Mz+3; else Nz = Mz+2; // First, solve in the X-direction for (int iy=0; iy<My; iy++) for (int iz=0; iz<Mz; iz++) { intptr_t doffset = iy*Mz+iz; intptr_t coffset = iy*Nz+iz; find_coefs_1d_s (spline->x_grid, spline->xBC, data+doffset, My*Mz, spline->coefs+coffset, Ny*Nz); } // Now, solve in the Y-direction for (int ix=0; ix<Nx; ix++) for (int iz=0; iz<Nz; iz++) { intptr_t doffset = ix*Ny*Nz + iz; intptr_t coffset = ix*Ny*Nz + iz; find_coefs_1d_s (spline->y_grid, spline->yBC, spline->coefs+doffset, Nz, spline->coefs+coffset, Nz); } // Now, solve in the Z-direction for (int ix=0; ix<Nx; ix++) for (int iy=0; iy<Ny; iy++) { intptr_t doffset = (ix*Ny+iy)*Nz; intptr_t coffset = (ix*Ny+iy)*Nz; find_coefs_1d_s (spline->z_grid, spline->zBC, spline->coefs+doffset, 1, spline->coefs+coffset, 1); } } //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// //// Single-Precision, Complex Creation Routines //// //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// // On input, bands should be filled with: // row 0 : abcdInitial from boundary conditions // rows 1:M: basis functions in first 3 cols, data in last // row M+1 : abcdFinal from boundary conditions // cstride gives the stride between values in coefs. // On exit, coefs with contain interpolating B-spline coefs UBspline_1d_c* create_UBspline_1d_c (Ugrid x_grid, BCtype_c xBC, complex_float *data) { // Create new spline UBspline_1d_c* restrict spline = malloc (sizeof(UBspline_1d_c)); spline->spcode = U1D; spline->tcode = SINGLE_COMPLEX; spline->xBC = xBC; // Setup internal variables int M = x_grid.num; int N; if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) { x_grid.delta = (x_grid.end-x_grid.start)/(double)(x_grid.num); N = M+3; } else { x_grid.delta = (x_grid.end-x_grid.start)/(double)(x_grid.num-1); N = M+2; } x_grid.delta_inv = 1.0/x_grid.delta; spline->x_grid = x_grid; #ifndef HAVE_SSE2 spline->coefs = malloc (2*sizeof(float)*N); #else posix_memalign ((void**)&spline->coefs, 16, 2*sizeof(float)*N); #endif BCtype_s xBC_r, xBC_i; xBC_r.lCode = xBC.lCode; xBC_r.rCode = xBC.rCode; xBC_r.lVal = xBC.lVal_r; xBC_r.rVal = xBC.rVal_r; xBC_i.lCode = xBC.lCode; xBC_i.rCode = xBC.rCode; xBC_i.lVal = xBC.lVal_i; xBC_i.rVal = xBC.rVal_i; // Real part find_coefs_1d_s (spline->x_grid, xBC_r, (float*)data, 2, (float*)spline->coefs, 2); // Imaginarty part find_coefs_1d_s (spline->x_grid, xBC_i, ((float*)data)+1, 2, ((float*)spline->coefs+1), 2); init_sse_data(); return spline; } void recompute_UBspline_1d_c (UBspline_1d_c* spline, complex_float *data) { BCtype_s xBC_r, xBC_i; xBC_r.lCode = spline->xBC.lCode; xBC_r.rCode = spline->xBC.rCode; xBC_r.lVal = spline->xBC.lVal_r; xBC_r.rVal = spline->xBC.rVal_r; xBC_i.lCode = spline->xBC.lCode; xBC_i.rCode = spline->xBC.rCode; xBC_i.lVal = spline->xBC.lVal_i; xBC_i.rVal = spline->xBC.rVal_i; // Real part find_coefs_1d_s (spline->x_grid, xBC_r, (float*)data, 2, (float*)spline->coefs, 2); // Imaginarty part find_coefs_1d_s (spline->x_grid, xBC_i, ((float*)data)+1, 2, ((float*)spline->coefs+1), 2); } UBspline_2d_c* create_UBspline_2d_c (Ugrid x_grid, Ugrid y_grid, BCtype_c xBC, BCtype_c yBC, complex_float *data) { // Create new spline UBspline_2d_c* restrict spline = malloc (sizeof(UBspline_2d_c)); spline->spcode = U2D; spline->tcode = SINGLE_COMPLEX; spline->xBC = xBC; spline->yBC = yBC; // Setup internal variables int Mx = x_grid.num; int My = y_grid.num; int Nx, Ny; if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; x_grid.delta = (x_grid.end - x_grid.start)/(double)(Nx-3); x_grid.delta_inv = 1.0/x_grid.delta; spline->x_grid = x_grid; if (yBC.lCode == PERIODIC || yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; y_grid.delta = (y_grid.end - y_grid.start)/(double)(Ny-3); y_grid.delta_inv = 1.0/y_grid.delta; spline->y_grid = y_grid; spline->x_stride = Ny; #ifndef HAVE_SSE2 spline->coefs = malloc (2*sizeof(float)*Nx*Ny); #else posix_memalign ((void**)&spline->coefs, 16, 2*sizeof(float)*Nx*Ny); #endif BCtype_s xBC_r, xBC_i, yBC_r, yBC_i; xBC_r.lCode = xBC.lCode; xBC_r.rCode = xBC.rCode; xBC_r.lVal = xBC.lVal_r; xBC_r.rVal = xBC.rVal_r; xBC_i.lCode = xBC.lCode; xBC_i.rCode = xBC.rCode; xBC_i.lVal = xBC.lVal_i; xBC_i.rVal = xBC.rVal_i; yBC_r.lCode = yBC.lCode; yBC_r.rCode = yBC.rCode; yBC_r.lVal = yBC.lVal_r; yBC_r.rVal = yBC.rVal_r; yBC_i.lCode = yBC.lCode; yBC_i.rCode = yBC.rCode; yBC_i.lVal = yBC.lVal_i; yBC_i.rVal = yBC.rVal_i; // First, solve in the X-direction for (int iy=0; iy<My; iy++) { intptr_t doffset = 2*iy; intptr_t coffset = 2*iy; // Real part find_coefs_1d_s (spline->x_grid, xBC_r, ((float*)data)+doffset, 2*My, (float*)spline->coefs+coffset, 2*Ny); // Imag part find_coefs_1d_s (spline->x_grid, xBC_i, ((float*)data)+doffset+1, 2*My, ((float*)spline->coefs)+coffset+1, 2*Ny); } // Now, solve in the Y-direction for (int ix=0; ix<Nx; ix++) { intptr_t doffset = 2*ix*Ny; intptr_t coffset = 2*ix*Ny; // Real part find_coefs_1d_s (spline->y_grid, yBC_r, ((float*)spline->coefs)+doffset, 2, ((float*)spline->coefs)+coffset, 2); // Imag part find_coefs_1d_s (spline->y_grid, yBC_i, ((float*)spline->coefs)+doffset+1, 2, ((float*)spline->coefs)+coffset+1, 2); } init_sse_data(); return spline; } void recompute_UBspline_2d_c (UBspline_2d_c* spline, complex_float *data) { // Setup internal variables int Mx = spline->x_grid.num; int My = spline->y_grid.num; int Nx, Ny; if (spline->xBC.lCode == PERIODIC || spline->xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; if (spline->yBC.lCode == PERIODIC || spline->yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; BCtype_s xBC_r, xBC_i, yBC_r, yBC_i; xBC_r.lCode = spline->xBC.lCode; xBC_r.rCode = spline->xBC.rCode; xBC_r.lVal = spline->xBC.lVal_r; xBC_r.rVal = spline->xBC.rVal_r; xBC_i.lCode = spline->xBC.lCode; xBC_i.rCode = spline->xBC.rCode; xBC_i.lVal = spline->xBC.lVal_i; xBC_i.rVal = spline->xBC.rVal_i; yBC_r.lCode = spline->yBC.lCode; yBC_r.rCode = spline->yBC.rCode; yBC_r.lVal = spline->yBC.lVal_r; yBC_r.rVal = spline->yBC.rVal_r; yBC_i.lCode = spline->yBC.lCode; yBC_i.rCode = spline->yBC.rCode; yBC_i.lVal = spline->yBC.lVal_i; yBC_i.rVal = spline->yBC.rVal_i; // First, solve in the X-direction for (int iy=0; iy<My; iy++) { intptr_t doffset = 2*iy; intptr_t coffset = 2*iy; // Real part find_coefs_1d_s (spline->x_grid, xBC_r, ((float*)data)+doffset, 2*My, (float*)spline->coefs+coffset, 2*Ny); // Imag part find_coefs_1d_s (spline->x_grid, xBC_i, ((float*)data)+doffset+1, 2*My, ((float*)spline->coefs)+coffset+1, 2*Ny); } // Now, solve in the Y-direction for (int ix=0; ix<Nx; ix++) { intptr_t doffset = 2*ix*Ny; intptr_t coffset = 2*ix*Ny; // Real part find_coefs_1d_s (spline->y_grid, yBC_r, ((float*)spline->coefs)+doffset, 2, ((float*)spline->coefs)+coffset, 2); // Imag part find_coefs_1d_s (spline->y_grid, yBC_i, ((float*)spline->coefs)+doffset+1, 2, ((float*)spline->coefs)+coffset+1, 2); } } UBspline_3d_c* create_UBspline_3d_c (Ugrid x_grid, Ugrid y_grid, Ugrid z_grid, BCtype_c xBC, BCtype_c yBC, BCtype_c zBC, complex_float *data) { // Create new spline UBspline_3d_c* restrict spline = malloc (sizeof(UBspline_3d_c)); spline->spcode = U3D; spline->tcode = SINGLE_COMPLEX; spline->xBC = xBC; spline->yBC = yBC; spline->zBC = zBC; // Setup internal variables int Mx = x_grid.num; int My = y_grid.num; int Mz = z_grid.num; int Nx, Ny, Nz; if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; x_grid.delta = (x_grid.end - x_grid.start)/(double)(Nx-3); x_grid.delta_inv = 1.0/x_grid.delta; spline->x_grid = x_grid; if (yBC.lCode == PERIODIC || yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; y_grid.delta = (y_grid.end - y_grid.start)/(double)(Ny-3); y_grid.delta_inv = 1.0/y_grid.delta; spline->y_grid = y_grid; if (zBC.lCode == PERIODIC || zBC.lCode == ANTIPERIODIC) Nz = Mz+3; else Nz = Mz+2; z_grid.delta = (z_grid.end - z_grid.start)/(double)(Nz-3); z_grid.delta_inv = 1.0/z_grid.delta; spline->z_grid = z_grid; spline->x_stride = Ny*Nz; spline->y_stride = Nz; #ifndef HAVE_SSE2 spline->coefs = malloc (2*sizeof(float)*Nx*Ny*Nz); #else posix_memalign ((void**)&spline->coefs, 16, 2*sizeof(float)*Nx*Ny*Nz); #endif BCtype_s xBC_r, xBC_i, yBC_r, yBC_i, zBC_r, zBC_i; xBC_r.lCode = xBC.lCode; xBC_r.rCode = xBC.rCode; xBC_r.lVal = xBC.lVal_r; xBC_r.rVal = xBC.rVal_r; xBC_i.lCode = xBC.lCode; xBC_i.rCode = xBC.rCode; xBC_i.lVal = xBC.lVal_i; xBC_i.rVal = xBC.rVal_i; yBC_r.lCode = yBC.lCode; yBC_r.rCode = yBC.rCode; yBC_r.lVal = yBC.lVal_r; yBC_r.rVal = yBC.rVal_r; yBC_i.lCode = yBC.lCode; yBC_i.rCode = yBC.rCode; yBC_i.lVal = yBC.lVal_i; yBC_i.rVal = yBC.rVal_i; zBC_r.lCode = zBC.lCode; zBC_r.rCode = zBC.rCode; zBC_r.lVal = zBC.lVal_r; zBC_r.rVal = zBC.rVal_r; zBC_i.lCode = zBC.lCode; zBC_i.rCode = zBC.rCode; zBC_i.lVal = zBC.lVal_i; zBC_i.rVal = zBC.rVal_i; // First, solve in the X-direction for (int iy=0; iy<My; iy++) for (int iz=0; iz<Mz; iz++) { intptr_t doffset = 2*(iy*Mz+iz); intptr_t coffset = 2*(iy*Nz+iz); // Real part find_coefs_1d_s (spline->x_grid, xBC_r, ((float*)data)+doffset, 2*My*Mz, ((float*)spline->coefs)+coffset, 2*Ny*Nz); // Imag part find_coefs_1d_s (spline->x_grid, xBC_i, ((float*)data)+doffset+1, 2*My*Mz, ((float*)spline->coefs)+coffset+1, 2*Ny*Nz); } // Now, solve in the Y-direction for (int ix=0; ix<Nx; ix++) for (int iz=0; iz<Nz; iz++) { intptr_t doffset = 2*(ix*Ny*Nz + iz); intptr_t coffset = 2*(ix*Ny*Nz + iz); // Real part find_coefs_1d_s (spline->y_grid, yBC_r, ((float*)spline->coefs)+doffset, 2*Nz, ((float*)spline->coefs)+coffset, 2*Nz); // Imag part find_coefs_1d_s (spline->y_grid, yBC_i, ((float*)spline->coefs)+doffset+1, 2*Nz, ((float*)spline->coefs)+coffset+1, 2*Nz); } // Now, solve in the Z-direction for (int ix=0; ix<Nx; ix++) for (int iy=0; iy<Ny; iy++) { intptr_t doffset = 2*((ix*Ny+iy)*Nz); intptr_t coffset = 2*((ix*Ny+iy)*Nz); // Real part find_coefs_1d_s (spline->z_grid, zBC_r, ((float*)spline->coefs)+doffset, 2, ((float*)spline->coefs)+coffset, 2); // Imag part find_coefs_1d_s (spline->z_grid, zBC_i, ((float*)spline->coefs)+doffset+1, 2, ((float*)spline->coefs)+coffset+1, 2); } init_sse_data(); return spline; } void recompute_UBspline_3d_c (UBspline_3d_c* spline, complex_float *data) { // Setup internal variables int Mx = spline->x_grid.num; int My = spline->y_grid.num; int Mz = spline->z_grid.num; int Nx, Ny, Nz; if (spline->xBC.lCode == PERIODIC || spline->xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; if (spline->yBC.lCode == PERIODIC || spline->yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; if (spline->zBC.lCode == PERIODIC || spline->zBC.lCode == ANTIPERIODIC) Nz = Mz+3; else Nz = Mz+2; BCtype_s xBC_r, xBC_i, yBC_r, yBC_i, zBC_r, zBC_i; xBC_r.lCode = spline->xBC.lCode; xBC_r.rCode = spline->xBC.rCode; xBC_r.lVal = spline->xBC.lVal_r; xBC_r.rVal = spline->xBC.rVal_r; xBC_i.lCode = spline->xBC.lCode; xBC_i.rCode = spline->xBC.rCode; xBC_i.lVal = spline->xBC.lVal_i; xBC_i.rVal = spline->xBC.rVal_i; yBC_r.lCode = spline->yBC.lCode; yBC_r.rCode = spline->yBC.rCode; yBC_r.lVal = spline->yBC.lVal_r; yBC_r.rVal = spline->yBC.rVal_r; yBC_i.lCode = spline->yBC.lCode; yBC_i.rCode = spline->yBC.rCode; yBC_i.lVal = spline->yBC.lVal_i; yBC_i.rVal = spline->yBC.rVal_i; zBC_r.lCode = spline->zBC.lCode; zBC_r.rCode = spline->zBC.rCode; zBC_r.lVal = spline->zBC.lVal_r; zBC_r.rVal = spline->zBC.rVal_r; zBC_i.lCode = spline->zBC.lCode; zBC_i.rCode = spline->zBC.rCode; zBC_i.lVal = spline->zBC.lVal_i; zBC_i.rVal = spline->zBC.rVal_i; // First, solve in the X-direction for (int iy=0; iy<My; iy++) for (int iz=0; iz<Mz; iz++) { intptr_t doffset = 2*(iy*Mz+iz); intptr_t coffset = 2*(iy*Nz+iz); // Real part find_coefs_1d_s (spline->x_grid, xBC_r, ((float*)data)+doffset, 2*My*Mz, ((float*)spline->coefs)+coffset, 2*Ny*Nz); // Imag part find_coefs_1d_s (spline->x_grid, xBC_i, ((float*)data)+doffset+1, 2*My*Mz, ((float*)spline->coefs)+coffset+1, 2*Ny*Nz); } // Now, solve in the Y-direction for (int ix=0; ix<Nx; ix++) for (int iz=0; iz<Nz; iz++) { intptr_t doffset = 2*(ix*Ny*Nz + iz); intptr_t coffset = 2*(ix*Ny*Nz + iz); // Real part find_coefs_1d_s (spline->y_grid, yBC_r, ((float*)spline->coefs)+doffset, 2*Nz, ((float*)spline->coefs)+coffset, 2*Nz); // Imag part find_coefs_1d_s (spline->y_grid, yBC_i, ((float*)spline->coefs)+doffset+1, 2*Nz, ((float*)spline->coefs)+coffset+1, 2*Nz); } // Now, solve in the Z-direction for (int ix=0; ix<Nx; ix++) for (int iy=0; iy<Ny; iy++) { intptr_t doffset = 2*((ix*Ny+iy)*Nz); intptr_t coffset = 2*((ix*Ny+iy)*Nz); // Real part find_coefs_1d_s (spline->z_grid, zBC_r, ((float*)spline->coefs)+doffset, 2, ((float*)spline->coefs)+coffset, 2); // Imag part find_coefs_1d_s (spline->z_grid, zBC_i, ((float*)spline->coefs)+doffset+1, 2, ((float*)spline->coefs)+coffset+1, 2); } } //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// //// Double-Precision, Real Creation Routines //// //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// // On input, bands should be filled with: // row 0 : abcdInitial from boundary conditions // rows 1:M: basis functions in first 3 cols, data in last // row M+1 : abcdFinal from boundary conditions // cstride gives the stride between values in coefs. // On exit, coefs with contain interpolating B-spline coefs void solve_deriv_interp_1d_d (double bands[], double coefs[], int M, int cstride) { // Solve interpolating equations // First and last rows are different bands[4*(0)+1] /= bands[4*(0)+0]; bands[4*(0)+2] /= bands[4*(0)+0]; bands[4*(0)+3] /= bands[4*(0)+0]; bands[4*(0)+0] = 1.0; bands[4*(1)+1] -= bands[4*(1)+0]*bands[4*(0)+1]; bands[4*(1)+2] -= bands[4*(1)+0]*bands[4*(0)+2]; bands[4*(1)+3] -= bands[4*(1)+0]*bands[4*(0)+3]; bands[4*(0)+0] = 0.0; bands[4*(1)+2] /= bands[4*(1)+1]; bands[4*(1)+3] /= bands[4*(1)+1]; bands[4*(1)+1] = 1.0; // Now do rows 2 through M+1 for (int row=2; row < (M+1); row++) { bands[4*(row)+1] -= bands[4*(row)+0]*bands[4*(row-1)+2]; bands[4*(row)+3] -= bands[4*(row)+0]*bands[4*(row-1)+3]; bands[4*(row)+2] /= bands[4*(row)+1]; bands[4*(row)+3] /= bands[4*(row)+1]; bands[4*(row)+0] = 0.0; bands[4*(row)+1] = 1.0; } // Do last row bands[4*(M+1)+1] -= bands[4*(M+1)+0]*bands[4*(M-1)+2]; bands[4*(M+1)+3] -= bands[4*(M+1)+0]*bands[4*(M-1)+3]; bands[4*(M+1)+2] -= bands[4*(M+1)+1]*bands[4*(M)+2]; bands[4*(M+1)+3] -= bands[4*(M+1)+1]*bands[4*(M)+3]; bands[4*(M+1)+3] /= bands[4*(M+1)+2]; bands[4*(M+1)+2] = 1.0; coefs[(M+1)*cstride] = bands[4*(M+1)+3]; // Now back substitute up for (int row=M; row>0; row--) coefs[row*cstride] = bands[4*(row)+3] - bands[4*(row)+2]*coefs[cstride*(row+1)]; // Finish with first row coefs[0] = bands[4*(0)+3] - bands[4*(0)+1]*coefs[1*cstride] - bands[4*(0)+2]*coefs[2*cstride]; } // On input, bands should be filled with: // row 0 : abcdInitial from boundary conditions // rows 1:M: basis functions in first 3 cols, data in last // row M+1 : abcdFinal from boundary conditions // cstride gives the stride between values in coefs. // On exit, coefs with contain interpolating B-spline coefs void solve_periodic_interp_1d_d (double bands[], double coefs[], int M, intptr_t cstride) { double lastCol[M]; // Now solve: // First and last rows are different bands[4*(0)+2] /= bands[4*(0)+1]; bands[4*(0)+0] /= bands[4*(0)+1]; bands[4*(0)+3] /= bands[4*(0)+1]; bands[4*(0)+1] = 1.0; bands[4*(M-1)+1] -= bands[4*(M-1)+2]*bands[4*(0)+0]; bands[4*(M-1)+3] -= bands[4*(M-1)+2]*bands[4*(0)+3]; bands[4*(M-1)+2] = -bands[4*(M-1)+2]*bands[4*(0)+2]; lastCol[0] = bands[4*(0)+0]; for (int row=1; row < (M-1); row++) { bands[4*(row)+1] -= bands[4*(row)+0] * bands[4*(row-1)+2]; bands[4*(row)+3] -= bands[4*(row)+0] * bands[4*(row-1)+3]; lastCol[row] = -bands[4*(row)+0] * lastCol[row-1]; bands[4*(row)+0] = 0.0; bands[4*(row)+2] /= bands[4*(row)+1]; bands[4*(row)+3] /= bands[4*(row)+1]; lastCol[row] /= bands[4*(row)+1]; bands[4*(row)+1] = 1.0; if (row < (M-2)) { bands[4*(M-1)+3] -= bands[4*(M-1)+2]*bands[4*(row)+3]; bands[4*(M-1)+1] -= bands[4*(M-1)+2]*lastCol[row]; bands[4*(M-1)+2] = -bands[4*(M-1)+2]*bands[4*(row)+2]; } } // Now do last row // The [2] element and [0] element are now on top of each other bands[4*(M-1)+0] += bands[4*(M-1)+2]; bands[4*(M-1)+1] -= bands[4*(M-1)+0] * (bands[4*(M-2)+2]+lastCol[M-2]); bands[4*(M-1)+3] -= bands[4*(M-1)+0] * bands[4*(M-2)+3]; bands[4*(M-1)+3] /= bands[4*(M-1)+1]; coefs[M*cstride] = bands[4*(M-1)+3]; for (int row=M-2; row>=0; row--) coefs[(row+1)*cstride] = bands[4*(row)+3] - bands[4*(row)+2]*coefs[(row+2)*cstride] - lastCol[row]*coefs[M*cstride]; coefs[0*cstride] = coefs[M*cstride]; coefs[(M+1)*cstride] = coefs[1*cstride]; coefs[(M+2)*cstride] = coefs[2*cstride]; } // On input, bands should be filled with: // row 0 : abcdInitial from boundary conditions // rows 1:M: basis functions in first 3 cols, data in last // row M+1 : abcdFinal from boundary conditions // cstride gives the stride between values in coefs. // On exit, coefs with contain interpolating B-spline coefs void solve_antiperiodic_interp_1d_d (double bands[], double coefs[], int M, int cstride) { double lastCol[M]; bands[4*0+0] *= -1.0; bands[4*(M-1)+2] *= -1.0; // Now solve: // First and last rows are different bands[4*(0)+2] /= bands[4*(0)+1]; bands[4*(0)+0] /= bands[4*(0)+1]; bands[4*(0)+3] /= bands[4*(0)+1]; bands[4*(0)+1] = 1.0; bands[4*(M-1)+1] -= bands[4*(M-1)+2]*bands[4*(0)+0]; bands[4*(M-1)+3] -= bands[4*(M-1)+2]*bands[4*(0)+3]; bands[4*(M-1)+2] = -bands[4*(M-1)+2]*bands[4*(0)+2]; lastCol[0] = bands[4*(0)+0]; for (int row=1; row < (M-1); row++) { bands[4*(row)+1] -= bands[4*(row)+0] * bands[4*(row-1)+2]; bands[4*(row)+3] -= bands[4*(row)+0] * bands[4*(row-1)+3]; lastCol[row] = -bands[4*(row)+0] * lastCol[row-1]; bands[4*(row)+0] = 0.0; bands[4*(row)+2] /= bands[4*(row)+1]; bands[4*(row)+3] /= bands[4*(row)+1]; lastCol[row] /= bands[4*(row)+1]; bands[4*(row)+1] = 1.0; if (row < (M-2)) { bands[4*(M-1)+3] -= bands[4*(M-1)+2]*bands[4*(row)+3]; bands[4*(M-1)+1] -= bands[4*(M-1)+2]*lastCol[row]; bands[4*(M-1)+2] = -bands[4*(M-1)+2]*bands[4*(row)+2]; } } // Now do last row // The [2] element and [0] element are now on top of each other bands[4*(M-1)+0] += bands[4*(M-1)+2]; bands[4*(M-1)+1] -= bands[4*(M-1)+0] * (bands[4*(M-2)+2]+lastCol[M-2]); bands[4*(M-1)+3] -= bands[4*(M-1)+0] * bands[4*(M-2)+3]; bands[4*(M-1)+3] /= bands[4*(M-1)+1]; coefs[M*cstride] = bands[4*(M-1)+3]; for (int row=M-2; row>=0; row--) coefs[(row+1)*cstride] = bands[4*(row)+3] - bands[4*(row)+2]*coefs[(row+2)*cstride] - lastCol[row]*coefs[M*cstride]; coefs[0*cstride] = -coefs[M*cstride]; coefs[(M+1)*cstride] = -coefs[1*cstride]; coefs[(M+2)*cstride] = -coefs[2*cstride]; } void find_coefs_1d_d (Ugrid grid, BCtype_d bc, double *data, intptr_t dstride, double *coefs, intptr_t cstride) { int M = grid.num; double basis[4] = {1.0/6.0, 2.0/3.0, 1.0/6.0, 0.0}; if (bc.lCode == PERIODIC || bc.lCode == ANTIPERIODIC) { #ifdef HAVE_C_VARARRAYS double bands[M*4]; #else double *bands = malloc (4*M*sizeof(double)); #endif for (int i=0; i<M; i++) { bands[4*i+0] = basis[0]; bands[4*i+1] = basis[1]; bands[4*i+2] = basis[2]; bands[4*i+3] = data[i*dstride]; } if (bc.lCode == ANTIPERIODIC) solve_antiperiodic_interp_1d_d (bands, coefs, M, cstride); else solve_periodic_interp_1d_d (bands, coefs, M, cstride); #ifndef HAVE_C_VARARRAYS free (bands); #endif } else { // Setup boundary conditions double abcd_left[4], abcd_right[4]; // Left boundary if (bc.lCode == FLAT || bc.lCode == NATURAL) bc.lVal = 0.0; if (bc.lCode == FLAT || bc.lCode == DERIV1) { abcd_left[0] = -0.5 * grid.delta_inv; abcd_left[1] = 0.0 * grid.delta_inv; abcd_left[2] = 0.5 * grid.delta_inv; abcd_left[3] = bc.lVal; } if (bc.lCode == NATURAL || bc.lCode == DERIV2) { abcd_left[0] = 1.0 * grid.delta_inv * grid.delta_inv; abcd_left[1] =-2.0 * grid.delta_inv * grid.delta_inv; abcd_left[2] = 1.0 * grid.delta_inv * grid.delta_inv; abcd_left[3] = bc.lVal; } // Right boundary if (bc.rCode == FLAT || bc.rCode == NATURAL) bc.rVal = 0.0; if (bc.rCode == FLAT || bc.rCode == DERIV1) { abcd_right[0] = -0.5 * grid.delta_inv; abcd_right[1] = 0.0 * grid.delta_inv; abcd_right[2] = 0.5 * grid.delta_inv; abcd_right[3] = bc.rVal; } if (bc.rCode == NATURAL || bc.rCode == DERIV2) { abcd_right[0] = 1.0 *grid.delta_inv * grid.delta_inv; abcd_right[1] =-2.0 *grid.delta_inv * grid.delta_inv; abcd_right[2] = 1.0 *grid.delta_inv * grid.delta_inv; abcd_right[3] = bc.rVal; } #ifdef HAVE_C_VARARRAYS double bands[(M+2)*4]; #else double *bands = malloc ((M+2)*4*sizeof(double)); #endif for (int i=0; i<4; i++) { bands[4*( 0 )+i] = abcd_left[i]; bands[4*(M+1)+i] = abcd_right[i]; } for (int i=0; i<M; i++) { for (int j=0; j<3; j++) bands[4*(i+1)+j] = basis[j]; bands[4*(i+1)+3] = data[i*dstride]; } // Now, solve for coefficients solve_deriv_interp_1d_d (bands, coefs, M, cstride); #ifndef HAVE_C_VARARRAYS free (bands); #endif } } UBspline_1d_d* create_UBspline_1d_d (Ugrid x_grid, BCtype_d xBC, double *data) { // Create new spline UBspline_1d_d* restrict spline = malloc (sizeof(UBspline_1d_d)); spline->spcode = U1D; spline->tcode = DOUBLE_REAL; spline->xBC = xBC; // Setup internal variables int M = x_grid.num; int N; if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) { x_grid.delta = (x_grid.end-x_grid.start)/(double)(x_grid.num); N = M+3; } else { x_grid.delta = (x_grid.end-x_grid.start)/(double)(x_grid.num-1); N = M+2; } x_grid.delta_inv = 1.0/x_grid.delta; spline->x_grid = x_grid; #ifndef HAVE_SSE2 spline->coefs = malloc (sizeof(double)*N); #else posix_memalign ((void**)&spline->coefs, 16, sizeof(double)*N); #endif find_coefs_1d_d (spline->x_grid, xBC, data, 1, spline->coefs, 1); init_sse_data(); return spline; } void recompute_UBspline_1d_d (UBspline_1d_d* spline, double *data) { find_coefs_1d_d (spline->x_grid, spline->xBC, data, 1, spline->coefs, 1); } UBspline_2d_d* create_UBspline_2d_d (Ugrid x_grid, Ugrid y_grid, BCtype_d xBC, BCtype_d yBC, double *data) { // Create new spline UBspline_2d_d* restrict spline = malloc (sizeof(UBspline_2d_d)); spline->spcode = U2D; spline->tcode = DOUBLE_REAL; spline->xBC = xBC; spline->yBC = yBC; // Setup internal variables int Mx = x_grid.num; int My = y_grid.num; int Nx, Ny; if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; x_grid.delta = (x_grid.end - x_grid.start)/(double)(Nx-3); x_grid.delta_inv = 1.0/x_grid.delta; spline->x_grid = x_grid; if (yBC.lCode == PERIODIC || yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; y_grid.delta = (y_grid.end - y_grid.start)/(double)(Ny-3); y_grid.delta_inv = 1.0/y_grid.delta; spline->y_grid = y_grid; spline->x_stride = Ny; #ifndef HAVE_SSE2 spline->coefs = malloc (sizeof(double)*Nx*Ny); #else posix_memalign ((void**)&spline->coefs, 16, (sizeof(double)*Nx*Ny)); #endif // First, solve in the X-direction for (int iy=0; iy<My; iy++) { intptr_t doffset = iy; intptr_t coffset = iy; find_coefs_1d_d (spline->x_grid, xBC, data+doffset, My, spline->coefs+coffset, Ny); } // Now, solve in the Y-direction for (int ix=0; ix<Nx; ix++) { intptr_t doffset = ix*Ny; intptr_t coffset = ix*Ny; find_coefs_1d_d (spline->y_grid, yBC, spline->coefs+doffset, 1, spline->coefs+coffset, 1); } init_sse_data(); return spline; } void recompute_UBspline_2d_d (UBspline_2d_d* spline, double *data) { int Mx = spline->x_grid.num; int My = spline->y_grid.num; int Nx, Ny; if (spline->xBC.lCode == PERIODIC || spline->xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; if (spline->yBC.lCode == PERIODIC || spline->yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; // First, solve in the X-direction for (int iy=0; iy<My; iy++) { intptr_t doffset = iy; intptr_t coffset = iy; find_coefs_1d_d (spline->x_grid, spline->xBC, data+doffset, My, spline->coefs+coffset, Ny); } // Now, solve in the Y-direction for (int ix=0; ix<Nx; ix++) { intptr_t doffset = ix*Ny; intptr_t coffset = ix*Ny; find_coefs_1d_d (spline->y_grid, spline->yBC, spline->coefs+doffset, 1, spline->coefs+coffset, 1); } } UBspline_3d_d* create_UBspline_3d_d (Ugrid x_grid, Ugrid y_grid, Ugrid z_grid, BCtype_d xBC, BCtype_d yBC, BCtype_d zBC, double *data) { // Create new spline UBspline_3d_d* restrict spline = malloc (sizeof(UBspline_3d_d)); spline->spcode = U3D; spline->tcode = DOUBLE_REAL; spline->xBC = xBC; spline->yBC = yBC; spline->zBC = zBC; // Setup internal variables int Mx = x_grid.num; int My = y_grid.num; int Mz = z_grid.num; int Nx, Ny, Nz; if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; x_grid.delta = (x_grid.end - x_grid.start)/(double)(Nx-3); x_grid.delta_inv = 1.0/x_grid.delta; spline->x_grid = x_grid; if (yBC.lCode == PERIODIC || yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; y_grid.delta = (y_grid.end - y_grid.start)/(double)(Ny-3); y_grid.delta_inv = 1.0/y_grid.delta; spline->y_grid = y_grid; if (zBC.lCode == PERIODIC || zBC.lCode == ANTIPERIODIC) Nz = Mz+3; else Nz = Mz+2; z_grid.delta = (z_grid.end - z_grid.start)/(double)(Nz-3); z_grid.delta_inv = 1.0/z_grid.delta; spline->z_grid = z_grid; spline->x_stride = Ny*Nz; spline->y_stride = Nz; spline->coefs_size=(size_t)Nx*(size_t)Ny*(size_t)Nz; #ifndef HAVE_SSE2 spline->coefs = malloc (sizeof(double)*spline->coefs_size); #else posix_memalign ((void**)&spline->coefs, 16, (sizeof(double)*spline->coefs_size)); #endif if(data != NULL) // only data is provided { // First, solve in the X-direction for (int iy=0; iy<My; iy++) for (int iz=0; iz<Mz; iz++) { intptr_t doffset = iy*Mz+iz; intptr_t coffset = iy*Nz+iz; find_coefs_1d_d (spline->x_grid, xBC, data+doffset, My*Mz, spline->coefs+coffset, Ny*Nz); } // Now, solve in the Y-direction for (int ix=0; ix<Nx; ix++) for (int iz=0; iz<Nz; iz++) { intptr_t doffset = ix*Ny*Nz + iz; intptr_t coffset = ix*Ny*Nz + iz; find_coefs_1d_d (spline->y_grid, yBC, spline->coefs+doffset, Nz, spline->coefs+coffset, Nz); } // Now, solve in the Z-direction for (int ix=0; ix<Nx; ix++) for (int iy=0; iy<Ny; iy++) { intptr_t doffset = (ix*Ny+iy)*Nz; intptr_t coffset = (ix*Ny+iy)*Nz; find_coefs_1d_d (spline->z_grid, zBC, spline->coefs+doffset, 1, spline->coefs+coffset, 1); } } init_sse_data(); return spline; } void recompute_UBspline_3d_d (UBspline_3d_d* spline, double *data) { int Mx = spline->x_grid.num; int My = spline->y_grid.num; int Mz = spline->z_grid.num; int Nx, Ny, Nz; if (spline->xBC.lCode == PERIODIC || spline->xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; if (spline->yBC.lCode == PERIODIC || spline->yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; if (spline->zBC.lCode == PERIODIC || spline->zBC.lCode == ANTIPERIODIC) Nz = Mz+3; else Nz = Mz+2; // First, solve in the X-direction for (int iy=0; iy<My; iy++) for (int iz=0; iz<Mz; iz++) { intptr_t doffset = iy*Mz+iz; intptr_t coffset = iy*Nz+iz; find_coefs_1d_d (spline->x_grid, spline->xBC, data+doffset, My*Mz, spline->coefs+coffset, Ny*Nz); } // Now, solve in the Y-direction for (int ix=0; ix<Nx; ix++) for (int iz=0; iz<Nz; iz++) { intptr_t doffset = ix*Ny*Nz + iz; intptr_t coffset = ix*Ny*Nz + iz; find_coefs_1d_d (spline->y_grid, spline->yBC, spline->coefs+doffset, Nz, spline->coefs+coffset, Nz); } // Now, solve in the Z-direction for (int ix=0; ix<Nx; ix++) for (int iy=0; iy<Ny; iy++) { intptr_t doffset = (ix*Ny+iy)*Nz; intptr_t coffset = (ix*Ny+iy)*Nz; find_coefs_1d_d (spline->z_grid, spline->zBC, spline->coefs+doffset, 1, spline->coefs+coffset, 1); } } //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// //// Double-Precision, Complex Creation Routines //// //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// // On input, bands should be filled with: // row 0 : abcdInitial from boundary conditions // rows 1:M: basis functions in first 3 cols, data in last // row M+1 : abcdFinal from boundary conditions // cstride gives the stride between values in coefs. // On exit, coefs with contain interpolating B-spline coefs UBspline_1d_z* create_UBspline_1d_z (Ugrid x_grid, BCtype_z xBC, complex_double *data) { // Create new spline UBspline_1d_z* restrict spline = malloc (sizeof(UBspline_1d_z)); spline->spcode = U1D; spline->tcode = DOUBLE_COMPLEX; spline->xBC = xBC; // Setup internal variables int M = x_grid.num; int N; if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) { x_grid.delta = (x_grid.end-x_grid.start)/(double)(x_grid.num); N = M+3; } else { x_grid.delta = (x_grid.end-x_grid.start)/(double)(x_grid.num-1); N = M+2; } x_grid.delta_inv = 1.0/x_grid.delta; spline->x_grid = x_grid; #ifndef HAVE_SSE2 spline->coefs = malloc (2*sizeof(double)*N); #else posix_memalign ((void**)&spline->coefs, 16, 2*sizeof(double)*N); #endif BCtype_d xBC_r, xBC_i; xBC_r.lCode = xBC.lCode; xBC_r.rCode = xBC.rCode; xBC_r.lVal = xBC.lVal_r; xBC_r.rVal = xBC.rVal_r; xBC_i.lCode = xBC.lCode; xBC_i.rCode = xBC.rCode; xBC_i.lVal = xBC.lVal_i; xBC_i.rVal = xBC.rVal_i; // Real part find_coefs_1d_d (spline->x_grid, xBC_r, (double*)data, 2, (double*)spline->coefs, 2); // Imaginarty part find_coefs_1d_d (spline->x_grid, xBC_i, ((double*)data)+1, 2, ((double*)spline->coefs)+1, 2); init_sse_data(); return spline; } void recompute_UBspline_1d_z (UBspline_1d_z* spline, complex_double *data) { int M = spline->x_grid.num; int N; if (spline->xBC.lCode == PERIODIC || spline->xBC.lCode == ANTIPERIODIC) N = M+3; else N = M+2; BCtype_d xBC_r, xBC_i; xBC_r.lCode = spline->xBC.lCode; xBC_r.rCode = spline->xBC.rCode; xBC_r.lVal = spline->xBC.lVal_r; xBC_r.rVal = spline->xBC.rVal_r; xBC_i.lCode = spline->xBC.lCode; xBC_i.rCode = spline->xBC.rCode; xBC_i.lVal = spline->xBC.lVal_i; xBC_i.rVal = spline->xBC.rVal_i; // Real part find_coefs_1d_d (spline->x_grid, xBC_r, (double*)data, 2, (double*)spline->coefs, 2); // Imaginarty part find_coefs_1d_d (spline->x_grid, xBC_i, ((double*)data)+1, 2, ((double*)spline->coefs)+1, 2); } UBspline_2d_z* create_UBspline_2d_z (Ugrid x_grid, Ugrid y_grid, BCtype_z xBC, BCtype_z yBC, complex_double *data) { // Create new spline UBspline_2d_z* restrict spline = malloc (sizeof(UBspline_2d_z)); spline->spcode = U2D; spline->tcode = DOUBLE_COMPLEX; spline->xBC = xBC; spline->yBC = yBC; // Setup internal variables int Mx = x_grid.num; int My = y_grid.num; int Nx, Ny; if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; x_grid.delta = (x_grid.end - x_grid.start)/(double)(Nx-3); x_grid.delta_inv = 1.0/x_grid.delta; spline->x_grid = x_grid; if (yBC.lCode == PERIODIC || yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; y_grid.delta = (y_grid.end - y_grid.start)/(double)(Ny-3); y_grid.delta_inv = 1.0/y_grid.delta; spline->y_grid = y_grid; spline->x_stride = Ny; #ifndef HAVE_SSE2 spline->coefs = malloc (2*sizeof(double)*Nx*Ny); #else posix_memalign ((void**)&spline->coefs, 16, 2*sizeof(double)*Nx*Ny); #endif BCtype_d xBC_r, xBC_i, yBC_r, yBC_i; xBC_r.lCode = xBC.lCode; xBC_r.rCode = xBC.rCode; xBC_r.lVal = xBC.lVal_r; xBC_r.rVal = xBC.rVal_r; xBC_i.lCode = xBC.lCode; xBC_i.rCode = xBC.rCode; xBC_i.lVal = xBC.lVal_i; xBC_i.rVal = xBC.rVal_i; yBC_r.lCode = yBC.lCode; yBC_r.rCode = yBC.rCode; yBC_r.lVal = yBC.lVal_r; yBC_r.rVal = yBC.rVal_r; yBC_i.lCode = yBC.lCode; yBC_i.rCode = yBC.rCode; yBC_i.lVal = yBC.lVal_i; yBC_i.rVal = yBC.rVal_i; // First, solve in the X-direction for (int iy=0; iy<My; iy++) { intptr_t doffset = 2*iy; intptr_t coffset = 2*iy; // Real part find_coefs_1d_d (spline->x_grid, xBC_r, ((double*)data+doffset), 2*My, (double*)spline->coefs+coffset, 2*Ny); // Imag part find_coefs_1d_d (spline->x_grid, xBC_i, ((double*)data)+doffset+1, 2*My, ((double*)spline->coefs)+coffset+1, 2*Ny); } // Now, solve in the Y-direction for (int ix=0; ix<Nx; ix++) { intptr_t doffset = 2*ix*Ny; intptr_t coffset = 2*ix*Ny; // Real part find_coefs_1d_d (spline->y_grid, yBC_r, ((double*)spline->coefs)+doffset, 2, (double*)spline->coefs+coffset, 2); // Imag part find_coefs_1d_d (spline->y_grid, yBC_i, (double*)spline->coefs+doffset+1, 2, ((double*)spline->coefs)+coffset+1, 2); } init_sse_data(); return spline; } void recompute_UBspline_2d_z (UBspline_2d_z* spline, complex_double *data) { int Mx = spline->x_grid.num; int My = spline->y_grid.num; int Nx, Ny; if (spline->xBC.lCode == PERIODIC || spline->xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; if (spline->yBC.lCode == PERIODIC || spline->yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; BCtype_d xBC_r, xBC_i, yBC_r, yBC_i; xBC_r.lCode = spline->xBC.lCode; xBC_r.rCode = spline->xBC.rCode; xBC_r.lVal = spline->xBC.lVal_r; xBC_r.rVal = spline->xBC.rVal_r; xBC_i.lCode = spline->xBC.lCode; xBC_i.rCode = spline->xBC.rCode; xBC_i.lVal = spline->xBC.lVal_i; xBC_i.rVal = spline->xBC.rVal_i; yBC_r.lCode = spline->yBC.lCode; yBC_r.rCode = spline->yBC.rCode; yBC_r.lVal = spline->yBC.lVal_r; yBC_r.rVal = spline->yBC.rVal_r; yBC_i.lCode = spline->yBC.lCode; yBC_i.rCode = spline->yBC.rCode; yBC_i.lVal = spline->yBC.lVal_i; yBC_i.rVal = spline->yBC.rVal_i; // First, solve in the X-direction for (int iy=0; iy<My; iy++) { intptr_t doffset = 2*iy; intptr_t coffset = 2*iy; // Real part find_coefs_1d_d (spline->x_grid, xBC_r, ((double*)data+doffset), 2*My, (double*)spline->coefs+coffset, 2*Ny); // Imag part find_coefs_1d_d (spline->x_grid, xBC_i, ((double*)data)+doffset+1, 2*My, ((double*)spline->coefs)+coffset+1, 2*Ny); } // Now, solve in the Y-direction for (int ix=0; ix<Nx; ix++) { intptr_t doffset = 2*ix*Ny; intptr_t coffset = 2*ix*Ny; // Real part find_coefs_1d_d (spline->y_grid, yBC_r, ((double*)spline->coefs)+doffset, 2, (double*)spline->coefs+coffset, 2); // Imag part find_coefs_1d_d (spline->y_grid, yBC_i, (double*)spline->coefs+doffset+1, 2, ((double*)spline->coefs)+coffset+1, 2); } } UBspline_3d_z* create_UBspline_3d_z (Ugrid x_grid, Ugrid y_grid, Ugrid z_grid, BCtype_z xBC, BCtype_z yBC, BCtype_z zBC, complex_double *data) { // Create new spline UBspline_3d_z* restrict spline = malloc (sizeof(UBspline_3d_z)); spline->spcode = U3D; spline->tcode = DOUBLE_COMPLEX; spline->xBC = xBC; spline->yBC = yBC; spline->zBC = zBC; // Setup internal variables int Mx = x_grid.num; int My = y_grid.num; int Mz = z_grid.num; int Nx, Ny, Nz; if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; x_grid.delta = (x_grid.end - x_grid.start)/(double)(Nx-3); x_grid.delta_inv = 1.0/x_grid.delta; spline->x_grid = x_grid; if (yBC.lCode == PERIODIC || yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; y_grid.delta = (y_grid.end - y_grid.start)/(double)(Ny-3); y_grid.delta_inv = 1.0/y_grid.delta; spline->y_grid = y_grid; if (zBC.lCode == PERIODIC || zBC.lCode == ANTIPERIODIC) Nz = Mz+3; else Nz = Mz+2; z_grid.delta = (z_grid.end - z_grid.start)/(double)(Nz-3); z_grid.delta_inv = 1.0/z_grid.delta; spline->z_grid = z_grid; spline->x_stride = Ny*Nz; spline->y_stride = Nz; #ifndef HAVE_SSE2 spline->coefs = malloc (2*sizeof(double)*Nx*Ny*Nz); #else posix_memalign ((void**)&spline->coefs, 16, 2*sizeof(double)*Nx*Ny*Nz); #endif BCtype_d xBC_r, xBC_i, yBC_r, yBC_i, zBC_r, zBC_i; xBC_r.lCode = xBC.lCode; xBC_r.rCode = xBC.rCode; xBC_r.lVal = xBC.lVal_r; xBC_r.rVal = xBC.rVal_r; xBC_i.lCode = xBC.lCode; xBC_i.rCode = xBC.rCode; xBC_i.lVal = xBC.lVal_i; xBC_i.rVal = xBC.rVal_i; yBC_r.lCode = yBC.lCode; yBC_r.rCode = yBC.rCode; yBC_r.lVal = yBC.lVal_r; yBC_r.rVal = yBC.rVal_r; yBC_i.lCode = yBC.lCode; yBC_i.rCode = yBC.rCode; yBC_i.lVal = yBC.lVal_i; yBC_i.rVal = yBC.rVal_i; zBC_r.lCode = zBC.lCode; zBC_r.rCode = zBC.rCode; zBC_r.lVal = zBC.lVal_r; zBC_r.rVal = zBC.rVal_r; zBC_i.lCode = zBC.lCode; zBC_i.rCode = zBC.rCode; zBC_i.lVal = zBC.lVal_i; zBC_i.rVal = zBC.rVal_i; // First, solve in the X-direction for (int iy=0; iy<My; iy++) for (int iz=0; iz<Mz; iz++) { intptr_t doffset = 2*(iy*Mz+iz); intptr_t coffset = 2*(iy*Nz+iz); // Real part find_coefs_1d_d (spline->x_grid, xBC_r, ((double*)data)+doffset, 2*My*Mz, ((double*)spline->coefs)+coffset, 2*Ny*Nz); // Imag part find_coefs_1d_d (spline->x_grid, xBC_i, ((double*)data)+doffset+1, 2*My*Mz, ((double*)spline->coefs)+coffset+1, 2*Ny*Nz); } // Now, solve in the Y-direction for (int ix=0; ix<Nx; ix++) for (int iz=0; iz<Nz; iz++) { intptr_t doffset = 2*(ix*Ny*Nz + iz); intptr_t coffset = 2*(ix*Ny*Nz + iz); // Real part find_coefs_1d_d (spline->y_grid, yBC_r, ((double*)spline->coefs)+doffset, 2*Nz, ((double*)spline->coefs)+coffset, 2*Nz); // Imag part find_coefs_1d_d (spline->y_grid, yBC_i, ((double*)spline->coefs)+doffset+1, 2*Nz, ((double*)spline->coefs)+coffset+1, 2*Nz); } // Now, solve in the Z-direction for (int ix=0; ix<Nx; ix++) for (int iy=0; iy<Ny; iy++) { intptr_t doffset = 2*((ix*Ny+iy)*Nz); intptr_t coffset = 2*((ix*Ny+iy)*Nz); // Real part find_coefs_1d_d (spline->z_grid, zBC_r, ((double*)spline->coefs)+doffset, 2, ((double*)spline->coefs)+coffset, 2); // Imag part find_coefs_1d_d (spline->z_grid, zBC_i, ((double*)spline->coefs)+doffset+1, 2, ((double*)spline->coefs)+coffset+1, 2); } init_sse_data(); return spline; } void recompute_UBspline_3d_z (UBspline_3d_z* spline, complex_double *data) { // Setup internal variables int Mx = spline->x_grid.num; int My = spline->y_grid.num; int Mz = spline->z_grid.num; int Nx, Ny, Nz; if (spline->xBC.lCode == PERIODIC || spline->xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; if (spline->yBC.lCode == PERIODIC || spline->yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; if (spline->zBC.lCode == PERIODIC || spline->zBC.lCode == ANTIPERIODIC) Nz = Mz+3; else Nz = Mz+2; BCtype_d xBC_r, xBC_i, yBC_r, yBC_i, zBC_r, zBC_i; xBC_r.lCode = spline->xBC.lCode; xBC_r.rCode = spline->xBC.rCode; xBC_r.lVal = spline->xBC.lVal_r; xBC_r.rVal = spline->xBC.rVal_r; xBC_i.lCode = spline->xBC.lCode; xBC_i.rCode = spline->xBC.rCode; xBC_i.lVal = spline->xBC.lVal_i; xBC_i.rVal = spline->xBC.rVal_i; yBC_r.lCode = spline->yBC.lCode; yBC_r.rCode = spline->yBC.rCode; yBC_r.lVal = spline->yBC.lVal_r; yBC_r.rVal = spline->yBC.rVal_r; yBC_i.lCode = spline->yBC.lCode; yBC_i.rCode = spline->yBC.rCode; yBC_i.lVal = spline->yBC.lVal_i; yBC_i.rVal = spline->yBC.rVal_i; zBC_r.lCode = spline->zBC.lCode; zBC_r.rCode = spline->zBC.rCode; zBC_r.lVal = spline->zBC.lVal_r; zBC_r.rVal = spline->zBC.rVal_r; zBC_i.lCode = spline->zBC.lCode; zBC_i.rCode = spline->zBC.rCode; zBC_i.lVal = spline->zBC.lVal_i; zBC_i.rVal = spline->zBC.rVal_i; // First, solve in the X-direction for (int iy=0; iy<My; iy++) for (int iz=0; iz<Mz; iz++) { intptr_t doffset = 2*(iy*Mz+iz); intptr_t coffset = 2*(iy*Nz+iz); // Real part find_coefs_1d_d (spline->x_grid, xBC_r, ((double*)data)+doffset, 2*My*Mz, ((double*)spline->coefs)+coffset, 2*Ny*Nz); // Imag part find_coefs_1d_d (spline->x_grid, xBC_i, ((double*)data)+doffset+1, 2*My*Mz, ((double*)spline->coefs)+coffset+1, 2*Ny*Nz); } // Now, solve in the Y-direction for (int ix=0; ix<Nx; ix++) for (int iz=0; iz<Nz; iz++) { intptr_t doffset = 2*(ix*Ny*Nz + iz); intptr_t coffset = 2*(ix*Ny*Nz + iz); // Real part find_coefs_1d_d (spline->y_grid, yBC_r, ((double*)spline->coefs)+doffset, 2*Nz, ((double*)spline->coefs)+coffset, 2*Nz); // Imag part find_coefs_1d_d (spline->y_grid, yBC_i, ((double*)spline->coefs)+doffset+1, 2*Nz, ((double*)spline->coefs)+coffset+1, 2*Nz); } // Now, solve in the Z-direction for (int ix=0; ix<Nx; ix++) for (int iy=0; iy<Ny; iy++) { intptr_t doffset = 2*((ix*Ny+iy)*Nz); intptr_t coffset = 2*((ix*Ny+iy)*Nz); // Real part find_coefs_1d_d (spline->z_grid, zBC_r, ((double*)spline->coefs)+doffset, 2, ((double*)spline->coefs)+coffset, 2); // Imag part find_coefs_1d_d (spline->z_grid, zBC_i, ((double*)spline->coefs)+doffset+1, 2, ((double*)spline->coefs)+coffset+1, 2); } } void destroy_UBspline (Bspline *spline) { free (spline->coefs); free (spline); } void destroy_NUBspline (Bspline *spline); void destroy_multi_UBspline (Bspline *spline); void destroy_Bspline (void *spline) { Bspline *sp = (Bspline *)spline; if (sp->sp_code <= U3D) destroy_UBspline (sp); else if (sp->sp_code <= NU3D) destroy_NUBspline (sp); else if (sp->sp_code <= MULTI_U3D) destroy_multi_UBspline (sp); else fprintf (stderr, "Error in destroy_Bspline: invalide spline code %d.\n", sp->sp_code); }
///////////////////////////////////////////////////////////////////////////// // einspline: a library for creating and evaluating B-splines // // Copyright (C) 2007 Kenneth P. Esler, Jr. // // // // This program is free software; you can redistribute it and/or modify // // it under the terms of the GNU General Public License as published by // // the Free Software Foundation; either version 2 of the License, or // // (at your option) any later version. // // // // This program is distributed in the hope that it will be useful, // // but WITHOUT ANY WARRANTY; without even the implied warranty of // // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // // GNU General Public License for more details. // // // // You should have received a copy of the GNU General Public License // // along with this program; if not, write to the Free Software // // Foundation, Inc., 51 Franklin Street, Fifth Floor, // // Boston, MA 02110-1301 USA // ///////////////////////////////////////////////////////////////////////////// #include "bspline_create.h" #ifndef _XOPEN_SOURCE #define _XOPEN_SOURCE 600 #endif #ifndef __USE_XOPEN2K #define __USE_XOPEN2K #endif #include <stdlib.h> #include <stdio.h> #include <inttypes.h> int posix_memalign(void **memptr, size_t alignment, size_t size); //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// //// Helper functions for spline creation //// //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// void init_sse_data(); void find_coefs_1d_d (Ugrid grid, BCtype_d bc, double *data, intptr_t dstride, double *coefs, intptr_t cstride); void solve_deriv_interp_1d_s (float bands[], float coefs[], int M, int cstride) { // Solve interpolating equations // First and last rows are different bands[4*(0)+1] /= bands[4*(0)+0]; bands[4*(0)+2] /= bands[4*(0)+0]; bands[4*(0)+3] /= bands[4*(0)+0]; bands[4*(0)+0] = 1.0; bands[4*(1)+1] -= bands[4*(1)+0]*bands[4*(0)+1]; bands[4*(1)+2] -= bands[4*(1)+0]*bands[4*(0)+2]; bands[4*(1)+3] -= bands[4*(1)+0]*bands[4*(0)+3]; bands[4*(0)+0] = 0.0; bands[4*(1)+2] /= bands[4*(1)+1]; bands[4*(1)+3] /= bands[4*(1)+1]; bands[4*(1)+1] = 1.0; // Now do rows 2 through M+1 for (int row=2; row < (M+1); row++) { bands[4*(row)+1] -= bands[4*(row)+0]*bands[4*(row-1)+2]; bands[4*(row)+3] -= bands[4*(row)+0]*bands[4*(row-1)+3]; bands[4*(row)+2] /= bands[4*(row)+1]; bands[4*(row)+3] /= bands[4*(row)+1]; bands[4*(row)+0] = 0.0; bands[4*(row)+1] = 1.0; } // Do last row bands[4*(M+1)+1] -= bands[4*(M+1)+0]*bands[4*(M-1)+2]; bands[4*(M+1)+3] -= bands[4*(M+1)+0]*bands[4*(M-1)+3]; bands[4*(M+1)+2] -= bands[4*(M+1)+1]*bands[4*(M)+2]; bands[4*(M+1)+3] -= bands[4*(M+1)+1]*bands[4*(M)+3]; bands[4*(M+1)+3] /= bands[4*(M+1)+2]; bands[4*(M+1)+2] = 1.0; coefs[(M+1)*cstride] = bands[4*(M+1)+3]; // Now back substitute up for (int row=M; row>0; row--) coefs[row*cstride] = bands[4*(row)+3] - bands[4*(row)+2]*coefs[cstride*(row+1)]; // Finish with first row coefs[0] = bands[4*(0)+3] - bands[4*(0)+1]*coefs[1*cstride] - bands[4*(0)+2]*coefs[2*cstride]; } // On input, bands should be filled with: // row 0 : abcdInitial from boundary conditions // rows 1:M: basis functions in first 3 cols, data in last // row M+1 : abcdFinal from boundary conditions // cstride gives the stride between values in coefs. // On exit, coefs with contain interpolating B-spline coefs void solve_periodic_interp_1d_s (float bands[], float coefs[], int M, size_t cstride) //int M, int cstride) { float lastCol[M]; // Now solve: // First and last rows are different bands[4*(0)+2] /= bands[4*(0)+1]; bands[4*(0)+0] /= bands[4*(0)+1]; bands[4*(0)+3] /= bands[4*(0)+1]; bands[4*(0)+1] = 1.0; bands[4*(M-1)+1] -= bands[4*(M-1)+2]*bands[4*(0)+0]; bands[4*(M-1)+3] -= bands[4*(M-1)+2]*bands[4*(0)+3]; bands[4*(M-1)+2] = -bands[4*(M-1)+2]*bands[4*(0)+2]; lastCol[0] = bands[4*(0)+0]; for (int row=1; row < (M-1); row++) { bands[4*(row)+1] -= bands[4*(row)+0] * bands[4*(row-1)+2]; bands[4*(row)+3] -= bands[4*(row)+0] * bands[4*(row-1)+3]; lastCol[row] = -bands[4*(row)+0] * lastCol[row-1]; bands[4*(row)+0] = 0.0; bands[4*(row)+2] /= bands[4*(row)+1]; bands[4*(row)+3] /= bands[4*(row)+1]; lastCol[row] /= bands[4*(row)+1]; bands[4*(row)+1] = 1.0; if (row < (M-2)) { bands[4*(M-1)+3] -= bands[4*(M-1)+2]*bands[4*(row)+3]; bands[4*(M-1)+1] -= bands[4*(M-1)+2]*lastCol[row]; bands[4*(M-1)+2] = -bands[4*(M-1)+2]*bands[4*(row)+2]; } } // Now do last row // The [2] element and [0] element are now on top of each other bands[4*(M-1)+0] += bands[4*(M-1)+2]; bands[4*(M-1)+1] -= bands[4*(M-1)+0] * (bands[4*(M-2)+2]+lastCol[M-2]); bands[4*(M-1)+3] -= bands[4*(M-1)+0] * bands[4*(M-2)+3]; bands[4*(M-1)+3] /= bands[4*(M-1)+1]; coefs[M*cstride] = bands[4*(M-1)+3]; for (int row=M-2; row>=0; row--) coefs[(row+1)*cstride] = bands[4*(row)+3] - bands[4*(row)+2]*coefs[(row+2)*cstride] - lastCol[row]*coefs[M*cstride]; coefs[0*cstride] = coefs[M*cstride]; coefs[(M+1)*cstride] = coefs[1*cstride]; coefs[(M+2)*cstride] = coefs[2*cstride]; } // On input, bands should be filled with: // row 0 : abcdInitial from boundary conditions // rows 1:M: basis functions in first 3 cols, data in last // row M+1 : abcdFinal from boundary conditions // cstride gives the stride between values in coefs. // On exit, coefs with contain interpolating B-spline coefs void solve_antiperiodic_interp_1d_s (float bands[], float coefs[], int M, int cstride) { bands[4*0+0] *= -1.0; bands[4*(M-1)+2] *= -1.0; float lastCol[M]; // Now solve: // First and last rows are different bands[4*(0)+2] /= bands[4*(0)+1]; bands[4*(0)+0] /= bands[4*(0)+1]; bands[4*(0)+3] /= bands[4*(0)+1]; bands[4*(0)+1] = 1.0; bands[4*(M-1)+1] -= bands[4*(M-1)+2]*bands[4*(0)+0]; bands[4*(M-1)+3] -= bands[4*(M-1)+2]*bands[4*(0)+3]; bands[4*(M-1)+2] = -bands[4*(M-1)+2]*bands[4*(0)+2]; lastCol[0] = bands[4*(0)+0]; for (int row=1; row < (M-1); row++) { bands[4*(row)+1] -= bands[4*(row)+0] * bands[4*(row-1)+2]; bands[4*(row)+3] -= bands[4*(row)+0] * bands[4*(row-1)+3]; lastCol[row] = -bands[4*(row)+0] * lastCol[row-1]; bands[4*(row)+0] = 0.0; bands[4*(row)+2] /= bands[4*(row)+1]; bands[4*(row)+3] /= bands[4*(row)+1]; lastCol[row] /= bands[4*(row)+1]; bands[4*(row)+1] = 1.0; if (row < (M-2)) { bands[4*(M-1)+3] -= bands[4*(M-1)+2]*bands[4*(row)+3]; bands[4*(M-1)+1] -= bands[4*(M-1)+2]*lastCol[row]; bands[4*(M-1)+2] = -bands[4*(M-1)+2]*bands[4*(row)+2]; } } // Now do last row // The [2] element and [0] element are now on top of each other bands[4*(M-1)+0] += bands[4*(M-1)+2]; bands[4*(M-1)+1] -= bands[4*(M-1)+0] * (bands[4*(M-2)+2]+lastCol[M-2]); bands[4*(M-1)+3] -= bands[4*(M-1)+0] * bands[4*(M-2)+3]; bands[4*(M-1)+3] /= bands[4*(M-1)+1]; coefs[M*cstride] = bands[4*(M-1)+3]; for (int row=M-2; row>=0; row--) coefs[(row+1)*cstride] = bands[4*(row)+3] - bands[4*(row)+2]*coefs[(row+2)*cstride] - lastCol[row]*coefs[M*cstride]; coefs[0*cstride] = -coefs[M*cstride]; coefs[(M+1)*cstride] = -coefs[1*cstride]; coefs[(M+2)*cstride] = -coefs[2*cstride]; } #ifdef HIGH_PRECISION void find_coefs_1d_s (Ugrid grid, BCtype_s bc, float *data, intptr_t dstride, float *coefs, intptr_t cstride) { BCtype_d d_bc; double *d_data, *d_coefs; d_bc.lCode = bc.lCode; d_bc.rCode = bc.rCode; d_bc.lVal = bc.lVal; d_bc.rVal = bc.rVal; int M = grid.num, N; if (bc.lCode == PERIODIC || bc.lCode == ANTIPERIODIC) N = M+3; else N = M+2; d_data = malloc (N*sizeof(double)); d_coefs = malloc (N*sizeof(double)); for (int i=0; i<M; i++) d_data[i] = data[i*dstride]; find_coefs_1d_d (grid, d_bc, d_data, 1, d_coefs, 1); for (int i=0; i<N; i++) coefs[i*cstride] = d_coefs[i]; free (d_data); free (d_coefs); } #else void find_coefs_1d_s (Ugrid grid, BCtype_s bc, float *data, intptr_t dstride, float *coefs, intptr_t cstride) { size_t M = grid.num; float basis[4] = {1.0/6.0, 2.0/3.0, 1.0/6.0, 0.0}; if (bc.lCode == PERIODIC || bc.lCode == ANTIPERIODIC) { #ifdef HAVE_C_VARARRAYS float bands[4*M]; #else float *bands = malloc(4*M*sizeof(float)); #endif for (size_t i=0; i<M; i++) { bands[4*i+0] = basis[0]; bands[4*i+1] = basis[1]; bands[4*i+2] = basis[2]; bands[4*i+3] = data[i*dstride]; } if (bc.lCode == PERIODIC) solve_periodic_interp_1d_s (bands, coefs, M, cstride); else solve_antiperiodic_interp_1d_s (bands, coefs, M, cstride); #ifndef HAVE_C_VARARRAYS free (bands); #endif } else { // Setup boundary conditions float abcd_left[4], abcd_right[4]; // Left boundary if (bc.lCode == FLAT || bc.lCode == NATURAL) bc.lVal = 0.0; if (bc.lCode == FLAT || bc.lCode == DERIV1) { abcd_left[0] = -0.5 * grid.delta_inv; abcd_left[1] = 0.0 * grid.delta_inv; abcd_left[2] = 0.5 * grid.delta_inv; abcd_left[3] = bc.lVal; } if (bc.lCode == NATURAL || bc.lCode == DERIV2) { abcd_left[0] = 1.0 * grid.delta_inv * grid.delta_inv; abcd_left[1] =-2.0 * grid.delta_inv * grid.delta_inv; abcd_left[2] = 1.0 * grid.delta_inv * grid.delta_inv; abcd_left[3] = bc.lVal; } // Right boundary if (bc.rCode == FLAT || bc.rCode == NATURAL) bc.rVal = 0.0; if (bc.rCode == FLAT || bc.rCode == DERIV1) { abcd_right[0] = -0.5 * grid.delta_inv; abcd_right[1] = 0.0 * grid.delta_inv; abcd_right[2] = 0.5 * grid.delta_inv; abcd_right[3] = bc.rVal; } if (bc.rCode == NATURAL || bc.rCode == DERIV2) { abcd_right[0] = 1.0 *grid.delta_inv * grid.delta_inv; abcd_right[1] =-2.0 *grid.delta_inv * grid.delta_inv; abcd_right[2] = 1.0 *grid.delta_inv * grid.delta_inv; abcd_right[3] = bc.rVal; } #ifdef HAVE_C_VARARRAYS float bands[4*(M+2)]; #else float *bands = malloc ((M+2)*4*sizeof(float)); #endif for (int i=0; i<4; i++) { bands[4*( 0 )+i] = abcd_left[i]; bands[4*(M+1)+i] = abcd_right[i]; } for (int i=0; i<M; i++) { for (int j=0; j<3; j++) bands[4*(i+1)+j] = basis[j]; bands[4*(i+1)+3] = data[i*dstride]; } // Now, solve for coefficients solve_deriv_interp_1d_s (bands, coefs, M, cstride); #ifndef HAVE_C_VARARRAYS free (bands); #endif } } #endif //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// //// Single-Precision, Real Creation Routines //// //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// // On input, bands should be filled with: // row 0 : abcdInitial from boundary conditions // rows 1:M: basis functions in first 3 cols, data in last // row M+1 : abcdFinal from boundary conditions // cstride gives the stride between values in coefs. // On exit, coefs with contain interpolating B-spline coefs UBspline_1d_s* create_UBspline_1d_s (Ugrid x_grid, BCtype_s xBC, float *data) { // Create new spline UBspline_1d_s* restrict spline = malloc (sizeof(UBspline_1d_s)); spline->spcode = U1D; spline->tcode = SINGLE_REAL; spline->xBC = xBC; spline->x_grid = x_grid; // Setup internal variables int M = x_grid.num; int N; if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) { x_grid.delta = (x_grid.end-x_grid.start)/(double)(x_grid.num); N = M+3; } else { x_grid.delta = (x_grid.end-x_grid.start)/(double)(x_grid.num-1); N = M+2; } x_grid.delta_inv = 1.0/x_grid.delta; spline->x_grid = x_grid; #ifndef HAVE_SSE2 spline->coefs = malloc (sizeof(float)*N); #else posix_memalign ((void**)&spline->coefs, 16, (sizeof(float)*N)); #endif find_coefs_1d_s (spline->x_grid, xBC, data, 1, spline->coefs, 1); init_sse_data(); return spline; } void recompute_UBspline_1d_s (UBspline_1d_s* spline, float *data) { find_coefs_1d_s (spline->x_grid, spline->xBC, data, 1, spline->coefs, 1); } UBspline_2d_s* create_UBspline_2d_s (Ugrid x_grid, Ugrid y_grid, BCtype_s xBC, BCtype_s yBC, float *data) { // Create new spline UBspline_2d_s* restrict spline = malloc (sizeof(UBspline_2d_s)); spline->spcode = U2D; spline->tcode = SINGLE_REAL; spline->xBC = xBC; spline->yBC = yBC; // Setup internal variables int Mx = x_grid.num; int My = y_grid.num; int Nx, Ny; if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; x_grid.delta = (x_grid.end - x_grid.start)/(double)(Nx-3); x_grid.delta_inv = 1.0/x_grid.delta; spline->x_grid = x_grid; if (yBC.lCode == PERIODIC || yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; y_grid.delta = (y_grid.end - y_grid.start)/(double)(Ny-3); y_grid.delta_inv = 1.0/y_grid.delta; spline->y_grid = y_grid; spline->x_stride = Ny; #ifndef HAVE_SSE2 spline->coefs = malloc (sizeof(float)*Nx*Ny); #else posix_memalign ((void**)&spline->coefs, 16, sizeof(float)*Nx*Ny); #endif // First, solve in the X-direction for (int iy=0; iy<My; iy++) { intptr_t doffset = iy; intptr_t coffset = iy; find_coefs_1d_s (spline->x_grid, spline->xBC, data+doffset, My, spline->coefs+coffset, Ny); } // Now, solve in the Y-direction for (int ix=0; ix<Nx; ix++) { intptr_t doffset = ix*Ny; intptr_t coffset = ix*Ny; find_coefs_1d_s (spline->y_grid, spline->yBC, spline->coefs+doffset, 1, spline->coefs+coffset, 1); } init_sse_data(); return spline; } void recompute_UBspline_2d_s (UBspline_2d_s* spline, float *data) { int Mx = spline->x_grid.num; int My = spline->y_grid.num; int Nx, Ny; if (spline->xBC.lCode == PERIODIC || spline->xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; if (spline->yBC.lCode == PERIODIC || spline->yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; // First, solve in the X-direction for (int iy=0; iy<My; iy++) { intptr_t doffset = iy; intptr_t coffset = iy; find_coefs_1d_s (spline->x_grid, spline->xBC, data+doffset, My, spline->coefs+coffset, Ny); } // Now, solve in the Y-direction for (int ix=0; ix<Nx; ix++) { intptr_t doffset = ix*Ny; intptr_t coffset = ix*Ny; find_coefs_1d_s (spline->y_grid, spline->yBC, spline->coefs+doffset, 1, spline->coefs+coffset, 1); } } UBspline_3d_s* create_UBspline_3d_s (Ugrid x_grid, Ugrid y_grid, Ugrid z_grid, BCtype_s xBC, BCtype_s yBC, BCtype_s zBC, float *data) { // Create new spline UBspline_3d_s* spline = malloc (sizeof(UBspline_3d_s)); spline->spcode = U3D; spline->tcode = SINGLE_REAL; spline->xBC = xBC; spline->yBC = yBC; spline->zBC = zBC; // Setup internal variables int Mx = x_grid.num; int My = y_grid.num; int Mz = z_grid.num; int Nx, Ny, Nz; if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; x_grid.delta = (x_grid.end - x_grid.start)/(double)(Nx-3); x_grid.delta_inv = 1.0/x_grid.delta; spline->x_grid = x_grid; if (yBC.lCode == PERIODIC || yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; y_grid.delta = (y_grid.end - y_grid.start)/(double)(Ny-3); y_grid.delta_inv = 1.0/y_grid.delta; spline->y_grid = y_grid; if (zBC.lCode == PERIODIC || zBC.lCode == ANTIPERIODIC) Nz = Mz+3; else Nz = Mz+2; z_grid.delta = (z_grid.end - z_grid.start)/(double)(Nz-3); z_grid.delta_inv = 1.0/z_grid.delta; spline->z_grid = z_grid; spline->x_stride = Ny*Nz; spline->y_stride = Nz; spline->coefs_size=(size_t)Nx*(size_t)Ny*(size_t)Nz; #ifndef HAVE_SSE2 spline->coefs = malloc (sizeof(float)*spline->coefs_size); #else posix_memalign ((void**)&spline->coefs, 16, (sizeof(float)*spline->coefs_size)); #endif // First, solve in the X-direction for (int iy=0; iy<My; iy++) for (int iz=0; iz<Mz; iz++) { intptr_t doffset = iy*Mz+iz; intptr_t coffset = iy*Nz+iz; find_coefs_1d_s (spline->x_grid, xBC, data+doffset, My*Mz, spline->coefs+coffset, Ny*Nz); } // Now, solve in the Y-direction for (int ix=0; ix<Nx; ix++) for (int iz=0; iz<Nz; iz++) { intptr_t doffset = ix*Ny*Nz + iz; intptr_t coffset = ix*Ny*Nz + iz; find_coefs_1d_s (spline->y_grid, yBC, spline->coefs+doffset, Nz, spline->coefs+coffset, Nz); } // Now, solve in the Z-direction for (int ix=0; ix<Nx; ix++) for (int iy=0; iy<Ny; iy++) { intptr_t doffset = (ix*Ny+iy)*Nz; intptr_t coffset = (ix*Ny+iy)*Nz; find_coefs_1d_s (spline->z_grid, zBC, spline->coefs+doffset, 1, spline->coefs+coffset, 1); } init_sse_data(); return spline; } void recompute_UBspline_3d_s (UBspline_3d_s* spline, float *data) { int Mx = spline->x_grid.num; int My = spline->y_grid.num; int Mz = spline->z_grid.num; int Nx, Ny, Nz; if (spline->xBC.lCode == PERIODIC || spline->xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; if (spline->yBC.lCode == PERIODIC || spline->yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; if (spline->zBC.lCode == PERIODIC || spline->zBC.lCode == ANTIPERIODIC) Nz = Mz+3; else Nz = Mz+2; // First, solve in the X-direction for (int iy=0; iy<My; iy++) for (int iz=0; iz<Mz; iz++) { intptr_t doffset = iy*Mz+iz; intptr_t coffset = iy*Nz+iz; find_coefs_1d_s (spline->x_grid, spline->xBC, data+doffset, My*Mz, spline->coefs+coffset, Ny*Nz); } // Now, solve in the Y-direction for (int ix=0; ix<Nx; ix++) for (int iz=0; iz<Nz; iz++) { intptr_t doffset = ix*Ny*Nz + iz; intptr_t coffset = ix*Ny*Nz + iz; find_coefs_1d_s (spline->y_grid, spline->yBC, spline->coefs+doffset, Nz, spline->coefs+coffset, Nz); } // Now, solve in the Z-direction for (int ix=0; ix<Nx; ix++) for (int iy=0; iy<Ny; iy++) { intptr_t doffset = (ix*Ny+iy)*Nz; intptr_t coffset = (ix*Ny+iy)*Nz; find_coefs_1d_s (spline->z_grid, spline->zBC, spline->coefs+doffset, 1, spline->coefs+coffset, 1); } } //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// //// Single-Precision, Complex Creation Routines //// //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// // On input, bands should be filled with: // row 0 : abcdInitial from boundary conditions // rows 1:M: basis functions in first 3 cols, data in last // row M+1 : abcdFinal from boundary conditions // cstride gives the stride between values in coefs. // On exit, coefs with contain interpolating B-spline coefs UBspline_1d_c* create_UBspline_1d_c (Ugrid x_grid, BCtype_c xBC, complex_float *data) { // Create new spline UBspline_1d_c* restrict spline = malloc (sizeof(UBspline_1d_c)); spline->spcode = U1D; spline->tcode = SINGLE_COMPLEX; spline->xBC = xBC; // Setup internal variables int M = x_grid.num; int N; if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) { x_grid.delta = (x_grid.end-x_grid.start)/(double)(x_grid.num); N = M+3; } else { x_grid.delta = (x_grid.end-x_grid.start)/(double)(x_grid.num-1); N = M+2; } x_grid.delta_inv = 1.0/x_grid.delta; spline->x_grid = x_grid; #ifndef HAVE_SSE2 spline->coefs = malloc (2*sizeof(float)*N); #else posix_memalign ((void**)&spline->coefs, 16, 2*sizeof(float)*N); #endif BCtype_s xBC_r, xBC_i; xBC_r.lCode = xBC.lCode; xBC_r.rCode = xBC.rCode; xBC_r.lVal = xBC.lVal_r; xBC_r.rVal = xBC.rVal_r; xBC_i.lCode = xBC.lCode; xBC_i.rCode = xBC.rCode; xBC_i.lVal = xBC.lVal_i; xBC_i.rVal = xBC.rVal_i; // Real part find_coefs_1d_s (spline->x_grid, xBC_r, (float*)data, 2, (float*)spline->coefs, 2); // Imaginarty part find_coefs_1d_s (spline->x_grid, xBC_i, ((float*)data)+1, 2, ((float*)spline->coefs+1), 2); init_sse_data(); return spline; } void recompute_UBspline_1d_c (UBspline_1d_c* spline, complex_float *data) { BCtype_s xBC_r, xBC_i; xBC_r.lCode = spline->xBC.lCode; xBC_r.rCode = spline->xBC.rCode; xBC_r.lVal = spline->xBC.lVal_r; xBC_r.rVal = spline->xBC.rVal_r; xBC_i.lCode = spline->xBC.lCode; xBC_i.rCode = spline->xBC.rCode; xBC_i.lVal = spline->xBC.lVal_i; xBC_i.rVal = spline->xBC.rVal_i; // Real part find_coefs_1d_s (spline->x_grid, xBC_r, (float*)data, 2, (float*)spline->coefs, 2); // Imaginarty part find_coefs_1d_s (spline->x_grid, xBC_i, ((float*)data)+1, 2, ((float*)spline->coefs+1), 2); } UBspline_2d_c* create_UBspline_2d_c (Ugrid x_grid, Ugrid y_grid, BCtype_c xBC, BCtype_c yBC, complex_float *data) { // Create new spline UBspline_2d_c* restrict spline = malloc (sizeof(UBspline_2d_c)); spline->spcode = U2D; spline->tcode = SINGLE_COMPLEX; spline->xBC = xBC; spline->yBC = yBC; // Setup internal variables int Mx = x_grid.num; int My = y_grid.num; int Nx, Ny; if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; x_grid.delta = (x_grid.end - x_grid.start)/(double)(Nx-3); x_grid.delta_inv = 1.0/x_grid.delta; spline->x_grid = x_grid; if (yBC.lCode == PERIODIC || yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; y_grid.delta = (y_grid.end - y_grid.start)/(double)(Ny-3); y_grid.delta_inv = 1.0/y_grid.delta; spline->y_grid = y_grid; spline->x_stride = Ny; #ifndef HAVE_SSE2 spline->coefs = malloc (2*sizeof(float)*Nx*Ny); #else posix_memalign ((void**)&spline->coefs, 16, 2*sizeof(float)*Nx*Ny); #endif BCtype_s xBC_r, xBC_i, yBC_r, yBC_i; xBC_r.lCode = xBC.lCode; xBC_r.rCode = xBC.rCode; xBC_r.lVal = xBC.lVal_r; xBC_r.rVal = xBC.rVal_r; xBC_i.lCode = xBC.lCode; xBC_i.rCode = xBC.rCode; xBC_i.lVal = xBC.lVal_i; xBC_i.rVal = xBC.rVal_i; yBC_r.lCode = yBC.lCode; yBC_r.rCode = yBC.rCode; yBC_r.lVal = yBC.lVal_r; yBC_r.rVal = yBC.rVal_r; yBC_i.lCode = yBC.lCode; yBC_i.rCode = yBC.rCode; yBC_i.lVal = yBC.lVal_i; yBC_i.rVal = yBC.rVal_i; // First, solve in the X-direction for (int iy=0; iy<My; iy++) { intptr_t doffset = 2*iy; intptr_t coffset = 2*iy; // Real part find_coefs_1d_s (spline->x_grid, xBC_r, ((float*)data)+doffset, 2*My, (float*)spline->coefs+coffset, 2*Ny); // Imag part find_coefs_1d_s (spline->x_grid, xBC_i, ((float*)data)+doffset+1, 2*My, ((float*)spline->coefs)+coffset+1, 2*Ny); } // Now, solve in the Y-direction for (int ix=0; ix<Nx; ix++) { intptr_t doffset = 2*ix*Ny; intptr_t coffset = 2*ix*Ny; // Real part find_coefs_1d_s (spline->y_grid, yBC_r, ((float*)spline->coefs)+doffset, 2, ((float*)spline->coefs)+coffset, 2); // Imag part find_coefs_1d_s (spline->y_grid, yBC_i, ((float*)spline->coefs)+doffset+1, 2, ((float*)spline->coefs)+coffset+1, 2); } init_sse_data(); return spline; } void recompute_UBspline_2d_c (UBspline_2d_c* spline, complex_float *data) { // Setup internal variables int Mx = spline->x_grid.num; int My = spline->y_grid.num; int Nx, Ny; if (spline->xBC.lCode == PERIODIC || spline->xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; if (spline->yBC.lCode == PERIODIC || spline->yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; BCtype_s xBC_r, xBC_i, yBC_r, yBC_i; xBC_r.lCode = spline->xBC.lCode; xBC_r.rCode = spline->xBC.rCode; xBC_r.lVal = spline->xBC.lVal_r; xBC_r.rVal = spline->xBC.rVal_r; xBC_i.lCode = spline->xBC.lCode; xBC_i.rCode = spline->xBC.rCode; xBC_i.lVal = spline->xBC.lVal_i; xBC_i.rVal = spline->xBC.rVal_i; yBC_r.lCode = spline->yBC.lCode; yBC_r.rCode = spline->yBC.rCode; yBC_r.lVal = spline->yBC.lVal_r; yBC_r.rVal = spline->yBC.rVal_r; yBC_i.lCode = spline->yBC.lCode; yBC_i.rCode = spline->yBC.rCode; yBC_i.lVal = spline->yBC.lVal_i; yBC_i.rVal = spline->yBC.rVal_i; // First, solve in the X-direction for (int iy=0; iy<My; iy++) { intptr_t doffset = 2*iy; intptr_t coffset = 2*iy; // Real part find_coefs_1d_s (spline->x_grid, xBC_r, ((float*)data)+doffset, 2*My, (float*)spline->coefs+coffset, 2*Ny); // Imag part find_coefs_1d_s (spline->x_grid, xBC_i, ((float*)data)+doffset+1, 2*My, ((float*)spline->coefs)+coffset+1, 2*Ny); } // Now, solve in the Y-direction for (int ix=0; ix<Nx; ix++) { intptr_t doffset = 2*ix*Ny; intptr_t coffset = 2*ix*Ny; // Real part find_coefs_1d_s (spline->y_grid, yBC_r, ((float*)spline->coefs)+doffset, 2, ((float*)spline->coefs)+coffset, 2); // Imag part find_coefs_1d_s (spline->y_grid, yBC_i, ((float*)spline->coefs)+doffset+1, 2, ((float*)spline->coefs)+coffset+1, 2); } } UBspline_3d_c* create_UBspline_3d_c (Ugrid x_grid, Ugrid y_grid, Ugrid z_grid, BCtype_c xBC, BCtype_c yBC, BCtype_c zBC, complex_float *data) { // Create new spline UBspline_3d_c* restrict spline = malloc (sizeof(UBspline_3d_c)); spline->spcode = U3D; spline->tcode = SINGLE_COMPLEX; spline->xBC = xBC; spline->yBC = yBC; spline->zBC = zBC; // Setup internal variables int Mx = x_grid.num; int My = y_grid.num; int Mz = z_grid.num; int Nx, Ny, Nz; if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; x_grid.delta = (x_grid.end - x_grid.start)/(double)(Nx-3); x_grid.delta_inv = 1.0/x_grid.delta; spline->x_grid = x_grid; if (yBC.lCode == PERIODIC || yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; y_grid.delta = (y_grid.end - y_grid.start)/(double)(Ny-3); y_grid.delta_inv = 1.0/y_grid.delta; spline->y_grid = y_grid; if (zBC.lCode == PERIODIC || zBC.lCode == ANTIPERIODIC) Nz = Mz+3; else Nz = Mz+2; z_grid.delta = (z_grid.end - z_grid.start)/(double)(Nz-3); z_grid.delta_inv = 1.0/z_grid.delta; spline->z_grid = z_grid; spline->x_stride = Ny*Nz; spline->y_stride = Nz; #ifndef HAVE_SSE2 spline->coefs = malloc (2*sizeof(float)*Nx*Ny*Nz); #else posix_memalign ((void**)&spline->coefs, 16, 2*sizeof(float)*Nx*Ny*Nz); #endif BCtype_s xBC_r, xBC_i, yBC_r, yBC_i, zBC_r, zBC_i; xBC_r.lCode = xBC.lCode; xBC_r.rCode = xBC.rCode; xBC_r.lVal = xBC.lVal_r; xBC_r.rVal = xBC.rVal_r; xBC_i.lCode = xBC.lCode; xBC_i.rCode = xBC.rCode; xBC_i.lVal = xBC.lVal_i; xBC_i.rVal = xBC.rVal_i; yBC_r.lCode = yBC.lCode; yBC_r.rCode = yBC.rCode; yBC_r.lVal = yBC.lVal_r; yBC_r.rVal = yBC.rVal_r; yBC_i.lCode = yBC.lCode; yBC_i.rCode = yBC.rCode; yBC_i.lVal = yBC.lVal_i; yBC_i.rVal = yBC.rVal_i; zBC_r.lCode = zBC.lCode; zBC_r.rCode = zBC.rCode; zBC_r.lVal = zBC.lVal_r; zBC_r.rVal = zBC.rVal_r; zBC_i.lCode = zBC.lCode; zBC_i.rCode = zBC.rCode; zBC_i.lVal = zBC.lVal_i; zBC_i.rVal = zBC.rVal_i; // First, solve in the X-direction for (int iy=0; iy<My; iy++) for (int iz=0; iz<Mz; iz++) { intptr_t doffset = 2*(iy*Mz+iz); intptr_t coffset = 2*(iy*Nz+iz); // Real part find_coefs_1d_s (spline->x_grid, xBC_r, ((float*)data)+doffset, 2*My*Mz, ((float*)spline->coefs)+coffset, 2*Ny*Nz); // Imag part find_coefs_1d_s (spline->x_grid, xBC_i, ((float*)data)+doffset+1, 2*My*Mz, ((float*)spline->coefs)+coffset+1, 2*Ny*Nz); } // Now, solve in the Y-direction for (int ix=0; ix<Nx; ix++) for (int iz=0; iz<Nz; iz++) { intptr_t doffset = 2*(ix*Ny*Nz + iz); intptr_t coffset = 2*(ix*Ny*Nz + iz); // Real part find_coefs_1d_s (spline->y_grid, yBC_r, ((float*)spline->coefs)+doffset, 2*Nz, ((float*)spline->coefs)+coffset, 2*Nz); // Imag part find_coefs_1d_s (spline->y_grid, yBC_i, ((float*)spline->coefs)+doffset+1, 2*Nz, ((float*)spline->coefs)+coffset+1, 2*Nz); } // Now, solve in the Z-direction for (int ix=0; ix<Nx; ix++) for (int iy=0; iy<Ny; iy++) { intptr_t doffset = 2*((ix*Ny+iy)*Nz); intptr_t coffset = 2*((ix*Ny+iy)*Nz); // Real part find_coefs_1d_s (spline->z_grid, zBC_r, ((float*)spline->coefs)+doffset, 2, ((float*)spline->coefs)+coffset, 2); // Imag part find_coefs_1d_s (spline->z_grid, zBC_i, ((float*)spline->coefs)+doffset+1, 2, ((float*)spline->coefs)+coffset+1, 2); } init_sse_data(); return spline; } void recompute_UBspline_3d_c (UBspline_3d_c* spline, complex_float *data) { // Setup internal variables int Mx = spline->x_grid.num; int My = spline->y_grid.num; int Mz = spline->z_grid.num; int Nx, Ny, Nz; if (spline->xBC.lCode == PERIODIC || spline->xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; if (spline->yBC.lCode == PERIODIC || spline->yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; if (spline->zBC.lCode == PERIODIC || spline->zBC.lCode == ANTIPERIODIC) Nz = Mz+3; else Nz = Mz+2; BCtype_s xBC_r, xBC_i, yBC_r, yBC_i, zBC_r, zBC_i; xBC_r.lCode = spline->xBC.lCode; xBC_r.rCode = spline->xBC.rCode; xBC_r.lVal = spline->xBC.lVal_r; xBC_r.rVal = spline->xBC.rVal_r; xBC_i.lCode = spline->xBC.lCode; xBC_i.rCode = spline->xBC.rCode; xBC_i.lVal = spline->xBC.lVal_i; xBC_i.rVal = spline->xBC.rVal_i; yBC_r.lCode = spline->yBC.lCode; yBC_r.rCode = spline->yBC.rCode; yBC_r.lVal = spline->yBC.lVal_r; yBC_r.rVal = spline->yBC.rVal_r; yBC_i.lCode = spline->yBC.lCode; yBC_i.rCode = spline->yBC.rCode; yBC_i.lVal = spline->yBC.lVal_i; yBC_i.rVal = spline->yBC.rVal_i; zBC_r.lCode = spline->zBC.lCode; zBC_r.rCode = spline->zBC.rCode; zBC_r.lVal = spline->zBC.lVal_r; zBC_r.rVal = spline->zBC.rVal_r; zBC_i.lCode = spline->zBC.lCode; zBC_i.rCode = spline->zBC.rCode; zBC_i.lVal = spline->zBC.lVal_i; zBC_i.rVal = spline->zBC.rVal_i; // First, solve in the X-direction for (int iy=0; iy<My; iy++) for (int iz=0; iz<Mz; iz++) { intptr_t doffset = 2*(iy*Mz+iz); intptr_t coffset = 2*(iy*Nz+iz); // Real part find_coefs_1d_s (spline->x_grid, xBC_r, ((float*)data)+doffset, 2*My*Mz, ((float*)spline->coefs)+coffset, 2*Ny*Nz); // Imag part find_coefs_1d_s (spline->x_grid, xBC_i, ((float*)data)+doffset+1, 2*My*Mz, ((float*)spline->coefs)+coffset+1, 2*Ny*Nz); } // Now, solve in the Y-direction for (int ix=0; ix<Nx; ix++) for (int iz=0; iz<Nz; iz++) { intptr_t doffset = 2*(ix*Ny*Nz + iz); intptr_t coffset = 2*(ix*Ny*Nz + iz); // Real part find_coefs_1d_s (spline->y_grid, yBC_r, ((float*)spline->coefs)+doffset, 2*Nz, ((float*)spline->coefs)+coffset, 2*Nz); // Imag part find_coefs_1d_s (spline->y_grid, yBC_i, ((float*)spline->coefs)+doffset+1, 2*Nz, ((float*)spline->coefs)+coffset+1, 2*Nz); } // Now, solve in the Z-direction for (int ix=0; ix<Nx; ix++) for (int iy=0; iy<Ny; iy++) { intptr_t doffset = 2*((ix*Ny+iy)*Nz); intptr_t coffset = 2*((ix*Ny+iy)*Nz); // Real part find_coefs_1d_s (spline->z_grid, zBC_r, ((float*)spline->coefs)+doffset, 2, ((float*)spline->coefs)+coffset, 2); // Imag part find_coefs_1d_s (spline->z_grid, zBC_i, ((float*)spline->coefs)+doffset+1, 2, ((float*)spline->coefs)+coffset+1, 2); } } //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// //// Double-Precision, Real Creation Routines //// //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// // On input, bands should be filled with: // row 0 : abcdInitial from boundary conditions // rows 1:M: basis functions in first 3 cols, data in last // row M+1 : abcdFinal from boundary conditions // cstride gives the stride between values in coefs. // On exit, coefs with contain interpolating B-spline coefs void solve_deriv_interp_1d_d (double bands[], double coefs[], int M, int cstride) { // Solve interpolating equations // First and last rows are different bands[4*(0)+1] /= bands[4*(0)+0]; bands[4*(0)+2] /= bands[4*(0)+0]; bands[4*(0)+3] /= bands[4*(0)+0]; bands[4*(0)+0] = 1.0; bands[4*(1)+1] -= bands[4*(1)+0]*bands[4*(0)+1]; bands[4*(1)+2] -= bands[4*(1)+0]*bands[4*(0)+2]; bands[4*(1)+3] -= bands[4*(1)+0]*bands[4*(0)+3]; bands[4*(0)+0] = 0.0; bands[4*(1)+2] /= bands[4*(1)+1]; bands[4*(1)+3] /= bands[4*(1)+1]; bands[4*(1)+1] = 1.0; // Now do rows 2 through M+1 for (int row=2; row < (M+1); row++) { bands[4*(row)+1] -= bands[4*(row)+0]*bands[4*(row-1)+2]; bands[4*(row)+3] -= bands[4*(row)+0]*bands[4*(row-1)+3]; bands[4*(row)+2] /= bands[4*(row)+1]; bands[4*(row)+3] /= bands[4*(row)+1]; bands[4*(row)+0] = 0.0; bands[4*(row)+1] = 1.0; } // Do last row bands[4*(M+1)+1] -= bands[4*(M+1)+0]*bands[4*(M-1)+2]; bands[4*(M+1)+3] -= bands[4*(M+1)+0]*bands[4*(M-1)+3]; bands[4*(M+1)+2] -= bands[4*(M+1)+1]*bands[4*(M)+2]; bands[4*(M+1)+3] -= bands[4*(M+1)+1]*bands[4*(M)+3]; bands[4*(M+1)+3] /= bands[4*(M+1)+2]; bands[4*(M+1)+2] = 1.0; coefs[(M+1)*cstride] = bands[4*(M+1)+3]; // Now back substitute up for (int row=M; row>0; row--) coefs[row*cstride] = bands[4*(row)+3] - bands[4*(row)+2]*coefs[cstride*(row+1)]; // Finish with first row coefs[0] = bands[4*(0)+3] - bands[4*(0)+1]*coefs[1*cstride] - bands[4*(0)+2]*coefs[2*cstride]; } // On input, bands should be filled with: // row 0 : abcdInitial from boundary conditions // rows 1:M: basis functions in first 3 cols, data in last // row M+1 : abcdFinal from boundary conditions // cstride gives the stride between values in coefs. // On exit, coefs with contain interpolating B-spline coefs void solve_periodic_interp_1d_d (double bands[], double coefs[], int M, intptr_t cstride) { double lastCol[M]; // Now solve: // First and last rows are different bands[4*(0)+2] /= bands[4*(0)+1]; bands[4*(0)+0] /= bands[4*(0)+1]; bands[4*(0)+3] /= bands[4*(0)+1]; bands[4*(0)+1] = 1.0; bands[4*(M-1)+1] -= bands[4*(M-1)+2]*bands[4*(0)+0]; bands[4*(M-1)+3] -= bands[4*(M-1)+2]*bands[4*(0)+3]; bands[4*(M-1)+2] = -bands[4*(M-1)+2]*bands[4*(0)+2]; lastCol[0] = bands[4*(0)+0]; for (int row=1; row < (M-1); row++) { bands[4*(row)+1] -= bands[4*(row)+0] * bands[4*(row-1)+2]; bands[4*(row)+3] -= bands[4*(row)+0] * bands[4*(row-1)+3]; lastCol[row] = -bands[4*(row)+0] * lastCol[row-1]; bands[4*(row)+0] = 0.0; bands[4*(row)+2] /= bands[4*(row)+1]; bands[4*(row)+3] /= bands[4*(row)+1]; lastCol[row] /= bands[4*(row)+1]; bands[4*(row)+1] = 1.0; if (row < (M-2)) { bands[4*(M-1)+3] -= bands[4*(M-1)+2]*bands[4*(row)+3]; bands[4*(M-1)+1] -= bands[4*(M-1)+2]*lastCol[row]; bands[4*(M-1)+2] = -bands[4*(M-1)+2]*bands[4*(row)+2]; } } // Now do last row // The [2] element and [0] element are now on top of each other bands[4*(M-1)+0] += bands[4*(M-1)+2]; bands[4*(M-1)+1] -= bands[4*(M-1)+0] * (bands[4*(M-2)+2]+lastCol[M-2]); bands[4*(M-1)+3] -= bands[4*(M-1)+0] * bands[4*(M-2)+3]; bands[4*(M-1)+3] /= bands[4*(M-1)+1]; coefs[M*cstride] = bands[4*(M-1)+3]; for (int row=M-2; row>=0; row--) coefs[(row+1)*cstride] = bands[4*(row)+3] - bands[4*(row)+2]*coefs[(row+2)*cstride] - lastCol[row]*coefs[M*cstride]; coefs[0*cstride] = coefs[M*cstride]; coefs[(M+1)*cstride] = coefs[1*cstride]; coefs[(M+2)*cstride] = coefs[2*cstride]; } // On input, bands should be filled with: // row 0 : abcdInitial from boundary conditions // rows 1:M: basis functions in first 3 cols, data in last // row M+1 : abcdFinal from boundary conditions // cstride gives the stride between values in coefs. // On exit, coefs with contain interpolating B-spline coefs void solve_antiperiodic_interp_1d_d (double bands[], double coefs[], int M, int cstride) { double lastCol[M]; bands[4*0+0] *= -1.0; bands[4*(M-1)+2] *= -1.0; // Now solve: // First and last rows are different bands[4*(0)+2] /= bands[4*(0)+1]; bands[4*(0)+0] /= bands[4*(0)+1]; bands[4*(0)+3] /= bands[4*(0)+1]; bands[4*(0)+1] = 1.0; bands[4*(M-1)+1] -= bands[4*(M-1)+2]*bands[4*(0)+0]; bands[4*(M-1)+3] -= bands[4*(M-1)+2]*bands[4*(0)+3]; bands[4*(M-1)+2] = -bands[4*(M-1)+2]*bands[4*(0)+2]; lastCol[0] = bands[4*(0)+0]; for (int row=1; row < (M-1); row++) { bands[4*(row)+1] -= bands[4*(row)+0] * bands[4*(row-1)+2]; bands[4*(row)+3] -= bands[4*(row)+0] * bands[4*(row-1)+3]; lastCol[row] = -bands[4*(row)+0] * lastCol[row-1]; bands[4*(row)+0] = 0.0; bands[4*(row)+2] /= bands[4*(row)+1]; bands[4*(row)+3] /= bands[4*(row)+1]; lastCol[row] /= bands[4*(row)+1]; bands[4*(row)+1] = 1.0; if (row < (M-2)) { bands[4*(M-1)+3] -= bands[4*(M-1)+2]*bands[4*(row)+3]; bands[4*(M-1)+1] -= bands[4*(M-1)+2]*lastCol[row]; bands[4*(M-1)+2] = -bands[4*(M-1)+2]*bands[4*(row)+2]; } } // Now do last row // The [2] element and [0] element are now on top of each other bands[4*(M-1)+0] += bands[4*(M-1)+2]; bands[4*(M-1)+1] -= bands[4*(M-1)+0] * (bands[4*(M-2)+2]+lastCol[M-2]); bands[4*(M-1)+3] -= bands[4*(M-1)+0] * bands[4*(M-2)+3]; bands[4*(M-1)+3] /= bands[4*(M-1)+1]; coefs[M*cstride] = bands[4*(M-1)+3]; for (int row=M-2; row>=0; row--) coefs[(row+1)*cstride] = bands[4*(row)+3] - bands[4*(row)+2]*coefs[(row+2)*cstride] - lastCol[row]*coefs[M*cstride]; coefs[0*cstride] = -coefs[M*cstride]; coefs[(M+1)*cstride] = -coefs[1*cstride]; coefs[(M+2)*cstride] = -coefs[2*cstride]; } void find_coefs_1d_d (Ugrid grid, BCtype_d bc, double *data, intptr_t dstride, double *coefs, intptr_t cstride) { int M = grid.num; double basis[4] = {1.0/6.0, 2.0/3.0, 1.0/6.0, 0.0}; if (bc.lCode == PERIODIC || bc.lCode == ANTIPERIODIC) { #ifdef HAVE_C_VARARRAYS double bands[M*4]; #else double *bands = malloc (4*M*sizeof(double)); #endif for (int i=0; i<M; i++) { bands[4*i+0] = basis[0]; bands[4*i+1] = basis[1]; bands[4*i+2] = basis[2]; bands[4*i+3] = data[i*dstride]; } if (bc.lCode == ANTIPERIODIC) solve_antiperiodic_interp_1d_d (bands, coefs, M, cstride); else solve_periodic_interp_1d_d (bands, coefs, M, cstride); #ifndef HAVE_C_VARARRAYS free (bands); #endif } else { // Setup boundary conditions double abcd_left[4], abcd_right[4]; // Left boundary if (bc.lCode == FLAT || bc.lCode == NATURAL) bc.lVal = 0.0; if (bc.lCode == FLAT || bc.lCode == DERIV1) { abcd_left[0] = -0.5 * grid.delta_inv; abcd_left[1] = 0.0 * grid.delta_inv; abcd_left[2] = 0.5 * grid.delta_inv; abcd_left[3] = bc.lVal; } if (bc.lCode == NATURAL || bc.lCode == DERIV2) { abcd_left[0] = 1.0 * grid.delta_inv * grid.delta_inv; abcd_left[1] =-2.0 * grid.delta_inv * grid.delta_inv; abcd_left[2] = 1.0 * grid.delta_inv * grid.delta_inv; abcd_left[3] = bc.lVal; } // Right boundary if (bc.rCode == FLAT || bc.rCode == NATURAL) bc.rVal = 0.0; if (bc.rCode == FLAT || bc.rCode == DERIV1) { abcd_right[0] = -0.5 * grid.delta_inv; abcd_right[1] = 0.0 * grid.delta_inv; abcd_right[2] = 0.5 * grid.delta_inv; abcd_right[3] = bc.rVal; } if (bc.rCode == NATURAL || bc.rCode == DERIV2) { abcd_right[0] = 1.0 *grid.delta_inv * grid.delta_inv; abcd_right[1] =-2.0 *grid.delta_inv * grid.delta_inv; abcd_right[2] = 1.0 *grid.delta_inv * grid.delta_inv; abcd_right[3] = bc.rVal; } #ifdef HAVE_C_VARARRAYS double bands[(M+2)*4]; #else double *bands = malloc ((M+2)*4*sizeof(double)); #endif for (int i=0; i<4; i++) { bands[4*( 0 )+i] = abcd_left[i]; bands[4*(M+1)+i] = abcd_right[i]; } for (int i=0; i<M; i++) { for (int j=0; j<3; j++) bands[4*(i+1)+j] = basis[j]; bands[4*(i+1)+3] = data[i*dstride]; } // Now, solve for coefficients solve_deriv_interp_1d_d (bands, coefs, M, cstride); #ifndef HAVE_C_VARARRAYS free (bands); #endif } } UBspline_1d_d* create_UBspline_1d_d (Ugrid x_grid, BCtype_d xBC, double *data) { // Create new spline UBspline_1d_d* restrict spline = malloc (sizeof(UBspline_1d_d)); spline->spcode = U1D; spline->tcode = DOUBLE_REAL; spline->xBC = xBC; // Setup internal variables int M = x_grid.num; int N; if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) { x_grid.delta = (x_grid.end-x_grid.start)/(double)(x_grid.num); N = M+3; } else { x_grid.delta = (x_grid.end-x_grid.start)/(double)(x_grid.num-1); N = M+2; } x_grid.delta_inv = 1.0/x_grid.delta; spline->x_grid = x_grid; #ifndef HAVE_SSE2 spline->coefs = malloc (sizeof(double)*N); #else posix_memalign ((void**)&spline->coefs, 16, sizeof(double)*N); #endif find_coefs_1d_d (spline->x_grid, xBC, data, 1, spline->coefs, 1); init_sse_data(); return spline; } void recompute_UBspline_1d_d (UBspline_1d_d* spline, double *data) { find_coefs_1d_d (spline->x_grid, spline->xBC, data, 1, spline->coefs, 1); } UBspline_2d_d* create_UBspline_2d_d (Ugrid x_grid, Ugrid y_grid, BCtype_d xBC, BCtype_d yBC, double *data) { // Create new spline UBspline_2d_d* restrict spline = malloc (sizeof(UBspline_2d_d)); spline->spcode = U2D; spline->tcode = DOUBLE_REAL; spline->xBC = xBC; spline->yBC = yBC; // Setup internal variables int Mx = x_grid.num; int My = y_grid.num; int Nx, Ny; if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; x_grid.delta = (x_grid.end - x_grid.start)/(double)(Nx-3); x_grid.delta_inv = 1.0/x_grid.delta; spline->x_grid = x_grid; if (yBC.lCode == PERIODIC || yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; y_grid.delta = (y_grid.end - y_grid.start)/(double)(Ny-3); y_grid.delta_inv = 1.0/y_grid.delta; spline->y_grid = y_grid; spline->x_stride = Ny; #ifndef HAVE_SSE2 spline->coefs = malloc (sizeof(double)*Nx*Ny); #else posix_memalign ((void**)&spline->coefs, 16, (sizeof(double)*Nx*Ny)); #endif // First, solve in the X-direction for (int iy=0; iy<My; iy++) { intptr_t doffset = iy; intptr_t coffset = iy; find_coefs_1d_d (spline->x_grid, xBC, data+doffset, My, spline->coefs+coffset, Ny); } // Now, solve in the Y-direction for (int ix=0; ix<Nx; ix++) { intptr_t doffset = ix*Ny; intptr_t coffset = ix*Ny; find_coefs_1d_d (spline->y_grid, yBC, spline->coefs+doffset, 1, spline->coefs+coffset, 1); } init_sse_data(); return spline; } void recompute_UBspline_2d_d (UBspline_2d_d* spline, double *data) { int Mx = spline->x_grid.num; int My = spline->y_grid.num; int Nx, Ny; if (spline->xBC.lCode == PERIODIC || spline->xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; if (spline->yBC.lCode == PERIODIC || spline->yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; // First, solve in the X-direction for (int iy=0; iy<My; iy++) { intptr_t doffset = iy; intptr_t coffset = iy; find_coefs_1d_d (spline->x_grid, spline->xBC, data+doffset, My, spline->coefs+coffset, Ny); } // Now, solve in the Y-direction for (int ix=0; ix<Nx; ix++) { intptr_t doffset = ix*Ny; intptr_t coffset = ix*Ny; find_coefs_1d_d (spline->y_grid, spline->yBC, spline->coefs+doffset, 1, spline->coefs+coffset, 1); } } UBspline_3d_d* create_UBspline_3d_d (Ugrid x_grid, Ugrid y_grid, Ugrid z_grid, BCtype_d xBC, BCtype_d yBC, BCtype_d zBC, double *data) { // Create new spline UBspline_3d_d* restrict spline = malloc (sizeof(UBspline_3d_d)); spline->spcode = U3D; spline->tcode = DOUBLE_REAL; spline->xBC = xBC; spline->yBC = yBC; spline->zBC = zBC; // Setup internal variables int Mx = x_grid.num; int My = y_grid.num; int Mz = z_grid.num; int Nx, Ny, Nz; if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; x_grid.delta = (x_grid.end - x_grid.start)/(double)(Nx-3); x_grid.delta_inv = 1.0/x_grid.delta; spline->x_grid = x_grid; if (yBC.lCode == PERIODIC || yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; y_grid.delta = (y_grid.end - y_grid.start)/(double)(Ny-3); y_grid.delta_inv = 1.0/y_grid.delta; spline->y_grid = y_grid; if (zBC.lCode == PERIODIC || zBC.lCode == ANTIPERIODIC) Nz = Mz+3; else Nz = Mz+2; z_grid.delta = (z_grid.end - z_grid.start)/(double)(Nz-3); z_grid.delta_inv = 1.0/z_grid.delta; spline->z_grid = z_grid; spline->x_stride = Ny*Nz; spline->y_stride = Nz; spline->coefs_size=(size_t)Nx*(size_t)Ny*(size_t)Nz; #ifndef HAVE_SSE2 spline->coefs = malloc (sizeof(double)*spline->coefs_size); #else posix_memalign ((void**)&spline->coefs, 16, (sizeof(double)*spline->coefs_size)); #endif if(data != NULL) // only data is provided { // First, solve in the X-direction #pragma omp parallel for for (int iy=0; iy<My; iy++) for (int iz=0; iz<Mz; iz++) { intptr_t doffset = iy*Mz+iz; intptr_t coffset = iy*Nz+iz; find_coefs_1d_d (spline->x_grid, xBC, data+doffset, My*Mz, spline->coefs+coffset, Ny*Nz); } // Now, solve in the Y-direction #pragma omp parallel for for (int ix=0; ix<Nx; ix++) for (int iz=0; iz<Nz; iz++) { intptr_t doffset = ix*Ny*Nz + iz; intptr_t coffset = ix*Ny*Nz + iz; find_coefs_1d_d (spline->y_grid, yBC, spline->coefs+doffset, Nz, spline->coefs+coffset, Nz); } // Now, solve in the Z-direction #pragma omp parallel for for (int ix=0; ix<Nx; ix++) for (int iy=0; iy<Ny; iy++) { intptr_t doffset = (ix*Ny+iy)*Nz; intptr_t coffset = (ix*Ny+iy)*Nz; find_coefs_1d_d (spline->z_grid, zBC, spline->coefs+doffset, 1, spline->coefs+coffset, 1); } } init_sse_data(); return spline; } void recompute_UBspline_3d_d (UBspline_3d_d* spline, double *data) { int Mx = spline->x_grid.num; int My = spline->y_grid.num; int Mz = spline->z_grid.num; int Nx, Ny, Nz; if (spline->xBC.lCode == PERIODIC || spline->xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; if (spline->yBC.lCode == PERIODIC || spline->yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; if (spline->zBC.lCode == PERIODIC || spline->zBC.lCode == ANTIPERIODIC) Nz = Mz+3; else Nz = Mz+2; // First, solve in the X-direction #pragma omp parallel for for (int iy=0; iy<My; iy++) for (int iz=0; iz<Mz; iz++) { intptr_t doffset = iy*Mz+iz; intptr_t coffset = iy*Nz+iz; find_coefs_1d_d (spline->x_grid, spline->xBC, data+doffset, My*Mz, spline->coefs+coffset, Ny*Nz); } // Now, solve in the Y-direction #pragma omp parallel for for (int ix=0; ix<Nx; ix++) for (int iz=0; iz<Nz; iz++) { intptr_t doffset = ix*Ny*Nz + iz; intptr_t coffset = ix*Ny*Nz + iz; find_coefs_1d_d (spline->y_grid, spline->yBC, spline->coefs+doffset, Nz, spline->coefs+coffset, Nz); } // Now, solve in the Z-direction #pragma omp parallel for for (int ix=0; ix<Nx; ix++) for (int iy=0; iy<Ny; iy++) { intptr_t doffset = (ix*Ny+iy)*Nz; intptr_t coffset = (ix*Ny+iy)*Nz; find_coefs_1d_d (spline->z_grid, spline->zBC, spline->coefs+doffset, 1, spline->coefs+coffset, 1); } } //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// //// Double-Precision, Complex Creation Routines //// //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// // On input, bands should be filled with: // row 0 : abcdInitial from boundary conditions // rows 1:M: basis functions in first 3 cols, data in last // row M+1 : abcdFinal from boundary conditions // cstride gives the stride between values in coefs. // On exit, coefs with contain interpolating B-spline coefs UBspline_1d_z* create_UBspline_1d_z (Ugrid x_grid, BCtype_z xBC, complex_double *data) { // Create new spline UBspline_1d_z* restrict spline = malloc (sizeof(UBspline_1d_z)); spline->spcode = U1D; spline->tcode = DOUBLE_COMPLEX; spline->xBC = xBC; // Setup internal variables int M = x_grid.num; int N; if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) { x_grid.delta = (x_grid.end-x_grid.start)/(double)(x_grid.num); N = M+3; } else { x_grid.delta = (x_grid.end-x_grid.start)/(double)(x_grid.num-1); N = M+2; } x_grid.delta_inv = 1.0/x_grid.delta; spline->x_grid = x_grid; #ifndef HAVE_SSE2 spline->coefs = malloc (2*sizeof(double)*N); #else posix_memalign ((void**)&spline->coefs, 16, 2*sizeof(double)*N); #endif BCtype_d xBC_r, xBC_i; xBC_r.lCode = xBC.lCode; xBC_r.rCode = xBC.rCode; xBC_r.lVal = xBC.lVal_r; xBC_r.rVal = xBC.rVal_r; xBC_i.lCode = xBC.lCode; xBC_i.rCode = xBC.rCode; xBC_i.lVal = xBC.lVal_i; xBC_i.rVal = xBC.rVal_i; // Real part find_coefs_1d_d (spline->x_grid, xBC_r, (double*)data, 2, (double*)spline->coefs, 2); // Imaginarty part find_coefs_1d_d (spline->x_grid, xBC_i, ((double*)data)+1, 2, ((double*)spline->coefs)+1, 2); init_sse_data(); return spline; } void recompute_UBspline_1d_z (UBspline_1d_z* spline, complex_double *data) { int M = spline->x_grid.num; int N; if (spline->xBC.lCode == PERIODIC || spline->xBC.lCode == ANTIPERIODIC) N = M+3; else N = M+2; BCtype_d xBC_r, xBC_i; xBC_r.lCode = spline->xBC.lCode; xBC_r.rCode = spline->xBC.rCode; xBC_r.lVal = spline->xBC.lVal_r; xBC_r.rVal = spline->xBC.rVal_r; xBC_i.lCode = spline->xBC.lCode; xBC_i.rCode = spline->xBC.rCode; xBC_i.lVal = spline->xBC.lVal_i; xBC_i.rVal = spline->xBC.rVal_i; // Real part find_coefs_1d_d (spline->x_grid, xBC_r, (double*)data, 2, (double*)spline->coefs, 2); // Imaginarty part find_coefs_1d_d (spline->x_grid, xBC_i, ((double*)data)+1, 2, ((double*)spline->coefs)+1, 2); } UBspline_2d_z* create_UBspline_2d_z (Ugrid x_grid, Ugrid y_grid, BCtype_z xBC, BCtype_z yBC, complex_double *data) { // Create new spline UBspline_2d_z* restrict spline = malloc (sizeof(UBspline_2d_z)); spline->spcode = U2D; spline->tcode = DOUBLE_COMPLEX; spline->xBC = xBC; spline->yBC = yBC; // Setup internal variables int Mx = x_grid.num; int My = y_grid.num; int Nx, Ny; if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; x_grid.delta = (x_grid.end - x_grid.start)/(double)(Nx-3); x_grid.delta_inv = 1.0/x_grid.delta; spline->x_grid = x_grid; if (yBC.lCode == PERIODIC || yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; y_grid.delta = (y_grid.end - y_grid.start)/(double)(Ny-3); y_grid.delta_inv = 1.0/y_grid.delta; spline->y_grid = y_grid; spline->x_stride = Ny; #ifndef HAVE_SSE2 spline->coefs = malloc (2*sizeof(double)*Nx*Ny); #else posix_memalign ((void**)&spline->coefs, 16, 2*sizeof(double)*Nx*Ny); #endif BCtype_d xBC_r, xBC_i, yBC_r, yBC_i; xBC_r.lCode = xBC.lCode; xBC_r.rCode = xBC.rCode; xBC_r.lVal = xBC.lVal_r; xBC_r.rVal = xBC.rVal_r; xBC_i.lCode = xBC.lCode; xBC_i.rCode = xBC.rCode; xBC_i.lVal = xBC.lVal_i; xBC_i.rVal = xBC.rVal_i; yBC_r.lCode = yBC.lCode; yBC_r.rCode = yBC.rCode; yBC_r.lVal = yBC.lVal_r; yBC_r.rVal = yBC.rVal_r; yBC_i.lCode = yBC.lCode; yBC_i.rCode = yBC.rCode; yBC_i.lVal = yBC.lVal_i; yBC_i.rVal = yBC.rVal_i; // First, solve in the X-direction for (int iy=0; iy<My; iy++) { intptr_t doffset = 2*iy; intptr_t coffset = 2*iy; // Real part find_coefs_1d_d (spline->x_grid, xBC_r, ((double*)data+doffset), 2*My, (double*)spline->coefs+coffset, 2*Ny); // Imag part find_coefs_1d_d (spline->x_grid, xBC_i, ((double*)data)+doffset+1, 2*My, ((double*)spline->coefs)+coffset+1, 2*Ny); } // Now, solve in the Y-direction for (int ix=0; ix<Nx; ix++) { intptr_t doffset = 2*ix*Ny; intptr_t coffset = 2*ix*Ny; // Real part find_coefs_1d_d (spline->y_grid, yBC_r, ((double*)spline->coefs)+doffset, 2, (double*)spline->coefs+coffset, 2); // Imag part find_coefs_1d_d (spline->y_grid, yBC_i, (double*)spline->coefs+doffset+1, 2, ((double*)spline->coefs)+coffset+1, 2); } init_sse_data(); return spline; } void recompute_UBspline_2d_z (UBspline_2d_z* spline, complex_double *data) { int Mx = spline->x_grid.num; int My = spline->y_grid.num; int Nx, Ny; if (spline->xBC.lCode == PERIODIC || spline->xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; if (spline->yBC.lCode == PERIODIC || spline->yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; BCtype_d xBC_r, xBC_i, yBC_r, yBC_i; xBC_r.lCode = spline->xBC.lCode; xBC_r.rCode = spline->xBC.rCode; xBC_r.lVal = spline->xBC.lVal_r; xBC_r.rVal = spline->xBC.rVal_r; xBC_i.lCode = spline->xBC.lCode; xBC_i.rCode = spline->xBC.rCode; xBC_i.lVal = spline->xBC.lVal_i; xBC_i.rVal = spline->xBC.rVal_i; yBC_r.lCode = spline->yBC.lCode; yBC_r.rCode = spline->yBC.rCode; yBC_r.lVal = spline->yBC.lVal_r; yBC_r.rVal = spline->yBC.rVal_r; yBC_i.lCode = spline->yBC.lCode; yBC_i.rCode = spline->yBC.rCode; yBC_i.lVal = spline->yBC.lVal_i; yBC_i.rVal = spline->yBC.rVal_i; // First, solve in the X-direction for (int iy=0; iy<My; iy++) { intptr_t doffset = 2*iy; intptr_t coffset = 2*iy; // Real part find_coefs_1d_d (spline->x_grid, xBC_r, ((double*)data+doffset), 2*My, (double*)spline->coefs+coffset, 2*Ny); // Imag part find_coefs_1d_d (spline->x_grid, xBC_i, ((double*)data)+doffset+1, 2*My, ((double*)spline->coefs)+coffset+1, 2*Ny); } // Now, solve in the Y-direction for (int ix=0; ix<Nx; ix++) { intptr_t doffset = 2*ix*Ny; intptr_t coffset = 2*ix*Ny; // Real part find_coefs_1d_d (spline->y_grid, yBC_r, ((double*)spline->coefs)+doffset, 2, (double*)spline->coefs+coffset, 2); // Imag part find_coefs_1d_d (spline->y_grid, yBC_i, (double*)spline->coefs+doffset+1, 2, ((double*)spline->coefs)+coffset+1, 2); } } UBspline_3d_z* create_UBspline_3d_z (Ugrid x_grid, Ugrid y_grid, Ugrid z_grid, BCtype_z xBC, BCtype_z yBC, BCtype_z zBC, complex_double *data) { // Create new spline UBspline_3d_z* restrict spline = malloc (sizeof(UBspline_3d_z)); spline->spcode = U3D; spline->tcode = DOUBLE_COMPLEX; spline->xBC = xBC; spline->yBC = yBC; spline->zBC = zBC; // Setup internal variables int Mx = x_grid.num; int My = y_grid.num; int Mz = z_grid.num; int Nx, Ny, Nz; if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; x_grid.delta = (x_grid.end - x_grid.start)/(double)(Nx-3); x_grid.delta_inv = 1.0/x_grid.delta; spline->x_grid = x_grid; if (yBC.lCode == PERIODIC || yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; y_grid.delta = (y_grid.end - y_grid.start)/(double)(Ny-3); y_grid.delta_inv = 1.0/y_grid.delta; spline->y_grid = y_grid; if (zBC.lCode == PERIODIC || zBC.lCode == ANTIPERIODIC) Nz = Mz+3; else Nz = Mz+2; z_grid.delta = (z_grid.end - z_grid.start)/(double)(Nz-3); z_grid.delta_inv = 1.0/z_grid.delta; spline->z_grid = z_grid; spline->x_stride = Ny*Nz; spline->y_stride = Nz; #ifndef HAVE_SSE2 spline->coefs = malloc (2*sizeof(double)*Nx*Ny*Nz); #else posix_memalign ((void**)&spline->coefs, 16, 2*sizeof(double)*Nx*Ny*Nz); #endif BCtype_d xBC_r, xBC_i, yBC_r, yBC_i, zBC_r, zBC_i; xBC_r.lCode = xBC.lCode; xBC_r.rCode = xBC.rCode; xBC_r.lVal = xBC.lVal_r; xBC_r.rVal = xBC.rVal_r; xBC_i.lCode = xBC.lCode; xBC_i.rCode = xBC.rCode; xBC_i.lVal = xBC.lVal_i; xBC_i.rVal = xBC.rVal_i; yBC_r.lCode = yBC.lCode; yBC_r.rCode = yBC.rCode; yBC_r.lVal = yBC.lVal_r; yBC_r.rVal = yBC.rVal_r; yBC_i.lCode = yBC.lCode; yBC_i.rCode = yBC.rCode; yBC_i.lVal = yBC.lVal_i; yBC_i.rVal = yBC.rVal_i; zBC_r.lCode = zBC.lCode; zBC_r.rCode = zBC.rCode; zBC_r.lVal = zBC.lVal_r; zBC_r.rVal = zBC.rVal_r; zBC_i.lCode = zBC.lCode; zBC_i.rCode = zBC.rCode; zBC_i.lVal = zBC.lVal_i; zBC_i.rVal = zBC.rVal_i; // First, solve in the X-direction for (int iy=0; iy<My; iy++) for (int iz=0; iz<Mz; iz++) { intptr_t doffset = 2*(iy*Mz+iz); intptr_t coffset = 2*(iy*Nz+iz); // Real part find_coefs_1d_d (spline->x_grid, xBC_r, ((double*)data)+doffset, 2*My*Mz, ((double*)spline->coefs)+coffset, 2*Ny*Nz); // Imag part find_coefs_1d_d (spline->x_grid, xBC_i, ((double*)data)+doffset+1, 2*My*Mz, ((double*)spline->coefs)+coffset+1, 2*Ny*Nz); } // Now, solve in the Y-direction for (int ix=0; ix<Nx; ix++) for (int iz=0; iz<Nz; iz++) { intptr_t doffset = 2*(ix*Ny*Nz + iz); intptr_t coffset = 2*(ix*Ny*Nz + iz); // Real part find_coefs_1d_d (spline->y_grid, yBC_r, ((double*)spline->coefs)+doffset, 2*Nz, ((double*)spline->coefs)+coffset, 2*Nz); // Imag part find_coefs_1d_d (spline->y_grid, yBC_i, ((double*)spline->coefs)+doffset+1, 2*Nz, ((double*)spline->coefs)+coffset+1, 2*Nz); } // Now, solve in the Z-direction for (int ix=0; ix<Nx; ix++) for (int iy=0; iy<Ny; iy++) { intptr_t doffset = 2*((ix*Ny+iy)*Nz); intptr_t coffset = 2*((ix*Ny+iy)*Nz); // Real part find_coefs_1d_d (spline->z_grid, zBC_r, ((double*)spline->coefs)+doffset, 2, ((double*)spline->coefs)+coffset, 2); // Imag part find_coefs_1d_d (spline->z_grid, zBC_i, ((double*)spline->coefs)+doffset+1, 2, ((double*)spline->coefs)+coffset+1, 2); } init_sse_data(); return spline; } void recompute_UBspline_3d_z (UBspline_3d_z* spline, complex_double *data) { // Setup internal variables int Mx = spline->x_grid.num; int My = spline->y_grid.num; int Mz = spline->z_grid.num; int Nx, Ny, Nz; if (spline->xBC.lCode == PERIODIC || spline->xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; if (spline->yBC.lCode == PERIODIC || spline->yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; if (spline->zBC.lCode == PERIODIC || spline->zBC.lCode == ANTIPERIODIC) Nz = Mz+3; else Nz = Mz+2; BCtype_d xBC_r, xBC_i, yBC_r, yBC_i, zBC_r, zBC_i; xBC_r.lCode = spline->xBC.lCode; xBC_r.rCode = spline->xBC.rCode; xBC_r.lVal = spline->xBC.lVal_r; xBC_r.rVal = spline->xBC.rVal_r; xBC_i.lCode = spline->xBC.lCode; xBC_i.rCode = spline->xBC.rCode; xBC_i.lVal = spline->xBC.lVal_i; xBC_i.rVal = spline->xBC.rVal_i; yBC_r.lCode = spline->yBC.lCode; yBC_r.rCode = spline->yBC.rCode; yBC_r.lVal = spline->yBC.lVal_r; yBC_r.rVal = spline->yBC.rVal_r; yBC_i.lCode = spline->yBC.lCode; yBC_i.rCode = spline->yBC.rCode; yBC_i.lVal = spline->yBC.lVal_i; yBC_i.rVal = spline->yBC.rVal_i; zBC_r.lCode = spline->zBC.lCode; zBC_r.rCode = spline->zBC.rCode; zBC_r.lVal = spline->zBC.lVal_r; zBC_r.rVal = spline->zBC.rVal_r; zBC_i.lCode = spline->zBC.lCode; zBC_i.rCode = spline->zBC.rCode; zBC_i.lVal = spline->zBC.lVal_i; zBC_i.rVal = spline->zBC.rVal_i; // First, solve in the X-direction for (int iy=0; iy<My; iy++) for (int iz=0; iz<Mz; iz++) { intptr_t doffset = 2*(iy*Mz+iz); intptr_t coffset = 2*(iy*Nz+iz); // Real part find_coefs_1d_d (spline->x_grid, xBC_r, ((double*)data)+doffset, 2*My*Mz, ((double*)spline->coefs)+coffset, 2*Ny*Nz); // Imag part find_coefs_1d_d (spline->x_grid, xBC_i, ((double*)data)+doffset+1, 2*My*Mz, ((double*)spline->coefs)+coffset+1, 2*Ny*Nz); } // Now, solve in the Y-direction for (int ix=0; ix<Nx; ix++) for (int iz=0; iz<Nz; iz++) { intptr_t doffset = 2*(ix*Ny*Nz + iz); intptr_t coffset = 2*(ix*Ny*Nz + iz); // Real part find_coefs_1d_d (spline->y_grid, yBC_r, ((double*)spline->coefs)+doffset, 2*Nz, ((double*)spline->coefs)+coffset, 2*Nz); // Imag part find_coefs_1d_d (spline->y_grid, yBC_i, ((double*)spline->coefs)+doffset+1, 2*Nz, ((double*)spline->coefs)+coffset+1, 2*Nz); } // Now, solve in the Z-direction for (int ix=0; ix<Nx; ix++) for (int iy=0; iy<Ny; iy++) { intptr_t doffset = 2*((ix*Ny+iy)*Nz); intptr_t coffset = 2*((ix*Ny+iy)*Nz); // Real part find_coefs_1d_d (spline->z_grid, zBC_r, ((double*)spline->coefs)+doffset, 2, ((double*)spline->coefs)+coffset, 2); // Imag part find_coefs_1d_d (spline->z_grid, zBC_i, ((double*)spline->coefs)+doffset+1, 2, ((double*)spline->coefs)+coffset+1, 2); } } void destroy_UBspline (Bspline *spline) { free (spline->coefs); free (spline); } void destroy_NUBspline (Bspline *spline); void destroy_multi_UBspline (Bspline *spline); void destroy_Bspline (void *spline) { Bspline *sp = (Bspline *)spline; if (sp->sp_code <= U3D) destroy_UBspline (sp); else if (sp->sp_code <= NU3D) destroy_NUBspline (sp); else if (sp->sp_code <= MULTI_U3D) destroy_multi_UBspline (sp); else fprintf (stderr, "Error in destroy_Bspline: invalide spline code %d.\n", sp->sp_code); }
simpar-omp.c
/** * @file simpar.c * @authors: Filipe Marques, Luís Fonseca * @date 29 Abr 2019 * @brief Header Parallellized implementation of simpar.h containing the particle simulation functions's source and main. */ #include "simpar.h" #include "omp.h" cell_t ** dummy; void usg_err() { printf("\t[-] usage : ./simpar <seed> <ncside> <n_par> <n_step>"); printf("\t\t[-] int <seed> : seed for random number generation.\n"); printf("\t\t[-] int <ncside> : size of the grid (number of cells on the side.\n"); printf("\t\t[-] int <n_par> : number of particles\n"); printf("\t\t[-] int <n_par> : number of time-steps\n"); exit(1); } long long val_l(const char* arg) { char *endptr; long long x = strtol(arg, &endptr, 10); /*Parse long from *arg*/ if (endptr == arg) { printf("[-] ERROR: Invalid number: %s\n", arg); return 0; } else if (*endptr) { printf("[-] ERROR: Trailing characters after number: %s\n", arg); } else if (x <= 0) { printf("[-] ERROR: Number must be positive: %llu\n", x); return 0; } return x; } cell_t** init_grid(const long ncside) { cell_t** grid = (cell_t**) calloc(ncside, sizeof(cell_t*)); dummy = (cell_t**)calloc(ncside, sizeof(cell_t*)); for(long c=0; c<ncside; c++) { grid[c] = (cell_t*)calloc(ncside, sizeof(cell_t)); dummy[c] = (cell_t*)calloc(ncside, sizeof(cell_t)); if(grid[c] == NULL) exit(0); if(dummy[c] == NULL) exit(0); } return grid; } void free_grid(cell_t** grid, long ncside) { for(long c=0; c<ncside; c++) { free(grid[c]); free(dummy[c]); } free(grid); free(dummy); } void init_particles(long seed, long ncside, long long n_part, particle_t *par) { long long i; srandom(seed); for(i=0; i < n_part; i++) { par[i].x = RND0_1; par[i].y = RND0_1; par[i].vx = RND0_1 / ncside / 10.0; par[i].vy = RND0_1 / ncside / 10.0; par[i].m = RND0_1 * ncside / (G * 1e6 * n_part); } } void init_env(cell_t** grid, long ncside, particle_t* p, long long n_par) { #pragma parallel for for(long long i=0; i<n_par; i++) { p[i].cx = (long) p[i].x * ncside; p[i].cy = (long) p[i].y * ncside; #pragma omp atomic grid[p[i].cx][p[i].cy].M += p[i].m; dummy[p[i].cx][p[i].cy].M += p[i].m; #pragma omp atomic grid[p[i].cx][p[i].cy].x += p[i].m * p[i].x; dummy[p[i].cx][p[i].cy].x += p[i].m * p[i].x; #pragma omp atomic grid[p[i].cx][p[i].cy].y += p[i].m * p[i].y; dummy[p[i].cx][p[i].cy].y += p[i].m * p[i].y; } } void accellerate_p(double* ax, double* ay, const cell_t* c, double m, double x, double y) { // Avoid calculation when cell is empty if((c->M) == 0.0) return; //double dirx = 1.0, diry = 1.0, double magnitude; double dx = ((c->x)/(c->M)) - x; double dy = ((c->y)/(c->M)) - y; double d_2 = (dx*dx)+(dy*dy); if(sqrt(d_2) < EPSLON) return; //if(dx<0.0){ dirx = -1.0; }else if(dx == 0.0){ dirx = 0.0; } //if(dy<0.0){ diry = -1.0; }else if(dy == 0.0){ diry = 0.0; } magnitude = (((c->M)*G)/d_2); *ax += dx * magnitude; *ay += dy * magnitude; } void update_particles(cell_t** grid, long ncside, particle_t* par, long long n_par, long n_step, long step) { double m, px, py, ax, ay; long cx, cy, nx, ny, ux, uy, lx, ly; #pragma omp parallel if(n_par*n_step > 1000000) { #pragma omp for private(m, px, py, ax, ay, cx, cy, nx, ny, ux, uy, lx, ly), reduction(+:t_mass, t_cx, t_cy), schedule(dynamic, 1000) for(long long i=0; i<n_par; i++) { m = par[i].m; px = par[i].x; py = par[i].y; cx = (long) px * ncside, nx; cy = (long) py * ncside, ny; ux = cx+1; uy = cy+1; lx = cx-1; ly = cy-1; if(ux >= ncside) ux = 0; else if(lx < 0) lx = ncside-1; if(uy >= ncside) uy = 0; else if(ly < 0) ly = ncside-1; ax = 0.0; ay = 0.0; accellerate_p(&ax, &ay, &(dummy[cx][cy]), m, px, py); // current cell accellerate_p(&ax, &ay, &(dummy[ux][cy]), m, px, py); // right cell accellerate_p(&ax, &ay, &(dummy[lx][cy]), m, px, py); // left cell //upper adjacents accellerate_p(&ax, &ay, &(dummy[cx][uy]), m, px, py); // upper cell accellerate_p(&ax, &ay, &(dummy[lx][uy]), m, px, py); // upper left cell accellerate_p(&ax, &ay, &(dummy[ux][uy]), m, px, py); // upper right cell //lower adjacents accellerate_p(&ax, &ay, &(dummy[cx][ly]), m, px, py); // lower cell accellerate_p(&ax, &ay, &(dummy[lx][ly]), m, px, py); // lower left cell accellerate_p(&ax, &ay, &(dummy[ux][ly]), m, px, py); // lower right cell //update velocity par[i].vx += ax; par[i].vy += ay; //update position par[i].x += par[i].vx + ax*0.5; while(par[i].x >= 1.0) par[i].x -= 1.0; while(par[i].x < 0.0) par[i].x += 1.0; par[i].y += par[i].vy + ay*0.5; while(par[i].y >= 1.0) par[i].y -= 1.0; while(par[i].y < 0.0) par[i].y += 1.0; //update cells if cell changed maybe outside loop? nx = (long) par[i].x*ncside; ny = (long) par[i].y*ncside; if(cx-nx || cy-ny) { if(cx-nx) par[i].cx = nx; if(cy-ny) par[i].cy = ny; #pragma omp atomic grid[cx][cy].M -= m; #pragma omp atomic grid[cx][cy].x -= m * px; #pragma omp atomic grid[cx][cy].y -= m * py; #pragma omp atomic grid[nx][ny].M += m; #pragma omp atomic grid[nx][ny].x += m * par[i].x; #pragma omp atomic grid[nx][ny].y += m * par[i].y; } if(n_step-1-step == 0) { t_mass += par[i].m; t_cx += par[i].m * par[i].x; t_cy += par[i].m * par[i].y; } } } #pragma omp parallel for for(long c = 0; c<ncside; c++) { for(long l = 0; l<ncside; l++) { dummy[c][l] = grid[c][l]; } } } int main(int argc, const char * argv[]) { if(argc != 5) { printf("[-] ERROR: Invalid number of arguments... Expected 4 but got %d\n", argc-1); usg_err(); } const long seed = (long) val_l(argv[1]); const long ncside = (long) val_l(argv[2]); const long long n_par = val_l(argv[3]); const long n_step = (long) val_l(argv[4]); if(!(seed*ncside*n_par*n_step)) usg_err(); double start_t, end_t; double elapsed_t; start_t = omp_get_wtime(); particle_t* par = (particle_t*) calloc(n_par, sizeof(particle_t)); init_particles(seed, ncside, n_par, par); cell_t** grid = init_grid(ncside); if(grid==NULL || par == NULL) exit(0); init_env(grid, ncside, par, n_par); for(long step = 0; step < n_step; step++) { update_particles(grid, ncside, par, n_par, n_step, step); } t_cx /= t_mass; t_cy /= t_mass; printf("%.2f %.2f\n", par[0].x, par[0].y); printf("%.2f %.2f\n", t_cx, t_cy); end_t = omp_get_wtime(); elapsed_t = ((double) (end_t - start_t)); //printf("%f (s)\n", elapsed_t); free(par); free_grid(grid, ncside); return 0; }
/** * @file simpar.c * @authors: Filipe Marques, Luís Fonseca * @date 29 Abr 2019 * @brief Header Parallellized implementation of simpar.h containing the particle simulation functions's source and main. */ #include "simpar.h" #include "omp.h" cell_t **dummy; void usg_err() { printf("\t[-] usage : ./simpar <seed> <ncside> <n_par> <n_step>"); printf("\t\t[-] int <seed> : seed for random number generation.\n"); printf("\t\t[-] int <ncside> : size of the grid (number of cells on the side.\n"); printf("\t\t[-] int <n_par> : number of particles\n"); printf("\t\t[-] int <n_par> : number of time-steps\n"); exit(1); } long long val_l(const char *arg) { char *endptr; long long x = strtol(arg, &endptr, 10); /* Parse long from *arg */ if (endptr == arg) { printf("[-] ERROR: Invalid number: %s\n", arg); return 0; } else if (*endptr) { printf("[-] ERROR: Trailing characters after number: %s\n", arg); } else if (x <= 0) { printf("[-] ERROR: Number must be positive: %llu\n", x); return 0; } return x; } cell_t ** init_grid(const long ncside) { cell_t **grid = (cell_t **) calloc(ncside, sizeof(cell_t *)); dummy = (cell_t **) calloc(ncside, sizeof(cell_t *)); for (long c = 0; c < ncside; c++) { grid[c] = (cell_t *) calloc(ncside, sizeof(cell_t)); dummy[c] = (cell_t *) calloc(ncside, sizeof(cell_t)); if (grid[c] == NULL) exit(0); if (dummy[c] == NULL) exit(0); } return grid; } void free_grid(cell_t ** grid, long ncside) { for (long c = 0; c < ncside; c++) { free(grid[c]); free(dummy[c]); } free(grid); free(dummy); } void init_particles(long seed, long ncside, long long n_part, particle_t * par) { long long i; srandom(seed); for (i = 0; i < n_part; i++) { par[i].x = RND0_1; par[i].y = RND0_1; par[i].vx = RND0_1 / ncside / 10.0; par[i].vy = RND0_1 / ncside / 10.0; par[i].m = RND0_1 * ncside / (G * 1e6 * n_part); } } void init_env(cell_t ** grid, long ncside, particle_t * p, long long n_par) { #pragma parallel for for (long long i = 0; i < n_par; i++) { p[i].cx = (long)p[i].x * ncside; p[i].cy = (long)p[i].y * ncside; grid[p[i].cx][p[i].cy].M += p[i].m; dummy[p[i].cx][p[i].cy].M += p[i].m; grid[p[i].cx][p[i].cy].x += p[i].m * p[i].x; dummy[p[i].cx][p[i].cy].x += p[i].m * p[i].x; grid[p[i].cx][p[i].cy].y += p[i].m * p[i].y; dummy[p[i].cx][p[i].cy].y += p[i].m * p[i].y; } } void accellerate_p(double *ax, double *ay, const cell_t * c, double m, double x, double y) { //Avoid calculation when cell is empty if ((c->M) == 0.0) return; //double dirx = 1.0, diry = 1.0, double magnitude; double dx = ((c->x) / (c->M)) - x; double dy = ((c->y) / (c->M)) - y; double d_2 = (dx * dx) + (dy * dy); if (sqrt(d_2) < EPSLON) return; //if (dx < 0.0) { dirx = -1.0; } else if (dx == 0.0) { dirx = 0.0; } //if (dy < 0.0) { diry = -1.0; } else if (dy == 0.0) { diry = 0.0; } magnitude = (((c->M) * G) / d_2); *ax += dx * magnitude; *ay += dy * magnitude; } void update_particles(cell_t ** grid, long ncside, particle_t * par, long long n_par, long n_step, long step) { double m, px, py, ax, ay; long cx, cy, nx, ny, ux, uy, lx, ly; for (long long i = 0; i < n_par; i++) { m = par[i].m; px = par[i].x; py = par[i].y; cx = (long)px *ncside, nx; cy = (long)py *ncside, ny; ux = cx + 1; uy = cy + 1; lx = cx - 1; ly = cy - 1; if (ux >= ncside) ux = 0; else if (lx < 0) lx = ncside - 1; if (uy >= ncside) uy = 0; else if (ly < 0) ly = ncside - 1; ax = 0.0; ay = 0.0; accellerate_p(&ax, &ay, &(dummy[cx][cy]), m, px, py); //current cell accellerate_p(&ax, &ay, &(dummy[ux][cy]), m, px, py); //right cell accellerate_p(&ax, &ay, &(dummy[lx][cy]), m, px, py); //left cell // upper adjacents accellerate_p(&ax, &ay, &(dummy[cx][uy]), m, px, py); //upper cell accellerate_p(&ax, &ay, &(dummy[lx][uy]), m, px, py); //upper left cell accellerate_p(&ax, &ay, &(dummy[ux][uy]), m, px, py); //upper right cell // lower adjacents accellerate_p(&ax, &ay, &(dummy[cx][ly]), m, px, py); //lower cell accellerate_p(&ax, &ay, &(dummy[lx][ly]), m, px, py); //lower left cell accellerate_p(&ax, &ay, &(dummy[ux][ly]), m, px, py); //lower right cell // update velocity par[i].vx += ax; par[i].vy += ay; //update position par[i].x += par[i].vx + ax * 0.5; while (par[i].x >= 1.0) par[i].x -= 1.0; while (par[i].x < 0.0) par[i].x += 1.0; par[i].y += par[i].vy + ay * 0.5; while (par[i].y >= 1.0) par[i].y -= 1.0; while (par[i].y < 0.0) par[i].y += 1.0; //update cells if cell changed maybe outside loop ? nx = (long)par[i].x * ncside; ny = (long)par[i].y * ncside; if (cx - nx || cy - ny) { if (cx - nx) par[i].cx = nx; if (cy - ny) par[i].cy = ny; grid[cx][cy].M -= m; grid[cx][cy].x -= m * px; grid[cx][cy].y -= m * py; grid[nx][ny].M += m; grid[nx][ny].x += m * par[i].x; grid[nx][ny].y += m * par[i].y; } if (n_step - 1 - step == 0) { t_mass += par[i].m; t_cx += par[i].m * par[i].x; t_cy += par[i].m * par[i].y; } } for (long c = 0; c < ncside; c++) { for (long l = 0; l < ncside; l++) { dummy[c][l] = grid[c][l]; } } } int main(int argc, const char *argv[]) { if (argc != 5) { printf("[-] ERROR: Invalid number of arguments... Expected 4 but got %d\n", argc - 1); usg_err(); } const long seed = (long)val_l(argv[1]); const long ncside = (long)val_l(argv[2]); const long long n_par = val_l(argv[3]); const long n_step = (long)val_l(argv[4]); if (!(seed * ncside * n_par * n_step)) usg_err(); double start_t, end_t; double elapsed_t; start_t = omp_get_wtime(); particle_t *par = (particle_t *) calloc(n_par, sizeof(particle_t)); init_particles(seed, ncside, n_par, par); cell_t **grid = init_grid(ncside); if (grid == NULL || par == NULL) exit(0); init_env(grid, ncside, par, n_par); for (long step = 0; step < n_step; step++) { update_particles(grid, ncside, par, n_par, n_step, step); } t_cx /= t_mass; t_cy /= t_mass; printf("%.2f %.2f\n", par[0].x, par[0].y); printf("%.2f %.2f\n", t_cx, t_cy); end_t = omp_get_wtime(); elapsed_t = ((double)(end_t - start_t)); //printf("%f (s)\n", elapsed_t); free(par); free_grid(grid, ncside); return 0; }
/** * @file simpar.c * @authors: Filipe Marques, Luís Fonseca * @date 29 Abr 2019 * @brief Header Parallellized implementation of simpar.h containing the particle simulation functions's source and main. */ #include "simpar.h" #include "omp.h" cell_t **dummy; void usg_err() { printf("\t[-] usage : ./simpar <seed> <ncside> <n_par> <n_step>"); printf("\t\t[-] int <seed> : seed for random number generation.\n"); printf("\t\t[-] int <ncside> : size of the grid (number of cells on the side.\n"); printf("\t\t[-] int <n_par> : number of particles\n"); printf("\t\t[-] int <n_par> : number of time-steps\n"); exit(1); } long long val_l(const char *arg) { char *endptr; long long x = strtol(arg, &endptr, 10); /* Parse long from *arg */ if (endptr == arg) { printf("[-] ERROR: Invalid number: %s\n", arg); return 0; } else if (*endptr) { printf("[-] ERROR: Trailing characters after number: %s\n", arg); } else if (x <= 0) { printf("[-] ERROR: Number must be positive: %llu\n", x); return 0; } return x; } cell_t ** init_grid(const long ncside) { cell_t **grid = (cell_t **) calloc(ncside, sizeof(cell_t *)); dummy = (cell_t **) calloc(ncside, sizeof(cell_t *)); for (long c = 0; c < ncside; c++) { grid[c] = (cell_t *) calloc(ncside, sizeof(cell_t)); dummy[c] = (cell_t *) calloc(ncside, sizeof(cell_t)); if (grid[c] == NULL) exit(0); if (dummy[c] == NULL) exit(0); } return grid; } void free_grid(cell_t ** grid, long ncside) { for (long c = 0; c < ncside; c++) { free(grid[c]); free(dummy[c]); } free(grid); free(dummy); } void init_particles(long seed, long ncside, long long n_part, particle_t * par) { long long i; srandom(seed); for (i = 0; i < n_part; i++) { par[i].x = RND0_1; par[i].y = RND0_1; par[i].vx = RND0_1 / ncside / 10.0; par[i].vy = RND0_1 / ncside / 10.0; par[i].m = RND0_1 * ncside / (G * 1e6 * n_part); } } void init_env(cell_t ** grid, long ncside, particle_t * p, long long n_par) { #pragma parallel for for (long long i = 0; i < n_par; i++) { p[i].cx = (long)p[i].x * ncside; p[i].cy = (long)p[i].y * ncside; #pragma omp atomic grid[p[i].cx][p[i].cy].M += p[i].m; dummy[p[i].cx][p[i].cy].M += p[i].m; #pragma omp atomic grid[p[i].cx][p[i].cy].x += p[i].m * p[i].x; dummy[p[i].cx][p[i].cy].x += p[i].m * p[i].x; #pragma omp atomic grid[p[i].cx][p[i].cy].y += p[i].m * p[i].y; dummy[p[i].cx][p[i].cy].y += p[i].m * p[i].y; } } void accellerate_p(double *ax, double *ay, const cell_t * c, double m, double x, double y) { //Avoid calculation when cell is empty if ((c->M) == 0.0) return; //double dirx = 1.0, diry = 1.0, double magnitude; double dx = ((c->x) / (c->M)) - x; double dy = ((c->y) / (c->M)) - y; double d_2 = (dx * dx) + (dy * dy); if (sqrt(d_2) < EPSLON) return; //if (dx < 0.0) { dirx = -1.0; } else if (dx == 0.0) { dirx = 0.0; } //if (dy < 0.0) { diry = -1.0; } else if (dy == 0.0) { diry = 0.0; } magnitude = (((c->M) * G) / d_2); *ax += dx * magnitude; *ay += dy * magnitude; } void update_particles(cell_t ** grid, long ncside, particle_t * par, long long n_par, long n_step, long step) { double m, px, py, ax, ay; long cx, cy, nx, ny, ux, uy, lx, ly; #pragma omp parallel if(n_par*n_step > 1000000) { #pragma omp for private(m, px, py, ax, ay, cx, cy, nx, ny, ux, uy, lx, ly), reduction(+:t_mass, t_cx, t_cy), schedule(dynamic, 1000) for (long long i = 0; i < n_par; i++) { m = par[i].m; px = par[i].x; py = par[i].y; cx = (long)px *ncside, nx; cy = (long)py *ncside, ny; ux = cx + 1; uy = cy + 1; lx = cx - 1; ly = cy - 1; if (ux >= ncside) ux = 0; else if (lx < 0) lx = ncside - 1; if (uy >= ncside) uy = 0; else if (ly < 0) ly = ncside - 1; ax = 0.0; ay = 0.0; accellerate_p(&ax, &ay, &(dummy[cx][cy]), m, px, py); //current cell accellerate_p(&ax, &ay, &(dummy[ux][cy]), m, px, py); //right cell accellerate_p(&ax, &ay, &(dummy[lx][cy]), m, px, py); //left cell // upper adjacents accellerate_p(&ax, &ay, &(dummy[cx][uy]), m, px, py); //upper cell accellerate_p(&ax, &ay, &(dummy[lx][uy]), m, px, py); //upper left cell accellerate_p(&ax, &ay, &(dummy[ux][uy]), m, px, py); //upper right cell // lower adjacents accellerate_p(&ax, &ay, &(dummy[cx][ly]), m, px, py); //lower cell accellerate_p(&ax, &ay, &(dummy[lx][ly]), m, px, py); //lower left cell accellerate_p(&ax, &ay, &(dummy[ux][ly]), m, px, py); //lower right cell // update velocity par[i].vx += ax; par[i].vy += ay; //update position par[i].x += par[i].vx + ax * 0.5; while (par[i].x >= 1.0) par[i].x -= 1.0; while (par[i].x < 0.0) par[i].x += 1.0; par[i].y += par[i].vy + ay * 0.5; while (par[i].y >= 1.0) par[i].y -= 1.0; while (par[i].y < 0.0) par[i].y += 1.0; //update cells if cell changed maybe outside loop ? nx = (long)par[i].x * ncside; ny = (long)par[i].y * ncside; if (cx - nx || cy - ny) { if (cx - nx) par[i].cx = nx; if (cy - ny) par[i].cy = ny; #pragma omp atomic grid[cx][cy].M -= m; #pragma omp atomic grid[cx][cy].x -= m * px; #pragma omp atomic grid[cx][cy].y -= m * py; #pragma omp atomic grid[nx][ny].M += m; #pragma omp atomic grid[nx][ny].x += m * par[i].x; #pragma omp atomic grid[nx][ny].y += m * par[i].y; } if (n_step - 1 - step == 0) { t_mass += par[i].m; t_cx += par[i].m * par[i].x; t_cy += par[i].m * par[i].y; } } } #pragma omp parallel for for (long c = 0; c < ncside; c++) { for (long l = 0; l < ncside; l++) { dummy[c][l] = grid[c][l]; } } } int main(int argc, const char *argv[]) { if (argc != 5) { printf("[-] ERROR: Invalid number of arguments... Expected 4 but got %d\n", argc - 1); usg_err(); } const long seed = (long)val_l(argv[1]); const long ncside = (long)val_l(argv[2]); const long long n_par = val_l(argv[3]); const long n_step = (long)val_l(argv[4]); if (!(seed * ncside * n_par * n_step)) usg_err(); double start_t, end_t; double elapsed_t; start_t = omp_get_wtime(); particle_t *par = (particle_t *) calloc(n_par, sizeof(particle_t)); init_particles(seed, ncside, n_par, par); cell_t **grid = init_grid(ncside); if (grid == NULL || par == NULL) exit(0); init_env(grid, ncside, par, n_par); for (long step = 0; step < n_step; step++) { update_particles(grid, ncside, par, n_par, n_step, step); } t_cx /= t_mass; t_cy /= t_mass; printf("%.2f %.2f\n", par[0].x, par[0].y); printf("%.2f %.2f\n", t_cx, t_cy); end_t = omp_get_wtime(); elapsed_t = ((double)(end_t - start_t)); //printf("%f (s)\n", elapsed_t); free(par); free_grid(grid, ncside); return 0; }
CPUMatrixTensorImpl.h
// Move some files out of CPUMatrixImpl.h to prevent compiler crash on out-of-heap #include "CPUMatrix.h" #include "TensorOps.h" namespace Microsoft { namespace MSR { namespace CNTK { // ======================================================================= // TensorView support // ======================================================================= // To save time, this makes extensive use of templates and macros. // ----------------------------------------------------------------------- // function to compute the value for a given output location (perform reduction if needed) // ----------------------------------------------------------------------- // perform loop over reduction index m // This function is declared inside a wrapper struct to allow partial specialization (m = -1). template <class ElemType, typename OPFN, typename ReductionOp, size_t N, int m> struct TensorOpReduction { // reduction case (non-reduction case is specialized) static inline ElemType Loop(array<ElemType*, N> pointers, const OPFN& opfn, const ReductionOp& reductionOp, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides) { array<ptrdiff_t, N - 1> strides; // N-1 because last one is the result pointer, which is unused in reduction for (size_t i = 0; i < N - 1; i++) // N = a small constant, this will be unrolled strides[i] = reducingStrides[i][(size_t) m]; double aggregate = TensorOpReduction<ElemType, OPFN, ReductionOp, N, m - 1>::Loop(pointers, opfn, reductionOp, reducingOpDims, reducingStrides); for (size_t dim = reducingOpDims[(size_t)m] - 1; dim-- > 0;) { // advance the pointers for (size_t i = 0; i < N - 1; i++) pointers[i] += strides[i]; // note: last pointer (result) is unused and untouched here // need to descend into one loop deeper aggregate = reductionOp(aggregate, TensorOpReduction<ElemType, OPFN, ReductionOp, N, m - 1>::Loop(pointers, opfn, reductionOp, reducingOpDims, reducingStrides)); } // Actually it would be nicer to return double but we keep ElementType so that test don't return different numbers than previous implementation. return static_cast<ElemType>(aggregate); } }; // perform loop over reduction index m // This is the specialized version for m = -1, which terminates the recursion. template <class ElemType, typename OPFN, typename ReductionOp, size_t N> struct TensorOpReduction<ElemType, OPFN, ReductionOp, N, -1> { static inline ElemType Loop(array<ElemType*, N> pointers, const OPFN& opfn, const ReductionOp& /*reductionOp*/, const SmallVector<size_t>&, const array<SmallVector<ptrdiff_t>, N>&) { return opfn(pointers); // finally we are doing some work!!! } }; // perform loop over reduction index m, while keeping track of the number of elements and their corresponding indices. // This function is declared inside a wrapper struct to allow partial specialization (m = -1). template <class ElemType, size_t N, int m> struct TensorArgOpReduction { static inline std::pair<ElemType, size_t> ReduceAll(array<ElemType*, N> pointers, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides, ElementWiseOperator reductionOp) { size_t counter = 0; size_t index = 0; ElemType val = (ElemType)0; switch (reducingOpDims.size()) { case 3: val = TensorArgOpReduction<ElemType, N, 2>::Loop(pointers, reducingOpDims, reducingStrides, reductionOp, counter, index); break; case 2: val = TensorArgOpReduction<ElemType, N, 1>::Loop(pointers, reducingOpDims, reducingStrides, reductionOp, counter, index); break; case 1: val = TensorArgOpReduction<ElemType, N, 0>::Loop(pointers, reducingOpDims, reducingStrides, reductionOp, counter, index); break; case 0: val = TensorArgOpReduction<ElemType, N, -1>::Loop(pointers, reducingOpDims, reducingStrides, reductionOp, counter, index); break; default: LogicError("TensorOp: %d non-flattened input dimensions are not supported.", (int)reducingOpDims.size()); } return make_pair(val, index); } // reduction case (non-reduction case is specialized) static inline ElemType Loop(array<ElemType*, N> pointers, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides, ElementWiseOperator reductionOp, size_t& counter, size_t& index) { array<ptrdiff_t, N - 1> strides; // N-1 because last one is the result pointer, which is unused in reduction for (size_t i = 0; i < N - 1; i++) // N = a small constant, this will be unrolled strides[i] = reducingStrides[i][(size_t)m]; ElemType aggregate = TensorArgOpReduction<ElemType, N, m - 1>::Loop(pointers, reducingOpDims, reducingStrides, reductionOp, counter, index); for (size_t dim = reducingOpDims[(size_t)m] - 1; dim-- > 0;) { // advance the pointers for (size_t i = 0; i < N - 1; i++) pointers[i] += strides[i]; // note: last pointer (result) is unused and untouched here ElemType val = TensorArgOpReduction<ElemType, N, m - 1>::Loop(pointers, reducingOpDims, reducingStrides, reductionOp, counter, index); bool update = false; switch (reductionOp) { case ElementWiseOperator::opArgmin: update = (aggregate > val); break; case ElementWiseOperator::opArgmax: update = (aggregate < val); break; } if (update) { aggregate = val; index = counter - 1; } } return aggregate; } }; // perform loop over reduction index m // This is the specialized version for m = -1, which terminates the recursion. template <class ElemType, size_t N> struct TensorArgOpReduction<ElemType, N, -1> { static inline ElemType Loop(array<ElemType*, N> pointers, const SmallVector<size_t>&, const array<SmallVector<ptrdiff_t>, N>&, ElementWiseOperator /*reductionOp*/, size_t& counter, size_t& /*index*/) { counter++; return *pointers[0]; // finally we are doing some work!!! } }; // ----------------------------------------------------------------------- // perform loop over regular index k for N-nary operations (N counting the output) // ----------------------------------------------------------------------- // perform loop over regular index k and reducing index m for N operands (counting the output) template <class ElemType, typename OPFN, typename ReductionOp, size_t N, bool vectorizable, int m, int k> struct TensorOpIteration { static inline void Loop(ElemType beta, array<ElemType*, N> pointers, ElemType alpha, const OPFN& opfn, const ReductionOp& reductionOp, const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, N>& regularStrides, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides) { // non-scalar case: still nested result loops left array<ptrdiff_t, N> strides; for (size_t i = 0; i < N; i++) // N = a small constant, this will be unrolled strides[i] = regularStrides[i][(size_t) k]; for (size_t dim = regularOpDims[(size_t) k]; dim-- > 0;) { // need to descend into one loop deeper TensorOpIteration<ElemType, OPFN, ReductionOp, N, vectorizable, m, k - 1>::Loop(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); // advance the pointers for (size_t i = 0; i < N; i++) pointers[i] += strides[i]; } } }; // Special version for innermost loop with strides all being 1 and no further reduction. Compiler can use SSE. // This is a very common case, e.g. adding vectors or computing the Sigmoid. template <class ElemType, typename OPFN, typename ReductionOp> struct TensorOpIteration<ElemType, OPFN, ReductionOp, 3, true /*vectorizable*/, -1 /*no reduction*/, 0 /*innermost loop*/> { static inline void Loop(ElemType beta, array<ElemType*, 3> pointers, ElemType alpha, const OPFN& opfn, const ReductionOp& reductionOp, const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 3>& regularStrides, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 3>& reducingStrides) { ElemType* pa = pointers[0]; ElemType* pb = pointers[1]; ElemType* pc = pointers[2]; size_t K = regularOpDims[0]; // special-case beta and alpha to allow the compiler to short-circuit it if (beta != 0) #pragma omp parallel for for (int k = 0; k < (int) K; k++) TensorOpIteration<ElemType, OPFN, ReductionOp, 3, true /*vectorizable*/, -1 /*no reduction*/, -1 /*scalar*/>::Loop(beta, array<ElemType*, 3>{pa + k, pb + k, pc + k}, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); else if (alpha != 1) #pragma omp parallel for for (int k = 0; k < (int) K; k++) TensorOpIteration<ElemType, OPFN, ReductionOp, 3, true /*vectorizable*/, -1 /*no reduction*/, -1 /*scalar*/>::Loop(0, array<ElemType*, 3>{pa + k, pb + k, pc + k}, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); else #pragma omp parallel for for (int k = 0; k < (int) K; k++) TensorOpIteration<ElemType, OPFN, ReductionOp, 3, true /*vectorizable*/, -1 /*no reduction*/, -1 /*scalar*/>::Loop(0, array<ElemType*, 3>{pa + k, pb + k, pc + k}, 1, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); // TODO: According to Amit, the VS compiler is not able to vectorize into lambdas. Solution: change the lambda to take an N, or to implement the loop inside (with 1 element by default). // TODO: The signedness of k (required for omp) causes an extra sign-extend. // TODO: OMP adds LOTS of overhead. Do we need a guard, a min size when to use it? } }; // and unary template <class ElemType, typename OPFN, typename ReductionOp> struct TensorOpIteration<ElemType, OPFN, ReductionOp, 2, true /*vectorizable*/, -1 /*no reduction*/, 0 /*innermost loop*/> { static inline void Loop(ElemType beta, array<ElemType*, 2> pointers, ElemType alpha, const OPFN& opfn, const ReductionOp& reductionOp, const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 2>& regularStrides, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 2>& reducingStrides) { ElemType* pa = pointers[0]; ElemType* pb = pointers[1]; size_t K = regularOpDims[0]; // special-case beta and alpha to allow the compiler to short-circuit it if (beta != 0) #pragma omp parallel for for (int k = 0; k < (int) K; k++) TensorOpIteration<ElemType, OPFN, ReductionOp, 2, true /*vectorizable*/, -1 /*no reduction*/, -1 /*scalar*/>::Loop(beta, array<ElemType*, 2>{pa + k, pb + k}, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); else if (alpha != 1) #pragma omp parallel for for (int k = 0; k < (int) K; k++) TensorOpIteration<ElemType, OPFN, ReductionOp, 2, true /*vectorizable*/, -1 /*no reduction*/, -1 /*scalar*/>::Loop(0, array<ElemType*, 2>{pa + k, pb + k}, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); else #pragma omp parallel for for (int k = 0; k < (int) K; k++) TensorOpIteration<ElemType, OPFN, ReductionOp, 2, true /*vectorizable*/, -1 /*no reduction*/, -1 /*scalar*/>::Loop(0, array<ElemType*, 2>{pa + k, pb + k}, 1, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); } }; template <class ElemType, typename OPFN, typename ReductionOp, size_t N, bool vectorizable, int m> struct TensorOpIteration<ElemType, OPFN, ReductionOp, N, vectorizable, m, -1> { static inline void Loop(ElemType beta, array<ElemType*, N> pointers, ElemType alpha, const OPFN& opfn, const ReductionOp& reductionOp, const SmallVector<size_t>&, const array<SmallVector<ptrdiff_t>, N>&, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides) { // we are at element level for the result: perform the op (there may still be reduction) ElemType val = TensorOpReduction<ElemType, OPFN, ReductionOp, N, m>::Loop(pointers, opfn, reductionOp, reducingOpDims, reducingStrides); // scale val *= alpha; // combine with previous value in target matrix, then write it out auto* pout = pointers.back(); if (beta != 0) val += beta * *pout; // save *pout = val; return; } }; // perform loop over regular index k and reducing index m for N operands (counting the output), the difference // between TensorOpIteration and TensorArgOpIteration, is that the latter store the index of the result, instead of // the result. The reason that they aren't combined is because of performance. template <class ElemType, size_t N, int k> struct TensorArgOpIteration { static inline void Loop(array<ElemType*, N> pointers, const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, N>& regularStrides, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides, ElementWiseOperator reductionOp) { // non-scalar case: still nested result loops left array<ptrdiff_t, N> strides; for (size_t i = 0; i < N; i++) // N = a small constant, this will be unrolled strides[i] = regularStrides[i][(size_t)k]; for (size_t dim = regularOpDims[(size_t)k]; dim-- > 0;) { // need to descend into one loop deeper TensorArgOpIteration<ElemType, N, k - 1>::Loop(pointers, regularOpDims, regularStrides, reducingOpDims, reducingStrides, reductionOp); // advance the pointers for (size_t i = 0; i < N; i++) pointers[i] += strides[i]; } } }; template <class ElemType, size_t N> struct TensorArgOpIteration<ElemType, N, -1> { static inline void Loop(array<ElemType*, N> pointers, const SmallVector<size_t>&, const array<SmallVector<ptrdiff_t>, N>&, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides, ElementWiseOperator reductionOp) { // we are at element level for the result: perform the op (there may still be reduction) auto val = TensorArgOpReduction<ElemType, N, 2>::ReduceAll(pointers, reducingOpDims, reducingStrides, reductionOp); auto* pout = pointers.back(); *pout = (ElemType)val.second; return; } }; // ----------------------------------------------------------------------- // map runtime parameters N to template parameters // ----------------------------------------------------------------------- // tensor operation with k+1 dimensions (-1 means scalar) template <class ElemType, typename OPFN, typename ReductionOp, size_t N, int k> static void TensorOpWithRegularLoop(ElemType beta, const array<ElemType*, N>& pointers, ElemType alpha, const OPFN& opfn, ReductionOp reductionOp, const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, N>& regularStrides, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides) { size_t dims = reducingOpDims.size(); switch (dims) { case 2: return TensorOpIteration<ElemType, OPFN, ReductionOp, N, false /*vectorizable*/, 1, k>::Loop(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); case 1: return TensorOpIteration<ElemType, OPFN, ReductionOp, N, false /*vectorizable*/, 0, k>::Loop(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); case 0: { // if all leading dimensions are 1, we can let the compiler do some unrolling bool leadingAllOne = true; for (size_t i = 0; i < N; i++) leadingAllOne &= k >= 0 && regularStrides[i][0] == 1; if (leadingAllOne) // special version that uses a hard-coded increment of 1 for all leading dimensions return TensorOpIteration<ElemType, OPFN, ReductionOp, N, true /*vectorizable*/, -1, k>::Loop(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); else return TensorOpIteration<ElemType, OPFN, ReductionOp, N, false /*vectorizable*/, -1, k>::Loop(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); } default: LogicError("TensorOp: %d non-flattened reduction dimensions are not supported.", (int) dims); } } // tensor operation, generalized in number of arguments, operation already provided as a lambda // This function now expands into different k. template <class ElemType, typename OPFN, typename ReductionOp, size_t N> static void TensorOpWithFnAndReduction(ElemType beta, array<ElemType*, N> pointers, ElemType alpha, const OPFN& opfn, const ReductionOp& reductionOp, const array<size_t, N>& offsets, const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, N>& regularStrides, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides) { for (size_t i = 0; i < N; i++) // N = a small constant, this will be unrolled pointers[i] += offsets[i]; size_t dims = regularOpDims.size(); switch (dims) { // N.B. consider code size impact when adding more cases. case 5: return TensorOpWithRegularLoop<ElemType, OPFN, ReductionOp, N, 4>(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); case 4: return TensorOpWithRegularLoop<ElemType, OPFN, ReductionOp, N, 3>(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); case 3: return TensorOpWithRegularLoop<ElemType, OPFN, ReductionOp, N, 2>(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); case 2: return TensorOpWithRegularLoop<ElemType, OPFN, ReductionOp, N, 1>(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); case 1: return TensorOpWithRegularLoop<ElemType, OPFN, ReductionOp, N, 0>(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); case 0: return TensorOpWithRegularLoop<ElemType, OPFN, ReductionOp, N, -1>(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); default: LogicError("TensorOp: %d non-flattened input dimensions are not supported.", (int)dims); } } // tensor operation, generalized in number of arguments, operation already provided as a lambda // This function now expands into different reductionOps template <class ElemType, typename OPFN, size_t N> static void TensorOpWithFn(ElemType beta, array<ElemType*, N> pointers, ElemType alpha, const OPFN& opfn, ElementWiseOperator reductionOp, const array<size_t, N>& offsets, const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, N>& regularStrides, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides) { // BUGBUG: Using always 'double' as type of aggregator even for ElemType==float. Reason: otherwise some e2e test would fail as historically we // used double for aggregator of sum. But: // * for min and max reductions this is meaningless. // * It is not consitent with what we do on GPU, there we aggregate on ElemType. // * It costs performance. // TODO: apdapt e2e tests to run with aggregator of type ElemType. #define CaseTensorOpWithFnAndReduction(oper) \ case ElementWiseOperator::op##oper: \ return TensorOpWithFnAndReduction(beta, pointers, alpha, opfn, [](double a, double b) \ { \ return Op##oper(a, b); \ }, \ offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides) switch (reductionOp) { CaseTensorOpWithFnAndReduction(Sum); CaseTensorOpWithFnAndReduction(LogSum); CaseTensorOpWithFnAndReduction(Min); CaseTensorOpWithFnAndReduction(Max); CaseTensorOpWithFnAndReduction(ElementwiseProduct); default: LogicError("Specified ElementWiseOperator op %d not supported as reduction operation.", (int)reductionOp); } } // ----------------------------------------------------------------------- // entry points from Matrix.cpp; also map op to a lambda // ----------------------------------------------------------------------- // special tensor ops for inference speed template <class ElemType> bool CPUMatrixSpecialUnaryTensorOpImpl(ElemType beta, const CPUMatrix<ElemType>& a, CPUMatrix<ElemType>& o, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp, const array<size_t, 2>& offsets, const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 2>& regularStrides, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 2>& reducingStrides); template <class ElemType> bool CPUMatrixSpecialBinaryTensorOpImpl(ElemType beta, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& o, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp, const array<size_t, 3>& offsets, const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 3>& regularStrides, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 3>& reducingStrides); template <class ElemType> bool CPUMatrixSpecialTernaryTensorOpImpl(ElemType beta, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, const CPUMatrix<ElemType>& c, CPUMatrix<ElemType>& o, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp, const array<size_t, 4>& offsets, const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 4>& regularStrides, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 4>& reducingStrides); // perform unary operation 'op' on a giving 'this', reinterpreting the matrices as tensors as specified by the dims and strides // This maps 'op' to a lambda. template <class ElemType> void CPUMatrixTensorOpImpl(ElemType beta, const CPUMatrix<ElemType>& a, CPUMatrix<ElemType>& o, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp, const array<size_t, 2>& offsets, const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 2>& regularStrides, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 2>& reducingStrides) { if (reductionOp != ElementWiseOperator::opSum && reductionOp != ElementWiseOperator::opLogSum && reductionOp != ElementWiseOperator::opMin && reductionOp != ElementWiseOperator::opMax && reductionOp != ElementWiseOperator::opElementwiseProduct) InvalidArgument("TensorOp: Unary reduction operations other than opMax, opMin, opSum, and opLogSum are not implemented."); #ifdef USE_MKL if (!!(CPUMatrix<ElemType>::GetOptimizationFlags() & CPUMatrix<ElemType>::OPT_EVAL_WITH_MKL) && CPUMatrixSpecialUnaryTensorOpImpl(beta, a, o, alpha, op, reductionOp, offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides)) return; #endif // TODO: Change the lambda to take a pointer and a number of elements, so that we can pass it 1 or 4 elements, in order for it to SSE-vectorize. #define CaseUnaryTensorOp(oper) \ case ElementWiseOperator::op##oper: \ return TensorOpWithFn(beta, pointers, alpha, [](const array<ElemType*, 2>& pp) \ { \ return Op##oper((*(pp[0]))); \ }, \ reductionOp, offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides) array<ElemType*, 2> pointers = {a.Data(), o.Data()}; switch (op) { ForAllUnaryOps(CaseUnaryTensorOp); default: LogicError("TensorOp: Unknown unary op code %d.", (int) op); } } // perform binary operation 'op' on a and b giving 'this', reinterpreting the matrices as tensors as specified by the dims and strides // This maps 'op' to a lambda. template <class ElemType> void CPUMatrixTensorOpImpl(ElemType beta, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& o, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp, const array<size_t, 3>& offsets, const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 3>& regularStrides, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 3>& reducingStrides) { if (reductionOp != ElementWiseOperator::opSum) InvalidArgument("TensorOp (binary): The only permitted binary reduction operation is opSum."); #ifdef USE_MKL if (!!(CPUMatrix<ElemType>::GetOptimizationFlags() & CPUMatrix<ElemType>::OPT_EVAL_WITH_MKL) && CPUMatrixSpecialBinaryTensorOpImpl(beta, a, b, o, alpha, op, reductionOp, offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides)) return; #endif #define CaseBinaryTensorOp(oper) \ case ElementWiseOperator::op##oper: \ return TensorOpWithFn(beta, pointers, alpha, [](const array<ElemType*, 3>& pp) \ { \ return Op##oper((*(pp[0])), (*(pp[1]))); \ }, \ reductionOp, offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides) array<ElemType*, 3> pointers = {a.Data(), b.Data(), o.Data()}; switch (op) { ForAllBinaryOps(CaseBinaryTensorOp); default: LogicError("TensorOp: Unknown op binary code %d.", (int) op); } } // perform ternary operation 'op' on a, and c giving 'this', reinterpreting the matrices as tensors as specified by the dims and strides // This maps 'op' to a lambda. template <class ElemType> void CPUMatrixTensorOpImpl(ElemType beta, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, const CPUMatrix<ElemType>& c, CPUMatrix<ElemType>& o, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp, const array<size_t, 4>& offsets, const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 4>& regularStrides, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 4>& reducingStrides) { if (reductionOp != ElementWiseOperator::opSum) InvalidArgument("TensorOp: The only permitted ternary reduction operation is opSum."); #ifdef USE_MKL if (!!(CPUMatrix<ElemType>::GetOptimizationFlags() & CPUMatrix<ElemType>::OPT_EVAL_WITH_MKL) && CPUMatrixSpecialTernaryTensorOpImpl(beta, a, b, c, o, alpha, op, reductionOp, offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides)) return; #endif #define CaseTernaryTensorOp(oper) \ case ElementWiseOperator::op##oper: \ return TensorOpWithFn(beta, pointers, alpha, [](const array<ElemType*, 4>& pp) \ { \ return Op##oper((*(pp[0])), (*(pp[1])), (*(pp[2]))); \ }, \ reductionOp, offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides) array<ElemType*, 4> pointers = {a.Data(), b.Data(), c.Data(), o.Data()}; switch (op) { ForAllTernaryOps(CaseTernaryTensorOp); default: LogicError("TensorOp: Unknown ternary op code %d.", (int) op); } } template <class ElemType> void CPUMatrixTensorArgOpImpl(const CPUMatrix<ElemType>& a, CPUMatrix<ElemType>& o, ElementWiseOperator reductionOp, const array<size_t, 2>& offsets, const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 2>& regularStrides, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 2>& reducingStrides) { if (reductionOp != ElementWiseOperator::opArgmin && reductionOp != ElementWiseOperator::opArgmax) InvalidArgument("TensorOp: Arg reduction operations other than opArgmax, and opArgmin are not implemented."); if (o.GetNumElements() == 1) { o.Data()[0] = (ElemType) a.ArgOp(reductionOp); } else { const size_t N = 2; array<ElemType*, N> pointers = { a.Data(), o.Data() }; for (size_t i = 0; i < N; i++) pointers[i] += offsets[i]; switch (regularOpDims.size()) { case 2: TensorArgOpIteration<ElemType, N, 1>::Loop(pointers, regularOpDims, regularStrides, reducingOpDims, reducingStrides, reductionOp); break; case 1: TensorArgOpIteration<ElemType, N, 0>::Loop(pointers, regularOpDims, regularStrides, reducingOpDims, reducingStrides, reductionOp); break; case 0: TensorArgOpIteration<ElemType, N, -1>::Loop(pointers, regularOpDims, regularStrides, reducingOpDims, reducingStrides, reductionOp); break; default: LogicError("TensorOp: %d non-flattened input dimensions are not supported.", (int)regularOpDims.size()); } } } }}}
// Move some files out of CPUMatrixImpl.h to prevent compiler crash on out-of-heap #include "CPUMatrix.h" #include "TensorOps.h" namespace Microsoft { namespace MSR { namespace CNTK { // ======================================================================= // TensorView support // ======================================================================= // To save time, this makes extensive use of templates and macros. // ----------------------------------------------------------------------- // function to compute the value for a given output location (perform reduction if needed) // ----------------------------------------------------------------------- // perform loop over reduction index m // This function is declared inside a wrapper struct to allow partial specialization (m = -1). template <class ElemType, typename OPFN, typename ReductionOp, size_t N, int m> struct TensorOpReduction { // reduction case (non-reduction case is specialized) static inline ElemType Loop(array<ElemType*, N> pointers, const OPFN& opfn, const ReductionOp& reductionOp, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides) { array<ptrdiff_t, N - 1> strides; // N-1 because last one is the result pointer, which is unused in reduction for (size_t i = 0; i < N - 1; i++) // N = a small constant, this will be unrolled strides[i] = reducingStrides[i][(size_t) m]; double aggregate = TensorOpReduction<ElemType, OPFN, ReductionOp, N, m - 1>::Loop(pointers, opfn, reductionOp, reducingOpDims, reducingStrides); for (size_t dim = reducingOpDims[(size_t)m] - 1; dim-- > 0;) { // advance the pointers for (size_t i = 0; i < N - 1; i++) pointers[i] += strides[i]; // note: last pointer (result) is unused and untouched here // need to descend into one loop deeper aggregate = reductionOp(aggregate, TensorOpReduction<ElemType, OPFN, ReductionOp, N, m - 1>::Loop(pointers, opfn, reductionOp, reducingOpDims, reducingStrides)); } // Actually it would be nicer to return double but we keep ElementType so that test don't return different numbers than previous implementation. return static_cast<ElemType>(aggregate); } }; // perform loop over reduction index m // This is the specialized version for m = -1, which terminates the recursion. template <class ElemType, typename OPFN, typename ReductionOp, size_t N> struct TensorOpReduction<ElemType, OPFN, ReductionOp, N, -1> { static inline ElemType Loop(array<ElemType*, N> pointers, const OPFN& opfn, const ReductionOp& /*reductionOp*/, const SmallVector<size_t>&, const array<SmallVector<ptrdiff_t>, N>&) { return opfn(pointers); // finally we are doing some work!!! } }; // perform loop over reduction index m, while keeping track of the number of elements and their corresponding indices. // This function is declared inside a wrapper struct to allow partial specialization (m = -1). template <class ElemType, size_t N, int m> struct TensorArgOpReduction { static inline std::pair<ElemType, size_t> ReduceAll(array<ElemType*, N> pointers, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides, ElementWiseOperator reductionOp) { size_t counter = 0; size_t index = 0; ElemType val = (ElemType)0; switch (reducingOpDims.size()) { case 3: val = TensorArgOpReduction<ElemType, N, 2>::Loop(pointers, reducingOpDims, reducingStrides, reductionOp, counter, index); break; case 2: val = TensorArgOpReduction<ElemType, N, 1>::Loop(pointers, reducingOpDims, reducingStrides, reductionOp, counter, index); break; case 1: val = TensorArgOpReduction<ElemType, N, 0>::Loop(pointers, reducingOpDims, reducingStrides, reductionOp, counter, index); break; case 0: val = TensorArgOpReduction<ElemType, N, -1>::Loop(pointers, reducingOpDims, reducingStrides, reductionOp, counter, index); break; default: LogicError("TensorOp: %d non-flattened input dimensions are not supported.", (int)reducingOpDims.size()); } return make_pair(val, index); } // reduction case (non-reduction case is specialized) static inline ElemType Loop(array<ElemType*, N> pointers, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides, ElementWiseOperator reductionOp, size_t& counter, size_t& index) { array<ptrdiff_t, N - 1> strides; // N-1 because last one is the result pointer, which is unused in reduction for (size_t i = 0; i < N - 1; i++) // N = a small constant, this will be unrolled strides[i] = reducingStrides[i][(size_t)m]; ElemType aggregate = TensorArgOpReduction<ElemType, N, m - 1>::Loop(pointers, reducingOpDims, reducingStrides, reductionOp, counter, index); for (size_t dim = reducingOpDims[(size_t)m] - 1; dim-- > 0;) { // advance the pointers for (size_t i = 0; i < N - 1; i++) pointers[i] += strides[i]; // note: last pointer (result) is unused and untouched here ElemType val = TensorArgOpReduction<ElemType, N, m - 1>::Loop(pointers, reducingOpDims, reducingStrides, reductionOp, counter, index); bool update = false; switch (reductionOp) { case ElementWiseOperator::opArgmin: update = (aggregate > val); break; case ElementWiseOperator::opArgmax: update = (aggregate < val); break; } if (update) { aggregate = val; index = counter - 1; } } return aggregate; } }; // perform loop over reduction index m // This is the specialized version for m = -1, which terminates the recursion. template <class ElemType, size_t N> struct TensorArgOpReduction<ElemType, N, -1> { static inline ElemType Loop(array<ElemType*, N> pointers, const SmallVector<size_t>&, const array<SmallVector<ptrdiff_t>, N>&, ElementWiseOperator /*reductionOp*/, size_t& counter, size_t& /*index*/) { counter++; return *pointers[0]; // finally we are doing some work!!! } }; // ----------------------------------------------------------------------- // perform loop over regular index k for N-nary operations (N counting the output) // ----------------------------------------------------------------------- // perform loop over regular index k and reducing index m for N operands (counting the output) template <class ElemType, typename OPFN, typename ReductionOp, size_t N, bool vectorizable, int m, int k> struct TensorOpIteration { static inline void Loop(ElemType beta, array<ElemType*, N> pointers, ElemType alpha, const OPFN& opfn, const ReductionOp& reductionOp, const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, N>& regularStrides, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides) { // non-scalar case: still nested result loops left array<ptrdiff_t, N> strides; for (size_t i = 0; i < N; i++) // N = a small constant, this will be unrolled strides[i] = regularStrides[i][(size_t) k]; for (size_t dim = regularOpDims[(size_t) k]; dim-- > 0;) { // need to descend into one loop deeper TensorOpIteration<ElemType, OPFN, ReductionOp, N, vectorizable, m, k - 1>::Loop(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); // advance the pointers for (size_t i = 0; i < N; i++) pointers[i] += strides[i]; } } }; // Special version for innermost loop with strides all being 1 and no further reduction. Compiler can use SSE. // This is a very common case, e.g. adding vectors or computing the Sigmoid. template <class ElemType, typename OPFN, typename ReductionOp> struct TensorOpIteration<ElemType, OPFN, ReductionOp, 3, true /*vectorizable*/, -1 /*no reduction*/, 0 /*innermost loop*/> { static inline void Loop(ElemType beta, array<ElemType*, 3> pointers, ElemType alpha, const OPFN& opfn, const ReductionOp& reductionOp, const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 3>& regularStrides, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 3>& reducingStrides) { ElemType* pa = pointers[0]; ElemType* pb = pointers[1]; ElemType* pc = pointers[2]; size_t K = regularOpDims[0]; // special-case beta and alpha to allow the compiler to short-circuit it if (beta != 0) for (int k = 0; k < (int) K; k++) TensorOpIteration<ElemType, OPFN, ReductionOp, 3, true /*vectorizable*/, -1 /*no reduction*/, -1 /*scalar*/>::Loop(beta, array<ElemType*, 3>{pa + k, pb + k, pc + k}, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); else if (alpha != 1) for (int k = 0; k < (int) K; k++) TensorOpIteration<ElemType, OPFN, ReductionOp, 3, true /*vectorizable*/, -1 /*no reduction*/, -1 /*scalar*/>::Loop(0, array<ElemType*, 3>{pa + k, pb + k, pc + k}, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); else for (int k = 0; k < (int) K; k++) TensorOpIteration<ElemType, OPFN, ReductionOp, 3, true /*vectorizable*/, -1 /*no reduction*/, -1 /*scalar*/>::Loop(0, array<ElemType*, 3>{pa + k, pb + k, pc + k}, 1, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); // TODO: According to Amit, the VS compiler is not able to vectorize into lambdas. Solution: change the lambda to take an N, or to implement the loop inside (with 1 element by default). // TODO: The signedness of k (required for omp) causes an extra sign-extend. // TODO: OMP adds LOTS of overhead. Do we need a guard, a min size when to use it? } }; // and unary template <class ElemType, typename OPFN, typename ReductionOp> struct TensorOpIteration<ElemType, OPFN, ReductionOp, 2, true /*vectorizable*/, -1 /*no reduction*/, 0 /*innermost loop*/> { static inline void Loop(ElemType beta, array<ElemType*, 2> pointers, ElemType alpha, const OPFN& opfn, const ReductionOp& reductionOp, const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 2>& regularStrides, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 2>& reducingStrides) { ElemType* pa = pointers[0]; ElemType* pb = pointers[1]; size_t K = regularOpDims[0]; // special-case beta and alpha to allow the compiler to short-circuit it if (beta != 0) for (int k = 0; k < (int) K; k++) TensorOpIteration<ElemType, OPFN, ReductionOp, 2, true /*vectorizable*/, -1 /*no reduction*/, -1 /*scalar*/>::Loop(beta, array<ElemType*, 2>{pa + k, pb + k}, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); else if (alpha != 1) for (int k = 0; k < (int) K; k++) TensorOpIteration<ElemType, OPFN, ReductionOp, 2, true /*vectorizable*/, -1 /*no reduction*/, -1 /*scalar*/>::Loop(0, array<ElemType*, 2>{pa + k, pb + k}, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); else for (int k = 0; k < (int) K; k++) TensorOpIteration<ElemType, OPFN, ReductionOp, 2, true /*vectorizable*/, -1 /*no reduction*/, -1 /*scalar*/>::Loop(0, array<ElemType*, 2>{pa + k, pb + k}, 1, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); } }; template <class ElemType, typename OPFN, typename ReductionOp, size_t N, bool vectorizable, int m> struct TensorOpIteration<ElemType, OPFN, ReductionOp, N, vectorizable, m, -1> { static inline void Loop(ElemType beta, array<ElemType*, N> pointers, ElemType alpha, const OPFN& opfn, const ReductionOp& reductionOp, const SmallVector<size_t>&, const array<SmallVector<ptrdiff_t>, N>&, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides) { // we are at element level for the result: perform the op (there may still be reduction) ElemType val = TensorOpReduction<ElemType, OPFN, ReductionOp, N, m>::Loop(pointers, opfn, reductionOp, reducingOpDims, reducingStrides); // scale val *= alpha; // combine with previous value in target matrix, then write it out auto* pout = pointers.back(); if (beta != 0) val += beta * *pout; // save *pout = val; return; } }; // perform loop over regular index k and reducing index m for N operands (counting the output), the difference // between TensorOpIteration and TensorArgOpIteration, is that the latter store the index of the result, instead of // the result. The reason that they aren't combined is because of performance. template <class ElemType, size_t N, int k> struct TensorArgOpIteration { static inline void Loop(array<ElemType*, N> pointers, const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, N>& regularStrides, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides, ElementWiseOperator reductionOp) { // non-scalar case: still nested result loops left array<ptrdiff_t, N> strides; for (size_t i = 0; i < N; i++) // N = a small constant, this will be unrolled strides[i] = regularStrides[i][(size_t)k]; for (size_t dim = regularOpDims[(size_t)k]; dim-- > 0;) { // need to descend into one loop deeper TensorArgOpIteration<ElemType, N, k - 1>::Loop(pointers, regularOpDims, regularStrides, reducingOpDims, reducingStrides, reductionOp); // advance the pointers for (size_t i = 0; i < N; i++) pointers[i] += strides[i]; } } }; template <class ElemType, size_t N> struct TensorArgOpIteration<ElemType, N, -1> { static inline void Loop(array<ElemType*, N> pointers, const SmallVector<size_t>&, const array<SmallVector<ptrdiff_t>, N>&, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides, ElementWiseOperator reductionOp) { // we are at element level for the result: perform the op (there may still be reduction) auto val = TensorArgOpReduction<ElemType, N, 2>::ReduceAll(pointers, reducingOpDims, reducingStrides, reductionOp); auto* pout = pointers.back(); *pout = (ElemType)val.second; return; } }; // ----------------------------------------------------------------------- // map runtime parameters N to template parameters // ----------------------------------------------------------------------- // tensor operation with k+1 dimensions (-1 means scalar) template <class ElemType, typename OPFN, typename ReductionOp, size_t N, int k> static void TensorOpWithRegularLoop(ElemType beta, const array<ElemType*, N>& pointers, ElemType alpha, const OPFN& opfn, ReductionOp reductionOp, const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, N>& regularStrides, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides) { size_t dims = reducingOpDims.size(); switch (dims) { case 2: return TensorOpIteration<ElemType, OPFN, ReductionOp, N, false /*vectorizable*/, 1, k>::Loop(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); case 1: return TensorOpIteration<ElemType, OPFN, ReductionOp, N, false /*vectorizable*/, 0, k>::Loop(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); case 0: { // if all leading dimensions are 1, we can let the compiler do some unrolling bool leadingAllOne = true; for (size_t i = 0; i < N; i++) leadingAllOne &= k >= 0 && regularStrides[i][0] == 1; if (leadingAllOne) // special version that uses a hard-coded increment of 1 for all leading dimensions return TensorOpIteration<ElemType, OPFN, ReductionOp, N, true /*vectorizable*/, -1, k>::Loop(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); else return TensorOpIteration<ElemType, OPFN, ReductionOp, N, false /*vectorizable*/, -1, k>::Loop(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); } default: LogicError("TensorOp: %d non-flattened reduction dimensions are not supported.", (int) dims); } } // tensor operation, generalized in number of arguments, operation already provided as a lambda // This function now expands into different k. template <class ElemType, typename OPFN, typename ReductionOp, size_t N> static void TensorOpWithFnAndReduction(ElemType beta, array<ElemType*, N> pointers, ElemType alpha, const OPFN& opfn, const ReductionOp& reductionOp, const array<size_t, N>& offsets, const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, N>& regularStrides, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides) { for (size_t i = 0; i < N; i++) // N = a small constant, this will be unrolled pointers[i] += offsets[i]; size_t dims = regularOpDims.size(); switch (dims) { // N.B. consider code size impact when adding more cases. case 5: return TensorOpWithRegularLoop<ElemType, OPFN, ReductionOp, N, 4>(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); case 4: return TensorOpWithRegularLoop<ElemType, OPFN, ReductionOp, N, 3>(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); case 3: return TensorOpWithRegularLoop<ElemType, OPFN, ReductionOp, N, 2>(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); case 2: return TensorOpWithRegularLoop<ElemType, OPFN, ReductionOp, N, 1>(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); case 1: return TensorOpWithRegularLoop<ElemType, OPFN, ReductionOp, N, 0>(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); case 0: return TensorOpWithRegularLoop<ElemType, OPFN, ReductionOp, N, -1>(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); default: LogicError("TensorOp: %d non-flattened input dimensions are not supported.", (int)dims); } } // tensor operation, generalized in number of arguments, operation already provided as a lambda // This function now expands into different reductionOps template <class ElemType, typename OPFN, size_t N> static void TensorOpWithFn(ElemType beta, array<ElemType*, N> pointers, ElemType alpha, const OPFN& opfn, ElementWiseOperator reductionOp, const array<size_t, N>& offsets, const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, N>& regularStrides, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides) { // BUGBUG: Using always 'double' as type of aggregator even for ElemType==float. Reason: otherwise some e2e test would fail as historically we // used double for aggregator of sum. But: // * for min and max reductions this is meaningless. // * It is not consitent with what we do on GPU, there we aggregate on ElemType. // * It costs performance. // TODO: apdapt e2e tests to run with aggregator of type ElemType. #define CaseTensorOpWithFnAndReduction(oper) \ case ElementWiseOperator::op##oper: \ return TensorOpWithFnAndReduction(beta, pointers, alpha, opfn, [](double a, double b) \ { \ return Op##oper(a, b); \ }, \ offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides) switch (reductionOp) { CaseTensorOpWithFnAndReduction(Sum); CaseTensorOpWithFnAndReduction(LogSum); CaseTensorOpWithFnAndReduction(Min); CaseTensorOpWithFnAndReduction(Max); CaseTensorOpWithFnAndReduction(ElementwiseProduct); default: LogicError("Specified ElementWiseOperator op %d not supported as reduction operation.", (int)reductionOp); } } // ----------------------------------------------------------------------- // entry points from Matrix.cpp; also map op to a lambda // ----------------------------------------------------------------------- // special tensor ops for inference speed template <class ElemType> bool CPUMatrixSpecialUnaryTensorOpImpl(ElemType beta, const CPUMatrix<ElemType>& a, CPUMatrix<ElemType>& o, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp, const array<size_t, 2>& offsets, const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 2>& regularStrides, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 2>& reducingStrides); template <class ElemType> bool CPUMatrixSpecialBinaryTensorOpImpl(ElemType beta, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& o, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp, const array<size_t, 3>& offsets, const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 3>& regularStrides, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 3>& reducingStrides); template <class ElemType> bool CPUMatrixSpecialTernaryTensorOpImpl(ElemType beta, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, const CPUMatrix<ElemType>& c, CPUMatrix<ElemType>& o, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp, const array<size_t, 4>& offsets, const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 4>& regularStrides, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 4>& reducingStrides); // perform unary operation 'op' on a giving 'this', reinterpreting the matrices as tensors as specified by the dims and strides // This maps 'op' to a lambda. template <class ElemType> void CPUMatrixTensorOpImpl(ElemType beta, const CPUMatrix<ElemType>& a, CPUMatrix<ElemType>& o, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp, const array<size_t, 2>& offsets, const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 2>& regularStrides, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 2>& reducingStrides) { if (reductionOp != ElementWiseOperator::opSum && reductionOp != ElementWiseOperator::opLogSum && reductionOp != ElementWiseOperator::opMin && reductionOp != ElementWiseOperator::opMax && reductionOp != ElementWiseOperator::opElementwiseProduct) InvalidArgument("TensorOp: Unary reduction operations other than opMax, opMin, opSum, and opLogSum are not implemented."); #ifdef USE_MKL if (!!(CPUMatrix<ElemType>::GetOptimizationFlags() & CPUMatrix<ElemType>::OPT_EVAL_WITH_MKL) && CPUMatrixSpecialUnaryTensorOpImpl(beta, a, o, alpha, op, reductionOp, offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides)) return; #endif // TODO: Change the lambda to take a pointer and a number of elements, so that we can pass it 1 or 4 elements, in order for it to SSE-vectorize. #define CaseUnaryTensorOp(oper) \ case ElementWiseOperator::op##oper: \ return TensorOpWithFn(beta, pointers, alpha, [](const array<ElemType*, 2>& pp) \ { \ return Op##oper((*(pp[0]))); \ }, \ reductionOp, offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides) array<ElemType*, 2> pointers = {a.Data(), o.Data()}; switch (op) { ForAllUnaryOps(CaseUnaryTensorOp); default: LogicError("TensorOp: Unknown unary op code %d.", (int) op); } } // perform binary operation 'op' on a and b giving 'this', reinterpreting the matrices as tensors as specified by the dims and strides // This maps 'op' to a lambda. template <class ElemType> void CPUMatrixTensorOpImpl(ElemType beta, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& o, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp, const array<size_t, 3>& offsets, const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 3>& regularStrides, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 3>& reducingStrides) { if (reductionOp != ElementWiseOperator::opSum) InvalidArgument("TensorOp (binary): The only permitted binary reduction operation is opSum."); #ifdef USE_MKL if (!!(CPUMatrix<ElemType>::GetOptimizationFlags() & CPUMatrix<ElemType>::OPT_EVAL_WITH_MKL) && CPUMatrixSpecialBinaryTensorOpImpl(beta, a, b, o, alpha, op, reductionOp, offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides)) return; #endif #define CaseBinaryTensorOp(oper) \ case ElementWiseOperator::op##oper: \ return TensorOpWithFn(beta, pointers, alpha, [](const array<ElemType*, 3>& pp) \ { \ return Op##oper((*(pp[0])), (*(pp[1]))); \ }, \ reductionOp, offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides) array<ElemType*, 3> pointers = {a.Data(), b.Data(), o.Data()}; switch (op) { ForAllBinaryOps(CaseBinaryTensorOp); default: LogicError("TensorOp: Unknown op binary code %d.", (int) op); } } // perform ternary operation 'op' on a, and c giving 'this', reinterpreting the matrices as tensors as specified by the dims and strides // This maps 'op' to a lambda. template <class ElemType> void CPUMatrixTensorOpImpl(ElemType beta, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, const CPUMatrix<ElemType>& c, CPUMatrix<ElemType>& o, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp, const array<size_t, 4>& offsets, const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 4>& regularStrides, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 4>& reducingStrides) { if (reductionOp != ElementWiseOperator::opSum) InvalidArgument("TensorOp: The only permitted ternary reduction operation is opSum."); #ifdef USE_MKL if (!!(CPUMatrix<ElemType>::GetOptimizationFlags() & CPUMatrix<ElemType>::OPT_EVAL_WITH_MKL) && CPUMatrixSpecialTernaryTensorOpImpl(beta, a, b, c, o, alpha, op, reductionOp, offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides)) return; #endif #define CaseTernaryTensorOp(oper) \ case ElementWiseOperator::op##oper: \ return TensorOpWithFn(beta, pointers, alpha, [](const array<ElemType*, 4>& pp) \ { \ return Op##oper((*(pp[0])), (*(pp[1])), (*(pp[2]))); \ }, \ reductionOp, offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides) array<ElemType*, 4> pointers = {a.Data(), b.Data(), c.Data(), o.Data()}; switch (op) { ForAllTernaryOps(CaseTernaryTensorOp); default: LogicError("TensorOp: Unknown ternary op code %d.", (int) op); } } template <class ElemType> void CPUMatrixTensorArgOpImpl(const CPUMatrix<ElemType>& a, CPUMatrix<ElemType>& o, ElementWiseOperator reductionOp, const array<size_t, 2>& offsets, const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 2>& regularStrides, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 2>& reducingStrides) { if (reductionOp != ElementWiseOperator::opArgmin && reductionOp != ElementWiseOperator::opArgmax) InvalidArgument("TensorOp: Arg reduction operations other than opArgmax, and opArgmin are not implemented."); if (o.GetNumElements() == 1) { o.Data()[0] = (ElemType) a.ArgOp(reductionOp); } else { const size_t N = 2; array<ElemType*, N> pointers = { a.Data(), o.Data() }; for (size_t i = 0; i < N; i++) pointers[i] += offsets[i]; switch (regularOpDims.size()) { case 2: TensorArgOpIteration<ElemType, N, 1>::Loop(pointers, regularOpDims, regularStrides, reducingOpDims, reducingStrides, reductionOp); break; case 1: TensorArgOpIteration<ElemType, N, 0>::Loop(pointers, regularOpDims, regularStrides, reducingOpDims, reducingStrides, reductionOp); break; case 0: TensorArgOpIteration<ElemType, N, -1>::Loop(pointers, regularOpDims, regularStrides, reducingOpDims, reducingStrides, reductionOp); break; default: LogicError("TensorOp: %d non-flattened input dimensions are not supported.", (int)regularOpDims.size()); } } } }}}
// Move some files out of CPUMatrixImpl.h to prevent compiler crash on out-of-heap #include "CPUMatrix.h" #include "TensorOps.h" namespace Microsoft { namespace MSR { namespace CNTK { // ======================================================================= // TensorView support // ======================================================================= // To save time, this makes extensive use of templates and macros. // ----------------------------------------------------------------------- // function to compute the value for a given output location (perform reduction if needed) // ----------------------------------------------------------------------- // perform loop over reduction index m // This function is declared inside a wrapper struct to allow partial specialization (m = -1). template <class ElemType, typename OPFN, typename ReductionOp, size_t N, int m> struct TensorOpReduction { // reduction case (non-reduction case is specialized) static inline ElemType Loop(array<ElemType*, N> pointers, const OPFN& opfn, const ReductionOp& reductionOp, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides) { array<ptrdiff_t, N - 1> strides; // N-1 because last one is the result pointer, which is unused in reduction for (size_t i = 0; i < N - 1; i++) // N = a small constant, this will be unrolled strides[i] = reducingStrides[i][(size_t) m]; double aggregate = TensorOpReduction<ElemType, OPFN, ReductionOp, N, m - 1>::Loop(pointers, opfn, reductionOp, reducingOpDims, reducingStrides); for (size_t dim = reducingOpDims[(size_t)m] - 1; dim-- > 0;) { // advance the pointers for (size_t i = 0; i < N - 1; i++) pointers[i] += strides[i]; // note: last pointer (result) is unused and untouched here // need to descend into one loop deeper aggregate = reductionOp(aggregate, TensorOpReduction<ElemType, OPFN, ReductionOp, N, m - 1>::Loop(pointers, opfn, reductionOp, reducingOpDims, reducingStrides)); } // Actually it would be nicer to return double but we keep ElementType so that test don't return different numbers than previous implementation. return static_cast<ElemType>(aggregate); } }; // perform loop over reduction index m // This is the specialized version for m = -1, which terminates the recursion. template <class ElemType, typename OPFN, typename ReductionOp, size_t N> struct TensorOpReduction<ElemType, OPFN, ReductionOp, N, -1> { static inline ElemType Loop(array<ElemType*, N> pointers, const OPFN& opfn, const ReductionOp& /*reductionOp*/, const SmallVector<size_t>&, const array<SmallVector<ptrdiff_t>, N>&) { return opfn(pointers); // finally we are doing some work!!! } }; // perform loop over reduction index m, while keeping track of the number of elements and their corresponding indices. // This function is declared inside a wrapper struct to allow partial specialization (m = -1). template <class ElemType, size_t N, int m> struct TensorArgOpReduction { static inline std::pair<ElemType, size_t> ReduceAll(array<ElemType*, N> pointers, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides, ElementWiseOperator reductionOp) { size_t counter = 0; size_t index = 0; ElemType val = (ElemType)0; switch (reducingOpDims.size()) { case 3: val = TensorArgOpReduction<ElemType, N, 2>::Loop(pointers, reducingOpDims, reducingStrides, reductionOp, counter, index); break; case 2: val = TensorArgOpReduction<ElemType, N, 1>::Loop(pointers, reducingOpDims, reducingStrides, reductionOp, counter, index); break; case 1: val = TensorArgOpReduction<ElemType, N, 0>::Loop(pointers, reducingOpDims, reducingStrides, reductionOp, counter, index); break; case 0: val = TensorArgOpReduction<ElemType, N, -1>::Loop(pointers, reducingOpDims, reducingStrides, reductionOp, counter, index); break; default: LogicError("TensorOp: %d non-flattened input dimensions are not supported.", (int)reducingOpDims.size()); } return make_pair(val, index); } // reduction case (non-reduction case is specialized) static inline ElemType Loop(array<ElemType*, N> pointers, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides, ElementWiseOperator reductionOp, size_t& counter, size_t& index) { array<ptrdiff_t, N - 1> strides; // N-1 because last one is the result pointer, which is unused in reduction for (size_t i = 0; i < N - 1; i++) // N = a small constant, this will be unrolled strides[i] = reducingStrides[i][(size_t)m]; ElemType aggregate = TensorArgOpReduction<ElemType, N, m - 1>::Loop(pointers, reducingOpDims, reducingStrides, reductionOp, counter, index); for (size_t dim = reducingOpDims[(size_t)m] - 1; dim-- > 0;) { // advance the pointers for (size_t i = 0; i < N - 1; i++) pointers[i] += strides[i]; // note: last pointer (result) is unused and untouched here ElemType val = TensorArgOpReduction<ElemType, N, m - 1>::Loop(pointers, reducingOpDims, reducingStrides, reductionOp, counter, index); bool update = false; switch (reductionOp) { case ElementWiseOperator::opArgmin: update = (aggregate > val); break; case ElementWiseOperator::opArgmax: update = (aggregate < val); break; } if (update) { aggregate = val; index = counter - 1; } } return aggregate; } }; // perform loop over reduction index m // This is the specialized version for m = -1, which terminates the recursion. template <class ElemType, size_t N> struct TensorArgOpReduction<ElemType, N, -1> { static inline ElemType Loop(array<ElemType*, N> pointers, const SmallVector<size_t>&, const array<SmallVector<ptrdiff_t>, N>&, ElementWiseOperator /*reductionOp*/, size_t& counter, size_t& /*index*/) { counter++; return *pointers[0]; // finally we are doing some work!!! } }; // ----------------------------------------------------------------------- // perform loop over regular index k for N-nary operations (N counting the output) // ----------------------------------------------------------------------- // perform loop over regular index k and reducing index m for N operands (counting the output) template <class ElemType, typename OPFN, typename ReductionOp, size_t N, bool vectorizable, int m, int k> struct TensorOpIteration { static inline void Loop(ElemType beta, array<ElemType*, N> pointers, ElemType alpha, const OPFN& opfn, const ReductionOp& reductionOp, const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, N>& regularStrides, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides) { // non-scalar case: still nested result loops left array<ptrdiff_t, N> strides; for (size_t i = 0; i < N; i++) // N = a small constant, this will be unrolled strides[i] = regularStrides[i][(size_t) k]; for (size_t dim = regularOpDims[(size_t) k]; dim-- > 0;) { // need to descend into one loop deeper TensorOpIteration<ElemType, OPFN, ReductionOp, N, vectorizable, m, k - 1>::Loop(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); // advance the pointers for (size_t i = 0; i < N; i++) pointers[i] += strides[i]; } } }; // Special version for innermost loop with strides all being 1 and no further reduction. Compiler can use SSE. // This is a very common case, e.g. adding vectors or computing the Sigmoid. template <class ElemType, typename OPFN, typename ReductionOp> struct TensorOpIteration<ElemType, OPFN, ReductionOp, 3, true /*vectorizable*/, -1 /*no reduction*/, 0 /*innermost loop*/> { static inline void Loop(ElemType beta, array<ElemType*, 3> pointers, ElemType alpha, const OPFN& opfn, const ReductionOp& reductionOp, const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 3>& regularStrides, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 3>& reducingStrides) { ElemType* pa = pointers[0]; ElemType* pb = pointers[1]; ElemType* pc = pointers[2]; size_t K = regularOpDims[0]; // special-case beta and alpha to allow the compiler to short-circuit it if (beta != 0) #pragma omp parallel for for (int k = 0; k < (int) K; k++) TensorOpIteration<ElemType, OPFN, ReductionOp, 3, true /*vectorizable*/, -1 /*no reduction*/, -1 /*scalar*/>::Loop(beta, array<ElemType*, 3>{pa + k, pb + k, pc + k}, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); else if (alpha != 1) #pragma omp parallel for for (int k = 0; k < (int) K; k++) TensorOpIteration<ElemType, OPFN, ReductionOp, 3, true /*vectorizable*/, -1 /*no reduction*/, -1 /*scalar*/>::Loop(0, array<ElemType*, 3>{pa + k, pb + k, pc + k}, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); else #pragma omp parallel for for (int k = 0; k < (int) K; k++) TensorOpIteration<ElemType, OPFN, ReductionOp, 3, true /*vectorizable*/, -1 /*no reduction*/, -1 /*scalar*/>::Loop(0, array<ElemType*, 3>{pa + k, pb + k, pc + k}, 1, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); // TODO: According to Amit, the VS compiler is not able to vectorize into lambdas. Solution: change the lambda to take an N, or to implement the loop inside (with 1 element by default). // TODO: The signedness of k (required for omp) causes an extra sign-extend. // TODO: OMP adds LOTS of overhead. Do we need a guard, a min size when to use it? } }; // and unary template <class ElemType, typename OPFN, typename ReductionOp> struct TensorOpIteration<ElemType, OPFN, ReductionOp, 2, true /*vectorizable*/, -1 /*no reduction*/, 0 /*innermost loop*/> { static inline void Loop(ElemType beta, array<ElemType*, 2> pointers, ElemType alpha, const OPFN& opfn, const ReductionOp& reductionOp, const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 2>& regularStrides, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 2>& reducingStrides) { ElemType* pa = pointers[0]; ElemType* pb = pointers[1]; size_t K = regularOpDims[0]; // special-case beta and alpha to allow the compiler to short-circuit it if (beta != 0) #pragma omp parallel for for (int k = 0; k < (int) K; k++) TensorOpIteration<ElemType, OPFN, ReductionOp, 2, true /*vectorizable*/, -1 /*no reduction*/, -1 /*scalar*/>::Loop(beta, array<ElemType*, 2>{pa + k, pb + k}, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); else if (alpha != 1) #pragma omp parallel for for (int k = 0; k < (int) K; k++) TensorOpIteration<ElemType, OPFN, ReductionOp, 2, true /*vectorizable*/, -1 /*no reduction*/, -1 /*scalar*/>::Loop(0, array<ElemType*, 2>{pa + k, pb + k}, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); else #pragma omp parallel for for (int k = 0; k < (int) K; k++) TensorOpIteration<ElemType, OPFN, ReductionOp, 2, true /*vectorizable*/, -1 /*no reduction*/, -1 /*scalar*/>::Loop(0, array<ElemType*, 2>{pa + k, pb + k}, 1, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); } }; template <class ElemType, typename OPFN, typename ReductionOp, size_t N, bool vectorizable, int m> struct TensorOpIteration<ElemType, OPFN, ReductionOp, N, vectorizable, m, -1> { static inline void Loop(ElemType beta, array<ElemType*, N> pointers, ElemType alpha, const OPFN& opfn, const ReductionOp& reductionOp, const SmallVector<size_t>&, const array<SmallVector<ptrdiff_t>, N>&, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides) { // we are at element level for the result: perform the op (there may still be reduction) ElemType val = TensorOpReduction<ElemType, OPFN, ReductionOp, N, m>::Loop(pointers, opfn, reductionOp, reducingOpDims, reducingStrides); // scale val *= alpha; // combine with previous value in target matrix, then write it out auto* pout = pointers.back(); if (beta != 0) val += beta * *pout; // save *pout = val; return; } }; // perform loop over regular index k and reducing index m for N operands (counting the output), the difference // between TensorOpIteration and TensorArgOpIteration, is that the latter store the index of the result, instead of // the result. The reason that they aren't combined is because of performance. template <class ElemType, size_t N, int k> struct TensorArgOpIteration { static inline void Loop(array<ElemType*, N> pointers, const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, N>& regularStrides, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides, ElementWiseOperator reductionOp) { // non-scalar case: still nested result loops left array<ptrdiff_t, N> strides; for (size_t i = 0; i < N; i++) // N = a small constant, this will be unrolled strides[i] = regularStrides[i][(size_t)k]; for (size_t dim = regularOpDims[(size_t)k]; dim-- > 0;) { // need to descend into one loop deeper TensorArgOpIteration<ElemType, N, k - 1>::Loop(pointers, regularOpDims, regularStrides, reducingOpDims, reducingStrides, reductionOp); // advance the pointers for (size_t i = 0; i < N; i++) pointers[i] += strides[i]; } } }; template <class ElemType, size_t N> struct TensorArgOpIteration<ElemType, N, -1> { static inline void Loop(array<ElemType*, N> pointers, const SmallVector<size_t>&, const array<SmallVector<ptrdiff_t>, N>&, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides, ElementWiseOperator reductionOp) { // we are at element level for the result: perform the op (there may still be reduction) auto val = TensorArgOpReduction<ElemType, N, 2>::ReduceAll(pointers, reducingOpDims, reducingStrides, reductionOp); auto* pout = pointers.back(); *pout = (ElemType)val.second; return; } }; // ----------------------------------------------------------------------- // map runtime parameters N to template parameters // ----------------------------------------------------------------------- // tensor operation with k+1 dimensions (-1 means scalar) template <class ElemType, typename OPFN, typename ReductionOp, size_t N, int k> static void TensorOpWithRegularLoop(ElemType beta, const array<ElemType*, N>& pointers, ElemType alpha, const OPFN& opfn, ReductionOp reductionOp, const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, N>& regularStrides, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides) { size_t dims = reducingOpDims.size(); switch (dims) { case 2: return TensorOpIteration<ElemType, OPFN, ReductionOp, N, false /*vectorizable*/, 1, k>::Loop(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); case 1: return TensorOpIteration<ElemType, OPFN, ReductionOp, N, false /*vectorizable*/, 0, k>::Loop(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); case 0: { // if all leading dimensions are 1, we can let the compiler do some unrolling bool leadingAllOne = true; for (size_t i = 0; i < N; i++) leadingAllOne &= k >= 0 && regularStrides[i][0] == 1; if (leadingAllOne) // special version that uses a hard-coded increment of 1 for all leading dimensions return TensorOpIteration<ElemType, OPFN, ReductionOp, N, true /*vectorizable*/, -1, k>::Loop(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); else return TensorOpIteration<ElemType, OPFN, ReductionOp, N, false /*vectorizable*/, -1, k>::Loop(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); } default: LogicError("TensorOp: %d non-flattened reduction dimensions are not supported.", (int) dims); } } // tensor operation, generalized in number of arguments, operation already provided as a lambda // This function now expands into different k. template <class ElemType, typename OPFN, typename ReductionOp, size_t N> static void TensorOpWithFnAndReduction(ElemType beta, array<ElemType*, N> pointers, ElemType alpha, const OPFN& opfn, const ReductionOp& reductionOp, const array<size_t, N>& offsets, const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, N>& regularStrides, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides) { for (size_t i = 0; i < N; i++) // N = a small constant, this will be unrolled pointers[i] += offsets[i]; size_t dims = regularOpDims.size(); switch (dims) { // N.B. consider code size impact when adding more cases. case 5: return TensorOpWithRegularLoop<ElemType, OPFN, ReductionOp, N, 4>(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); case 4: return TensorOpWithRegularLoop<ElemType, OPFN, ReductionOp, N, 3>(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); case 3: return TensorOpWithRegularLoop<ElemType, OPFN, ReductionOp, N, 2>(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); case 2: return TensorOpWithRegularLoop<ElemType, OPFN, ReductionOp, N, 1>(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); case 1: return TensorOpWithRegularLoop<ElemType, OPFN, ReductionOp, N, 0>(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); case 0: return TensorOpWithRegularLoop<ElemType, OPFN, ReductionOp, N, -1>(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); default: LogicError("TensorOp: %d non-flattened input dimensions are not supported.", (int)dims); } } // tensor operation, generalized in number of arguments, operation already provided as a lambda // This function now expands into different reductionOps template <class ElemType, typename OPFN, size_t N> static void TensorOpWithFn(ElemType beta, array<ElemType*, N> pointers, ElemType alpha, const OPFN& opfn, ElementWiseOperator reductionOp, const array<size_t, N>& offsets, const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, N>& regularStrides, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides) { // BUGBUG: Using always 'double' as type of aggregator even for ElemType==float. Reason: otherwise some e2e test would fail as historically we // used double for aggregator of sum. But: // * for min and max reductions this is meaningless. // * It is not consitent with what we do on GPU, there we aggregate on ElemType. // * It costs performance. // TODO: apdapt e2e tests to run with aggregator of type ElemType. #define CaseTensorOpWithFnAndReduction(oper) \ case ElementWiseOperator::op##oper: \ return TensorOpWithFnAndReduction(beta, pointers, alpha, opfn, [](double a, double b) \ { \ return Op##oper(a, b); \ }, \ offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides) switch (reductionOp) { CaseTensorOpWithFnAndReduction(Sum); CaseTensorOpWithFnAndReduction(LogSum); CaseTensorOpWithFnAndReduction(Min); CaseTensorOpWithFnAndReduction(Max); CaseTensorOpWithFnAndReduction(ElementwiseProduct); default: LogicError("Specified ElementWiseOperator op %d not supported as reduction operation.", (int)reductionOp); } } // ----------------------------------------------------------------------- // entry points from Matrix.cpp; also map op to a lambda // ----------------------------------------------------------------------- // special tensor ops for inference speed template <class ElemType> bool CPUMatrixSpecialUnaryTensorOpImpl(ElemType beta, const CPUMatrix<ElemType>& a, CPUMatrix<ElemType>& o, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp, const array<size_t, 2>& offsets, const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 2>& regularStrides, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 2>& reducingStrides); template <class ElemType> bool CPUMatrixSpecialBinaryTensorOpImpl(ElemType beta, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& o, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp, const array<size_t, 3>& offsets, const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 3>& regularStrides, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 3>& reducingStrides); template <class ElemType> bool CPUMatrixSpecialTernaryTensorOpImpl(ElemType beta, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, const CPUMatrix<ElemType>& c, CPUMatrix<ElemType>& o, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp, const array<size_t, 4>& offsets, const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 4>& regularStrides, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 4>& reducingStrides); // perform unary operation 'op' on a giving 'this', reinterpreting the matrices as tensors as specified by the dims and strides // This maps 'op' to a lambda. template <class ElemType> void CPUMatrixTensorOpImpl(ElemType beta, const CPUMatrix<ElemType>& a, CPUMatrix<ElemType>& o, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp, const array<size_t, 2>& offsets, const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 2>& regularStrides, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 2>& reducingStrides) { if (reductionOp != ElementWiseOperator::opSum && reductionOp != ElementWiseOperator::opLogSum && reductionOp != ElementWiseOperator::opMin && reductionOp != ElementWiseOperator::opMax && reductionOp != ElementWiseOperator::opElementwiseProduct) InvalidArgument("TensorOp: Unary reduction operations other than opMax, opMin, opSum, and opLogSum are not implemented."); #ifdef USE_MKL if (!!(CPUMatrix<ElemType>::GetOptimizationFlags() & CPUMatrix<ElemType>::OPT_EVAL_WITH_MKL) && CPUMatrixSpecialUnaryTensorOpImpl(beta, a, o, alpha, op, reductionOp, offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides)) return; #endif // TODO: Change the lambda to take a pointer and a number of elements, so that we can pass it 1 or 4 elements, in order for it to SSE-vectorize. #define CaseUnaryTensorOp(oper) \ case ElementWiseOperator::op##oper: \ return TensorOpWithFn(beta, pointers, alpha, [](const array<ElemType*, 2>& pp) \ { \ return Op##oper((*(pp[0]))); \ }, \ reductionOp, offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides) array<ElemType*, 2> pointers = {a.Data(), o.Data()}; switch (op) { ForAllUnaryOps(CaseUnaryTensorOp); default: LogicError("TensorOp: Unknown unary op code %d.", (int) op); } } // perform binary operation 'op' on a and b giving 'this', reinterpreting the matrices as tensors as specified by the dims and strides // This maps 'op' to a lambda. template <class ElemType> void CPUMatrixTensorOpImpl(ElemType beta, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& o, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp, const array<size_t, 3>& offsets, const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 3>& regularStrides, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 3>& reducingStrides) { if (reductionOp != ElementWiseOperator::opSum) InvalidArgument("TensorOp (binary): The only permitted binary reduction operation is opSum."); #ifdef USE_MKL if (!!(CPUMatrix<ElemType>::GetOptimizationFlags() & CPUMatrix<ElemType>::OPT_EVAL_WITH_MKL) && CPUMatrixSpecialBinaryTensorOpImpl(beta, a, b, o, alpha, op, reductionOp, offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides)) return; #endif #define CaseBinaryTensorOp(oper) \ case ElementWiseOperator::op##oper: \ return TensorOpWithFn(beta, pointers, alpha, [](const array<ElemType*, 3>& pp) \ { \ return Op##oper((*(pp[0])), (*(pp[1]))); \ }, \ reductionOp, offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides) array<ElemType*, 3> pointers = {a.Data(), b.Data(), o.Data()}; switch (op) { ForAllBinaryOps(CaseBinaryTensorOp); default: LogicError("TensorOp: Unknown op binary code %d.", (int) op); } } // perform ternary operation 'op' on a, and c giving 'this', reinterpreting the matrices as tensors as specified by the dims and strides // This maps 'op' to a lambda. template <class ElemType> void CPUMatrixTensorOpImpl(ElemType beta, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, const CPUMatrix<ElemType>& c, CPUMatrix<ElemType>& o, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp, const array<size_t, 4>& offsets, const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 4>& regularStrides, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 4>& reducingStrides) { if (reductionOp != ElementWiseOperator::opSum) InvalidArgument("TensorOp: The only permitted ternary reduction operation is opSum."); #ifdef USE_MKL if (!!(CPUMatrix<ElemType>::GetOptimizationFlags() & CPUMatrix<ElemType>::OPT_EVAL_WITH_MKL) && CPUMatrixSpecialTernaryTensorOpImpl(beta, a, b, c, o, alpha, op, reductionOp, offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides)) return; #endif #define CaseTernaryTensorOp(oper) \ case ElementWiseOperator::op##oper: \ return TensorOpWithFn(beta, pointers, alpha, [](const array<ElemType*, 4>& pp) \ { \ return Op##oper((*(pp[0])), (*(pp[1])), (*(pp[2]))); \ }, \ reductionOp, offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides) array<ElemType*, 4> pointers = {a.Data(), b.Data(), c.Data(), o.Data()}; switch (op) { ForAllTernaryOps(CaseTernaryTensorOp); default: LogicError("TensorOp: Unknown ternary op code %d.", (int) op); } } template <class ElemType> void CPUMatrixTensorArgOpImpl(const CPUMatrix<ElemType>& a, CPUMatrix<ElemType>& o, ElementWiseOperator reductionOp, const array<size_t, 2>& offsets, const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 2>& regularStrides, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 2>& reducingStrides) { if (reductionOp != ElementWiseOperator::opArgmin && reductionOp != ElementWiseOperator::opArgmax) InvalidArgument("TensorOp: Arg reduction operations other than opArgmax, and opArgmin are not implemented."); if (o.GetNumElements() == 1) { o.Data()[0] = (ElemType) a.ArgOp(reductionOp); } else { const size_t N = 2; array<ElemType*, N> pointers = { a.Data(), o.Data() }; for (size_t i = 0; i < N; i++) pointers[i] += offsets[i]; switch (regularOpDims.size()) { case 2: TensorArgOpIteration<ElemType, N, 1>::Loop(pointers, regularOpDims, regularStrides, reducingOpDims, reducingStrides, reductionOp); break; case 1: TensorArgOpIteration<ElemType, N, 0>::Loop(pointers, regularOpDims, regularStrides, reducingOpDims, reducingStrides, reductionOp); break; case 0: TensorArgOpIteration<ElemType, N, -1>::Loop(pointers, regularOpDims, regularStrides, reducingOpDims, reducingStrides, reductionOp); break; default: LogicError("TensorOp: %d non-flattened input dimensions are not supported.", (int)regularOpDims.size()); } } } }}}
GB_binop__ldexp_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__ldexp_fp32) // A.*B function (eWiseMult): GB (_AemultB_08__ldexp_fp32) // A.*B function (eWiseMult): GB (_AemultB_02__ldexp_fp32) // A.*B function (eWiseMult): GB (_AemultB_04__ldexp_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__ldexp_fp32) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__ldexp_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__ldexp_fp32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ldexp_fp32) // C=scalar+B GB (_bind1st__ldexp_fp32) // C=scalar+B' GB (_bind1st_tran__ldexp_fp32) // C=A+scalar GB (_bind2nd__ldexp_fp32) // C=A'+scalar GB (_bind2nd_tran__ldexp_fp32) // C type: float // A type: float // B,b type: float // BinaryOp: cij = ldexpf (aij, bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ float aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ float bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = ldexpf (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LDEXP || GxB_NO_FP32 || GxB_NO_LDEXP_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__ldexp_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__ldexp_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__ldexp_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__ldexp_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__ldexp_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__ldexp_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__ldexp_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__ldexp_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__ldexp_fp32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; float bij = GBX (Bx, p, false) ; Cx [p] = ldexpf (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__ldexp_fp32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = GBX (Ax, p, false) ; Cx [p] = ldexpf (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = ldexpf (x, aij) ; \ } GrB_Info GB (_bind1st_tran__ldexp_fp32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = ldexpf (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__ldexp_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__ldexp_fp32) // A.*B function (eWiseMult): GB (_AemultB_08__ldexp_fp32) // A.*B function (eWiseMult): GB (_AemultB_02__ldexp_fp32) // A.*B function (eWiseMult): GB (_AemultB_04__ldexp_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__ldexp_fp32) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__ldexp_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__ldexp_fp32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ldexp_fp32) // C=scalar+B GB (_bind1st__ldexp_fp32) // C=scalar+B' GB (_bind1st_tran__ldexp_fp32) // C=A+scalar GB (_bind2nd__ldexp_fp32) // C=A'+scalar GB (_bind2nd_tran__ldexp_fp32) // C type: float // A type: float // B,b type: float // BinaryOp: cij = ldexpf (aij, bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ float aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ float bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = ldexpf (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LDEXP || GxB_NO_FP32 || GxB_NO_LDEXP_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__ldexp_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__ldexp_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__ldexp_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__ldexp_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__ldexp_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__ldexp_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__ldexp_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__ldexp_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__ldexp_fp32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; float bij = GBX (Bx, p, false) ; Cx [p] = ldexpf (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__ldexp_fp32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = GBX (Ax, p, false) ; Cx [p] = ldexpf (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = ldexpf (x, aij) ; \ } GrB_Info GB (_bind1st_tran__ldexp_fp32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = ldexpf (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__ldexp_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__ldexp_fp32) // A.*B function (eWiseMult): GB (_AemultB_08__ldexp_fp32) // A.*B function (eWiseMult): GB (_AemultB_02__ldexp_fp32) // A.*B function (eWiseMult): GB (_AemultB_04__ldexp_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__ldexp_fp32) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__ldexp_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__ldexp_fp32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ldexp_fp32) // C=scalar+B GB (_bind1st__ldexp_fp32) // C=scalar+B' GB (_bind1st_tran__ldexp_fp32) // C=A+scalar GB (_bind2nd__ldexp_fp32) // C=A'+scalar GB (_bind2nd_tran__ldexp_fp32) // C type: float // A type: float // B,b type: float // BinaryOp: cij = ldexpf (aij, bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ float aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ float bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = ldexpf (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LDEXP || GxB_NO_FP32 || GxB_NO_LDEXP_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__ldexp_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__ldexp_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__ldexp_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__ldexp_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__ldexp_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__ldexp_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__ldexp_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__ldexp_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__ldexp_fp32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; float bij = GBX (Bx, p, false) ; Cx [p] = ldexpf (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__ldexp_fp32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = GBX (Ax, p, false) ; Cx [p] = ldexpf (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = ldexpf (x, aij) ; \ } GrB_Info GB (_bind1st_tran__ldexp_fp32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = ldexpf (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__ldexp_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
comm.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /** * Copyright (c) 2015 by Contributors */ #ifndef MXNET_KVSTORE_COMM_H_ #define MXNET_KVSTORE_COMM_H_ #include <dmlc/omp.h> #include <string> #include <algorithm> #include <utility> #include <limits> #include <vector> #include <tuple> #include <thread> #include "mxnet/ndarray.h" #include "gradient_compression.h" #include "../ndarray/ndarray_function.h" #include "../operator/tensor/sparse_retain-inl.h" #include "./kvstore_utils.h" namespace mxnet { namespace kvstore { /** * \brief multiple device commmunication */ class Comm { public: Comm() { pinned_ctx_ = Context::CPUPinned(0); } virtual ~Comm() { } /** * \brief init key with the data shape and storage shape */ virtual void Init(int key, const NDArrayStorageType stype, const TShape& shape, int dtype = mshadow::kFloat32) = 0; /** * \brief returns src[0] + .. + src[src.size()-1] */ virtual const NDArray& Reduce( int key, const std::vector<NDArray>& src, int priority) = 0; /** * \brief copy from src to dst[i] for every i */ virtual void Broadcast( int key, const NDArray& src, const std::vector<NDArray*> dst, int priority) = 0; /** * \brief broadcast src to dst[i] with target row_ids for every i * \param key the identifier key for the stored ndarray * \param src the source row_sparse ndarray to broadcast * \param dst a list of destination row_sparse NDArray and its target row_ids to broadcast, where the row_ids are expected to be unique and sorted in row_id.data() * \param priority the priority of the operation */ virtual void BroadcastRowSparse(int key, const NDArray& src, const std::vector<std::pair<NDArray*, NDArray>>& dst, const int priority) = 0; /** * \brief return a pinned contex */ Context pinned_ctx() const { return pinned_ctx_; } /** * \brief Sets gradient compression parameters to be able to * perform reduce with compressed gradients */ void SetGradientCompression(std::shared_ptr<GradientCompression> gc) { gc_ = gc; } protected: Context pinned_ctx_; std::shared_ptr<GradientCompression> gc_; }; /** * \brief an implemention of Comm that first copy data to CPU memeory, and then * reduce there */ class CommCPU : public Comm { public: CommCPU() { nthread_reduction_ = dmlc::GetEnv("MXNET_KVSTORE_REDUCTION_NTHREADS", 4); bigarray_bound_ = dmlc::GetEnv("MXNET_KVSTORE_BIGARRAY_BOUND", 1000 * 1000); // TODO(junwu) delete the following data member, now for benchmark only is_serial_push_ = dmlc::GetEnv("MXNET_KVSTORE_SERIAL_PUSH", 0); } virtual ~CommCPU() { } void Init(int key, const NDArrayStorageType stype, const TShape& shape, int type = mshadow::kFloat32) override { // Delayed allocation - the dense merged buffer might not be used at all if push() // only sees sparse arrays bool delay_alloc = true; merge_buf_[key].merged = NDArray(shape, pinned_ctx_, delay_alloc, type); } const NDArray& Reduce(int key, const std::vector<NDArray>& src, int priority) override { auto& buf = merge_buf_[key]; const auto stype = src[0].storage_type(); // avoid extra copy for single device, but it may bring problems for // abnormal usage of kvstore if (src.size() == 1) { if (stype == kDefaultStorage) { return src[0]; } else { // With 'local' kvstore, we could store the weight on CPU while compute // the gradient on GPU when the weight is extremely large. // To avoiding copying the weight to the same context of the gradient, // we always copy the gradient to merged buf. NDArray& merged = buf.merged_buf(stype); CopyFromTo(src[0], &merged, priority); return merged; } } NDArray& buf_merged = buf.merged_buf(stype); // normal dense reduce if (stype == kDefaultStorage) { std::vector<Engine::VarHandle> const_vars(src.size() - 1); std::vector<NDArray> reduce(src.size()); CopyFromTo(src[0], &buf_merged, priority); reduce[0] = buf_merged; if (buf.copy_buf.empty()) { buf.copy_buf.resize(src.size()-1); for (size_t j = 0; j < src.size() - 1; ++j) { // allocate copy buffer buf.copy_buf[j] = NDArray( src[0].shape(), pinned_ctx_, false, src[0].dtype()); } } CHECK(stype == buf.copy_buf[0].storage_type()) << "Storage type mismatch detected. " << stype << "(src) vs. " << buf.copy_buf[0].storage_type() << "(buf.copy_buf)"; for (size_t i = 1; i < src.size(); ++i) { CopyFromTo(src[i], &(buf.copy_buf[i-1]), priority); reduce[i] = buf.copy_buf[i-1]; const_vars[i-1] = reduce[i].var(); } Engine::Get()->PushAsync( [reduce, this](RunContext rctx, Engine::CallbackOnComplete on_complete) { ReduceSumCPU(reduce); on_complete(); }, Context::CPU(), const_vars, {reduce[0].var()}, FnProperty::kCPUPrioritized, priority, "KVStoreReduce"); } else { // sparse reduce std::vector<Engine::VarHandle> const_vars(src.size()); std::vector<NDArray> reduce(src.size()); if (buf.copy_buf.empty()) { buf.copy_buf.resize(src.size()); for (size_t j = 0; j < src.size(); ++j) { buf.copy_buf[j] = NDArray( src[0].storage_type(), src[0].shape(), pinned_ctx_, true, src[0].dtype()); } } CHECK(stype == buf.copy_buf[0].storage_type()) << "Storage type mismatch detected. " << stype << "(src) vs. " << buf.copy_buf[0].storage_type() << "(buf.copy_buf)"; for (size_t i = 0; i < src.size(); ++i) { CopyFromTo(src[i], &(buf.copy_buf[i]), priority); reduce[i] = buf.copy_buf[i]; const_vars[i] = reduce[i].var(); } Resource rsc = ResourceManager::Get()->Request(buf_merged.ctx(), ResourceRequest(ResourceRequest::kTempSpace)); Engine::Get()->PushAsync( [reduce, buf_merged, rsc, this](RunContext rctx, Engine::CallbackOnComplete on_complete) { NDArray out = buf_merged; is_serial_push_? ReduceSumCPUExSerial(reduce, &out) : mxnet::ndarray::ElementwiseSum(rctx.get_stream<cpu>(), rsc, reduce, &out); on_complete(); }, Context::CPU(), const_vars, {buf_merged.var(), rsc.var}, FnProperty::kCPUPrioritized, priority, "KVStoreReduce"); } return buf_merged; } void Broadcast(int key, const NDArray& src, const std::vector<NDArray*> dst, int priority) override { int mask = src.ctx().dev_mask(); if (mask == Context::kCPU) { for (auto d : dst) CopyFromTo(src, d, priority); } else { // First copy data to pinned_ctx, then broadcast. // Note that kv.init initializes the data on pinned_ctx. // This branch indicates push() with ndarrays on gpus were called, // and the source is copied to gpu ctx. // Also indicates that buffers are already initialized during push(). auto& buf = merge_buf_[key].merged_buf(src.storage_type()); CopyFromTo(src, &buf, priority); for (auto d : dst) CopyFromTo(buf, d, priority); } } void BroadcastRowSparse(int key, const NDArray& src, const std::vector<std::pair<NDArray*, NDArray>>& dst, const int priority) override { using namespace mshadow; CHECK_EQ(src.storage_type(), kRowSparseStorage) << "BroadcastRowSparse expects row-sparse src NDArray"; CHECK_EQ(src.ctx().dev_mask(), Context::kCPU) << "BroadcastRowSparse with src on gpu context not supported"; for (const auto& dst_kv : dst) { NDArray* out = dst_kv.first; NDArray row_id = dst_kv.second; CHECK_EQ(out->storage_type(), kRowSparseStorage) << "BroadcastRowSparse expects row_sparse dst NDArray"; CHECK_EQ(row_id.ctx().dev_mask(), Context::kCPU) << "BroadcastRowSparse with row_indices on gpu context not supported"; // retain according to unique indices const bool is_same_ctx = out->ctx() == src.ctx(); const bool is_diff_var = out->var() != src.var(); NDArray retained_cpu = (is_same_ctx && is_diff_var) ? *out : NDArray(kRowSparseStorage, src.shape(), src.ctx(), true, src.dtype(), src.aux_types()); if (!is_diff_var) { common::LogOnce("The output of row_sparse_pull() on key " + std::to_string(key) + "refers to the same NDArray as the one stored in KVStore." "Performing row_sparse_pull() with such output is going to change the " "data stored in KVStore. Incorrect result may be generated " "next time row_sparse_pull() is called. To avoid such an issue," "consider create a new NDArray buffer to store the output."); } Engine::Get()->PushAsync( [=](RunContext rctx, Engine::CallbackOnComplete on_complete) { const TBlob& indices = row_id.data(); NDArray temp = retained_cpu; // get rid the of const qualifier op::SparseRetainOpForwardRspImpl<cpu>(rctx.get_stream<cpu>(), src, indices, kWriteTo, &temp); on_complete(); }, Context::CPU(), {src.var(), row_id.var()}, {retained_cpu.var()}, FnProperty::kNormal, priority, "KVStoreSparseRetain"); // if retained_cpu == out, CopyFromTo will ignore the copy operation CopyFromTo(retained_cpu, out, priority); } } private: // reduce sum into val[0] inline void ReduceSumCPU(const std::vector<NDArray> &in_data) { MSHADOW_TYPE_SWITCH(in_data[0].dtype(), DType, { std::vector<DType*> dptr(in_data.size()); for (size_t i = 0; i < in_data.size(); ++i) { TBlob data = in_data[i].data(); CHECK(data.CheckContiguous()); dptr[i] = data.FlatTo2D<cpu, DType>().dptr_; } size_t total = in_data[0].shape().Size(); ReduceSumCPUImpl(dptr, total); }); } // serial implementation of reduce sum for row sparse NDArray. inline void ReduceSumCPUExSerial(const std::vector<NDArray> &in, NDArray *out) { using namespace rowsparse; using namespace mshadow; auto stype = out->storage_type(); CHECK_EQ(stype, kRowSparseStorage) << "Unexpected storage type " << stype; size_t total_num_rows = 0; size_t num_in = in.size(); // skip the ones with empty indices and values std::vector<bool> skip(num_in, false); // the values tensor of the inputs MSHADOW_TYPE_SWITCH(out->dtype(), DType, { MSHADOW_IDX_TYPE_SWITCH(out->aux_type(kIdx), IType, { std::vector<Tensor<cpu, 2, DType>> in_vals(num_in); std::vector<Tensor<cpu, 1, IType>> in_indices(num_in); // offset to the values tensor of all inputs std::vector<size_t> offsets(num_in, 0); std::vector<size_t> num_rows(num_in, 0); for (size_t i = 0; i < num_in; i++) { if (!in[i].storage_initialized()) { skip[i] = true; continue; } auto size = in[i].aux_shape(kIdx).Size(); num_rows[i] = size; total_num_rows += size; in_vals[i] = in[i].data().FlatTo2D<cpu, DType>(); in_indices[i] = in[i].aux_data(kIdx).FlatTo1D<cpu, IType>(); } std::vector<IType> indices; indices.reserve(total_num_rows); // gather indices from all inputs for (size_t i = 0; i < num_in; i++) { for (size_t j = 0; j < num_rows[i]; j++) { indices.emplace_back(in_indices[i][j]); } } CHECK_EQ(indices.size(), total_num_rows); // dedup indices std::sort(indices.begin(), indices.end()); indices.resize(std::unique(indices.begin(), indices.end()) - indices.begin()); // the one left are unique non-zero rows size_t nnr = indices.size(); // allocate memory for output out->CheckAndAlloc({Shape1(nnr)}); auto idx_data = out->aux_data(kIdx).FlatTo1D<cpu, IType>(); auto val_data = out->data().FlatTo2D<cpu, DType>(); for (size_t i = 0; i < nnr; i++) { // copy indices back idx_data[i] = indices[i]; bool zeros = true; for (size_t j = 0; j < num_in; j++) { if (skip[j]) continue; size_t offset = offsets[j]; if (offset < num_rows[j]) { if (indices[i] == in_indices[j][offset]) { if (zeros) { Copy(val_data[i], in_vals[j][offset], nullptr); zeros = false; } else { val_data[i] += in_vals[j][offset]; } offsets[j] += 1; } } } } }); }); } template<typename DType> inline static void ReduceSumCPU( const std::vector<DType*> &dptr, size_t offset, index_t size) { using namespace mshadow; // NOLINT(*) Tensor<cpu, 1, DType> in_0(dptr[0] + offset, Shape1(size)); for (size_t i = 1; i < dptr.size(); i+=4) { switch (dptr.size() - i) { case 1: { Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size)); in_0 += in_1; break; } case 2: { Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_2(dptr[i+1] + offset, Shape1(size)); in_0 += in_1 + in_2; break; } case 3: { Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_2(dptr[i+1] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_3(dptr[i+2] + offset, Shape1(size)); in_0 += in_1 + in_2 + in_3; break; } default: { Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_2(dptr[i+1] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_3(dptr[i+2] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_4(dptr[i+3] + offset, Shape1(size)); in_0 += in_1 + in_2 + in_3 + in_4; break; } } } } template<typename DType> inline void ReduceSumCPUImpl(std::vector<DType*> dptr, size_t total) { const size_t step = std::min(bigarray_bound_, static_cast<size_t>(4 << 10)); long ntask = (total + step - 1) / step; // NOLINT(*) if (total < bigarray_bound_ || nthread_reduction_ <= 1) { ReduceSumCPU(dptr, 0, total); } else { #pragma omp parallel for schedule(static) num_threads(nthread_reduction_) for (long j = 0; j < ntask; ++j) { // NOLINT(*) size_t k = static_cast<size_t>(j); size_t begin = std::min(k * step, total); size_t end = std::min((k + 1) * step, total); if (j == ntask - 1) CHECK_EQ(end, total); ReduceSumCPU(dptr, begin, static_cast<index_t>(end - begin)); } } } /// \brief temporal space for pushing and pulling struct BufferEntry { /// \brief the merged value NDArray merged; /// \brief the cpu buffer for gpu data std::vector<NDArray> copy_buf; /// \brief the merged buffer for the given storage type inline NDArray& merged_buf(NDArrayStorageType stype) { if (stype == kDefaultStorage) { return merged; } CHECK(stype == kRowSparseStorage) << "unexpected storage type " << stype; // check if sparse_merged is initialized if (sparse_merged.is_none()) { CHECK(!merged.is_none()); sparse_merged = NDArray(kRowSparseStorage, merged.shape(), merged.ctx(), true, merged.dtype()); } return sparse_merged; } private: /// \brief the sparse merged value NDArray sparse_merged; }; std::unordered_map<int, BufferEntry> merge_buf_; size_t bigarray_bound_; int nthread_reduction_; bool is_serial_push_; }; /** * \brief an implementation of Comm that performs reduction on device * directly. * * It is faster if the total device-to-device bandwidths is larger than * device-to-cpu, which is often true for 4 or 8 GPUs. But it uses more device * memory. */ class CommDevice : public Comm { public: CommDevice() { inited_ = false; } virtual ~CommDevice() { } void Init(int key, const NDArrayStorageType stype, const TShape& shape, int dtype = mshadow::kFloat32) override { sorted_key_attrs_.emplace_back(key, shape, dtype); inited_ = false; } void InitBuffersAndComm(const std::vector<NDArray>& src) { if (!inited_) { std::vector<Context> devs; for (const auto& a : src) { devs.push_back(a.ctx()); } InitMergeBuffer(devs); if (dmlc::GetEnv("MXNET_ENABLE_GPU_P2P", 1)) { EnableP2P(devs); } } } const NDArray& ReduceRowSparse(int key, const std::vector<NDArray>& src, int priority) { auto& buf = merge_buf_[key]; std::vector<NDArray> reduce(src.size()); const NDArrayStorageType stype = src[0].storage_type(); NDArray& buf_merged = buf.merged_buf(stype); if (buf.copy_buf.empty()) { // initialize buffer for copying during reduce buf.copy_buf.resize(src.size()); for (size_t j = 0; j < src.size(); ++j) { buf.copy_buf[j] = NDArray(stype, src[0].shape(), buf_merged.ctx(), true, src[0].dtype()); } } CHECK(src[0].storage_type() == buf.copy_buf[0].storage_type()) << "Storage type mismatch detected. " << src[0].storage_type() << "(src) vs. " << buf.copy_buf[0].storage_type() << "(buf.copy_buf)"; for (size_t i = 0; i < src.size(); ++i) { CopyFromTo(src[i], &(buf.copy_buf[i]), priority); reduce[i] = buf.copy_buf[i]; } ElementwiseSum(reduce, &buf_merged, priority); return buf_merged; } const NDArray& Reduce(int key, const std::vector<NDArray>& src, int priority) override { // when this reduce is called from kvstore_dist, gc is not set // we don't do compression twice in dist_sync_device if ((gc_ != nullptr) && (gc_->get_type() != CompressionType::kNone)) { return ReduceCompressed(key, src, priority); } // avoid extra copy for single device, but it may bring problems for // abnormal usage of kvstore if (src.size() == 1) { return src[0]; } InitBuffersAndComm(src); auto& buf = merge_buf_[key]; const NDArrayStorageType stype = src[0].storage_type(); NDArray& buf_merged = buf.merged_buf(stype); // normal dense reduce if (stype == kDefaultStorage) { CopyFromTo(src[0], &buf_merged, priority); std::vector<NDArray> reduce(src.size()); reduce[0] = buf_merged; if (buf.copy_buf.empty()) { // TODO(mli) this results in large device memory usage for huge ndarray, // such as the largest fullc in VGG. consider to do segment reduce with // NDArray.Slice or gpu direct memory access. for the latter, we need to // remove some ctx check, and also it reduces 20% perf buf.copy_buf.resize(src.size()-1); for (size_t i = 0; i < src.size()-1; ++i) { buf.copy_buf[i] = NDArray( buf_merged.shape(), buf_merged.ctx(), false, buf_merged.dtype()); } } for (size_t i = 0; i < src.size()-1; ++i) { CopyFromTo(src[i+1], &(buf.copy_buf[i]), priority); reduce[i+1] = buf.copy_buf[i]; } ElementwiseSum(reduce, &buf_merged, priority); } else { // sparse reduce buf_merged = ReduceRowSparse(key, src, priority); } return buf_merged; } const NDArray& ReduceCompressed(int key, const std::vector<NDArray>& src, int priority) { InitBuffersAndComm(src); auto& buf = merge_buf_[key]; std::vector<NDArray> reduce(src.size()); if (buf.copy_buf.empty()) { // one buf for each context buf.copy_buf.resize(src.size()); buf.compressed_recv_buf.resize(src.size()); buf.compressed_send_buf.resize(src.size()); buf.residual.resize(src.size()); for (size_t i = 0; i < src.size(); ++i) { buf.copy_buf[i] = NDArray(buf.merged.shape(), buf.merged.ctx(), false, buf.merged.dtype()); buf.residual[i] = NDArray(buf.merged.shape(), src[i].ctx(), false, buf.merged.dtype()); buf.residual[i] = 0; int64_t small_size = gc_->GetCompressedSize(buf.merged.shape().Size()); buf.compressed_recv_buf[i] = NDArray(TShape{small_size}, buf.merged.ctx(), false, buf.merged.dtype()); buf.compressed_send_buf[i] = NDArray(TShape{small_size}, src[i].ctx(), false, buf.merged.dtype()); } } for (size_t i = 0; i < src.size(); ++i) { // compress before copy // this is done even if the data is on same context as copy_buf because // we don't want the training to be biased towards data on this GPU gc_->Quantize(src[i], &(buf.compressed_send_buf[i]), &(buf.residual[i]), priority); if (buf.compressed_send_buf[i].ctx() != buf.compressed_recv_buf[i].ctx()) { CopyFromTo(buf.compressed_send_buf[i], &(buf.compressed_recv_buf[i]), priority); } else { // avoid memory copy when they are on same context buf.compressed_recv_buf[i] = buf.compressed_send_buf[i]; } gc_->Dequantize(buf.compressed_recv_buf[i], &(buf.copy_buf[i]), priority); reduce[i] = buf.copy_buf[i]; } ElementwiseSum(reduce, &buf.merged); return buf.merged; } void Broadcast(int key, const NDArray& src, const std::vector<NDArray*> dst, int priority) override { if (!inited_) { // copy to a random device first int dev_id = key % dst.size(); CopyFromTo(src, dst[dev_id], priority); for (size_t i = 0; i < dst.size(); ++i) { if (i != static_cast<size_t>(dev_id)) { CopyFromTo(*dst[dev_id], dst[i], priority); } } } else { auto& buf_merged = merge_buf_[key].merged_buf(src.storage_type()); CopyFromTo(src, &buf_merged, priority); for (auto d : dst) { CopyFromTo(buf_merged, d, priority); } } } void BroadcastRowSparse(int key, const NDArray& src, const std::vector<std::pair<NDArray*, NDArray>>& dst, const int priority) override { CHECK_EQ(src.storage_type(), kRowSparseStorage) << "BroadcastRowSparse expects row-sparse src NDArray"; for (const auto& dst_kv : dst) { NDArray* out = dst_kv.first; NDArray row_id = dst_kv.second; CHECK_EQ(out->storage_type(), kRowSparseStorage) << "BroadcastRowSparse expects row_sparse dst NDArray"; CHECK_EQ(row_id.ctx(), src.ctx()) << "row_id and src are expected to be on the same context"; // retain according to indices const bool is_same_ctx = out->ctx() == src.ctx(); const bool is_diff_var = out->var() != src.var(); NDArray retained_gpu = (is_same_ctx && is_diff_var) ? *out : NDArray(kRowSparseStorage, out->shape(), src.ctx(), true, out->dtype(), out->aux_types()); if (!is_diff_var) { common::LogOnce("The output of row_sparse_pull() on key " + std::to_string(key) + "refers to the same NDArray as the one stored in KVStore." "Performing row_sparse_pull() with such output is going to change the " "data stored in KVStore. Incorrect result may be generated " "next time row_sparse_pull() is called. To avoid such an issue," "consider create a new NDArray buffer to store the output."); } bool is_gpu = retained_gpu.ctx().dev_mask() == gpu::kDevMask; Engine::Get()->PushAsync([=](RunContext rctx, Engine::CallbackOnComplete on_complete) { const TBlob& indices = row_id.data(); using namespace mxnet::common; NDArray temp = retained_gpu; switch (temp.ctx().dev_mask()) { case cpu::kDevMask: { SparseRetainOpForwardRspWrapper<cpu>(rctx.get_stream<cpu>(), src, indices, kWriteTo, &temp); break; } #if MXNET_USE_GPU case gpu::kDevMask: { SparseRetainOpForwardRspWrapper<gpu>(rctx.get_stream<gpu>(), src, indices, kWriteTo, &temp); // wait for GPU operations to complete rctx.get_stream<gpu>()->Wait(); break; } #endif default: LOG(FATAL) << MXNET_GPU_NOT_ENABLED_ERROR; } on_complete(); }, retained_gpu.ctx(), {src.var(), row_id.var()}, {retained_gpu.var()}, is_gpu ? FnProperty::kGPUPrioritized : FnProperty::kCPUPrioritized, priority, "KVStoreSparseRetain"); CopyFromTo(retained_gpu, out, priority); } } using KeyAttrs = std::tuple<int, TShape, int>; // try to allocate buff on device evenly void InitMergeBuffer(const std::vector<Context>& devs) { std::sort(sorted_key_attrs_.begin(), sorted_key_attrs_.end(), []( const KeyAttrs& a, const KeyAttrs& b) { return std::get<1>(a).Size() > std::get<1>(b).Size(); }); std::unordered_map<int, std::pair<Context, size_t>> ctx_info; for (auto d : devs) { ctx_info[d.dev_id] = std::make_pair(d, 0); } for (auto& sorted_key_attr : sorted_key_attrs_) { const int key = std::get<0>(sorted_key_attr); const TShape& shape = std::get<1>(sorted_key_attr); const int type = std::get<2>(sorted_key_attr); auto& buf = merge_buf_[key]; Context ctx; size_t min_size = std::numeric_limits<size_t>::max(); for (auto& ctx_info_kv : ctx_info) { size_t size = ctx_info_kv.second.second; if (size <= min_size) { ctx = ctx_info_kv.second.first; min_size = size; } } // Delayed allocation - as the dense merged buffer might not be used at all if push() // only sees sparse arrays if (buf.merged.is_none()) { bool delay_alloc = true; buf.merged = NDArray(shape, ctx, delay_alloc, type); } ctx_info[ctx.dev_id].second += shape.Size(); } inited_ = true; } private: void EnableP2P(const std::vector<Context>& devs) { #if MXNET_USE_GPU std::vector<int> gpus; for (const auto& d : devs) { if (d.dev_mask() == gpu::kDevMask) { gpus.push_back(d.dev_id); } } int n = static_cast<int>(gpus.size()); int enabled = 0; std::vector<int> p2p(n*n); // Restores active device to what it was before EnableP2P mxnet::common::cuda::DeviceStore device_store; for (int i = 0; i < n; ++i) { device_store.SetDevice(gpus[i]); for (int j = 0; j < n; j++) { int access; hipDeviceCanAccessPeer(&access, gpus[i], gpus[j]); if (access) { hipError_t e = hipDeviceEnablePeerAccess(gpus[j], 0); if (e == hipSuccess || e == hipErrorPeerAccessAlreadyEnabled) { ++enabled; p2p[i*n+j] = 1; } } } } if (enabled != n*(n-1)) { // print warning info if not fully enabled LOG(WARNING) << "only " << enabled << " out of " << n*(n-1) << " GPU pairs are enabled direct access. " << "It may affect the performance. " << "You can set MXNET_ENABLE_GPU_P2P=0 to turn it off"; std::string access(n, '.'); for (int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) { access[j] = p2p[i*n+j] ? 'v' : '.'; } LOG(WARNING) << access; } } #endif } /// \brief temporal space for pushing and pulling struct BufferEntry { /// \brief the dense merged value for reduce and broadcast operations NDArray merged; /// \brief the gpu buffer for copy during reduce operation std::vector<NDArray> copy_buf; /// \brief the residual buffer for gradient compression std::vector<NDArray> residual; /// \brief the small buffer for compressed data in sender std::vector<NDArray> compressed_send_buf; /// \brief the small buffer for compressed data in receiver std::vector<NDArray> compressed_recv_buf; /// \brief the merged buffer for the given storage type (could be either dense or row_sparse) inline NDArray& merged_buf(NDArrayStorageType stype) { if (stype == kDefaultStorage) { CHECK(!merged.is_none()) << "unintialized merge buffer detected"; return merged; } CHECK(stype == kRowSparseStorage) << "unexpected storage type " << stype; // check if sparse_merged is initialized if (sparse_merged.is_none()) { CHECK(!merged.is_none()); sparse_merged = NDArray(kRowSparseStorage, merged.shape(), merged.ctx(), true, merged.dtype()); } return sparse_merged; } private: /// \brief the sparse merged value for reduce and rowsparse broadcast operations NDArray sparse_merged; }; std::unordered_map<int, BufferEntry> merge_buf_; public: bool inited_; std::vector<KeyAttrs> sorted_key_attrs_; }; } // namespace kvstore } // namespace mxnet #endif // MXNET_KVSTORE_COMM_H_
/** * Copyright (c) 2015 by Contributors */ #ifndef MXNET_KVSTORE_COMM_H_ #define MXNET_KVSTORE_COMM_H_ #include <dmlc/omp.h> #include <string> #include <algorithm> #include <utility> #include <limits> #include <vector> #include <tuple> #include <thread> #include "mxnet/ndarray.h" #include "gradient_compression.h" #include "../ndarray/ndarray_function.h" #include "../operator/tensor/sparse_retain-inl.h" #include "./kvstore_utils.h" namespace mxnet { namespace kvstore { /** * \brief multiple device commmunication */ class Comm { public: Comm() { pinned_ctx_ = Context::CPUPinned(0); } virtual ~Comm() { } /** * \brief init key with the data shape and storage shape */ virtual void Init(int key, const NDArrayStorageType stype, const TShape& shape, int dtype = mshadow::kFloat32) = 0; /** * \brief returns src[0] + .. + src[src.size()-1] */ virtual const NDArray& Reduce( int key, const std::vector<NDArray>& src, int priority) = 0; /** * \brief copy from src to dst[i] for every i */ virtual void Broadcast( int key, const NDArray& src, const std::vector<NDArray*> dst, int priority) = 0; /** * \brief broadcast src to dst[i] with target row_ids for every i * \param key the identifier key for the stored ndarray * \param src the source row_sparse ndarray to broadcast * \param dst a list of destination row_sparse NDArray and its target row_ids to broadcast, where the row_ids are expected to be unique and sorted in row_id.data() * \param priority the priority of the operation */ virtual void BroadcastRowSparse(int key, const NDArray& src, const std::vector<std::pair<NDArray*, NDArray>>& dst, const int priority) = 0; /** * \brief return a pinned contex */ Context pinned_ctx() const { return pinned_ctx_; } /** * \brief Sets gradient compression parameters to be able to * perform reduce with compressed gradients */ void SetGradientCompression(std::shared_ptr<GradientCompression> gc) { gc_ = gc; } protected: Context pinned_ctx_; std::shared_ptr<GradientCompression> gc_; }; /** * \brief an implemention of Comm that first copy data to CPU memeory, and then * reduce there */ class CommCPU : public Comm { public: CommCPU() { nthread_reduction_ = dmlc::GetEnv("MXNET_KVSTORE_REDUCTION_NTHREADS", 4); bigarray_bound_ = dmlc::GetEnv("MXNET_KVSTORE_BIGARRAY_BOUND", 1000 * 1000); // TODO(junwu) delete the following data member, now for benchmark only is_serial_push_ = dmlc::GetEnv("MXNET_KVSTORE_SERIAL_PUSH", 0); } virtual ~CommCPU() { } void Init(int key, const NDArrayStorageType stype, const TShape& shape, int type = mshadow::kFloat32) override { // Delayed allocation - the dense merged buffer might not be used at all if push() // only sees sparse arrays bool delay_alloc = true; merge_buf_[key].merged = NDArray(shape, pinned_ctx_, delay_alloc, type); } const NDArray& Reduce(int key, const std::vector<NDArray>& src, int priority) override { auto& buf = merge_buf_[key]; const auto stype = src[0].storage_type(); // avoid extra copy for single device, but it may bring problems for // abnormal usage of kvstore if (src.size() == 1) { if (stype == kDefaultStorage) { return src[0]; } else { // With 'local' kvstore, we could store the weight on CPU while compute // the gradient on GPU when the weight is extremely large. // To avoiding copying the weight to the same context of the gradient, // we always copy the gradient to merged buf. NDArray& merged = buf.merged_buf(stype); CopyFromTo(src[0], &merged, priority); return merged; } } NDArray& buf_merged = buf.merged_buf(stype); // normal dense reduce if (stype == kDefaultStorage) { std::vector<Engine::VarHandle> const_vars(src.size() - 1); std::vector<NDArray> reduce(src.size()); CopyFromTo(src[0], &buf_merged, priority); reduce[0] = buf_merged; if (buf.copy_buf.empty()) { buf.copy_buf.resize(src.size()-1); for (size_t j = 0; j < src.size() - 1; ++j) { // allocate copy buffer buf.copy_buf[j] = NDArray( src[0].shape(), pinned_ctx_, false, src[0].dtype()); } } CHECK(stype == buf.copy_buf[0].storage_type()) << "Storage type mismatch detected. " << stype << "(src) vs. " << buf.copy_buf[0].storage_type() << "(buf.copy_buf)"; for (size_t i = 1; i < src.size(); ++i) { CopyFromTo(src[i], &(buf.copy_buf[i-1]), priority); reduce[i] = buf.copy_buf[i-1]; const_vars[i-1] = reduce[i].var(); } Engine::Get()->PushAsync( [reduce, this](RunContext rctx, Engine::CallbackOnComplete on_complete) { ReduceSumCPU(reduce); on_complete(); }, Context::CPU(), const_vars, {reduce[0].var()}, FnProperty::kCPUPrioritized, priority, "KVStoreReduce"); } else { // sparse reduce std::vector<Engine::VarHandle> const_vars(src.size()); std::vector<NDArray> reduce(src.size()); if (buf.copy_buf.empty()) { buf.copy_buf.resize(src.size()); for (size_t j = 0; j < src.size(); ++j) { buf.copy_buf[j] = NDArray( src[0].storage_type(), src[0].shape(), pinned_ctx_, true, src[0].dtype()); } } CHECK(stype == buf.copy_buf[0].storage_type()) << "Storage type mismatch detected. " << stype << "(src) vs. " << buf.copy_buf[0].storage_type() << "(buf.copy_buf)"; for (size_t i = 0; i < src.size(); ++i) { CopyFromTo(src[i], &(buf.copy_buf[i]), priority); reduce[i] = buf.copy_buf[i]; const_vars[i] = reduce[i].var(); } Resource rsc = ResourceManager::Get()->Request(buf_merged.ctx(), ResourceRequest(ResourceRequest::kTempSpace)); Engine::Get()->PushAsync( [reduce, buf_merged, rsc, this](RunContext rctx, Engine::CallbackOnComplete on_complete) { NDArray out = buf_merged; is_serial_push_? ReduceSumCPUExSerial(reduce, &out) : mxnet::ndarray::ElementwiseSum(rctx.get_stream<cpu>(), rsc, reduce, &out); on_complete(); }, Context::CPU(), const_vars, {buf_merged.var(), rsc.var}, FnProperty::kCPUPrioritized, priority, "KVStoreReduce"); } return buf_merged; } void Broadcast(int key, const NDArray& src, const std::vector<NDArray*> dst, int priority) override { int mask = src.ctx().dev_mask(); if (mask == Context::kCPU) { for (auto d : dst) CopyFromTo(src, d, priority); } else { // First copy data to pinned_ctx, then broadcast. // Note that kv.init initializes the data on pinned_ctx. // This branch indicates push() with ndarrays on gpus were called, // and the source is copied to gpu ctx. // Also indicates that buffers are already initialized during push(). auto& buf = merge_buf_[key].merged_buf(src.storage_type()); CopyFromTo(src, &buf, priority); for (auto d : dst) CopyFromTo(buf, d, priority); } } void BroadcastRowSparse(int key, const NDArray& src, const std::vector<std::pair<NDArray*, NDArray>>& dst, const int priority) override { using namespace mshadow; CHECK_EQ(src.storage_type(), kRowSparseStorage) << "BroadcastRowSparse expects row-sparse src NDArray"; CHECK_EQ(src.ctx().dev_mask(), Context::kCPU) << "BroadcastRowSparse with src on gpu context not supported"; for (const auto& dst_kv : dst) { NDArray* out = dst_kv.first; NDArray row_id = dst_kv.second; CHECK_EQ(out->storage_type(), kRowSparseStorage) << "BroadcastRowSparse expects row_sparse dst NDArray"; CHECK_EQ(row_id.ctx().dev_mask(), Context::kCPU) << "BroadcastRowSparse with row_indices on gpu context not supported"; // retain according to unique indices const bool is_same_ctx = out->ctx() == src.ctx(); const bool is_diff_var = out->var() != src.var(); NDArray retained_cpu = (is_same_ctx && is_diff_var) ? *out : NDArray(kRowSparseStorage, src.shape(), src.ctx(), true, src.dtype(), src.aux_types()); if (!is_diff_var) { common::LogOnce("The output of row_sparse_pull() on key " + std::to_string(key) + "refers to the same NDArray as the one stored in KVStore." "Performing row_sparse_pull() with such output is going to change the " "data stored in KVStore. Incorrect result may be generated " "next time row_sparse_pull() is called. To avoid such an issue," "consider create a new NDArray buffer to store the output."); } Engine::Get()->PushAsync( [=](RunContext rctx, Engine::CallbackOnComplete on_complete) { const TBlob& indices = row_id.data(); NDArray temp = retained_cpu; // get rid the of const qualifier op::SparseRetainOpForwardRspImpl<cpu>(rctx.get_stream<cpu>(), src, indices, kWriteTo, &temp); on_complete(); }, Context::CPU(), {src.var(), row_id.var()}, {retained_cpu.var()}, FnProperty::kNormal, priority, "KVStoreSparseRetain"); // if retained_cpu == out, CopyFromTo will ignore the copy operation CopyFromTo(retained_cpu, out, priority); } } private: // reduce sum into val[0] inline void ReduceSumCPU(const std::vector<NDArray> &in_data) { MSHADOW_TYPE_SWITCH(in_data[0].dtype(), DType, { std::vector<DType*> dptr(in_data.size()); for (size_t i = 0; i < in_data.size(); ++i) { TBlob data = in_data[i].data(); CHECK(data.CheckContiguous()); dptr[i] = data.FlatTo2D<cpu, DType>().dptr_; } size_t total = in_data[0].shape().Size(); ReduceSumCPUImpl(dptr, total); }); } // serial implementation of reduce sum for row sparse NDArray. inline void ReduceSumCPUExSerial(const std::vector<NDArray> &in, NDArray *out) { using namespace rowsparse; using namespace mshadow; auto stype = out->storage_type(); CHECK_EQ(stype, kRowSparseStorage) << "Unexpected storage type " << stype; size_t total_num_rows = 0; size_t num_in = in.size(); // skip the ones with empty indices and values std::vector<bool> skip(num_in, false); // the values tensor of the inputs MSHADOW_TYPE_SWITCH(out->dtype(), DType, { MSHADOW_IDX_TYPE_SWITCH(out->aux_type(kIdx), IType, { std::vector<Tensor<cpu, 2, DType>> in_vals(num_in); std::vector<Tensor<cpu, 1, IType>> in_indices(num_in); // offset to the values tensor of all inputs std::vector<size_t> offsets(num_in, 0); std::vector<size_t> num_rows(num_in, 0); for (size_t i = 0; i < num_in; i++) { if (!in[i].storage_initialized()) { skip[i] = true; continue; } auto size = in[i].aux_shape(kIdx).Size(); num_rows[i] = size; total_num_rows += size; in_vals[i] = in[i].data().FlatTo2D<cpu, DType>(); in_indices[i] = in[i].aux_data(kIdx).FlatTo1D<cpu, IType>(); } std::vector<IType> indices; indices.reserve(total_num_rows); // gather indices from all inputs for (size_t i = 0; i < num_in; i++) { for (size_t j = 0; j < num_rows[i]; j++) { indices.emplace_back(in_indices[i][j]); } } CHECK_EQ(indices.size(), total_num_rows); // dedup indices std::sort(indices.begin(), indices.end()); indices.resize(std::unique(indices.begin(), indices.end()) - indices.begin()); // the one left are unique non-zero rows size_t nnr = indices.size(); // allocate memory for output out->CheckAndAlloc({Shape1(nnr)}); auto idx_data = out->aux_data(kIdx).FlatTo1D<cpu, IType>(); auto val_data = out->data().FlatTo2D<cpu, DType>(); for (size_t i = 0; i < nnr; i++) { // copy indices back idx_data[i] = indices[i]; bool zeros = true; for (size_t j = 0; j < num_in; j++) { if (skip[j]) continue; size_t offset = offsets[j]; if (offset < num_rows[j]) { if (indices[i] == in_indices[j][offset]) { if (zeros) { Copy(val_data[i], in_vals[j][offset], nullptr); zeros = false; } else { val_data[i] += in_vals[j][offset]; } offsets[j] += 1; } } } } }); }); } template<typename DType> inline static void ReduceSumCPU( const std::vector<DType*> &dptr, size_t offset, index_t size) { using namespace mshadow; // NOLINT(*) Tensor<cpu, 1, DType> in_0(dptr[0] + offset, Shape1(size)); for (size_t i = 1; i < dptr.size(); i+=4) { switch (dptr.size() - i) { case 1: { Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size)); in_0 += in_1; break; } case 2: { Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_2(dptr[i+1] + offset, Shape1(size)); in_0 += in_1 + in_2; break; } case 3: { Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_2(dptr[i+1] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_3(dptr[i+2] + offset, Shape1(size)); in_0 += in_1 + in_2 + in_3; break; } default: { Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_2(dptr[i+1] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_3(dptr[i+2] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_4(dptr[i+3] + offset, Shape1(size)); in_0 += in_1 + in_2 + in_3 + in_4; break; } } } } template<typename DType> inline void ReduceSumCPUImpl(std::vector<DType*> dptr, size_t total) { const size_t step = std::min(bigarray_bound_, static_cast<size_t>(4 << 10)); long ntask = (total + step - 1) / step; // NOLINT(*) if (total < bigarray_bound_ || nthread_reduction_ <= 1) { ReduceSumCPU(dptr, 0, total); } else { for (long j = 0; j < ntask; ++j) { // NOLINT(*) size_t k = static_cast<size_t>(j); size_t begin = std::min(k * step, total); size_t end = std::min((k + 1) * step, total); if (j == ntask - 1) CHECK_EQ(end, total); ReduceSumCPU(dptr, begin, static_cast<index_t>(end - begin)); } } } /// \brief temporal space for pushing and pulling struct BufferEntry { /// \brief the merged value NDArray merged; /// \brief the cpu buffer for gpu data std::vector<NDArray> copy_buf; /// \brief the merged buffer for the given storage type inline NDArray& merged_buf(NDArrayStorageType stype) { if (stype == kDefaultStorage) { return merged; } CHECK(stype == kRowSparseStorage) << "unexpected storage type " << stype; // check if sparse_merged is initialized if (sparse_merged.is_none()) { CHECK(!merged.is_none()); sparse_merged = NDArray(kRowSparseStorage, merged.shape(), merged.ctx(), true, merged.dtype()); } return sparse_merged; } private: /// \brief the sparse merged value NDArray sparse_merged; }; std::unordered_map<int, BufferEntry> merge_buf_; size_t bigarray_bound_; int nthread_reduction_; bool is_serial_push_; }; /** * \brief an implementation of Comm that performs reduction on device * directly. * * It is faster if the total device-to-device bandwidths is larger than * device-to-cpu, which is often true for 4 or 8 GPUs. But it uses more device * memory. */ class CommDevice : public Comm { public: CommDevice() { inited_ = false; } virtual ~CommDevice() { } void Init(int key, const NDArrayStorageType stype, const TShape& shape, int dtype = mshadow::kFloat32) override { sorted_key_attrs_.emplace_back(key, shape, dtype); inited_ = false; } void InitBuffersAndComm(const std::vector<NDArray>& src) { if (!inited_) { std::vector<Context> devs; for (const auto& a : src) { devs.push_back(a.ctx()); } InitMergeBuffer(devs); if (dmlc::GetEnv("MXNET_ENABLE_GPU_P2P", 1)) { EnableP2P(devs); } } } const NDArray& ReduceRowSparse(int key, const std::vector<NDArray>& src, int priority) { auto& buf = merge_buf_[key]; std::vector<NDArray> reduce(src.size()); const NDArrayStorageType stype = src[0].storage_type(); NDArray& buf_merged = buf.merged_buf(stype); if (buf.copy_buf.empty()) { // initialize buffer for copying during reduce buf.copy_buf.resize(src.size()); for (size_t j = 0; j < src.size(); ++j) { buf.copy_buf[j] = NDArray(stype, src[0].shape(), buf_merged.ctx(), true, src[0].dtype()); } } CHECK(src[0].storage_type() == buf.copy_buf[0].storage_type()) << "Storage type mismatch detected. " << src[0].storage_type() << "(src) vs. " << buf.copy_buf[0].storage_type() << "(buf.copy_buf)"; for (size_t i = 0; i < src.size(); ++i) { CopyFromTo(src[i], &(buf.copy_buf[i]), priority); reduce[i] = buf.copy_buf[i]; } ElementwiseSum(reduce, &buf_merged, priority); return buf_merged; } const NDArray& Reduce(int key, const std::vector<NDArray>& src, int priority) override { // when this reduce is called from kvstore_dist, gc is not set // we don't do compression twice in dist_sync_device if ((gc_ != nullptr) && (gc_->get_type() != CompressionType::kNone)) { return ReduceCompressed(key, src, priority); } // avoid extra copy for single device, but it may bring problems for // abnormal usage of kvstore if (src.size() == 1) { return src[0]; } InitBuffersAndComm(src); auto& buf = merge_buf_[key]; const NDArrayStorageType stype = src[0].storage_type(); NDArray& buf_merged = buf.merged_buf(stype); // normal dense reduce if (stype == kDefaultStorage) { CopyFromTo(src[0], &buf_merged, priority); std::vector<NDArray> reduce(src.size()); reduce[0] = buf_merged; if (buf.copy_buf.empty()) { // TODO(mli) this results in large device memory usage for huge ndarray, // such as the largest fullc in VGG. consider to do segment reduce with // NDArray.Slice or gpu direct memory access. for the latter, we need to // remove some ctx check, and also it reduces 20% perf buf.copy_buf.resize(src.size()-1); for (size_t i = 0; i < src.size()-1; ++i) { buf.copy_buf[i] = NDArray( buf_merged.shape(), buf_merged.ctx(), false, buf_merged.dtype()); } } for (size_t i = 0; i < src.size()-1; ++i) { CopyFromTo(src[i+1], &(buf.copy_buf[i]), priority); reduce[i+1] = buf.copy_buf[i]; } ElementwiseSum(reduce, &buf_merged, priority); } else { // sparse reduce buf_merged = ReduceRowSparse(key, src, priority); } return buf_merged; } const NDArray& ReduceCompressed(int key, const std::vector<NDArray>& src, int priority) { InitBuffersAndComm(src); auto& buf = merge_buf_[key]; std::vector<NDArray> reduce(src.size()); if (buf.copy_buf.empty()) { // one buf for each context buf.copy_buf.resize(src.size()); buf.compressed_recv_buf.resize(src.size()); buf.compressed_send_buf.resize(src.size()); buf.residual.resize(src.size()); for (size_t i = 0; i < src.size(); ++i) { buf.copy_buf[i] = NDArray(buf.merged.shape(), buf.merged.ctx(), false, buf.merged.dtype()); buf.residual[i] = NDArray(buf.merged.shape(), src[i].ctx(), false, buf.merged.dtype()); buf.residual[i] = 0; int64_t small_size = gc_->GetCompressedSize(buf.merged.shape().Size()); buf.compressed_recv_buf[i] = NDArray(TShape{small_size}, buf.merged.ctx(), false, buf.merged.dtype()); buf.compressed_send_buf[i] = NDArray(TShape{small_size}, src[i].ctx(), false, buf.merged.dtype()); } } for (size_t i = 0; i < src.size(); ++i) { // compress before copy // this is done even if the data is on same context as copy_buf because // we don't want the training to be biased towards data on this GPU gc_->Quantize(src[i], &(buf.compressed_send_buf[i]), &(buf.residual[i]), priority); if (buf.compressed_send_buf[i].ctx() != buf.compressed_recv_buf[i].ctx()) { CopyFromTo(buf.compressed_send_buf[i], &(buf.compressed_recv_buf[i]), priority); } else { // avoid memory copy when they are on same context buf.compressed_recv_buf[i] = buf.compressed_send_buf[i]; } gc_->Dequantize(buf.compressed_recv_buf[i], &(buf.copy_buf[i]), priority); reduce[i] = buf.copy_buf[i]; } ElementwiseSum(reduce, &buf.merged); return buf.merged; } void Broadcast(int key, const NDArray& src, const std::vector<NDArray*> dst, int priority) override { if (!inited_) { // copy to a random device first int dev_id = key % dst.size(); CopyFromTo(src, dst[dev_id], priority); for (size_t i = 0; i < dst.size(); ++i) { if (i != static_cast<size_t>(dev_id)) { CopyFromTo(*dst[dev_id], dst[i], priority); } } } else { auto& buf_merged = merge_buf_[key].merged_buf(src.storage_type()); CopyFromTo(src, &buf_merged, priority); for (auto d : dst) { CopyFromTo(buf_merged, d, priority); } } } void BroadcastRowSparse(int key, const NDArray& src, const std::vector<std::pair<NDArray*, NDArray>>& dst, const int priority) override { CHECK_EQ(src.storage_type(), kRowSparseStorage) << "BroadcastRowSparse expects row-sparse src NDArray"; for (const auto& dst_kv : dst) { NDArray* out = dst_kv.first; NDArray row_id = dst_kv.second; CHECK_EQ(out->storage_type(), kRowSparseStorage) << "BroadcastRowSparse expects row_sparse dst NDArray"; CHECK_EQ(row_id.ctx(), src.ctx()) << "row_id and src are expected to be on the same context"; // retain according to indices const bool is_same_ctx = out->ctx() == src.ctx(); const bool is_diff_var = out->var() != src.var(); NDArray retained_gpu = (is_same_ctx && is_diff_var) ? *out : NDArray(kRowSparseStorage, out->shape(), src.ctx(), true, out->dtype(), out->aux_types()); if (!is_diff_var) { common::LogOnce("The output of row_sparse_pull() on key " + std::to_string(key) + "refers to the same NDArray as the one stored in KVStore." "Performing row_sparse_pull() with such output is going to change the " "data stored in KVStore. Incorrect result may be generated " "next time row_sparse_pull() is called. To avoid such an issue," "consider create a new NDArray buffer to store the output."); } bool is_gpu = retained_gpu.ctx().dev_mask() == gpu::kDevMask; Engine::Get()->PushAsync([=](RunContext rctx, Engine::CallbackOnComplete on_complete) { const TBlob& indices = row_id.data(); using namespace mxnet::common; NDArray temp = retained_gpu; switch (temp.ctx().dev_mask()) { case cpu::kDevMask: { SparseRetainOpForwardRspWrapper<cpu>(rctx.get_stream<cpu>(), src, indices, kWriteTo, &temp); break; } #if MXNET_USE_GPU case gpu::kDevMask: { SparseRetainOpForwardRspWrapper<gpu>(rctx.get_stream<gpu>(), src, indices, kWriteTo, &temp); // wait for GPU operations to complete rctx.get_stream<gpu>()->Wait(); break; } #endif default: LOG(FATAL) << MXNET_GPU_NOT_ENABLED_ERROR; } on_complete(); }, retained_gpu.ctx(), {src.var(), row_id.var()}, {retained_gpu.var()}, is_gpu ? FnProperty::kGPUPrioritized : FnProperty::kCPUPrioritized, priority, "KVStoreSparseRetain"); CopyFromTo(retained_gpu, out, priority); } } using KeyAttrs = std::tuple<int, TShape, int>; // try to allocate buff on device evenly void InitMergeBuffer(const std::vector<Context>& devs) { std::sort(sorted_key_attrs_.begin(), sorted_key_attrs_.end(), []( const KeyAttrs& a, const KeyAttrs& b) { return std::get<1>(a).Size() > std::get<1>(b).Size(); }); std::unordered_map<int, std::pair<Context, size_t>> ctx_info; for (auto d : devs) { ctx_info[d.dev_id] = std::make_pair(d, 0); } for (auto& sorted_key_attr : sorted_key_attrs_) { const int key = std::get<0>(sorted_key_attr); const TShape& shape = std::get<1>(sorted_key_attr); const int type = std::get<2>(sorted_key_attr); auto& buf = merge_buf_[key]; Context ctx; size_t min_size = std::numeric_limits<size_t>::max(); for (auto& ctx_info_kv : ctx_info) { size_t size = ctx_info_kv.second.second; if (size <= min_size) { ctx = ctx_info_kv.second.first; min_size = size; } } // Delayed allocation - as the dense merged buffer might not be used at all if push() // only sees sparse arrays if (buf.merged.is_none()) { bool delay_alloc = true; buf.merged = NDArray(shape, ctx, delay_alloc, type); } ctx_info[ctx.dev_id].second += shape.Size(); } inited_ = true; } private: void EnableP2P(const std::vector<Context>& devs) { #if MXNET_USE_GPU std::vector<int> gpus; for (const auto& d : devs) { if (d.dev_mask() == gpu::kDevMask) { gpus.push_back(d.dev_id); } } int n = static_cast<int>(gpus.size()); int enabled = 0; std::vector<int> p2p(n*n); // Restores active device to what it was before EnableP2P mxnet::common::cuda::DeviceStore device_store; for (int i = 0; i < n; ++i) { device_store.SetDevice(gpus[i]); for (int j = 0; j < n; j++) { int access; hipDeviceCanAccessPeer(&access, gpus[i], gpus[j]); if (access) { hipError_t e = hipDeviceEnablePeerAccess(gpus[j], 0); if (e == hipSuccess || e == hipErrorPeerAccessAlreadyEnabled) { ++enabled; p2p[i*n+j] = 1; } } } } if (enabled != n*(n-1)) { // print warning info if not fully enabled LOG(WARNING) << "only " << enabled << " out of " << n*(n-1) << " GPU pairs are enabled direct access. " << "It may affect the performance. " << "You can set MXNET_ENABLE_GPU_P2P=0 to turn it off"; std::string access(n, '.'); for (int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) { access[j] = p2p[i*n+j] ? 'v' : '.'; } LOG(WARNING) << access; } } #endif } /// \brief temporal space for pushing and pulling struct BufferEntry { /// \brief the dense merged value for reduce and broadcast operations NDArray merged; /// \brief the gpu buffer for copy during reduce operation std::vector<NDArray> copy_buf; /// \brief the residual buffer for gradient compression std::vector<NDArray> residual; /// \brief the small buffer for compressed data in sender std::vector<NDArray> compressed_send_buf; /// \brief the small buffer for compressed data in receiver std::vector<NDArray> compressed_recv_buf; /// \brief the merged buffer for the given storage type (could be either dense or row_sparse) inline NDArray& merged_buf(NDArrayStorageType stype) { if (stype == kDefaultStorage) { CHECK(!merged.is_none()) << "unintialized merge buffer detected"; return merged; } CHECK(stype == kRowSparseStorage) << "unexpected storage type " << stype; // check if sparse_merged is initialized if (sparse_merged.is_none()) { CHECK(!merged.is_none()); sparse_merged = NDArray(kRowSparseStorage, merged.shape(), merged.ctx(), true, merged.dtype()); } return sparse_merged; } private: /// \brief the sparse merged value for reduce and rowsparse broadcast operations NDArray sparse_merged; }; std::unordered_map<int, BufferEntry> merge_buf_; public: bool inited_; std::vector<KeyAttrs> sorted_key_attrs_; }; } // namespace kvstore } // namespace mxnet #endif // MXNET_KVSTORE_COMM_H_
/** * Copyright (c) 2015 by Contributors */ #ifndef MXNET_KVSTORE_COMM_H_ #define MXNET_KVSTORE_COMM_H_ #include <dmlc/omp.h> #include <string> #include <algorithm> #include <utility> #include <limits> #include <vector> #include <tuple> #include <thread> #include "mxnet/ndarray.h" #include "gradient_compression.h" #include "../ndarray/ndarray_function.h" #include "../operator/tensor/sparse_retain-inl.h" #include "./kvstore_utils.h" namespace mxnet { namespace kvstore { /** * \brief multiple device commmunication */ class Comm { public: Comm() { pinned_ctx_ = Context::CPUPinned(0); } virtual ~Comm() { } /** * \brief init key with the data shape and storage shape */ virtual void Init(int key, const NDArrayStorageType stype, const TShape& shape, int dtype = mshadow::kFloat32) = 0; /** * \brief returns src[0] + .. + src[src.size()-1] */ virtual const NDArray& Reduce( int key, const std::vector<NDArray>& src, int priority) = 0; /** * \brief copy from src to dst[i] for every i */ virtual void Broadcast( int key, const NDArray& src, const std::vector<NDArray*> dst, int priority) = 0; /** * \brief broadcast src to dst[i] with target row_ids for every i * \param key the identifier key for the stored ndarray * \param src the source row_sparse ndarray to broadcast * \param dst a list of destination row_sparse NDArray and its target row_ids to broadcast, where the row_ids are expected to be unique and sorted in row_id.data() * \param priority the priority of the operation */ virtual void BroadcastRowSparse(int key, const NDArray& src, const std::vector<std::pair<NDArray*, NDArray>>& dst, const int priority) = 0; /** * \brief return a pinned contex */ Context pinned_ctx() const { return pinned_ctx_; } /** * \brief Sets gradient compression parameters to be able to * perform reduce with compressed gradients */ void SetGradientCompression(std::shared_ptr<GradientCompression> gc) { gc_ = gc; } protected: Context pinned_ctx_; std::shared_ptr<GradientCompression> gc_; }; /** * \brief an implemention of Comm that first copy data to CPU memeory, and then * reduce there */ class CommCPU : public Comm { public: CommCPU() { nthread_reduction_ = dmlc::GetEnv("MXNET_KVSTORE_REDUCTION_NTHREADS", 4); bigarray_bound_ = dmlc::GetEnv("MXNET_KVSTORE_BIGARRAY_BOUND", 1000 * 1000); // TODO(junwu) delete the following data member, now for benchmark only is_serial_push_ = dmlc::GetEnv("MXNET_KVSTORE_SERIAL_PUSH", 0); } virtual ~CommCPU() { } void Init(int key, const NDArrayStorageType stype, const TShape& shape, int type = mshadow::kFloat32) override { // Delayed allocation - the dense merged buffer might not be used at all if push() // only sees sparse arrays bool delay_alloc = true; merge_buf_[key].merged = NDArray(shape, pinned_ctx_, delay_alloc, type); } const NDArray& Reduce(int key, const std::vector<NDArray>& src, int priority) override { auto& buf = merge_buf_[key]; const auto stype = src[0].storage_type(); // avoid extra copy for single device, but it may bring problems for // abnormal usage of kvstore if (src.size() == 1) { if (stype == kDefaultStorage) { return src[0]; } else { // With 'local' kvstore, we could store the weight on CPU while compute // the gradient on GPU when the weight is extremely large. // To avoiding copying the weight to the same context of the gradient, // we always copy the gradient to merged buf. NDArray& merged = buf.merged_buf(stype); CopyFromTo(src[0], &merged, priority); return merged; } } NDArray& buf_merged = buf.merged_buf(stype); // normal dense reduce if (stype == kDefaultStorage) { std::vector<Engine::VarHandle> const_vars(src.size() - 1); std::vector<NDArray> reduce(src.size()); CopyFromTo(src[0], &buf_merged, priority); reduce[0] = buf_merged; if (buf.copy_buf.empty()) { buf.copy_buf.resize(src.size()-1); for (size_t j = 0; j < src.size() - 1; ++j) { // allocate copy buffer buf.copy_buf[j] = NDArray( src[0].shape(), pinned_ctx_, false, src[0].dtype()); } } CHECK(stype == buf.copy_buf[0].storage_type()) << "Storage type mismatch detected. " << stype << "(src) vs. " << buf.copy_buf[0].storage_type() << "(buf.copy_buf)"; for (size_t i = 1; i < src.size(); ++i) { CopyFromTo(src[i], &(buf.copy_buf[i-1]), priority); reduce[i] = buf.copy_buf[i-1]; const_vars[i-1] = reduce[i].var(); } Engine::Get()->PushAsync( [reduce, this](RunContext rctx, Engine::CallbackOnComplete on_complete) { ReduceSumCPU(reduce); on_complete(); }, Context::CPU(), const_vars, {reduce[0].var()}, FnProperty::kCPUPrioritized, priority, "KVStoreReduce"); } else { // sparse reduce std::vector<Engine::VarHandle> const_vars(src.size()); std::vector<NDArray> reduce(src.size()); if (buf.copy_buf.empty()) { buf.copy_buf.resize(src.size()); for (size_t j = 0; j < src.size(); ++j) { buf.copy_buf[j] = NDArray( src[0].storage_type(), src[0].shape(), pinned_ctx_, true, src[0].dtype()); } } CHECK(stype == buf.copy_buf[0].storage_type()) << "Storage type mismatch detected. " << stype << "(src) vs. " << buf.copy_buf[0].storage_type() << "(buf.copy_buf)"; for (size_t i = 0; i < src.size(); ++i) { CopyFromTo(src[i], &(buf.copy_buf[i]), priority); reduce[i] = buf.copy_buf[i]; const_vars[i] = reduce[i].var(); } Resource rsc = ResourceManager::Get()->Request(buf_merged.ctx(), ResourceRequest(ResourceRequest::kTempSpace)); Engine::Get()->PushAsync( [reduce, buf_merged, rsc, this](RunContext rctx, Engine::CallbackOnComplete on_complete) { NDArray out = buf_merged; is_serial_push_? ReduceSumCPUExSerial(reduce, &out) : mxnet::ndarray::ElementwiseSum(rctx.get_stream<cpu>(), rsc, reduce, &out); on_complete(); }, Context::CPU(), const_vars, {buf_merged.var(), rsc.var}, FnProperty::kCPUPrioritized, priority, "KVStoreReduce"); } return buf_merged; } void Broadcast(int key, const NDArray& src, const std::vector<NDArray*> dst, int priority) override { int mask = src.ctx().dev_mask(); if (mask == Context::kCPU) { for (auto d : dst) CopyFromTo(src, d, priority); } else { // First copy data to pinned_ctx, then broadcast. // Note that kv.init initializes the data on pinned_ctx. // This branch indicates push() with ndarrays on gpus were called, // and the source is copied to gpu ctx. // Also indicates that buffers are already initialized during push(). auto& buf = merge_buf_[key].merged_buf(src.storage_type()); CopyFromTo(src, &buf, priority); for (auto d : dst) CopyFromTo(buf, d, priority); } } void BroadcastRowSparse(int key, const NDArray& src, const std::vector<std::pair<NDArray*, NDArray>>& dst, const int priority) override { using namespace mshadow; CHECK_EQ(src.storage_type(), kRowSparseStorage) << "BroadcastRowSparse expects row-sparse src NDArray"; CHECK_EQ(src.ctx().dev_mask(), Context::kCPU) << "BroadcastRowSparse with src on gpu context not supported"; for (const auto& dst_kv : dst) { NDArray* out = dst_kv.first; NDArray row_id = dst_kv.second; CHECK_EQ(out->storage_type(), kRowSparseStorage) << "BroadcastRowSparse expects row_sparse dst NDArray"; CHECK_EQ(row_id.ctx().dev_mask(), Context::kCPU) << "BroadcastRowSparse with row_indices on gpu context not supported"; // retain according to unique indices const bool is_same_ctx = out->ctx() == src.ctx(); const bool is_diff_var = out->var() != src.var(); NDArray retained_cpu = (is_same_ctx && is_diff_var) ? *out : NDArray(kRowSparseStorage, src.shape(), src.ctx(), true, src.dtype(), src.aux_types()); if (!is_diff_var) { common::LogOnce("The output of row_sparse_pull() on key " + std::to_string(key) + "refers to the same NDArray as the one stored in KVStore." "Performing row_sparse_pull() with such output is going to change the " "data stored in KVStore. Incorrect result may be generated " "next time row_sparse_pull() is called. To avoid such an issue," "consider create a new NDArray buffer to store the output."); } Engine::Get()->PushAsync( [=](RunContext rctx, Engine::CallbackOnComplete on_complete) { const TBlob& indices = row_id.data(); NDArray temp = retained_cpu; // get rid the of const qualifier op::SparseRetainOpForwardRspImpl<cpu>(rctx.get_stream<cpu>(), src, indices, kWriteTo, &temp); on_complete(); }, Context::CPU(), {src.var(), row_id.var()}, {retained_cpu.var()}, FnProperty::kNormal, priority, "KVStoreSparseRetain"); // if retained_cpu == out, CopyFromTo will ignore the copy operation CopyFromTo(retained_cpu, out, priority); } } private: // reduce sum into val[0] inline void ReduceSumCPU(const std::vector<NDArray> &in_data) { MSHADOW_TYPE_SWITCH(in_data[0].dtype(), DType, { std::vector<DType*> dptr(in_data.size()); for (size_t i = 0; i < in_data.size(); ++i) { TBlob data = in_data[i].data(); CHECK(data.CheckContiguous()); dptr[i] = data.FlatTo2D<cpu, DType>().dptr_; } size_t total = in_data[0].shape().Size(); ReduceSumCPUImpl(dptr, total); }); } // serial implementation of reduce sum for row sparse NDArray. inline void ReduceSumCPUExSerial(const std::vector<NDArray> &in, NDArray *out) { using namespace rowsparse; using namespace mshadow; auto stype = out->storage_type(); CHECK_EQ(stype, kRowSparseStorage) << "Unexpected storage type " << stype; size_t total_num_rows = 0; size_t num_in = in.size(); // skip the ones with empty indices and values std::vector<bool> skip(num_in, false); // the values tensor of the inputs MSHADOW_TYPE_SWITCH(out->dtype(), DType, { MSHADOW_IDX_TYPE_SWITCH(out->aux_type(kIdx), IType, { std::vector<Tensor<cpu, 2, DType>> in_vals(num_in); std::vector<Tensor<cpu, 1, IType>> in_indices(num_in); // offset to the values tensor of all inputs std::vector<size_t> offsets(num_in, 0); std::vector<size_t> num_rows(num_in, 0); for (size_t i = 0; i < num_in; i++) { if (!in[i].storage_initialized()) { skip[i] = true; continue; } auto size = in[i].aux_shape(kIdx).Size(); num_rows[i] = size; total_num_rows += size; in_vals[i] = in[i].data().FlatTo2D<cpu, DType>(); in_indices[i] = in[i].aux_data(kIdx).FlatTo1D<cpu, IType>(); } std::vector<IType> indices; indices.reserve(total_num_rows); // gather indices from all inputs for (size_t i = 0; i < num_in; i++) { for (size_t j = 0; j < num_rows[i]; j++) { indices.emplace_back(in_indices[i][j]); } } CHECK_EQ(indices.size(), total_num_rows); // dedup indices std::sort(indices.begin(), indices.end()); indices.resize(std::unique(indices.begin(), indices.end()) - indices.begin()); // the one left are unique non-zero rows size_t nnr = indices.size(); // allocate memory for output out->CheckAndAlloc({Shape1(nnr)}); auto idx_data = out->aux_data(kIdx).FlatTo1D<cpu, IType>(); auto val_data = out->data().FlatTo2D<cpu, DType>(); for (size_t i = 0; i < nnr; i++) { // copy indices back idx_data[i] = indices[i]; bool zeros = true; for (size_t j = 0; j < num_in; j++) { if (skip[j]) continue; size_t offset = offsets[j]; if (offset < num_rows[j]) { if (indices[i] == in_indices[j][offset]) { if (zeros) { Copy(val_data[i], in_vals[j][offset], nullptr); zeros = false; } else { val_data[i] += in_vals[j][offset]; } offsets[j] += 1; } } } } }); }); } template<typename DType> inline static void ReduceSumCPU( const std::vector<DType*> &dptr, size_t offset, index_t size) { using namespace mshadow; // NOLINT(*) Tensor<cpu, 1, DType> in_0(dptr[0] + offset, Shape1(size)); for (size_t i = 1; i < dptr.size(); i+=4) { switch (dptr.size() - i) { case 1: { Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size)); in_0 += in_1; break; } case 2: { Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_2(dptr[i+1] + offset, Shape1(size)); in_0 += in_1 + in_2; break; } case 3: { Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_2(dptr[i+1] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_3(dptr[i+2] + offset, Shape1(size)); in_0 += in_1 + in_2 + in_3; break; } default: { Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_2(dptr[i+1] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_3(dptr[i+2] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_4(dptr[i+3] + offset, Shape1(size)); in_0 += in_1 + in_2 + in_3 + in_4; break; } } } } template<typename DType> inline void ReduceSumCPUImpl(std::vector<DType*> dptr, size_t total) { const size_t step = std::min(bigarray_bound_, static_cast<size_t>(4 << 10)); long ntask = (total + step - 1) / step; // NOLINT(*) if (total < bigarray_bound_ || nthread_reduction_ <= 1) { ReduceSumCPU(dptr, 0, total); } else { #pragma omp parallel for schedule(static) num_threads(nthread_reduction_) for (long j = 0; j < ntask; ++j) { // NOLINT(*) size_t k = static_cast<size_t>(j); size_t begin = std::min(k * step, total); size_t end = std::min((k + 1) * step, total); if (j == ntask - 1) CHECK_EQ(end, total); ReduceSumCPU(dptr, begin, static_cast<index_t>(end - begin)); } } } /// \brief temporal space for pushing and pulling struct BufferEntry { /// \brief the merged value NDArray merged; /// \brief the cpu buffer for gpu data std::vector<NDArray> copy_buf; /// \brief the merged buffer for the given storage type inline NDArray& merged_buf(NDArrayStorageType stype) { if (stype == kDefaultStorage) { return merged; } CHECK(stype == kRowSparseStorage) << "unexpected storage type " << stype; // check if sparse_merged is initialized if (sparse_merged.is_none()) { CHECK(!merged.is_none()); sparse_merged = NDArray(kRowSparseStorage, merged.shape(), merged.ctx(), true, merged.dtype()); } return sparse_merged; } private: /// \brief the sparse merged value NDArray sparse_merged; }; std::unordered_map<int, BufferEntry> merge_buf_; size_t bigarray_bound_; int nthread_reduction_; bool is_serial_push_; }; /** * \brief an implementation of Comm that performs reduction on device * directly. * * It is faster if the total device-to-device bandwidths is larger than * device-to-cpu, which is often true for 4 or 8 GPUs. But it uses more device * memory. */ class CommDevice : public Comm { public: CommDevice() { inited_ = false; } virtual ~CommDevice() { } void Init(int key, const NDArrayStorageType stype, const TShape& shape, int dtype = mshadow::kFloat32) override { sorted_key_attrs_.emplace_back(key, shape, dtype); inited_ = false; } void InitBuffersAndComm(const std::vector<NDArray>& src) { if (!inited_) { std::vector<Context> devs; for (const auto& a : src) { devs.push_back(a.ctx()); } InitMergeBuffer(devs); if (dmlc::GetEnv("MXNET_ENABLE_GPU_P2P", 1)) { EnableP2P(devs); } } } const NDArray& ReduceRowSparse(int key, const std::vector<NDArray>& src, int priority) { auto& buf = merge_buf_[key]; std::vector<NDArray> reduce(src.size()); const NDArrayStorageType stype = src[0].storage_type(); NDArray& buf_merged = buf.merged_buf(stype); if (buf.copy_buf.empty()) { // initialize buffer for copying during reduce buf.copy_buf.resize(src.size()); for (size_t j = 0; j < src.size(); ++j) { buf.copy_buf[j] = NDArray(stype, src[0].shape(), buf_merged.ctx(), true, src[0].dtype()); } } CHECK(src[0].storage_type() == buf.copy_buf[0].storage_type()) << "Storage type mismatch detected. " << src[0].storage_type() << "(src) vs. " << buf.copy_buf[0].storage_type() << "(buf.copy_buf)"; for (size_t i = 0; i < src.size(); ++i) { CopyFromTo(src[i], &(buf.copy_buf[i]), priority); reduce[i] = buf.copy_buf[i]; } ElementwiseSum(reduce, &buf_merged, priority); return buf_merged; } const NDArray& Reduce(int key, const std::vector<NDArray>& src, int priority) override { // when this reduce is called from kvstore_dist, gc is not set // we don't do compression twice in dist_sync_device if ((gc_ != nullptr) && (gc_->get_type() != CompressionType::kNone)) { return ReduceCompressed(key, src, priority); } // avoid extra copy for single device, but it may bring problems for // abnormal usage of kvstore if (src.size() == 1) { return src[0]; } InitBuffersAndComm(src); auto& buf = merge_buf_[key]; const NDArrayStorageType stype = src[0].storage_type(); NDArray& buf_merged = buf.merged_buf(stype); // normal dense reduce if (stype == kDefaultStorage) { CopyFromTo(src[0], &buf_merged, priority); std::vector<NDArray> reduce(src.size()); reduce[0] = buf_merged; if (buf.copy_buf.empty()) { // TODO(mli) this results in large device memory usage for huge ndarray, // such as the largest fullc in VGG. consider to do segment reduce with // NDArray.Slice or gpu direct memory access. for the latter, we need to // remove some ctx check, and also it reduces 20% perf buf.copy_buf.resize(src.size()-1); for (size_t i = 0; i < src.size()-1; ++i) { buf.copy_buf[i] = NDArray( buf_merged.shape(), buf_merged.ctx(), false, buf_merged.dtype()); } } for (size_t i = 0; i < src.size()-1; ++i) { CopyFromTo(src[i+1], &(buf.copy_buf[i]), priority); reduce[i+1] = buf.copy_buf[i]; } ElementwiseSum(reduce, &buf_merged, priority); } else { // sparse reduce buf_merged = ReduceRowSparse(key, src, priority); } return buf_merged; } const NDArray& ReduceCompressed(int key, const std::vector<NDArray>& src, int priority) { InitBuffersAndComm(src); auto& buf = merge_buf_[key]; std::vector<NDArray> reduce(src.size()); if (buf.copy_buf.empty()) { // one buf for each context buf.copy_buf.resize(src.size()); buf.compressed_recv_buf.resize(src.size()); buf.compressed_send_buf.resize(src.size()); buf.residual.resize(src.size()); for (size_t i = 0; i < src.size(); ++i) { buf.copy_buf[i] = NDArray(buf.merged.shape(), buf.merged.ctx(), false, buf.merged.dtype()); buf.residual[i] = NDArray(buf.merged.shape(), src[i].ctx(), false, buf.merged.dtype()); buf.residual[i] = 0; int64_t small_size = gc_->GetCompressedSize(buf.merged.shape().Size()); buf.compressed_recv_buf[i] = NDArray(TShape{small_size}, buf.merged.ctx(), false, buf.merged.dtype()); buf.compressed_send_buf[i] = NDArray(TShape{small_size}, src[i].ctx(), false, buf.merged.dtype()); } } for (size_t i = 0; i < src.size(); ++i) { // compress before copy // this is done even if the data is on same context as copy_buf because // we don't want the training to be biased towards data on this GPU gc_->Quantize(src[i], &(buf.compressed_send_buf[i]), &(buf.residual[i]), priority); if (buf.compressed_send_buf[i].ctx() != buf.compressed_recv_buf[i].ctx()) { CopyFromTo(buf.compressed_send_buf[i], &(buf.compressed_recv_buf[i]), priority); } else { // avoid memory copy when they are on same context buf.compressed_recv_buf[i] = buf.compressed_send_buf[i]; } gc_->Dequantize(buf.compressed_recv_buf[i], &(buf.copy_buf[i]), priority); reduce[i] = buf.copy_buf[i]; } ElementwiseSum(reduce, &buf.merged); return buf.merged; } void Broadcast(int key, const NDArray& src, const std::vector<NDArray*> dst, int priority) override { if (!inited_) { // copy to a random device first int dev_id = key % dst.size(); CopyFromTo(src, dst[dev_id], priority); for (size_t i = 0; i < dst.size(); ++i) { if (i != static_cast<size_t>(dev_id)) { CopyFromTo(*dst[dev_id], dst[i], priority); } } } else { auto& buf_merged = merge_buf_[key].merged_buf(src.storage_type()); CopyFromTo(src, &buf_merged, priority); for (auto d : dst) { CopyFromTo(buf_merged, d, priority); } } } void BroadcastRowSparse(int key, const NDArray& src, const std::vector<std::pair<NDArray*, NDArray>>& dst, const int priority) override { CHECK_EQ(src.storage_type(), kRowSparseStorage) << "BroadcastRowSparse expects row-sparse src NDArray"; for (const auto& dst_kv : dst) { NDArray* out = dst_kv.first; NDArray row_id = dst_kv.second; CHECK_EQ(out->storage_type(), kRowSparseStorage) << "BroadcastRowSparse expects row_sparse dst NDArray"; CHECK_EQ(row_id.ctx(), src.ctx()) << "row_id and src are expected to be on the same context"; // retain according to indices const bool is_same_ctx = out->ctx() == src.ctx(); const bool is_diff_var = out->var() != src.var(); NDArray retained_gpu = (is_same_ctx && is_diff_var) ? *out : NDArray(kRowSparseStorage, out->shape(), src.ctx(), true, out->dtype(), out->aux_types()); if (!is_diff_var) { common::LogOnce("The output of row_sparse_pull() on key " + std::to_string(key) + "refers to the same NDArray as the one stored in KVStore." "Performing row_sparse_pull() with such output is going to change the " "data stored in KVStore. Incorrect result may be generated " "next time row_sparse_pull() is called. To avoid such an issue," "consider create a new NDArray buffer to store the output."); } bool is_gpu = retained_gpu.ctx().dev_mask() == gpu::kDevMask; Engine::Get()->PushAsync([=](RunContext rctx, Engine::CallbackOnComplete on_complete) { const TBlob& indices = row_id.data(); using namespace mxnet::common; NDArray temp = retained_gpu; switch (temp.ctx().dev_mask()) { case cpu::kDevMask: { SparseRetainOpForwardRspWrapper<cpu>(rctx.get_stream<cpu>(), src, indices, kWriteTo, &temp); break; } #if MXNET_USE_GPU case gpu::kDevMask: { SparseRetainOpForwardRspWrapper<gpu>(rctx.get_stream<gpu>(), src, indices, kWriteTo, &temp); // wait for GPU operations to complete rctx.get_stream<gpu>()->Wait(); break; } #endif default: LOG(FATAL) << MXNET_GPU_NOT_ENABLED_ERROR; } on_complete(); }, retained_gpu.ctx(), {src.var(), row_id.var()}, {retained_gpu.var()}, is_gpu ? FnProperty::kGPUPrioritized : FnProperty::kCPUPrioritized, priority, "KVStoreSparseRetain"); CopyFromTo(retained_gpu, out, priority); } } using KeyAttrs = std::tuple<int, TShape, int>; // try to allocate buff on device evenly void InitMergeBuffer(const std::vector<Context>& devs) { std::sort(sorted_key_attrs_.begin(), sorted_key_attrs_.end(), []( const KeyAttrs& a, const KeyAttrs& b) { return std::get<1>(a).Size() > std::get<1>(b).Size(); }); std::unordered_map<int, std::pair<Context, size_t>> ctx_info; for (auto d : devs) { ctx_info[d.dev_id] = std::make_pair(d, 0); } for (auto& sorted_key_attr : sorted_key_attrs_) { const int key = std::get<0>(sorted_key_attr); const TShape& shape = std::get<1>(sorted_key_attr); const int type = std::get<2>(sorted_key_attr); auto& buf = merge_buf_[key]; Context ctx; size_t min_size = std::numeric_limits<size_t>::max(); for (auto& ctx_info_kv : ctx_info) { size_t size = ctx_info_kv.second.second; if (size <= min_size) { ctx = ctx_info_kv.second.first; min_size = size; } } // Delayed allocation - as the dense merged buffer might not be used at all if push() // only sees sparse arrays if (buf.merged.is_none()) { bool delay_alloc = true; buf.merged = NDArray(shape, ctx, delay_alloc, type); } ctx_info[ctx.dev_id].second += shape.Size(); } inited_ = true; } private: void EnableP2P(const std::vector<Context>& devs) { #if MXNET_USE_GPU std::vector<int> gpus; for (const auto& d : devs) { if (d.dev_mask() == gpu::kDevMask) { gpus.push_back(d.dev_id); } } int n = static_cast<int>(gpus.size()); int enabled = 0; std::vector<int> p2p(n*n); // Restores active device to what it was before EnableP2P mxnet::common::cuda::DeviceStore device_store; for (int i = 0; i < n; ++i) { device_store.SetDevice(gpus[i]); for (int j = 0; j < n; j++) { int access; hipDeviceCanAccessPeer(&access, gpus[i], gpus[j]); if (access) { hipError_t e = hipDeviceEnablePeerAccess(gpus[j], 0); if (e == hipSuccess || e == hipErrorPeerAccessAlreadyEnabled) { ++enabled; p2p[i*n+j] = 1; } } } } if (enabled != n*(n-1)) { // print warning info if not fully enabled LOG(WARNING) << "only " << enabled << " out of " << n*(n-1) << " GPU pairs are enabled direct access. " << "It may affect the performance. " << "You can set MXNET_ENABLE_GPU_P2P=0 to turn it off"; std::string access(n, '.'); for (int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) { access[j] = p2p[i*n+j] ? 'v' : '.'; } LOG(WARNING) << access; } } #endif } /// \brief temporal space for pushing and pulling struct BufferEntry { /// \brief the dense merged value for reduce and broadcast operations NDArray merged; /// \brief the gpu buffer for copy during reduce operation std::vector<NDArray> copy_buf; /// \brief the residual buffer for gradient compression std::vector<NDArray> residual; /// \brief the small buffer for compressed data in sender std::vector<NDArray> compressed_send_buf; /// \brief the small buffer for compressed data in receiver std::vector<NDArray> compressed_recv_buf; /// \brief the merged buffer for the given storage type (could be either dense or row_sparse) inline NDArray& merged_buf(NDArrayStorageType stype) { if (stype == kDefaultStorage) { CHECK(!merged.is_none()) << "unintialized merge buffer detected"; return merged; } CHECK(stype == kRowSparseStorage) << "unexpected storage type " << stype; // check if sparse_merged is initialized if (sparse_merged.is_none()) { CHECK(!merged.is_none()); sparse_merged = NDArray(kRowSparseStorage, merged.shape(), merged.ctx(), true, merged.dtype()); } return sparse_merged; } private: /// \brief the sparse merged value for reduce and rowsparse broadcast operations NDArray sparse_merged; }; std::unordered_map<int, BufferEntry> merge_buf_; public: bool inited_; std::vector<KeyAttrs> sorted_key_attrs_; }; } // namespace kvstore } // namespace mxnet #endif // MXNET_KVSTORE_COMM_H_
shared-clause.c
#include <stdio.h> #ifdef _OPENMP #include <omp.h> #endif int main() { int i, n = 7; int a[n]; for (i=0; i<n; i++) a[i] = i+1; #pragma omp parallel for shared(a) default(none) for (i=0; i<n; i++) a[i] += i; printf("Después de parallel for:\n"); for (i=0; i<n; i++) printf("a[%d] = %d\n",i,a[i]); }
#include <stdio.h> int main() { int i, n = 7; int a[n]; for (i = 0; i < n; i++) a[i] = i + 1; for (i = 0; i < n; i++) a[i] += i; printf("Después de parallel for:\n"); for (i = 0; i < n; i++) printf("a[%d] = %d\n", i, a[i]); }
#include <stdio.h> #ifdef _OPENMP #include <omp.h> #endif int main() { int i, n = 7; int a[n]; for (i = 0; i < n; i++) a[i] = i + 1; #pragma omp parallel for shared(a) default(none) for (i = 0; i < n; i++) a[i] += i; printf("Después de parallel for:\n"); for (i = 0; i < n; i++) printf("a[%d] = %d\n", i, a[i]); }
q2.c
#include <math.h> #include <omp.h> #include <stdio.h> #include <stdlib.h> #include "common.h" const i32 REPEAT_COUNT = 512; const i32 ARRAY_SIZE = 8192; const i32 RECURSION_SIZE_LIMIT = 32; const i32 FILE_ARRAY_SIZE = 100000; i32* get_random_array(i32 size) { i32* arr = malloc(sizeof(i32) * size); for (i32 i = 0; i < size; i++) { arr[i] = rand() % 500; } return arr; } i32* get_array_from_file() { i32* arr = malloc(sizeof(i32) * FILE_ARRAY_SIZE); FILE* file = fopen("input1.txt", "r"); for (i32 i = 0; i < FILE_ARRAY_SIZE; i++) { i32 elem; fscanf(file, "%d", &elem); arr[i] = elem; } fclose(file); return arr; } void output_array_to_file(i32* arr) { FILE* file = fopen("output1.txt", "w"); for (i32 i = 0; i < FILE_ARRAY_SIZE; i++) { fprintf(file, "%d\n", arr[i]); } fclose(file); } void insertion_sort(i32* arr, i32 l, i32 r) { for (i32 i = l + 1; i < r; i++) { i32 key = arr[i]; i32 j = i - 1; while (j >= l && arr[j] > key) { arr[j + 1] = arr[j]; j--; } arr[j + 1] = key; } } void merge(i32* arr, i32 l, i32 m, i32 r) { i32 n1 = m - l; i32 n2 = r - m; i32* L = malloc(sizeof(i32) * n1); i32* R = malloc(sizeof(i32) * n2); for (i32 i = 0; i < n1; i++) { L[i] = arr[l + i]; } for (i32 i = 0; i < n2; i++) { R[i] = arr[m + i]; } i32 i = 0, j = 0; i32 k = l; while (i < n1 && j < n2) { if (L[i] <= R[j]) { arr[k] = L[i]; i++; } else { arr[k] = R[j]; j++; } k++; } while (i < n1) { arr[k] = L[i]; i++; k++; } while (j < n2) { arr[k] = R[j]; j++; k++; } free(L); free(R); } void merge_sort(i32* arr, i32 l, i32 r) { if ((r - l) > RECURSION_SIZE_LIMIT) { i32 m = l + (r - l) / 2; #pragma omp task merge_sort(arr, l, m); #pragma omp task merge_sort(arr, m, r); #pragma omp taskwait merge(arr, l, m, r); } else if (r - l > 1) { insertion_sort(arr, l, r); } } f64 do_it(i32* arr) { f64 before = omp_get_wtime(); #pragma omp parallel #pragma omp single merge_sort(arr, 0, FILE_ARRAY_SIZE); f64 after = omp_get_wtime(); return after - before; } i32 main(i32 argc, char** argv) { omp_set_nested(1); if (argc > 1) { i32 thread_count = atoi(argv[1]); omp_set_num_threads(thread_count); } // i32* arr = get_random_array(ARRAY_SIZE); i32* arr = malloc(sizeof(i32) * FILE_ARRAY_SIZE); i32* orig_arr = get_array_from_file(); f64 time_sum = 0; for (i32 i = 0; i < REPEAT_COUNT; i++) { for (i32 k = 0; k < FILE_ARRAY_SIZE; k++) { arr[k] = orig_arr[k]; } time_sum += do_it(arr); } output_array_to_file(arr); printf("%fus\n", (time_sum * 1000000) / REPEAT_COUNT); return 0; }
#include <math.h> #include <omp.h> #include <stdio.h> #include <stdlib.h> #include "common.h" const i32 REPEAT_COUNT = 512; const i32 ARRAY_SIZE = 8192; const i32 RECURSION_SIZE_LIMIT = 32; const i32 FILE_ARRAY_SIZE = 100000; i32* get_random_array(i32 size) { i32* arr = malloc(sizeof(i32) * size); for (i32 i = 0; i < size; i++) { arr[i] = rand() % 500; } return arr; } i32* get_array_from_file() { i32* arr = malloc(sizeof(i32) * FILE_ARRAY_SIZE); FILE* file = fopen("input1.txt", "r"); for (i32 i = 0; i < FILE_ARRAY_SIZE; i++) { i32 elem; fscanf(file, "%d", &elem); arr[i] = elem; } fclose(file); return arr; } void output_array_to_file(i32* arr) { FILE* file = fopen("output1.txt", "w"); for (i32 i = 0; i < FILE_ARRAY_SIZE; i++) { fprintf(file, "%d\n", arr[i]); } fclose(file); } void insertion_sort(i32* arr, i32 l, i32 r) { for (i32 i = l + 1; i < r; i++) { i32 key = arr[i]; i32 j = i - 1; while (j >= l && arr[j] > key) { arr[j + 1] = arr[j]; j--; } arr[j + 1] = key; } } void merge(i32* arr, i32 l, i32 m, i32 r) { i32 n1 = m - l; i32 n2 = r - m; i32* L = malloc(sizeof(i32) * n1); i32* R = malloc(sizeof(i32) * n2); for (i32 i = 0; i < n1; i++) { L[i] = arr[l + i]; } for (i32 i = 0; i < n2; i++) { R[i] = arr[m + i]; } i32 i = 0, j = 0; i32 k = l; while (i < n1 && j < n2) { if (L[i] <= R[j]) { arr[k] = L[i]; i++; } else { arr[k] = R[j]; j++; } k++; } while (i < n1) { arr[k] = L[i]; i++; k++; } while (j < n2) { arr[k] = R[j]; j++; k++; } free(L); free(R); } void merge_sort(i32* arr, i32 l, i32 r) { if ((r - l) > RECURSION_SIZE_LIMIT) { i32 m = l + (r - l) / 2; merge_sort(arr, l, m); merge_sort(arr, m, r); merge(arr, l, m, r); } else if (r - l > 1) { insertion_sort(arr, l, r); } } f64 do_it(i32* arr) { f64 before = omp_get_wtime(); merge_sort(arr, 0, FILE_ARRAY_SIZE); f64 after = omp_get_wtime(); return after - before; } i32 main(i32 argc, char** argv) { omp_set_nested(1); if (argc > 1) { i32 thread_count = atoi(argv[1]); omp_set_num_threads(thread_count); } // i32* arr = get_random_array(ARRAY_SIZE); i32* arr = malloc(sizeof(i32) * FILE_ARRAY_SIZE); i32* orig_arr = get_array_from_file(); f64 time_sum = 0; for (i32 i = 0; i < REPEAT_COUNT; i++) { for (i32 k = 0; k < FILE_ARRAY_SIZE; k++) { arr[k] = orig_arr[k]; } time_sum += do_it(arr); } output_array_to_file(arr); printf("%fus\n", (time_sum * 1000000) / REPEAT_COUNT); return 0; }
#include <math.h> #include <omp.h> #include <stdio.h> #include <stdlib.h> #include "common.h" const i32 REPEAT_COUNT = 512; const i32 ARRAY_SIZE = 8192; const i32 RECURSION_SIZE_LIMIT = 32; const i32 FILE_ARRAY_SIZE = 100000; i32* get_random_array(i32 size) { i32* arr = malloc(sizeof(i32) * size); for (i32 i = 0; i < size; i++) { arr[i] = rand() % 500; } return arr; } i32* get_array_from_file() { i32* arr = malloc(sizeof(i32) * FILE_ARRAY_SIZE); FILE* file = fopen("input1.txt", "r"); for (i32 i = 0; i < FILE_ARRAY_SIZE; i++) { i32 elem; fscanf(file, "%d", &elem); arr[i] = elem; } fclose(file); return arr; } void output_array_to_file(i32* arr) { FILE* file = fopen("output1.txt", "w"); for (i32 i = 0; i < FILE_ARRAY_SIZE; i++) { fprintf(file, "%d\n", arr[i]); } fclose(file); } void insertion_sort(i32* arr, i32 l, i32 r) { for (i32 i = l + 1; i < r; i++) { i32 key = arr[i]; i32 j = i - 1; while (j >= l && arr[j] > key) { arr[j + 1] = arr[j]; j--; } arr[j + 1] = key; } } void merge(i32* arr, i32 l, i32 m, i32 r) { i32 n1 = m - l; i32 n2 = r - m; i32* L = malloc(sizeof(i32) * n1); i32* R = malloc(sizeof(i32) * n2); for (i32 i = 0; i < n1; i++) { L[i] = arr[l + i]; } for (i32 i = 0; i < n2; i++) { R[i] = arr[m + i]; } i32 i = 0, j = 0; i32 k = l; while (i < n1 && j < n2) { if (L[i] <= R[j]) { arr[k] = L[i]; i++; } else { arr[k] = R[j]; j++; } k++; } while (i < n1) { arr[k] = L[i]; i++; k++; } while (j < n2) { arr[k] = R[j]; j++; k++; } free(L); free(R); } void merge_sort(i32* arr, i32 l, i32 r) { if ((r - l) > RECURSION_SIZE_LIMIT) { i32 m = l + (r - l) / 2; #pragma omp task merge_sort(arr, l, m); #pragma omp task merge_sort(arr, m, r); #pragma omp taskwait merge(arr, l, m, r); } else if (r - l > 1) { insertion_sort(arr, l, r); } } f64 do_it(i32* arr) { f64 before = omp_get_wtime(); #pragma omp parallel #pragma omp single merge_sort(arr, 0, FILE_ARRAY_SIZE); f64 after = omp_get_wtime(); return after - before; } i32 main(i32 argc, char** argv) { omp_set_nested(1); if (argc > 1) { i32 thread_count = atoi(argv[1]); omp_set_num_threads(thread_count); } // i32* arr = get_random_array(ARRAY_SIZE); i32* arr = malloc(sizeof(i32) * FILE_ARRAY_SIZE); i32* orig_arr = get_array_from_file(); f64 time_sum = 0; for (i32 i = 0; i < REPEAT_COUNT; i++) { for (i32 k = 0; k < FILE_ARRAY_SIZE; k++) { arr[k] = orig_arr[k]; } time_sum += do_it(arr); } output_array_to_file(arr); printf("%fus\n", (time_sum * 1000000) / REPEAT_COUNT); return 0; }
channel.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC H H AAA N N N N EEEEE L % % C H H A A NN N NN N E L % % C HHHHH AAAAA N N N N N N EEE L % % C H H A A N NN N NN E L % % CCCC H H A A N N N N EEEEE LLLLL % % % % % % MagickCore Image Channel Methods % % % % Software Design % % Cristy % % December 2003 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/cache-private.h" #include "magick/channel.h" #include "magick/color-private.h" #include "magick/colorspace-private.h" #include "magick/composite-private.h" #include "magick/exception-private.h" #include "magick/enhance.h" #include "magick/image.h" #include "magick/list.h" #include "magick/log.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/pixel-accessor.h" #include "magick/resource_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/token.h" #include "magick/utility.h" #include "magick/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m b i n e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CombineImages() combines one or more images into a single image. The % grayscale value of the pixels of each image in the sequence is assigned in % order to the specified channels of the combined image. The typical % ordering would be image 1 => Red, 2 => Green, 3 => Blue, etc. % % The format of the CombineImages method is: % % Image *CombineImages(const Image *image,const ChannelType channel, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CombineImages(const Image *image,const ChannelType channel, ExceptionInfo *exception) { #define CombineImageTag "Combine/Image" CacheView *combine_view; const Image *next; Image *combine_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Ensure the image are the same size. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); for (next=image; next != (Image *) NULL; next=GetNextImageInList(next)) { if ((next->columns != image->columns) || (next->rows != image->rows)) ThrowImageException(OptionError,"ImagesAreNotTheSameSize"); } combine_image=CloneImage(image,0,0,MagickTrue,exception); if (combine_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(combine_image,DirectClass) == MagickFalse) { InheritException(exception,&combine_image->exception); combine_image=DestroyImage(combine_image); return((Image *) NULL); } if (IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) { if (fabs(image->gamma-1.0) <= MagickEpsilon) (void) SetImageColorspace(combine_image,RGBColorspace); else (void) SetImageColorspace(combine_image,sRGBColorspace); } if ((channel & OpacityChannel) != 0) combine_image->matte=MagickTrue; (void) SetImageBackgroundColor(combine_image); /* Combine images. */ status=MagickTrue; progress=0; combine_view=AcquireAuthenticCacheView(combine_image,exception); for (y=0; y < (ssize_t) combine_image->rows; y++) { CacheView *image_view; const Image *next; PixelPacket *pixels; const PixelPacket *magick_restrict p; PixelPacket *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(combine_view,0,y,combine_image->columns, 1,exception); if (pixels == (PixelPacket *) NULL) { status=MagickFalse; continue; } next=image; if (((channel & RedChannel) != 0) && (next != (Image *) NULL)) { image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); if (p == (const PixelPacket *) NULL) continue; q=pixels; for (x=0; x < (ssize_t) combine_image->columns; x++) { SetPixelRed(q,ClampToQuantum(GetPixelIntensity(image,p))); p++; q++; } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } if (((channel & GreenChannel) != 0) && (next != (Image *) NULL)) { image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); if (p == (const PixelPacket *) NULL) continue; q=pixels; for (x=0; x < (ssize_t) combine_image->columns; x++) { SetPixelGreen(q,ClampToQuantum(GetPixelIntensity(image,p))); p++; q++; } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } if (((channel & BlueChannel) != 0) && (next != (Image *) NULL)) { image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); if (p == (const PixelPacket *) NULL) continue; q=pixels; for (x=0; x < (ssize_t) combine_image->columns; x++) { SetPixelBlue(q,ClampToQuantum(GetPixelIntensity(image,p))); p++; q++; } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } if (((channel & OpacityChannel) != 0) && (next != (Image *) NULL)) { image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); if (p == (const PixelPacket *) NULL) continue; q=pixels; for (x=0; x < (ssize_t) combine_image->columns; x++) { SetPixelAlpha(q,ClampToQuantum(GetPixelIntensity(image,p))); p++; q++; } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace) && (next != (Image *) NULL)) { IndexPacket *indexes; image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); if (p == (const PixelPacket *) NULL) continue; indexes=GetCacheViewAuthenticIndexQueue(combine_view); for (x=0; x < (ssize_t) combine_image->columns; x++) { SetPixelIndex(indexes+x,ClampToQuantum(GetPixelIntensity(image,p))); p++; } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } if (SyncCacheViewAuthenticPixels(combine_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,CombineImageTag,progress, combine_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } combine_view=DestroyCacheView(combine_view); if (IsGrayColorspace(combine_image->colorspace) != MagickFalse) (void) TransformImageColorspace(combine_image,sRGBColorspace); if (status == MagickFalse) combine_image=DestroyImage(combine_image); return(combine_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e A l p h a C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageAlphaChannel() returns MagickFalse if the image alpha channel is % not activated. That is, the image is RGB rather than RGBA or CMYK rather % than CMYKA. % % The format of the GetImageAlphaChannel method is: % % MagickBooleanType GetImageAlphaChannel(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType GetImageAlphaChannel(const Image *image) { assert(image != (const Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); return(image->matte); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e p a r a t e I m a g e C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SeparateImageChannel() separates a channel from the image and returns it as % a grayscale image. A channel is a particular color component of each pixel % in the image. % % The format of the SeparateImageChannel method is: % % MagickBooleanType SeparateImageChannel(Image *image, % const ChannelType channel) % % A description of each parameter follows: % % o image: the image. % % o channel: Identify which channel to extract: RedChannel, GreenChannel, % BlueChannel, OpacityChannel, CyanChannel, MagentaChannel, % YellowChannel, or BlackChannel. % */ MagickExport Image *SeparateImage(const Image *image,const ChannelType channel, ExceptionInfo *exception) { Image *separate_image; MagickBooleanType status; /* Initialize separate image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); separate_image=CloneImage(image,0,0,MagickTrue,exception); if (separate_image == (Image *) NULL) return((Image *) NULL); status=SeparateImageChannel(separate_image,channel); if (status == MagickFalse) separate_image=DestroyImage(separate_image); return(separate_image); } MagickExport MagickBooleanType SeparateImageChannel(Image *image, const ChannelType channel) { #define SeparateImageTag "Separate/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (channel == GrayChannels) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); /* Separate image channels. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { IndexPacket *magick_restrict indexes; PixelPacket *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); switch (channel) { case RedChannel: { for (x=0; x < (ssize_t) image->columns; x++) { SetPixelGreen(q,GetPixelRed(q)); SetPixelBlue(q,GetPixelRed(q)); q++; } break; } case GreenChannel: { for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,GetPixelGreen(q)); SetPixelBlue(q,GetPixelGreen(q)); q++; } break; } case BlueChannel: { for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,GetPixelBlue(q)); SetPixelGreen(q,GetPixelBlue(q)); q++; } break; } case OpacityChannel: { for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,GetPixelOpacity(q)); SetPixelGreen(q,GetPixelOpacity(q)); SetPixelBlue(q,GetPixelOpacity(q)); q++; } break; } case BlackChannel: { if ((image->storage_class != PseudoClass) && (image->colorspace != CMYKColorspace)) break; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,GetPixelIndex(indexes+x)); SetPixelGreen(q,GetPixelIndex(indexes+x)); SetPixelBlue(q,GetPixelIndex(indexes+x)); q++; } break; } case TrueAlphaChannel: { for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,GetPixelAlpha(q)); SetPixelGreen(q,GetPixelAlpha(q)); SetPixelBlue(q,GetPixelAlpha(q)); q++; } break; } case GrayChannels: { for (x=0; x < (ssize_t) image->columns; x++) { SetPixelAlpha(q,ClampToQuantum(GetPixelIntensity(image,q))); q++; } break; } default: break; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SeparateImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); if (channel != GrayChannels) { image->matte=MagickFalse; (void) SetImageColorspace(image,GRAYColorspace); } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e p a r a t e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SeparateImages() returns a separate grayscale image for each channel % specified. % % The format of the SeparateImages method is: % % MagickBooleanType SeparateImages(const Image *image, % const ChannelType channel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: Identify which channels to extract: RedChannel, GreenChannel, % BlueChannel, OpacityChannel, CyanChannel, MagentaChannel, % YellowChannel, or BlackChannel. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SeparateImages(const Image *image,const ChannelType channel, ExceptionInfo *exception) { Image *images, *separate_image; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); images=NewImageList(); if ((channel & RedChannel) != 0) { separate_image=CloneImage(image,0,0,MagickTrue,exception); (void) SeparateImageChannel(separate_image,RedChannel); AppendImageToList(&images,separate_image); } if ((channel & GreenChannel) != 0) { separate_image=CloneImage(image,0,0,MagickTrue,exception); (void) SeparateImageChannel(separate_image,GreenChannel); AppendImageToList(&images,separate_image); } if ((channel & BlueChannel) != 0) { separate_image=CloneImage(image,0,0,MagickTrue,exception); (void) SeparateImageChannel(separate_image,BlueChannel); AppendImageToList(&images,separate_image); } if (((channel & BlackChannel) != 0) && (image->colorspace == CMYKColorspace)) { separate_image=CloneImage(image,0,0,MagickTrue,exception); (void) SeparateImageChannel(separate_image,BlackChannel); AppendImageToList(&images,separate_image); } if ((channel & AlphaChannel) != 0) { separate_image=CloneImage(image,0,0,MagickTrue,exception); (void) SeparateImageChannel(separate_image,TrueAlphaChannel); AppendImageToList(&images,separate_image); } return(images); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e A l p h a C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageAlphaChannel() activates, deactivates, resets, or sets the alpha % channel. % % The format of the SetImageAlphaChannel method is: % % MagickBooleanType SetImageAlphaChannel(Image *image, % const AlphaChannelType alpha_type) % % A description of each parameter follows: % % o image: the image. % % o alpha_type: The alpha channel type: ActivateAlphaChannel, % AssociateAlphaChannel, CopyAlphaChannel, Disassociate, % DeactivateAlphaChannel, ExtractAlphaChannel, OpaqueAlphaChannel, % ResetAlphaChannel, SetAlphaChannel, ShapeAlphaChannel, and % TransparentAlphaChannel. % */ MagickExport MagickBooleanType SetImageAlphaChannel(Image *image, const AlphaChannelType alpha_type) { CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); exception=(&image->exception); status=MagickTrue; switch (alpha_type) { case ActivateAlphaChannel: { if (image->matte == MagickTrue) return(status); image->matte=MagickTrue; break; } case AssociateAlphaChannel: { /* Associate alpha. */ status=SetImageStorageClass(image,DirectClass); if (status == MagickFalse) break; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelPacket *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double gamma; gamma=QuantumScale*GetPixelAlpha(q); SetPixelRed(q,ClampToQuantum(gamma*GetPixelRed(q))); SetPixelGreen(q,ClampToQuantum(gamma*GetPixelGreen(q))); SetPixelBlue(q,ClampToQuantum(gamma*GetPixelBlue(q))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->matte=MagickFalse; break; } case BackgroundAlphaChannel: { IndexPacket index; MagickBooleanType status; MagickPixelPacket background; PixelPacket pixel; /* Set transparent pixels to background color. */ if (image->matte == MagickFalse) break; status=SetImageStorageClass(image,DirectClass); if (status == MagickFalse) break; GetMagickPixelPacket(image,&background); SetMagickPixelPacket(image,&image->background_color,(const IndexPacket *) NULL,&background); if (image->colorspace == CMYKColorspace) ConvertRGBToCMYK(&background); index=0; SetPixelPacket(image,&background,&pixel,&index); status=MagickTrue; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { IndexPacket *magick_restrict indexes; PixelPacket *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (q->opacity == TransparentOpacity) { SetPixelRed(q,pixel.red); SetPixelGreen(q,pixel.green); SetPixelBlue(q,pixel.blue); } q++; } if (image->colorspace == CMYKColorspace) { indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes+x,index); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } case CopyAlphaChannel: case ShapeAlphaChannel: { /* Special usage case for SeparateImageChannel(): copy grayscale color to the alpha channel. */ status=SeparateImageChannel(image,GrayChannels); image->matte=MagickTrue; /* make sure transparency is now on! */ if (alpha_type == ShapeAlphaChannel) { MagickPixelPacket background; /* Reset all color channels to background color. */ GetMagickPixelPacket(image,&background); SetMagickPixelPacket(image,&(image->background_color),(IndexPacket *) NULL,&background); (void) LevelColorsImage(image,&background,&background,MagickTrue); } break; } case DeactivateAlphaChannel: { if (image->matte == MagickFalse) return(status); image->matte=MagickFalse; break; } case DisassociateAlphaChannel: { status=SetImageStorageClass(image,DirectClass); if (status == MagickFalse) break; image->matte=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelPacket *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double alpha, gamma; alpha=QuantumScale*GetPixelAlpha(q); gamma=PerceptibleReciprocal(alpha); SetPixelRed(q,ClampToQuantum(gamma*GetPixelRed(q))); SetPixelGreen(q,ClampToQuantum(gamma*GetPixelGreen(q))); SetPixelBlue(q,ClampToQuantum(gamma*GetPixelBlue(q))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->matte=MagickFalse; break; } case ExtractAlphaChannel: { status=SeparateImageChannel(image,TrueAlphaChannel); image->matte=MagickFalse; break; } case RemoveAlphaChannel: case FlattenAlphaChannel: { IndexPacket index; MagickPixelPacket background; PixelPacket pixel; /* Flatten image pixels over the background pixels. */ if (image->matte == MagickFalse) break; if (SetImageStorageClass(image,DirectClass) == MagickFalse) break; GetMagickPixelPacket(image,&background); SetMagickPixelPacket(image,&image->background_color,(const IndexPacket *) NULL,&background); if (image->colorspace == CMYKColorspace) ConvertRGBToCMYK(&background); (void) memset(&pixel,0,sizeof(pixel)); index=0; SetPixelPacket(image,&background,&pixel,&index); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { IndexPacket *magick_restrict indexes; PixelPacket *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double gamma, opacity; gamma=1.0-QuantumScale*QuantumScale*q->opacity*pixel.opacity; opacity=(double) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); q->red=ClampToQuantum(gamma*MagickOver_((MagickRealType) q->red, (MagickRealType) q->opacity,(MagickRealType) pixel.red, (MagickRealType) pixel.opacity)); q->green=ClampToQuantum(gamma*MagickOver_((MagickRealType) q->green, (MagickRealType) q->opacity,(MagickRealType) pixel.green, (MagickRealType) pixel.opacity)); q->blue=ClampToQuantum(gamma*MagickOver_((MagickRealType) q->blue, (MagickRealType) q->opacity,(MagickRealType) pixel.blue, (MagickRealType) pixel.opacity)); q->opacity=ClampToQuantum(opacity); q++; } if (image->colorspace == CMYKColorspace) { indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes+x,index); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } case ResetAlphaChannel: /* deprecated */ case OpaqueAlphaChannel: { status=SetImageOpacity(image,OpaqueOpacity); break; } case SetAlphaChannel: { if (image->matte == MagickFalse) status=SetImageOpacity(image,OpaqueOpacity); break; } case TransparentAlphaChannel: { status=SetImageOpacity(image,TransparentOpacity); break; } case UndefinedAlphaChannel: break; } if (status == MagickFalse) return(status); return(SyncImagePixelCache(image,&image->exception)); }
/* * Include declarations. */ #include "magick/studio.h" #include "magick/cache-private.h" #include "magick/channel.h" #include "magick/color-private.h" #include "magick/colorspace-private.h" #include "magick/composite-private.h" #include "magick/exception-private.h" #include "magick/enhance.h" #include "magick/image.h" #include "magick/list.h" #include "magick/log.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/pixel-accessor.h" #include "magick/resource_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/token.h" #include "magick/utility.h" #include "magick/version.h" /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % C o m b i n e I m a g e s * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % CombineImages() combines one or more images into a single image. * The % grayscale value of the pixels of each image in the sequence is * assigned in % order to the specified channels of the combined image. * The typical % ordering would be image 1 => Red, 2 => Green, 3 => Blue, * etc. % % The format of the CombineImages method is: % % Image * *CombineImages(const Image *image,const ChannelType channel, % * ExceptionInfo *exception) % % A description of each parameter follows: % * % o image: the image. % % o exception: return any errors or warnings * in this structure. % */ MagickExport Image * CombineImages(const Image * image, const ChannelType channel, ExceptionInfo * exception) { #define CombineImageTag "Combine/Image" CacheView * combine_view; const Image * next; Image * combine_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* * Ensure the image are the same size. */ assert(image != (const Image *)NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); for (next = image; next != (Image *) NULL; next = GetNextImageInList(next)) { if ((next->columns != image->columns) || (next->rows != image->rows)) ThrowImageException(OptionError, "ImagesAreNotTheSameSize"); } combine_image = CloneImage(image, 0, 0, MagickTrue, exception); if (combine_image == (Image *) NULL) return ((Image *) NULL); if (SetImageStorageClass(combine_image, DirectClass) == MagickFalse) { InheritException(exception, &combine_image->exception); combine_image = DestroyImage(combine_image); return ((Image *) NULL); } if (IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) { if (fabs(image->gamma - 1.0) <= MagickEpsilon) (void)SetImageColorspace(combine_image, RGBColorspace); else (void)SetImageColorspace(combine_image, sRGBColorspace); } if ((channel & OpacityChannel) != 0) combine_image->matte = MagickTrue; (void)SetImageBackgroundColor(combine_image); /* * Combine images. */ status = MagickTrue; progress = 0; combine_view = AcquireAuthenticCacheView(combine_image, exception); for (y = 0; y < (ssize_t) combine_image->rows; y++) { CacheView * image_view; const Image * next; PixelPacket * pixels; const PixelPacket * magick_restrict p; PixelPacket * magick_restrict q; ssize_t x; if (status == MagickFalse) continue; pixels = GetCacheViewAuthenticPixels(combine_view, 0, y, combine_image->columns, 1, exception); if (pixels == (PixelPacket *) NULL) { status = MagickFalse; continue; } next = image; if (((channel & RedChannel) != 0) && (next != (Image *) NULL)) { image_view = AcquireVirtualCacheView(next, exception); p = GetCacheViewVirtualPixels(image_view, 0, y, next->columns, 1, exception); if (p == (const PixelPacket *)NULL) continue; q = pixels; for (x = 0; x < (ssize_t) combine_image->columns; x++) { SetPixelRed(q, ClampToQuantum(GetPixelIntensity(image, p))); p++; q++; } image_view = DestroyCacheView(image_view); next = GetNextImageInList(next); } if (((channel & GreenChannel) != 0) && (next != (Image *) NULL)) { image_view = AcquireVirtualCacheView(next, exception); p = GetCacheViewVirtualPixels(image_view, 0, y, next->columns, 1, exception); if (p == (const PixelPacket *)NULL) continue; q = pixels; for (x = 0; x < (ssize_t) combine_image->columns; x++) { SetPixelGreen(q, ClampToQuantum(GetPixelIntensity(image, p))); p++; q++; } image_view = DestroyCacheView(image_view); next = GetNextImageInList(next); } if (((channel & BlueChannel) != 0) && (next != (Image *) NULL)) { image_view = AcquireVirtualCacheView(next, exception); p = GetCacheViewVirtualPixels(image_view, 0, y, next->columns, 1, exception); if (p == (const PixelPacket *)NULL) continue; q = pixels; for (x = 0; x < (ssize_t) combine_image->columns; x++) { SetPixelBlue(q, ClampToQuantum(GetPixelIntensity(image, p))); p++; q++; } image_view = DestroyCacheView(image_view); next = GetNextImageInList(next); } if (((channel & OpacityChannel) != 0) && (next != (Image *) NULL)) { image_view = AcquireVirtualCacheView(next, exception); p = GetCacheViewVirtualPixels(image_view, 0, y, next->columns, 1, exception); if (p == (const PixelPacket *)NULL) continue; q = pixels; for (x = 0; x < (ssize_t) combine_image->columns; x++) { SetPixelAlpha(q, ClampToQuantum(GetPixelIntensity(image, p))); p++; q++; } image_view = DestroyCacheView(image_view); next = GetNextImageInList(next); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace) && (next != (Image *) NULL)) { IndexPacket * indexes; image_view = AcquireVirtualCacheView(next, exception); p = GetCacheViewVirtualPixels(image_view, 0, y, next->columns, 1, exception); if (p == (const PixelPacket *)NULL) continue; indexes = GetCacheViewAuthenticIndexQueue(combine_view); for (x = 0; x < (ssize_t) combine_image->columns; x++) { SetPixelIndex(indexes + x, ClampToQuantum(GetPixelIntensity(image, p))); p++; } image_view = DestroyCacheView(image_view); next = GetNextImageInList(next); } if (SyncCacheViewAuthenticPixels(combine_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; progress++; proceed = SetImageProgress(image, CombineImageTag, progress, combine_image->rows); if (proceed == MagickFalse) status = MagickFalse; } } combine_view = DestroyCacheView(combine_view); if (IsGrayColorspace(combine_image->colorspace) != MagickFalse) (void)TransformImageColorspace(combine_image, sRGBColorspace); if (status == MagickFalse) combine_image = DestroyImage(combine_image); return (combine_image); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % G e t I m a g e A l p h a C h a n n e l * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % GetImageAlphaChannel() returns MagickFalse if the image alpha * channel is % not activated. That is, the image is RGB rather than RGBA * or CMYK rather % than CMYKA. % % The format of the GetImageAlphaChannel * method is: % % MagickBooleanType GetImageAlphaChannel(const Image * *image) % % A description of each parameter follows: % % o image: the * image. % */ MagickExport MagickBooleanType GetImageAlphaChannel(const Image * image) { assert(image != (const Image *)NULL); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "..."); assert(image->signature == MagickCoreSignature); return (image->matte); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % S e p a r a t e I m a g e C h a n n e l * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % SeparateImageChannel() separates a channel from the image and * returns it as % a grayscale image. A channel is a particular color * component of each pixel % in the image. % % The format of the * SeparateImageChannel method is: % % MagickBooleanType * SeparateImageChannel(Image *image, % const ChannelType channel) % % * A description of each parameter follows: % % o image: the image. % % * o channel: Identify which channel to extract: RedChannel, GreenChannel, % * BlueChannel, OpacityChannel, CyanChannel, MagentaChannel, % * YellowChannel, or BlackChannel. % */ MagickExport Image * SeparateImage(const Image * image, const ChannelType channel, ExceptionInfo * exception) { Image * separate_image; MagickBooleanType status; /* * Initialize separate image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); separate_image = CloneImage(image, 0, 0, MagickTrue, exception); if (separate_image == (Image *) NULL) return ((Image *) NULL); status = SeparateImageChannel(separate_image, channel); if (status == MagickFalse) separate_image = DestroyImage(separate_image); return (separate_image); } MagickExport MagickBooleanType SeparateImageChannel(Image * image, const ChannelType channel) { #define SeparateImageTag "Separate/Image" CacheView * image_view; ExceptionInfo * exception; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); if (SetImageStorageClass(image, DirectClass) == MagickFalse) return (MagickFalse); if (channel == GrayChannels) (void)SetImageAlphaChannel(image, OpaqueAlphaChannel); /* * Separate image channels. */ status = MagickTrue; progress = 0; exception = (&image->exception); image_view = AcquireAuthenticCacheView(image, exception); for (y = 0; y < (ssize_t) image->rows; y++) { IndexPacket * magick_restrict indexes; PixelPacket * magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (PixelPacket *) NULL) { status = MagickFalse; continue; } indexes = GetCacheViewAuthenticIndexQueue(image_view); switch (channel) { case RedChannel: { for (x = 0; x < (ssize_t) image->columns; x++) { SetPixelGreen(q, GetPixelRed(q)); SetPixelBlue(q, GetPixelRed(q)); q++; } break; } case GreenChannel: { for (x = 0; x < (ssize_t) image->columns; x++) { SetPixelRed(q, GetPixelGreen(q)); SetPixelBlue(q, GetPixelGreen(q)); q++; } break; } case BlueChannel: { for (x = 0; x < (ssize_t) image->columns; x++) { SetPixelRed(q, GetPixelBlue(q)); SetPixelGreen(q, GetPixelBlue(q)); q++; } break; } case OpacityChannel: { for (x = 0; x < (ssize_t) image->columns; x++) { SetPixelRed(q, GetPixelOpacity(q)); SetPixelGreen(q, GetPixelOpacity(q)); SetPixelBlue(q, GetPixelOpacity(q)); q++; } break; } case BlackChannel: { if ((image->storage_class != PseudoClass) && (image->colorspace != CMYKColorspace)) break; for (x = 0; x < (ssize_t) image->columns; x++) { SetPixelRed(q, GetPixelIndex(indexes + x)); SetPixelGreen(q, GetPixelIndex(indexes + x)); SetPixelBlue(q, GetPixelIndex(indexes + x)); q++; } break; } case TrueAlphaChannel: { for (x = 0; x < (ssize_t) image->columns; x++) { SetPixelRed(q, GetPixelAlpha(q)); SetPixelGreen(q, GetPixelAlpha(q)); SetPixelBlue(q, GetPixelAlpha(q)); q++; } break; } case GrayChannels: { for (x = 0; x < (ssize_t) image->columns; x++) { SetPixelAlpha(q, ClampToQuantum(GetPixelIntensity(image, q))); q++; } break; } default: break; } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; progress++; proceed = SetImageProgress(image, SeparateImageTag, progress, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } image_view = DestroyCacheView(image_view); if (channel != GrayChannels) { image->matte = MagickFalse; (void)SetImageColorspace(image, GRAYColorspace); } return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % S e p a r a t e I m a g e s * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % SeparateImages() returns a separate grayscale image for each * channel % specified. % % The format of the SeparateImages method is: % % * MagickBooleanType SeparateImages(const Image *image, % const * ChannelType channel,ExceptionInfo *exception) % % A description of each * parameter follows: % % o image: the image. % % o channel: Identify * which channels to extract: RedChannel, GreenChannel, % BlueChannel, * OpacityChannel, CyanChannel, MagentaChannel, % YellowChannel, or * BlackChannel. % % o exception: return any errors or warnings in this * structure. % */ MagickExport Image * SeparateImages(const Image * image, const ChannelType channel, ExceptionInfo * exception) { Image * images, *separate_image; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); images = NewImageList(); if ((channel & RedChannel) != 0) { separate_image = CloneImage(image, 0, 0, MagickTrue, exception); (void)SeparateImageChannel(separate_image, RedChannel); AppendImageToList(&images, separate_image); } if ((channel & GreenChannel) != 0) { separate_image = CloneImage(image, 0, 0, MagickTrue, exception); (void)SeparateImageChannel(separate_image, GreenChannel); AppendImageToList(&images, separate_image); } if ((channel & BlueChannel) != 0) { separate_image = CloneImage(image, 0, 0, MagickTrue, exception); (void)SeparateImageChannel(separate_image, BlueChannel); AppendImageToList(&images, separate_image); } if (((channel & BlackChannel) != 0) && (image->colorspace == CMYKColorspace)) { separate_image = CloneImage(image, 0, 0, MagickTrue, exception); (void)SeparateImageChannel(separate_image, BlackChannel); AppendImageToList(&images, separate_image); } if ((channel & AlphaChannel) != 0) { separate_image = CloneImage(image, 0, 0, MagickTrue, exception); (void)SeparateImageChannel(separate_image, TrueAlphaChannel); AppendImageToList(&images, separate_image); } return (images); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % S e t I m a g e A l p h a C h a n n e l * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % SetImageAlphaChannel() activates, deactivates, resets, or sets * the alpha % channel. % % The format of the SetImageAlphaChannel method * is: % % MagickBooleanType SetImageAlphaChannel(Image *image, % * const AlphaChannelType alpha_type) % % A description of each parameter * follows: % % o image: the image. % % o alpha_type: The alpha * channel type: ActivateAlphaChannel, % AssociateAlphaChannel, * CopyAlphaChannel, Disassociate, % DeactivateAlphaChannel, * ExtractAlphaChannel, OpaqueAlphaChannel, % ResetAlphaChannel, * SetAlphaChannel, ShapeAlphaChannel, and % TransparentAlphaChannel. % */ MagickExport MagickBooleanType SetImageAlphaChannel(Image * image, const AlphaChannelType alpha_type) { CacheView * image_view; ExceptionInfo * exception; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "..."); assert(image->signature == MagickCoreSignature); exception = (&image->exception); status = MagickTrue; switch (alpha_type) { case ActivateAlphaChannel: { if (image->matte == MagickTrue) return (status); image->matte = MagickTrue; break; } case AssociateAlphaChannel: { /* * Associate alpha. */ status = SetImageStorageClass(image, DirectClass); if (status == MagickFalse) break; image_view = AcquireAuthenticCacheView(image, exception); for (y = 0; y < (ssize_t) image->rows; y++) { PixelPacket * magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (PixelPacket *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { double gamma; gamma = QuantumScale * GetPixelAlpha(q); SetPixelRed(q, ClampToQuantum(gamma * GetPixelRed(q))); SetPixelGreen(q, ClampToQuantum(gamma * GetPixelGreen(q))); SetPixelBlue(q, ClampToQuantum(gamma * GetPixelBlue(q))); q++; } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; } image_view = DestroyCacheView(image_view); image->matte = MagickFalse; break; } case BackgroundAlphaChannel: { IndexPacket index; MagickBooleanType status; MagickPixelPacket background; PixelPacket pixel; /* * Set transparent pixels to background color. */ if (image->matte == MagickFalse) break; status = SetImageStorageClass(image, DirectClass); if (status == MagickFalse) break; GetMagickPixelPacket(image, &background); SetMagickPixelPacket(image, &image->background_color, (const IndexPacket *) NULL, &background); if (image->colorspace == CMYKColorspace) ConvertRGBToCMYK(&background); index = 0; SetPixelPacket(image, &background, &pixel, &index); status = MagickTrue; exception = (&image->exception); image_view = AcquireAuthenticCacheView(image, exception); for (y = 0; y < (ssize_t) image->rows; y++) { IndexPacket * magick_restrict indexes; PixelPacket * magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (PixelPacket *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { if (q->opacity == TransparentOpacity) { SetPixelRed(q, pixel.red); SetPixelGreen(q, pixel.green); SetPixelBlue(q, pixel.blue); } q++; } if (image->colorspace == CMYKColorspace) { indexes = GetCacheViewAuthenticIndexQueue(image_view); for (x = 0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes + x, index); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; } image_view = DestroyCacheView(image_view); return (status); } case CopyAlphaChannel: case ShapeAlphaChannel: { /* * Special usage case for SeparateImageChannel(): copy grayscale * color to the alpha channel. */ status = SeparateImageChannel(image, GrayChannels); image->matte = MagickTrue; /* make sure transparency is now on! */ if (alpha_type == ShapeAlphaChannel) { MagickPixelPacket background; /* * Reset all color channels to background color. */ GetMagickPixelPacket(image, &background); SetMagickPixelPacket(image, &(image->background_color), (IndexPacket *) NULL, &background); (void)LevelColorsImage(image, &background, &background, MagickTrue); } break; } case DeactivateAlphaChannel: { if (image->matte == MagickFalse) return (status); image->matte = MagickFalse; break; } case DisassociateAlphaChannel: { status = SetImageStorageClass(image, DirectClass); if (status == MagickFalse) break; image->matte = MagickTrue; image_view = AcquireAuthenticCacheView(image, exception); for (y = 0; y < (ssize_t) image->rows; y++) { PixelPacket * magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (PixelPacket *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { double alpha, gamma; alpha = QuantumScale * GetPixelAlpha(q); gamma = PerceptibleReciprocal(alpha); SetPixelRed(q, ClampToQuantum(gamma * GetPixelRed(q))); SetPixelGreen(q, ClampToQuantum(gamma * GetPixelGreen(q))); SetPixelBlue(q, ClampToQuantum(gamma * GetPixelBlue(q))); q++; } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; } image_view = DestroyCacheView(image_view); image->matte = MagickFalse; break; } case ExtractAlphaChannel: { status = SeparateImageChannel(image, TrueAlphaChannel); image->matte = MagickFalse; break; } case RemoveAlphaChannel: case FlattenAlphaChannel: { IndexPacket index; MagickPixelPacket background; PixelPacket pixel; /* * Flatten image pixels over the background pixels. */ if (image->matte == MagickFalse) break; if (SetImageStorageClass(image, DirectClass) == MagickFalse) break; GetMagickPixelPacket(image, &background); SetMagickPixelPacket(image, &image->background_color, (const IndexPacket *) NULL, &background); if (image->colorspace == CMYKColorspace) ConvertRGBToCMYK(&background); (void)memset(&pixel, 0, sizeof(pixel)); index = 0; SetPixelPacket(image, &background, &pixel, &index); image_view = AcquireAuthenticCacheView(image, exception); for (y = 0; y < (ssize_t) image->rows; y++) { IndexPacket * magick_restrict indexes; PixelPacket * magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (PixelPacket *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { double gamma, opacity; gamma = 1.0 - QuantumScale * QuantumScale * q->opacity * pixel.opacity; opacity = (double)QuantumRange *(1.0 - gamma); gamma = PerceptibleReciprocal(gamma); q->red = ClampToQuantum(gamma * MagickOver_((MagickRealType) q->red, (MagickRealType) q->opacity, (MagickRealType) pixel.red, (MagickRealType) pixel.opacity)); q->green = ClampToQuantum(gamma * MagickOver_((MagickRealType) q->green, (MagickRealType) q->opacity, (MagickRealType) pixel.green, (MagickRealType) pixel.opacity)); q->blue = ClampToQuantum(gamma * MagickOver_((MagickRealType) q->blue, (MagickRealType) q->opacity, (MagickRealType) pixel.blue, (MagickRealType) pixel.opacity)); q->opacity = ClampToQuantum(opacity); q++; } if (image->colorspace == CMYKColorspace) { indexes = GetCacheViewAuthenticIndexQueue(image_view); for (x = 0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes + x, index); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; } image_view = DestroyCacheView(image_view); return (status); } case ResetAlphaChannel: /* deprecated */ case OpaqueAlphaChannel: { status = SetImageOpacity(image, OpaqueOpacity); break; } case SetAlphaChannel: { if (image->matte == MagickFalse) status = SetImageOpacity(image, OpaqueOpacity); break; } case TransparentAlphaChannel: { status = SetImageOpacity(image, TransparentOpacity); break; } case UndefinedAlphaChannel: break; } if (status == MagickFalse) return (status); return (SyncImagePixelCache(image, &image->exception)); }
/* * Include declarations. */ #include "magick/studio.h" #include "magick/cache-private.h" #include "magick/channel.h" #include "magick/color-private.h" #include "magick/colorspace-private.h" #include "magick/composite-private.h" #include "magick/exception-private.h" #include "magick/enhance.h" #include "magick/image.h" #include "magick/list.h" #include "magick/log.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/pixel-accessor.h" #include "magick/resource_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/token.h" #include "magick/utility.h" #include "magick/version.h" /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % C o m b i n e I m a g e s * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % CombineImages() combines one or more images into a single image. * The % grayscale value of the pixels of each image in the sequence is * assigned in % order to the specified channels of the combined image. * The typical % ordering would be image 1 => Red, 2 => Green, 3 => Blue, * etc. % % The format of the CombineImages method is: % % Image * *CombineImages(const Image *image,const ChannelType channel, % * ExceptionInfo *exception) % % A description of each parameter follows: % * % o image: the image. % % o exception: return any errors or warnings * in this structure. % */ MagickExport Image * CombineImages(const Image * image, const ChannelType channel, ExceptionInfo * exception) { #define CombineImageTag "Combine/Image" CacheView * combine_view; const Image * next; Image * combine_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* * Ensure the image are the same size. */ assert(image != (const Image *)NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); for (next = image; next != (Image *) NULL; next = GetNextImageInList(next)) { if ((next->columns != image->columns) || (next->rows != image->rows)) ThrowImageException(OptionError, "ImagesAreNotTheSameSize"); } combine_image = CloneImage(image, 0, 0, MagickTrue, exception); if (combine_image == (Image *) NULL) return ((Image *) NULL); if (SetImageStorageClass(combine_image, DirectClass) == MagickFalse) { InheritException(exception, &combine_image->exception); combine_image = DestroyImage(combine_image); return ((Image *) NULL); } if (IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) { if (fabs(image->gamma - 1.0) <= MagickEpsilon) (void)SetImageColorspace(combine_image, RGBColorspace); else (void)SetImageColorspace(combine_image, sRGBColorspace); } if ((channel & OpacityChannel) != 0) combine_image->matte = MagickTrue; (void)SetImageBackgroundColor(combine_image); /* * Combine images. */ status = MagickTrue; progress = 0; combine_view = AcquireAuthenticCacheView(combine_image, exception); for (y = 0; y < (ssize_t) combine_image->rows; y++) { CacheView * image_view; const Image * next; PixelPacket * pixels; const PixelPacket * magick_restrict p; PixelPacket * magick_restrict q; ssize_t x; if (status == MagickFalse) continue; pixels = GetCacheViewAuthenticPixels(combine_view, 0, y, combine_image->columns, 1, exception); if (pixels == (PixelPacket *) NULL) { status = MagickFalse; continue; } next = image; if (((channel & RedChannel) != 0) && (next != (Image *) NULL)) { image_view = AcquireVirtualCacheView(next, exception); p = GetCacheViewVirtualPixels(image_view, 0, y, next->columns, 1, exception); if (p == (const PixelPacket *)NULL) continue; q = pixels; for (x = 0; x < (ssize_t) combine_image->columns; x++) { SetPixelRed(q, ClampToQuantum(GetPixelIntensity(image, p))); p++; q++; } image_view = DestroyCacheView(image_view); next = GetNextImageInList(next); } if (((channel & GreenChannel) != 0) && (next != (Image *) NULL)) { image_view = AcquireVirtualCacheView(next, exception); p = GetCacheViewVirtualPixels(image_view, 0, y, next->columns, 1, exception); if (p == (const PixelPacket *)NULL) continue; q = pixels; for (x = 0; x < (ssize_t) combine_image->columns; x++) { SetPixelGreen(q, ClampToQuantum(GetPixelIntensity(image, p))); p++; q++; } image_view = DestroyCacheView(image_view); next = GetNextImageInList(next); } if (((channel & BlueChannel) != 0) && (next != (Image *) NULL)) { image_view = AcquireVirtualCacheView(next, exception); p = GetCacheViewVirtualPixels(image_view, 0, y, next->columns, 1, exception); if (p == (const PixelPacket *)NULL) continue; q = pixels; for (x = 0; x < (ssize_t) combine_image->columns; x++) { SetPixelBlue(q, ClampToQuantum(GetPixelIntensity(image, p))); p++; q++; } image_view = DestroyCacheView(image_view); next = GetNextImageInList(next); } if (((channel & OpacityChannel) != 0) && (next != (Image *) NULL)) { image_view = AcquireVirtualCacheView(next, exception); p = GetCacheViewVirtualPixels(image_view, 0, y, next->columns, 1, exception); if (p == (const PixelPacket *)NULL) continue; q = pixels; for (x = 0; x < (ssize_t) combine_image->columns; x++) { SetPixelAlpha(q, ClampToQuantum(GetPixelIntensity(image, p))); p++; q++; } image_view = DestroyCacheView(image_view); next = GetNextImageInList(next); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace) && (next != (Image *) NULL)) { IndexPacket * indexes; image_view = AcquireVirtualCacheView(next, exception); p = GetCacheViewVirtualPixels(image_view, 0, y, next->columns, 1, exception); if (p == (const PixelPacket *)NULL) continue; indexes = GetCacheViewAuthenticIndexQueue(combine_view); for (x = 0; x < (ssize_t) combine_image->columns; x++) { SetPixelIndex(indexes + x, ClampToQuantum(GetPixelIntensity(image, p))); p++; } image_view = DestroyCacheView(image_view); next = GetNextImageInList(next); } if (SyncCacheViewAuthenticPixels(combine_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed = SetImageProgress(image, CombineImageTag, progress, combine_image->rows); if (proceed == MagickFalse) status = MagickFalse; } } combine_view = DestroyCacheView(combine_view); if (IsGrayColorspace(combine_image->colorspace) != MagickFalse) (void)TransformImageColorspace(combine_image, sRGBColorspace); if (status == MagickFalse) combine_image = DestroyImage(combine_image); return (combine_image); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % G e t I m a g e A l p h a C h a n n e l * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % GetImageAlphaChannel() returns MagickFalse if the image alpha * channel is % not activated. That is, the image is RGB rather than RGBA * or CMYK rather % than CMYKA. % % The format of the GetImageAlphaChannel * method is: % % MagickBooleanType GetImageAlphaChannel(const Image * *image) % % A description of each parameter follows: % % o image: the * image. % */ MagickExport MagickBooleanType GetImageAlphaChannel(const Image * image) { assert(image != (const Image *)NULL); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "..."); assert(image->signature == MagickCoreSignature); return (image->matte); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % S e p a r a t e I m a g e C h a n n e l * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % SeparateImageChannel() separates a channel from the image and * returns it as % a grayscale image. A channel is a particular color * component of each pixel % in the image. % % The format of the * SeparateImageChannel method is: % % MagickBooleanType * SeparateImageChannel(Image *image, % const ChannelType channel) % % * A description of each parameter follows: % % o image: the image. % % * o channel: Identify which channel to extract: RedChannel, GreenChannel, % * BlueChannel, OpacityChannel, CyanChannel, MagentaChannel, % * YellowChannel, or BlackChannel. % */ MagickExport Image * SeparateImage(const Image * image, const ChannelType channel, ExceptionInfo * exception) { Image * separate_image; MagickBooleanType status; /* * Initialize separate image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); separate_image = CloneImage(image, 0, 0, MagickTrue, exception); if (separate_image == (Image *) NULL) return ((Image *) NULL); status = SeparateImageChannel(separate_image, channel); if (status == MagickFalse) separate_image = DestroyImage(separate_image); return (separate_image); } MagickExport MagickBooleanType SeparateImageChannel(Image * image, const ChannelType channel) { #define SeparateImageTag "Separate/Image" CacheView * image_view; ExceptionInfo * exception; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); if (SetImageStorageClass(image, DirectClass) == MagickFalse) return (MagickFalse); if (channel == GrayChannels) (void)SetImageAlphaChannel(image, OpaqueAlphaChannel); /* * Separate image channels. */ status = MagickTrue; progress = 0; exception = (&image->exception); image_view = AcquireAuthenticCacheView(image, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y = 0; y < (ssize_t) image->rows; y++) { IndexPacket * magick_restrict indexes; PixelPacket * magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (PixelPacket *) NULL) { status = MagickFalse; continue; } indexes = GetCacheViewAuthenticIndexQueue(image_view); switch (channel) { case RedChannel: { for (x = 0; x < (ssize_t) image->columns; x++) { SetPixelGreen(q, GetPixelRed(q)); SetPixelBlue(q, GetPixelRed(q)); q++; } break; } case GreenChannel: { for (x = 0; x < (ssize_t) image->columns; x++) { SetPixelRed(q, GetPixelGreen(q)); SetPixelBlue(q, GetPixelGreen(q)); q++; } break; } case BlueChannel: { for (x = 0; x < (ssize_t) image->columns; x++) { SetPixelRed(q, GetPixelBlue(q)); SetPixelGreen(q, GetPixelBlue(q)); q++; } break; } case OpacityChannel: { for (x = 0; x < (ssize_t) image->columns; x++) { SetPixelRed(q, GetPixelOpacity(q)); SetPixelGreen(q, GetPixelOpacity(q)); SetPixelBlue(q, GetPixelOpacity(q)); q++; } break; } case BlackChannel: { if ((image->storage_class != PseudoClass) && (image->colorspace != CMYKColorspace)) break; for (x = 0; x < (ssize_t) image->columns; x++) { SetPixelRed(q, GetPixelIndex(indexes + x)); SetPixelGreen(q, GetPixelIndex(indexes + x)); SetPixelBlue(q, GetPixelIndex(indexes + x)); q++; } break; } case TrueAlphaChannel: { for (x = 0; x < (ssize_t) image->columns; x++) { SetPixelRed(q, GetPixelAlpha(q)); SetPixelGreen(q, GetPixelAlpha(q)); SetPixelBlue(q, GetPixelAlpha(q)); q++; } break; } case GrayChannels: { for (x = 0; x < (ssize_t) image->columns; x++) { SetPixelAlpha(q, ClampToQuantum(GetPixelIntensity(image, q))); q++; } break; } default: break; } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed = SetImageProgress(image, SeparateImageTag, progress, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } image_view = DestroyCacheView(image_view); if (channel != GrayChannels) { image->matte = MagickFalse; (void)SetImageColorspace(image, GRAYColorspace); } return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % S e p a r a t e I m a g e s * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % SeparateImages() returns a separate grayscale image for each * channel % specified. % % The format of the SeparateImages method is: % % * MagickBooleanType SeparateImages(const Image *image, % const * ChannelType channel,ExceptionInfo *exception) % % A description of each * parameter follows: % % o image: the image. % % o channel: Identify * which channels to extract: RedChannel, GreenChannel, % BlueChannel, * OpacityChannel, CyanChannel, MagentaChannel, % YellowChannel, or * BlackChannel. % % o exception: return any errors or warnings in this * structure. % */ MagickExport Image * SeparateImages(const Image * image, const ChannelType channel, ExceptionInfo * exception) { Image * images, *separate_image; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); images = NewImageList(); if ((channel & RedChannel) != 0) { separate_image = CloneImage(image, 0, 0, MagickTrue, exception); (void)SeparateImageChannel(separate_image, RedChannel); AppendImageToList(&images, separate_image); } if ((channel & GreenChannel) != 0) { separate_image = CloneImage(image, 0, 0, MagickTrue, exception); (void)SeparateImageChannel(separate_image, GreenChannel); AppendImageToList(&images, separate_image); } if ((channel & BlueChannel) != 0) { separate_image = CloneImage(image, 0, 0, MagickTrue, exception); (void)SeparateImageChannel(separate_image, BlueChannel); AppendImageToList(&images, separate_image); } if (((channel & BlackChannel) != 0) && (image->colorspace == CMYKColorspace)) { separate_image = CloneImage(image, 0, 0, MagickTrue, exception); (void)SeparateImageChannel(separate_image, BlackChannel); AppendImageToList(&images, separate_image); } if ((channel & AlphaChannel) != 0) { separate_image = CloneImage(image, 0, 0, MagickTrue, exception); (void)SeparateImageChannel(separate_image, TrueAlphaChannel); AppendImageToList(&images, separate_image); } return (images); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % S e t I m a g e A l p h a C h a n n e l * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % SetImageAlphaChannel() activates, deactivates, resets, or sets * the alpha % channel. % % The format of the SetImageAlphaChannel method * is: % % MagickBooleanType SetImageAlphaChannel(Image *image, % * const AlphaChannelType alpha_type) % % A description of each parameter * follows: % % o image: the image. % % o alpha_type: The alpha * channel type: ActivateAlphaChannel, % AssociateAlphaChannel, * CopyAlphaChannel, Disassociate, % DeactivateAlphaChannel, * ExtractAlphaChannel, OpaqueAlphaChannel, % ResetAlphaChannel, * SetAlphaChannel, ShapeAlphaChannel, and % TransparentAlphaChannel. % */ MagickExport MagickBooleanType SetImageAlphaChannel(Image * image, const AlphaChannelType alpha_type) { CacheView * image_view; ExceptionInfo * exception; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "..."); assert(image->signature == MagickCoreSignature); exception = (&image->exception); status = MagickTrue; switch (alpha_type) { case ActivateAlphaChannel: { if (image->matte == MagickTrue) return (status); image->matte = MagickTrue; break; } case AssociateAlphaChannel: { /* * Associate alpha. */ status = SetImageStorageClass(image, DirectClass); if (status == MagickFalse) break; image_view = AcquireAuthenticCacheView(image, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y = 0; y < (ssize_t) image->rows; y++) { PixelPacket * magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (PixelPacket *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { double gamma; gamma = QuantumScale * GetPixelAlpha(q); SetPixelRed(q, ClampToQuantum(gamma * GetPixelRed(q))); SetPixelGreen(q, ClampToQuantum(gamma * GetPixelGreen(q))); SetPixelBlue(q, ClampToQuantum(gamma * GetPixelBlue(q))); q++; } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; } image_view = DestroyCacheView(image_view); image->matte = MagickFalse; break; } case BackgroundAlphaChannel: { IndexPacket index; MagickBooleanType status; MagickPixelPacket background; PixelPacket pixel; /* * Set transparent pixels to background color. */ if (image->matte == MagickFalse) break; status = SetImageStorageClass(image, DirectClass); if (status == MagickFalse) break; GetMagickPixelPacket(image, &background); SetMagickPixelPacket(image, &image->background_color, (const IndexPacket *) NULL, &background); if (image->colorspace == CMYKColorspace) ConvertRGBToCMYK(&background); index = 0; SetPixelPacket(image, &background, &pixel, &index); status = MagickTrue; exception = (&image->exception); image_view = AcquireAuthenticCacheView(image, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y = 0; y < (ssize_t) image->rows; y++) { IndexPacket * magick_restrict indexes; PixelPacket * magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (PixelPacket *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { if (q->opacity == TransparentOpacity) { SetPixelRed(q, pixel.red); SetPixelGreen(q, pixel.green); SetPixelBlue(q, pixel.blue); } q++; } if (image->colorspace == CMYKColorspace) { indexes = GetCacheViewAuthenticIndexQueue(image_view); for (x = 0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes + x, index); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; } image_view = DestroyCacheView(image_view); return (status); } case CopyAlphaChannel: case ShapeAlphaChannel: { /* * Special usage case for SeparateImageChannel(): copy grayscale * color to the alpha channel. */ status = SeparateImageChannel(image, GrayChannels); image->matte = MagickTrue; /* make sure transparency is now on! */ if (alpha_type == ShapeAlphaChannel) { MagickPixelPacket background; /* * Reset all color channels to background color. */ GetMagickPixelPacket(image, &background); SetMagickPixelPacket(image, &(image->background_color), (IndexPacket *) NULL, &background); (void)LevelColorsImage(image, &background, &background, MagickTrue); } break; } case DeactivateAlphaChannel: { if (image->matte == MagickFalse) return (status); image->matte = MagickFalse; break; } case DisassociateAlphaChannel: { status = SetImageStorageClass(image, DirectClass); if (status == MagickFalse) break; image->matte = MagickTrue; image_view = AcquireAuthenticCacheView(image, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y = 0; y < (ssize_t) image->rows; y++) { PixelPacket * magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (PixelPacket *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { double alpha, gamma; alpha = QuantumScale * GetPixelAlpha(q); gamma = PerceptibleReciprocal(alpha); SetPixelRed(q, ClampToQuantum(gamma * GetPixelRed(q))); SetPixelGreen(q, ClampToQuantum(gamma * GetPixelGreen(q))); SetPixelBlue(q, ClampToQuantum(gamma * GetPixelBlue(q))); q++; } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; } image_view = DestroyCacheView(image_view); image->matte = MagickFalse; break; } case ExtractAlphaChannel: { status = SeparateImageChannel(image, TrueAlphaChannel); image->matte = MagickFalse; break; } case RemoveAlphaChannel: case FlattenAlphaChannel: { IndexPacket index; MagickPixelPacket background; PixelPacket pixel; /* * Flatten image pixels over the background pixels. */ if (image->matte == MagickFalse) break; if (SetImageStorageClass(image, DirectClass) == MagickFalse) break; GetMagickPixelPacket(image, &background); SetMagickPixelPacket(image, &image->background_color, (const IndexPacket *) NULL, &background); if (image->colorspace == CMYKColorspace) ConvertRGBToCMYK(&background); (void)memset(&pixel, 0, sizeof(pixel)); index = 0; SetPixelPacket(image, &background, &pixel, &index); image_view = AcquireAuthenticCacheView(image, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y = 0; y < (ssize_t) image->rows; y++) { IndexPacket * magick_restrict indexes; PixelPacket * magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (PixelPacket *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { double gamma, opacity; gamma = 1.0 - QuantumScale * QuantumScale * q->opacity * pixel.opacity; opacity = (double)QuantumRange *(1.0 - gamma); gamma = PerceptibleReciprocal(gamma); q->red = ClampToQuantum(gamma * MagickOver_((MagickRealType) q->red, (MagickRealType) q->opacity, (MagickRealType) pixel.red, (MagickRealType) pixel.opacity)); q->green = ClampToQuantum(gamma * MagickOver_((MagickRealType) q->green, (MagickRealType) q->opacity, (MagickRealType) pixel.green, (MagickRealType) pixel.opacity)); q->blue = ClampToQuantum(gamma * MagickOver_((MagickRealType) q->blue, (MagickRealType) q->opacity, (MagickRealType) pixel.blue, (MagickRealType) pixel.opacity)); q->opacity = ClampToQuantum(opacity); q++; } if (image->colorspace == CMYKColorspace) { indexes = GetCacheViewAuthenticIndexQueue(image_view); for (x = 0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes + x, index); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; } image_view = DestroyCacheView(image_view); return (status); } case ResetAlphaChannel: /* deprecated */ case OpaqueAlphaChannel: { status = SetImageOpacity(image, OpaqueOpacity); break; } case SetAlphaChannel: { if (image->matte == MagickFalse) status = SetImageOpacity(image, OpaqueOpacity); break; } case TransparentAlphaChannel: { status = SetImageOpacity(image, TransparentOpacity); break; } case UndefinedAlphaChannel: break; } if (status == MagickFalse) return (status); return (SyncImagePixelCache(image, &image->exception)); }
j3d27pt.c
#define BENCH_DIM 3 #define BENCH_FPP 54 #define BENCH_RAD 1 #include "common.h" double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop) { double start_time = sb_time(), end_time = 0.0; int dimsize = compsize + BENCH_RAD * 2; SB_TYPE (*A)[dimsize][dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize][dimsize])A1; if (scop) { #pragma scop for (int t = 0; t < timestep; t++) for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++) for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++) for (int k = BENCH_RAD; k < dimsize - BENCH_RAD; k++) A[(t+1)%2][i][j][k] = (1.500f*A[t%2][i-1][j][k] + 0.500f*A[t%2][i-1][j-1][k-1] + 0.700f*A[t%2][i-1][j-1][k] + 0.900f*A[t%2][i-1][j-1][k+1] + 1.200f*A[t%2][i-1][j][k-1] + 1.201f*A[t%2][i-1][j][k+1] + 0.901f*A[t%2][i-1][j+1][k-1] + 0.701f*A[t%2][i-1][j+1][k] + 0.501f*A[t%2][i-1][j+1][k+1] + 1.510f*A[t%2][i][j][k] + 0.510f*A[t%2][i][j-1][k-1] + 0.710f*A[t%2][i][j-1][k] + 0.910f*A[t%2][i][j-1][k+1] + 1.210f*A[t%2][i][j][k-1] + 1.211f*A[t%2][i][j][k+1] + 0.911f*A[t%2][i][j+1][k-1] + 0.711f*A[t%2][i][j+1][k] + 0.511f*A[t%2][i][j+1][k+1] + 1.520f*A[t%2][i+1][j][k] + 0.520f*A[t%2][i+1][j-1][k-1] + 0.720f*A[t%2][i+1][j-1][k] + 0.920f*A[t%2][i+1][j-1][k+1] + 1.220f*A[t%2][i+1][j][k-1] + 1.221f*A[t%2][i+1][j][k+1] + 0.921f*A[t%2][i+1][j+1][k-1] + 0.721f*A[t%2][i+1][j+1][k] + 0.521f*A[t%2][i+1][j+1][k+1]) / 159; #pragma endscop } else { for (int t = 0; t < timestep; t++) #pragma omp parallel for for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++) for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++) for (int k = BENCH_RAD; k < dimsize - BENCH_RAD; k++) A[(t+1)%2][i][j][k] = (1.500f*A[t%2][i-1][j][k] + 0.500f*A[t%2][i-1][j-1][k-1] + 0.700f*A[t%2][i-1][j-1][k] + 0.900f*A[t%2][i-1][j-1][k+1] + 1.200f*A[t%2][i-1][j][k-1] + 1.201f*A[t%2][i-1][j][k+1] + 0.901f*A[t%2][i-1][j+1][k-1] + 0.701f*A[t%2][i-1][j+1][k] + 0.501f*A[t%2][i-1][j+1][k+1] + 1.510f*A[t%2][i][j][k] + 0.510f*A[t%2][i][j-1][k-1] + 0.710f*A[t%2][i][j-1][k] + 0.910f*A[t%2][i][j-1][k+1] + 1.210f*A[t%2][i][j][k-1] + 1.211f*A[t%2][i][j][k+1] + 0.911f*A[t%2][i][j+1][k-1] + 0.711f*A[t%2][i][j+1][k] + 0.511f*A[t%2][i][j+1][k+1] + 1.520f*A[t%2][i+1][j][k] + 0.520f*A[t%2][i+1][j-1][k-1] + 0.720f*A[t%2][i+1][j-1][k] + 0.920f*A[t%2][i+1][j-1][k+1] + 1.220f*A[t%2][i+1][j][k-1] + 1.221f*A[t%2][i+1][j][k+1] + 0.921f*A[t%2][i+1][j+1][k-1] + 0.721f*A[t%2][i+1][j+1][k] + 0.521f*A[t%2][i+1][j+1][k+1]) / 159; } return (((end_time != 0.0) ? end_time : sb_time()) - start_time); }
#define BENCH_DIM 3 #define BENCH_FPP 54 #define BENCH_RAD 1 #include "common.h" double kernel_stencil(SB_TYPE * A1, int compsize, int timestep, bool scop) { double start_time = sb_time(), end_time = 0.0; int dimsize = compsize + BENCH_RAD * 2; SB_TYPE(*A)[dimsize][dimsize][dimsize] = (SB_TYPE(*)[dimsize][dimsize][dimsize]) A1; if (scop) { #pragma scop for (int t = 0; t < timestep; t++) for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++) for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++) for (int k = BENCH_RAD; k < dimsize - BENCH_RAD; k++) A[(t + 1) % 2][i][j][k] = (1.500 f * A[t % 2][i - 1][j][k] + 0.500 f * A[t % 2][i - 1][j - 1][k - 1] + 0.700 f * A[t % 2][i - 1][j - 1][k] + 0.900 f * A[t % 2][i - 1][j - 1][k + 1] + 1.200 f * A[t % 2][i - 1][j][k - 1] + 1.201 f * A[t % 2][i - 1][j][k + 1] + 0.901 f * A[t % 2][i - 1][j + 1][k - 1] + 0.701 f * A[t % 2][i - 1][j + 1][k] + 0.501 f * A[t % 2][i - 1][j + 1][k + 1] + 1.510 f * A[t % 2][i][j][k] + 0.510 f * A[t % 2][i][j - 1][k - 1] + 0.710 f * A[t % 2][i][j - 1][k] + 0.910 f * A[t % 2][i][j - 1][k + 1] + 1.210 f * A[t % 2][i][j][k - 1] + 1.211 f * A[t % 2][i][j][k + 1] + 0.911 f * A[t % 2][i][j + 1][k - 1] + 0.711 f * A[t % 2][i][j + 1][k] + 0.511 f * A[t % 2][i][j + 1][k + 1] + 1.520 f * A[t % 2][i + 1][j][k] + 0.520 f * A[t % 2][i + 1][j - 1][k - 1] + 0.720 f * A[t % 2][i + 1][j - 1][k] + 0.920 f * A[t % 2][i + 1][j - 1][k + 1] + 1.220 f * A[t % 2][i + 1][j][k - 1] + 1.221 f * A[t % 2][i + 1][j][k + 1] + 0.921 f * A[t % 2][i + 1][j + 1][k - 1] + 0.721 f * A[t % 2][i + 1][j + 1][k] + 0.521 f * A[t % 2][i + 1][j + 1][k + 1]) / 159; #pragma endscop } else { for (int t = 0; t < timestep; t++) for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++) for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++) for (int k = BENCH_RAD; k < dimsize - BENCH_RAD; k++) A[(t + 1) % 2][i][j][k] = (1.500 f * A[t % 2][i - 1][j][k] + 0.500 f * A[t % 2][i - 1][j - 1][k - 1] + 0.700 f * A[t % 2][i - 1][j - 1][k] + 0.900 f * A[t % 2][i - 1][j - 1][k + 1] + 1.200 f * A[t % 2][i - 1][j][k - 1] + 1.201 f * A[t % 2][i - 1][j][k + 1] + 0.901 f * A[t % 2][i - 1][j + 1][k - 1] + 0.701 f * A[t % 2][i - 1][j + 1][k] + 0.501 f * A[t % 2][i - 1][j + 1][k + 1] + 1.510 f * A[t % 2][i][j][k] + 0.510 f * A[t % 2][i][j - 1][k - 1] + 0.710 f * A[t % 2][i][j - 1][k] + 0.910 f * A[t % 2][i][j - 1][k + 1] + 1.210 f * A[t % 2][i][j][k - 1] + 1.211 f * A[t % 2][i][j][k + 1] + 0.911 f * A[t % 2][i][j + 1][k - 1] + 0.711 f * A[t % 2][i][j + 1][k] + 0.511 f * A[t % 2][i][j + 1][k + 1] + 1.520 f * A[t % 2][i + 1][j][k] + 0.520 f * A[t % 2][i + 1][j - 1][k - 1] + 0.720 f * A[t % 2][i + 1][j - 1][k] + 0.920 f * A[t % 2][i + 1][j - 1][k + 1] + 1.220 f * A[t % 2][i + 1][j][k - 1] + 1.221 f * A[t % 2][i + 1][j][k + 1] + 0.921 f * A[t % 2][i + 1][j + 1][k - 1] + 0.721 f * A[t % 2][i + 1][j + 1][k] + 0.521 f * A[t % 2][i + 1][j + 1][k + 1]) / 159; } return (((end_time != 0.0) ? end_time : sb_time()) - start_time); }
#define BENCH_DIM 3 #define BENCH_FPP 54 #define BENCH_RAD 1 #include "common.h" double kernel_stencil(SB_TYPE * A1, int compsize, int timestep, bool scop) { double start_time = sb_time(), end_time = 0.0; int dimsize = compsize + BENCH_RAD * 2; SB_TYPE(*A)[dimsize][dimsize][dimsize] = (SB_TYPE(*)[dimsize][dimsize][dimsize]) A1; if (scop) { #pragma scop for (int t = 0; t < timestep; t++) for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++) for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++) for (int k = BENCH_RAD; k < dimsize - BENCH_RAD; k++) A[(t + 1) % 2][i][j][k] = (1.500 f * A[t % 2][i - 1][j][k] + 0.500 f * A[t % 2][i - 1][j - 1][k - 1] + 0.700 f * A[t % 2][i - 1][j - 1][k] + 0.900 f * A[t % 2][i - 1][j - 1][k + 1] + 1.200 f * A[t % 2][i - 1][j][k - 1] + 1.201 f * A[t % 2][i - 1][j][k + 1] + 0.901 f * A[t % 2][i - 1][j + 1][k - 1] + 0.701 f * A[t % 2][i - 1][j + 1][k] + 0.501 f * A[t % 2][i - 1][j + 1][k + 1] + 1.510 f * A[t % 2][i][j][k] + 0.510 f * A[t % 2][i][j - 1][k - 1] + 0.710 f * A[t % 2][i][j - 1][k] + 0.910 f * A[t % 2][i][j - 1][k + 1] + 1.210 f * A[t % 2][i][j][k - 1] + 1.211 f * A[t % 2][i][j][k + 1] + 0.911 f * A[t % 2][i][j + 1][k - 1] + 0.711 f * A[t % 2][i][j + 1][k] + 0.511 f * A[t % 2][i][j + 1][k + 1] + 1.520 f * A[t % 2][i + 1][j][k] + 0.520 f * A[t % 2][i + 1][j - 1][k - 1] + 0.720 f * A[t % 2][i + 1][j - 1][k] + 0.920 f * A[t % 2][i + 1][j - 1][k + 1] + 1.220 f * A[t % 2][i + 1][j][k - 1] + 1.221 f * A[t % 2][i + 1][j][k + 1] + 0.921 f * A[t % 2][i + 1][j + 1][k - 1] + 0.721 f * A[t % 2][i + 1][j + 1][k] + 0.521 f * A[t % 2][i + 1][j + 1][k + 1]) / 159; #pragma endscop } else { for (int t = 0; t < timestep; t++) #pragma omp parallel for for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++) for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++) for (int k = BENCH_RAD; k < dimsize - BENCH_RAD; k++) A[(t + 1) % 2][i][j][k] = (1.500 f * A[t % 2][i - 1][j][k] + 0.500 f * A[t % 2][i - 1][j - 1][k - 1] + 0.700 f * A[t % 2][i - 1][j - 1][k] + 0.900 f * A[t % 2][i - 1][j - 1][k + 1] + 1.200 f * A[t % 2][i - 1][j][k - 1] + 1.201 f * A[t % 2][i - 1][j][k + 1] + 0.901 f * A[t % 2][i - 1][j + 1][k - 1] + 0.701 f * A[t % 2][i - 1][j + 1][k] + 0.501 f * A[t % 2][i - 1][j + 1][k + 1] + 1.510 f * A[t % 2][i][j][k] + 0.510 f * A[t % 2][i][j - 1][k - 1] + 0.710 f * A[t % 2][i][j - 1][k] + 0.910 f * A[t % 2][i][j - 1][k + 1] + 1.210 f * A[t % 2][i][j][k - 1] + 1.211 f * A[t % 2][i][j][k + 1] + 0.911 f * A[t % 2][i][j + 1][k - 1] + 0.711 f * A[t % 2][i][j + 1][k] + 0.511 f * A[t % 2][i][j + 1][k + 1] + 1.520 f * A[t % 2][i + 1][j][k] + 0.520 f * A[t % 2][i + 1][j - 1][k - 1] + 0.720 f * A[t % 2][i + 1][j - 1][k] + 0.920 f * A[t % 2][i + 1][j - 1][k + 1] + 1.220 f * A[t % 2][i + 1][j][k - 1] + 1.221 f * A[t % 2][i + 1][j][k + 1] + 0.921 f * A[t % 2][i + 1][j + 1][k - 1] + 0.721 f * A[t % 2][i + 1][j + 1][k] + 0.521 f * A[t % 2][i + 1][j + 1][k + 1]) / 159; } return (((end_time != 0.0) ? end_time : sb_time()) - start_time); }
draw.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD RRRR AAA W W % % D D R R A A W W % % D D RRRR AAAAA W W W % % D D R RN A A WW WW % % DDDD R R A A W W % % % % % % MagickCore Image Drawing Methods % % % % % % Software Design % % Cristy % % July 1998 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Bill Radcliffe of Corbis (www.corbis.com) contributed the polygon % rendering code based on Paul Heckbert's "Concave Polygon Scan Conversion", % Graphics Gems, 1990. Leonard Rosenthal and David Harr of Appligent % (www.appligent.com) contributed the dash pattern, linecap stroking % algorithm, and minor rendering improvements. % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/annotate.h" #include "MagickCore/artifact.h" #include "MagickCore/blob.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/constitute.h" #include "MagickCore/draw.h" #include "MagickCore/draw-private.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/property.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/transform-private.h" #include "MagickCore/utility.h" /* Define declarations. */ #define BezierQuantum 200 #define DrawEpsilon (1.0e-10) /* Typedef declarations. */ typedef struct _EdgeInfo { SegmentInfo bounds; double scanline; PointInfo *points; size_t number_points; ssize_t direction; MagickBooleanType ghostline; size_t highwater; } EdgeInfo; typedef struct _ElementInfo { double cx, cy, major, minor, angle; } ElementInfo; typedef struct _PolygonInfo { EdgeInfo *edges; size_t number_edges; } PolygonInfo; typedef enum { MoveToCode, OpenCode, GhostlineCode, LineToCode, EndCode } PathInfoCode; typedef struct _PathInfo { PointInfo point; PathInfoCode code; } PathInfo; /* Forward declarations. */ static MagickBooleanType DrawStrokePolygon(Image *,const DrawInfo *,const PrimitiveInfo *, ExceptionInfo *); static PrimitiveInfo *TraceStrokePolygon(const DrawInfo *,const PrimitiveInfo *); static size_t TracePath(PrimitiveInfo *,const char *); static void TraceArc(PrimitiveInfo *,const PointInfo,const PointInfo,const PointInfo), TraceArcPath(PrimitiveInfo *,const PointInfo,const PointInfo,const PointInfo, const double,const MagickBooleanType,const MagickBooleanType), TraceBezier(PrimitiveInfo *,const size_t), TraceCircle(PrimitiveInfo *,const PointInfo,const PointInfo), TraceEllipse(PrimitiveInfo *,const PointInfo,const PointInfo, const PointInfo), TraceLine(PrimitiveInfo *,const PointInfo,const PointInfo), TraceRectangle(PrimitiveInfo *,const PointInfo,const PointInfo), TraceRoundRectangle(PrimitiveInfo *,const PointInfo,const PointInfo, PointInfo), TraceSquareLinecap(PrimitiveInfo *,const size_t,const double); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireDrawInfo() returns a DrawInfo structure properly initialized. % % The format of the AcquireDrawInfo method is: % % DrawInfo *AcquireDrawInfo(void) % */ MagickExport DrawInfo *AcquireDrawInfo(void) { DrawInfo *draw_info; draw_info=(DrawInfo *) AcquireMagickMemory(sizeof(*draw_info)); if (draw_info == (DrawInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); GetDrawInfo((ImageInfo *) NULL,draw_info); return(draw_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneDrawInfo() makes a copy of the given draw_info structure. If NULL % is specified, a new DrawInfo structure is created initialized to default % values. % % The format of the CloneDrawInfo method is: % % DrawInfo *CloneDrawInfo(const ImageInfo *image_info, % const DrawInfo *draw_info) % % A description of each parameter follows: % % o image_info: the image info. % % o draw_info: the draw info. % */ MagickExport DrawInfo *CloneDrawInfo(const ImageInfo *image_info, const DrawInfo *draw_info) { DrawInfo *clone_info; ExceptionInfo *exception; clone_info=(DrawInfo *) AcquireMagickMemory(sizeof(*clone_info)); if (clone_info == (DrawInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); GetDrawInfo(image_info,clone_info); if (draw_info == (DrawInfo *) NULL) return(clone_info); exception=AcquireExceptionInfo(); if (clone_info->primitive != (char *) NULL) (void) CloneString(&clone_info->primitive,draw_info->primitive); if (draw_info->geometry != (char *) NULL) (void) CloneString(&clone_info->geometry,draw_info->geometry); clone_info->viewbox=draw_info->viewbox; clone_info->affine=draw_info->affine; clone_info->gravity=draw_info->gravity; clone_info->fill=draw_info->fill; clone_info->stroke=draw_info->stroke; clone_info->stroke_width=draw_info->stroke_width; if (draw_info->fill_pattern != (Image *) NULL) clone_info->fill_pattern=CloneImage(draw_info->fill_pattern,0,0,MagickTrue, exception); if (draw_info->stroke_pattern != (Image *) NULL) clone_info->stroke_pattern=CloneImage(draw_info->stroke_pattern,0,0, MagickTrue,exception); clone_info->stroke_antialias=draw_info->stroke_antialias; clone_info->text_antialias=draw_info->text_antialias; clone_info->fill_rule=draw_info->fill_rule; clone_info->linecap=draw_info->linecap; clone_info->linejoin=draw_info->linejoin; clone_info->miterlimit=draw_info->miterlimit; clone_info->dash_offset=draw_info->dash_offset; clone_info->decorate=draw_info->decorate; clone_info->compose=draw_info->compose; if (draw_info->text != (char *) NULL) (void) CloneString(&clone_info->text,draw_info->text); if (draw_info->font != (char *) NULL) (void) CloneString(&clone_info->font,draw_info->font); if (draw_info->metrics != (char *) NULL) (void) CloneString(&clone_info->metrics,draw_info->metrics); if (draw_info->family != (char *) NULL) (void) CloneString(&clone_info->family,draw_info->family); clone_info->style=draw_info->style; clone_info->stretch=draw_info->stretch; clone_info->weight=draw_info->weight; if (draw_info->encoding != (char *) NULL) (void) CloneString(&clone_info->encoding,draw_info->encoding); clone_info->pointsize=draw_info->pointsize; clone_info->kerning=draw_info->kerning; clone_info->interline_spacing=draw_info->interline_spacing; clone_info->interword_spacing=draw_info->interword_spacing; clone_info->direction=draw_info->direction; if (draw_info->density != (char *) NULL) (void) CloneString(&clone_info->density,draw_info->density); clone_info->align=draw_info->align; clone_info->undercolor=draw_info->undercolor; clone_info->border_color=draw_info->border_color; if (draw_info->server_name != (char *) NULL) (void) CloneString(&clone_info->server_name,draw_info->server_name); if (draw_info->dash_pattern != (double *) NULL) { register ssize_t x; for (x=0; fabs(draw_info->dash_pattern[x]) >= DrawEpsilon; x++) ; clone_info->dash_pattern=(double *) AcquireQuantumMemory((size_t) x+1UL, sizeof(*clone_info->dash_pattern)); if (clone_info->dash_pattern == (double *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateDashPattern"); (void) CopyMagickMemory(clone_info->dash_pattern,draw_info->dash_pattern, (size_t) (x+1)*sizeof(*clone_info->dash_pattern)); } clone_info->gradient=draw_info->gradient; if (draw_info->gradient.stops != (StopInfo *) NULL) { size_t number_stops; number_stops=clone_info->gradient.number_stops; clone_info->gradient.stops=(StopInfo *) AcquireQuantumMemory((size_t) number_stops,sizeof(*clone_info->gradient.stops)); if (clone_info->gradient.stops == (StopInfo *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateDashPattern"); (void) CopyMagickMemory(clone_info->gradient.stops, draw_info->gradient.stops,(size_t) number_stops* sizeof(*clone_info->gradient.stops)); } if (draw_info->clip_mask != (char *) NULL) (void) CloneString(&clone_info->clip_mask,draw_info->clip_mask); clone_info->bounds=draw_info->bounds; clone_info->clip_units=draw_info->clip_units; clone_info->render=draw_info->render; clone_info->fill_alpha=draw_info->fill_alpha; clone_info->stroke_alpha=draw_info->stroke_alpha; clone_info->element_reference=draw_info->element_reference; clone_info->debug=IsEventLogging(); exception=DestroyExceptionInfo(exception); return(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n v e r t P a t h T o P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvertPathToPolygon() converts a path to the more efficient sorted % rendering form. % % The format of the ConvertPathToPolygon method is: % % PolygonInfo *ConvertPathToPolygon(const DrawInfo *draw_info, % const PathInfo *path_info) % % A description of each parameter follows: % % o Method ConvertPathToPolygon returns the path in a more efficient sorted % rendering form of type PolygonInfo. % % o draw_info: Specifies a pointer to an DrawInfo structure. % % o path_info: Specifies a pointer to an PathInfo structure. % % */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int CompareEdges(const void *x,const void *y) { register const EdgeInfo *p, *q; /* Compare two edges. */ p=(const EdgeInfo *) x; q=(const EdgeInfo *) y; if ((p->points[0].y-DrawEpsilon) > q->points[0].y) return(1); if ((p->points[0].y+DrawEpsilon) < q->points[0].y) return(-1); if ((p->points[0].x-DrawEpsilon) > q->points[0].x) return(1); if ((p->points[0].x+DrawEpsilon) < q->points[0].x) return(-1); if (((p->points[1].x-p->points[0].x)*(q->points[1].y-q->points[0].y)- (p->points[1].y-p->points[0].y)*(q->points[1].x-q->points[0].x)) > 0.0) return(1); return(-1); } #if defined(__cplusplus) || defined(c_plusplus) } #endif static void LogPolygonInfo(const PolygonInfo *polygon_info) { register EdgeInfo *p; register ssize_t i, j; (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin active-edge"); p=polygon_info->edges; for (i=0; i < (ssize_t) polygon_info->number_edges; i++) { (void) LogMagickEvent(DrawEvent,GetMagickModule()," edge %.20g:", (double) i); (void) LogMagickEvent(DrawEvent,GetMagickModule()," direction: %s", p->direction != MagickFalse ? "down" : "up"); (void) LogMagickEvent(DrawEvent,GetMagickModule()," ghostline: %s", p->ghostline != MagickFalse ? "transparent" : "opaque"); (void) LogMagickEvent(DrawEvent,GetMagickModule(), " bounds: %g,%g - %g,%g",p->bounds.x1,p->bounds.y1, p->bounds.x2,p->bounds.y2); for (j=0; j < (ssize_t) p->number_points; j++) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %g,%g", p->points[j].x,p->points[j].y); p++; } (void) LogMagickEvent(DrawEvent,GetMagickModule()," end active-edge"); } static void ReversePoints(PointInfo *points,const size_t number_points) { PointInfo point; register ssize_t i; for (i=0; i < (ssize_t) (number_points >> 1); i++) { point=points[i]; points[i]=points[number_points-(i+1)]; points[number_points-(i+1)]=point; } } static PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info) { long direction, next_direction; PointInfo point, *points; PolygonInfo *polygon_info; SegmentInfo bounds; register ssize_t i, n; MagickBooleanType ghostline; size_t edge, number_edges, number_points; /* Convert a path to the more efficient sorted rendering form. */ polygon_info=(PolygonInfo *) AcquireMagickMemory(sizeof(*polygon_info)); if (polygon_info == (PolygonInfo *) NULL) return((PolygonInfo *) NULL); number_edges=16; polygon_info->edges=(EdgeInfo *) AcquireQuantumMemory(number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return((PolygonInfo *) NULL); (void) ResetMagickMemory(polygon_info->edges,0,number_edges* sizeof(*polygon_info->edges)); direction=0; edge=0; ghostline=MagickFalse; n=0; number_points=0; points=(PointInfo *) NULL; (void) ResetMagickMemory(&point,0,sizeof(point)); (void) ResetMagickMemory(&bounds,0,sizeof(bounds)); for (i=0; path_info[i].code != EndCode; i++) { if ((path_info[i].code == MoveToCode) || (path_info[i].code == OpenCode) || (path_info[i].code == GhostlineCode)) { /* Move to. */ if ((points != (PointInfo *) NULL) && (n >= 2)) { if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return((PolygonInfo *) NULL); } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; points=(PointInfo *) NULL; ghostline=MagickFalse; edge++; } if (points == (PointInfo *) NULL) { number_points=16; points=(PointInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) return((PolygonInfo *) NULL); } ghostline=path_info[i].code == GhostlineCode ? MagickTrue : MagickFalse; point=path_info[i].point; points[0]=point; bounds.x1=point.x; bounds.x2=point.x; direction=0; n=1; continue; } /* Line to. */ next_direction=((path_info[i].point.y > point.y) || ((fabs(path_info[i].point.y-point.y) < DrawEpsilon) && (path_info[i].point.x > point.x))) ? 1 : -1; if ((points != (PointInfo *) NULL) && (direction != 0) && (direction != next_direction)) { /* New edge. */ point=points[n-1]; if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return((PolygonInfo *) NULL); } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; number_points=16; points=(PointInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) return((PolygonInfo *) NULL); n=1; ghostline=MagickFalse; points[0]=point; bounds.x1=point.x; bounds.x2=point.x; edge++; } direction=next_direction; if (points == (PointInfo *) NULL) continue; if (n == (ssize_t) number_points) { number_points<<=1; points=(PointInfo *) ResizeQuantumMemory(points,(size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) return((PolygonInfo *) NULL); } point=path_info[i].point; points[n]=point; if (point.x < bounds.x1) bounds.x1=point.x; if (point.x > bounds.x2) bounds.x2=point.x; n++; } if (points != (PointInfo *) NULL) { if (n < 2) points=(PointInfo *) RelinquishMagickMemory(points); else { if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return((PolygonInfo *) NULL); } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; ghostline=MagickFalse; edge++; } } polygon_info->number_edges=edge; qsort(polygon_info->edges,(size_t) polygon_info->number_edges, sizeof(*polygon_info->edges),CompareEdges); if (IsEventLogging() != MagickFalse) LogPolygonInfo(polygon_info); return(polygon_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n v e r t P r i m i t i v e T o P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvertPrimitiveToPath() converts a PrimitiveInfo structure into a vector % path structure. % % The format of the ConvertPrimitiveToPath method is: % % PathInfo *ConvertPrimitiveToPath(const DrawInfo *draw_info, % const PrimitiveInfo *primitive_info) % % A description of each parameter follows: % % o Method ConvertPrimitiveToPath returns a vector path structure of type % PathInfo. % % o draw_info: a structure of type DrawInfo. % % o primitive_info: Specifies a pointer to an PrimitiveInfo structure. % % */ static void LogPathInfo(const PathInfo *path_info) { register const PathInfo *p; (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin vector-path"); for (p=path_info; p->code != EndCode; p++) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %g,%g %s",p->point.x,p->point.y,p->code == GhostlineCode ? "moveto ghostline" : p->code == OpenCode ? "moveto open" : p->code == MoveToCode ? "moveto" : p->code == LineToCode ? "lineto" : "?"); (void) LogMagickEvent(DrawEvent,GetMagickModule()," end vector-path"); } static PathInfo *ConvertPrimitiveToPath(const PrimitiveInfo *primitive_info) { PathInfo *path_info; PathInfoCode code; PointInfo p, q; register ssize_t i, n; ssize_t coordinates, start; /* Converts a PrimitiveInfo structure into a vector path structure. */ switch (primitive_info->primitive) { case AlphaPrimitive: case ColorPrimitive: case ImagePrimitive: case PointPrimitive: case TextPrimitive: return((PathInfo *) NULL); default: break; } for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ; path_info=(PathInfo *) AcquireQuantumMemory((size_t) (2UL*i+3UL), sizeof(*path_info)); if (path_info == (PathInfo *) NULL) return((PathInfo *) NULL); coordinates=0; n=0; p.x=(-1.0); p.y=(-1.0); q.x=(-1.0); q.y=(-1.0); start=0; for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { code=LineToCode; if (coordinates <= 0) { coordinates=(ssize_t) primitive_info[i].coordinates; p=primitive_info[i].point; start=n; code=MoveToCode; } coordinates--; /* Eliminate duplicate points. */ if ((i == 0) || (fabs(q.x-primitive_info[i].point.x) >= DrawEpsilon) || (fabs(q.y-primitive_info[i].point.y) >= DrawEpsilon)) { path_info[n].code=code; path_info[n].point=primitive_info[i].point; q=primitive_info[i].point; n++; } if (coordinates > 0) continue; if ((fabs(p.x-primitive_info[i].point.x) < DrawEpsilon) && (fabs(p.y-primitive_info[i].point.y) < DrawEpsilon)) continue; /* Mark the p point as open if it does not match the q. */ path_info[start].code=OpenCode; path_info[n].code=GhostlineCode; path_info[n].point=primitive_info[i].point; n++; path_info[n].code=LineToCode; path_info[n].point=p; n++; } path_info[n].code=EndCode; path_info[n].point.x=0.0; path_info[n].point.y=0.0; if (IsEventLogging() != MagickFalse) LogPathInfo(path_info); return(path_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyDrawInfo() deallocates memory associated with an DrawInfo % structure. % % The format of the DestroyDrawInfo method is: % % DrawInfo *DestroyDrawInfo(DrawInfo *draw_info) % % A description of each parameter follows: % % o draw_info: the draw info. % */ MagickExport DrawInfo *DestroyDrawInfo(DrawInfo *draw_info) { if (draw_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); if (draw_info->primitive != (char *) NULL) draw_info->primitive=DestroyString(draw_info->primitive); if (draw_info->text != (char *) NULL) draw_info->text=DestroyString(draw_info->text); if (draw_info->geometry != (char *) NULL) draw_info->geometry=DestroyString(draw_info->geometry); if (draw_info->fill_pattern != (Image *) NULL) draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern); if (draw_info->stroke_pattern != (Image *) NULL) draw_info->stroke_pattern=DestroyImage(draw_info->stroke_pattern); if (draw_info->font != (char *) NULL) draw_info->font=DestroyString(draw_info->font); if (draw_info->metrics != (char *) NULL) draw_info->metrics=DestroyString(draw_info->metrics); if (draw_info->family != (char *) NULL) draw_info->family=DestroyString(draw_info->family); if (draw_info->encoding != (char *) NULL) draw_info->encoding=DestroyString(draw_info->encoding); if (draw_info->density != (char *) NULL) draw_info->density=DestroyString(draw_info->density); if (draw_info->server_name != (char *) NULL) draw_info->server_name=(char *) RelinquishMagickMemory(draw_info->server_name); if (draw_info->dash_pattern != (double *) NULL) draw_info->dash_pattern=(double *) RelinquishMagickMemory( draw_info->dash_pattern); if (draw_info->gradient.stops != (StopInfo *) NULL) draw_info->gradient.stops=(StopInfo *) RelinquishMagickMemory( draw_info->gradient.stops); if (draw_info->clip_mask != (char *) NULL) draw_info->clip_mask=DestroyString(draw_info->clip_mask); draw_info->signature=(~MagickCoreSignature); draw_info=(DrawInfo *) RelinquishMagickMemory(draw_info); return(draw_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y E d g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyEdge() destroys the specified polygon edge. % % The format of the DestroyEdge method is: % % ssize_t DestroyEdge(PolygonInfo *polygon_info,const int edge) % % A description of each parameter follows: % % o polygon_info: Specifies a pointer to an PolygonInfo structure. % % o edge: the polygon edge number to destroy. % */ static size_t DestroyEdge(PolygonInfo *polygon_info, const size_t edge) { assert(edge < polygon_info->number_edges); polygon_info->edges[edge].points=(PointInfo *) RelinquishMagickMemory( polygon_info->edges[edge].points); polygon_info->number_edges--; if (edge < polygon_info->number_edges) (void) CopyMagickMemory(polygon_info->edges+edge,polygon_info->edges+edge+1, (size_t) (polygon_info->number_edges-edge)*sizeof(*polygon_info->edges)); return(polygon_info->number_edges); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y P o l y g o n I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPolygonInfo() destroys the PolygonInfo data structure. % % The format of the DestroyPolygonInfo method is: % % PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info) % % A description of each parameter follows: % % o polygon_info: Specifies a pointer to an PolygonInfo structure. % */ static PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info) { register ssize_t i; for (i=0; i < (ssize_t) polygon_info->number_edges; i++) polygon_info->edges[i].points=(PointInfo *) RelinquishMagickMemory(polygon_info->edges[i].points); polygon_info->edges=(EdgeInfo *) RelinquishMagickMemory(polygon_info->edges); return((PolygonInfo *) RelinquishMagickMemory(polygon_info)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w A f f i n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawAffineImage() composites the source over the destination image as % dictated by the affine transform. % % The format of the DrawAffineImage method is: % % MagickBooleanType DrawAffineImage(Image *image,const Image *source, % const AffineMatrix *affine,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o source: the source image. % % o affine: the affine transform. % % o exception: return any errors or warnings in this structure. % */ static SegmentInfo AffineEdge(const Image *image,const AffineMatrix *affine, const double y,const SegmentInfo *edge) { double intercept, z; register double x; SegmentInfo inverse_edge; /* Determine left and right edges. */ inverse_edge.x1=edge->x1; inverse_edge.y1=edge->y1; inverse_edge.x2=edge->x2; inverse_edge.y2=edge->y2; z=affine->ry*y+affine->tx; if (affine->sx >= DrawEpsilon) { intercept=(-z/affine->sx); x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z+(double) image->columns)/affine->sx; x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if (affine->sx < -DrawEpsilon) { intercept=(-z+(double) image->columns)/affine->sx; x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z/affine->sx); x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->columns)) { inverse_edge.x2=edge->x1; return(inverse_edge); } /* Determine top and bottom edges. */ z=affine->sy*y+affine->ty; if (affine->rx >= DrawEpsilon) { intercept=(-z/affine->rx); x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z+(double) image->rows)/affine->rx; x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if (affine->rx < -DrawEpsilon) { intercept=(-z+(double) image->rows)/affine->rx; x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z/affine->rx); x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->rows)) { inverse_edge.x2=edge->x2; return(inverse_edge); } return(inverse_edge); } static AffineMatrix InverseAffineMatrix(const AffineMatrix *affine) { AffineMatrix inverse_affine; double determinant; determinant=PerceptibleReciprocal(affine->sx*affine->sy-affine->rx* affine->ry); inverse_affine.sx=determinant*affine->sy; inverse_affine.rx=determinant*(-affine->rx); inverse_affine.ry=determinant*(-affine->ry); inverse_affine.sy=determinant*affine->sx; inverse_affine.tx=(-affine->tx)*inverse_affine.sx-affine->ty* inverse_affine.ry; inverse_affine.ty=(-affine->tx)*inverse_affine.rx-affine->ty* inverse_affine.sy; return(inverse_affine); } MagickExport MagickBooleanType DrawAffineImage(Image *image, const Image *source,const AffineMatrix *affine,ExceptionInfo *exception) { AffineMatrix inverse_affine; CacheView *image_view, *source_view; MagickBooleanType status; PixelInfo zero; PointInfo extent[4], min, max; register ssize_t i; SegmentInfo edge; ssize_t start, stop, y; /* Determine bounding box. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(source != (const Image *) NULL); assert(source->signature == MagickCoreSignature); assert(affine != (AffineMatrix *) NULL); extent[0].x=0.0; extent[0].y=0.0; extent[1].x=(double) source->columns-1.0; extent[1].y=0.0; extent[2].x=(double) source->columns-1.0; extent[2].y=(double) source->rows-1.0; extent[3].x=0.0; extent[3].y=(double) source->rows-1.0; for (i=0; i < 4; i++) { PointInfo point; point=extent[i]; extent[i].x=point.x*affine->sx+point.y*affine->ry+affine->tx; extent[i].y=point.x*affine->rx+point.y*affine->sy+affine->ty; } min=extent[0]; max=extent[0]; for (i=1; i < 4; i++) { if (min.x > extent[i].x) min.x=extent[i].x; if (min.y > extent[i].y) min.y=extent[i].y; if (max.x < extent[i].x) max.x=extent[i].x; if (max.y < extent[i].y) max.y=extent[i].y; } /* Affine transform image. */ if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; edge.x1=MagickMax(min.x,0.0); edge.y1=MagickMax(min.y,0.0); edge.x2=MagickMin(max.x,(double) image->columns-1.0); edge.y2=MagickMin(max.y,(double) image->rows-1.0); inverse_affine=InverseAffineMatrix(affine); GetPixelInfo(image,&zero); start=(ssize_t) ceil(edge.y1-0.5); stop=(ssize_t) floor(edge.y2+0.5); source_view=AcquireVirtualCacheView(source,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(source,image,1,1) #endif for (y=start; y <= stop; y++) { PixelInfo composite, pixel; PointInfo point; register ssize_t x; register Quantum *magick_restrict q; SegmentInfo inverse_edge; ssize_t x_offset; inverse_edge=AffineEdge(source,&inverse_affine,(double) y,&edge); if (inverse_edge.x2 < inverse_edge.x1) continue; q=GetCacheViewAuthenticPixels(image_view,(ssize_t) ceil(inverse_edge.x1- 0.5),y,(size_t) (floor(inverse_edge.x2+0.5)-ceil(inverse_edge.x1-0.5)+1), 1,exception); if (q == (Quantum *) NULL) continue; pixel=zero; composite=zero; x_offset=0; for (x=(ssize_t) ceil(inverse_edge.x1-0.5); x <= (ssize_t) floor(inverse_edge.x2+0.5); x++) { point.x=(double) x*inverse_affine.sx+y*inverse_affine.ry+ inverse_affine.tx; point.y=(double) x*inverse_affine.rx+y*inverse_affine.sy+ inverse_affine.ty; (void) InterpolatePixelInfo(source,source_view,UndefinedInterpolatePixel, point.x,point.y,&pixel,exception); GetPixelInfoPixel(image,q,&composite); CompositePixelInfoOver(&pixel,pixel.alpha,&composite,composite.alpha, &composite); SetPixelViaPixelInfo(image,&composite,q); x_offset++; q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w B o u n d i n g R e c t a n g l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawBoundingRectangles() draws the bounding rectangles on the image. This % is only useful for developers debugging the rendering algorithm. % % The format of the DrawBoundingRectangles method is: % % void DrawBoundingRectangles(Image *image,const DrawInfo *draw_info, % PolygonInfo *polygon_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o polygon_info: Specifies a pointer to a PolygonInfo structure. % % o exception: return any errors or warnings in this structure. % */ static void DrawBoundingRectangles(Image *image,const DrawInfo *draw_info, const PolygonInfo *polygon_info,ExceptionInfo *exception) { DrawInfo *clone_info; double mid; PointInfo end, resolution, start; PrimitiveInfo primitive_info[6]; register ssize_t i; SegmentInfo bounds; ssize_t coordinates; clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) QueryColorCompliance("#000F",AllCompliance,&clone_info->fill, exception); resolution.x=DefaultResolution; resolution.y=DefaultResolution; if (clone_info->density != (char *) NULL) { GeometryInfo geometry_info; MagickStatusType flags; flags=ParseGeometry(clone_info->density,&geometry_info); resolution.x=geometry_info.rho; resolution.y=geometry_info.sigma; if ((flags & SigmaValue) == MagickFalse) resolution.y=resolution.x; } mid=(resolution.x/72.0)*ExpandAffine(&clone_info->affine)* clone_info->stroke_width/2.0; bounds.x1=0.0; bounds.y1=0.0; bounds.x2=0.0; bounds.y2=0.0; if (polygon_info != (PolygonInfo *) NULL) { bounds=polygon_info->edges[0].bounds; for (i=1; i < (ssize_t) polygon_info->number_edges; i++) { if (polygon_info->edges[i].bounds.x1 < (double) bounds.x1) bounds.x1=polygon_info->edges[i].bounds.x1; if (polygon_info->edges[i].bounds.y1 < (double) bounds.y1) bounds.y1=polygon_info->edges[i].bounds.y1; if (polygon_info->edges[i].bounds.x2 > (double) bounds.x2) bounds.x2=polygon_info->edges[i].bounds.x2; if (polygon_info->edges[i].bounds.y2 > (double) bounds.y2) bounds.y2=polygon_info->edges[i].bounds.y2; } bounds.x1-=mid; bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns ? (double) image->columns-1 : bounds.x1; bounds.y1-=mid; bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows ? (double) image->rows-1 : bounds.y1; bounds.x2+=mid; bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns ? (double) image->columns-1 : bounds.x2; bounds.y2+=mid; bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows ? (double) image->rows-1 : bounds.y2; for (i=0; i < (ssize_t) polygon_info->number_edges; i++) { if (polygon_info->edges[i].direction != 0) (void) QueryColorCompliance("red",AllCompliance,&clone_info->stroke, exception); else (void) QueryColorCompliance("green",AllCompliance,&clone_info->stroke, exception); start.x=(double) (polygon_info->edges[i].bounds.x1-mid); start.y=(double) (polygon_info->edges[i].bounds.y1-mid); end.x=(double) (polygon_info->edges[i].bounds.x2+mid); end.y=(double) (polygon_info->edges[i].bounds.y2+mid); primitive_info[0].primitive=RectanglePrimitive; TraceRectangle(primitive_info,start,end); primitive_info[0].method=ReplaceMethod; coordinates=(ssize_t) primitive_info[0].coordinates; primitive_info[coordinates].primitive=UndefinedPrimitive; (void) DrawPrimitive(image,clone_info,primitive_info,exception); } } (void) QueryColorCompliance("blue",AllCompliance,&clone_info->stroke, exception); start.x=(double) (bounds.x1-mid); start.y=(double) (bounds.y1-mid); end.x=(double) (bounds.x2+mid); end.y=(double) (bounds.y2+mid); primitive_info[0].primitive=RectanglePrimitive; TraceRectangle(primitive_info,start,end); primitive_info[0].method=ReplaceMethod; coordinates=(ssize_t) primitive_info[0].coordinates; primitive_info[coordinates].primitive=UndefinedPrimitive; (void) DrawPrimitive(image,clone_info,primitive_info,exception); clone_info=DestroyDrawInfo(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w C l i p P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawClipPath() draws the clip path on the image mask. % % The format of the DrawClipPath method is: % % MagickBooleanType DrawClipPath(Image *image,const DrawInfo *draw_info, % const char *name,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o name: the name of the clip path. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType DrawClipPath(Image *image, const DrawInfo *draw_info,const char *name,ExceptionInfo *exception) { char filename[MagickPathExtent]; Image *clip_mask; const char *value; DrawInfo *clone_info; MagickStatusType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); (void) FormatLocaleString(filename,MagickPathExtent,"%s",name); value=GetImageArtifact(image,filename); if (value == (const char *) NULL) return(MagickFalse); clip_mask=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (clip_mask == (Image *) NULL) return(MagickFalse); (void) QueryColorCompliance("#0000",AllCompliance, &clip_mask->background_color,exception); clip_mask->background_color.alpha=(MagickRealType) TransparentAlpha; (void) SetImageBackgroundColor(clip_mask,exception); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin clip-path %s", draw_info->clip_mask); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) CloneString(&clone_info->primitive,value); (void) QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill, exception); clone_info->clip_mask=(char *) NULL; status=NegateImage(clip_mask,MagickFalse,exception); (void) SetImageMask(image,ReadPixelMask,clip_mask,exception); clip_mask=DestroyImage(clip_mask); status&=DrawImage(image,clone_info,exception); clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end clip-path"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w D a s h P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawDashPolygon() draws a dashed polygon (line, rectangle, ellipse) on the % image while respecting the dash offset and dash pattern attributes. % % The format of the DrawDashPolygon method is: % % MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info, % const PrimitiveInfo *primitive_info,Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info, const PrimitiveInfo *primitive_info,Image *image,ExceptionInfo *exception) { DrawInfo *clone_info; double length, maximum_length, offset, scale, total_length; MagickStatusType status; PrimitiveInfo *dash_polygon; register ssize_t i; register double dx, dy; size_t number_vertices; ssize_t j, n; assert(draw_info != (const DrawInfo *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-dash"); for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ; number_vertices=(size_t) i; dash_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t) (2UL*number_vertices+1UL),sizeof(*dash_polygon)); if (dash_polygon == (PrimitiveInfo *) NULL) return(MagickFalse); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->miterlimit=0; dash_polygon[0]=primitive_info[0]; scale=ExpandAffine(&draw_info->affine); length=scale*(draw_info->dash_pattern[0]-0.5); offset=fabs(draw_info->dash_offset) >= DrawEpsilon ? scale*draw_info->dash_offset : 0.0; j=1; for (n=0; offset > 0.0; j=0) { if (draw_info->dash_pattern[n] <= 0.0) break; length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5)); if (offset > length) { offset-=length; n++; length=scale*(draw_info->dash_pattern[n]+0.5); continue; } if (offset < length) { length-=offset; offset=0.0; break; } offset=0.0; n++; } status=MagickTrue; maximum_length=0.0; total_length=0.0; for (i=1; (i < (ssize_t) number_vertices) && (length >= 0.0); i++) { dx=primitive_info[i].point.x-primitive_info[i-1].point.x; dy=primitive_info[i].point.y-primitive_info[i-1].point.y; maximum_length=hypot((double) dx,dy); if (fabs(length) < DrawEpsilon) { n++; if (fabs(draw_info->dash_pattern[n]) < DrawEpsilon) n=0; length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5)); } for (total_length=0.0; (length >= 0.0) && (maximum_length >= (total_length+length)); ) { total_length+=length; if ((n & 0x01) != 0) { dash_polygon[0]=primitive_info[0]; dash_polygon[0].point.x=(double) (primitive_info[i-1].point.x+dx* total_length/maximum_length); dash_polygon[0].point.y=(double) (primitive_info[i-1].point.y+dy* total_length/maximum_length); j=1; } else { if ((j+1) > (ssize_t) (2*number_vertices)) break; dash_polygon[j]=primitive_info[i-1]; dash_polygon[j].point.x=(double) (primitive_info[i-1].point.x+dx* total_length/maximum_length); dash_polygon[j].point.y=(double) (primitive_info[i-1].point.y+dy* total_length/maximum_length); dash_polygon[j].coordinates=1; j++; dash_polygon[0].coordinates=(size_t) j; dash_polygon[j].primitive=UndefinedPrimitive; status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception); } n++; if (fabs(draw_info->dash_pattern[n]) < DrawEpsilon) n=0; length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5)); } length-=(maximum_length-total_length); if ((n & 0x01) != 0) continue; dash_polygon[j]=primitive_info[i]; dash_polygon[j].coordinates=1; j++; } if ((total_length <= maximum_length) && ((n & 0x01) == 0) && (j > 1)) { dash_polygon[j]=primitive_info[i-1]; dash_polygon[j].point.x+=DrawEpsilon; dash_polygon[j].point.y+=DrawEpsilon; dash_polygon[j].coordinates=1; j++; dash_polygon[0].coordinates=(size_t) j; dash_polygon[j].primitive=UndefinedPrimitive; status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception); } dash_polygon=(PrimitiveInfo *) RelinquishMagickMemory(dash_polygon); clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-dash"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawImage() draws a graphic primitive on your image. The primitive % may be represented as a string or filename. Precede the filename with an % "at" sign (@) and the contents of the file are drawn on the image. You % can affect how text is drawn by setting one or more members of the draw % info structure. % % The format of the DrawImage method is: % % MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType IsPoint(const char *point) { char *p; double value; value=StringToDouble(point,&p); return((fabs(value) < DrawEpsilon) && (p == point) ? MagickFalse : MagickTrue); } static inline void TracePoint(PrimitiveInfo *primitive_info, const PointInfo point) { primitive_info->coordinates=1; primitive_info->point=point; } MagickExport MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info, ExceptionInfo *exception) { #define RenderImageTag "Render/Image" AffineMatrix affine, current; char keyword[MagickPathExtent], geometry[MagickPathExtent], *next_token, pattern[MagickPathExtent], *primitive, *token; const char *q; double angle, factor, primitive_extent; DrawInfo **graphic_context; MagickBooleanType proceed; MagickSizeType length, number_points; MagickStatusType status; PointInfo point; PrimitiveInfo *primitive_info; PrimitiveType primitive_type; register const char *p; register ssize_t i, x; SegmentInfo bounds; size_t extent, number_stops; ssize_t j, k, n; StopInfo *stops; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); if ((draw_info->primitive == (char *) NULL) || (*draw_info->primitive == '\0')) return(MagickFalse); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"begin draw-image"); if (*draw_info->primitive != '@') primitive=AcquireString(draw_info->primitive); else primitive=FileToString(draw_info->primitive+1,~0UL,exception); if (primitive == (char *) NULL) return(MagickFalse); primitive_extent=(double) strlen(primitive); (void) SetImageArtifact(image,"MVG",primitive); n=0; number_stops=0; stops=(StopInfo *) NULL; /* Allocate primitive info memory. */ graphic_context=(DrawInfo **) AcquireMagickMemory(sizeof(*graphic_context)); if (graphic_context == (DrawInfo **) NULL) { primitive=DestroyString(primitive); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } number_points=6553; primitive_info=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*primitive_info)); if (primitive_info == (PrimitiveInfo *) NULL) { primitive=DestroyString(primitive); for ( ; n >= 0; n--) graphic_context[n]=DestroyDrawInfo(graphic_context[n]); graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,draw_info); graphic_context[n]->viewbox=image->page; if ((image->page.width == 0) || (image->page.height == 0)) { graphic_context[n]->viewbox.width=image->columns; graphic_context[n]->viewbox.height=image->rows; } token=AcquireString(primitive); extent=strlen(token)+MagickPathExtent; if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; for (q=primitive; *q != '\0'; ) { /* Interpret graphic primitive. */ GetNextToken(q,&q,MagickPathExtent,keyword); if (*keyword == '\0') break; if (*keyword == '#') { /* Comment. */ while ((*q != '\n') && (*q != '\0')) q++; continue; } p=q-strlen(keyword)-1; primitive_type=UndefinedPrimitive; current=graphic_context[n]->affine; GetAffineMatrix(&affine); switch (*keyword) { case ';': break; case 'a': case 'A': { if (LocaleCompare("affine",keyword) == 0) { GetNextToken(q,&q,extent,token); affine.sx=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.rx=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.ry=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.sy=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.tx=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.ty=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; break; } if (LocaleCompare("alpha",keyword) == 0) { primitive_type=AlphaPrimitive; break; } if (LocaleCompare("arc",keyword) == 0) { primitive_type=ArcPrimitive; break; } status=MagickFalse; break; } case 'b': case 'B': { if (LocaleCompare("bezier",keyword) == 0) { primitive_type=BezierPrimitive; break; } if (LocaleCompare("border-color",keyword) == 0) { GetNextToken(q,&q,extent,token); (void) QueryColorCompliance(token,AllCompliance, &graphic_context[n]->border_color,exception); break; } status=MagickFalse; break; } case 'c': case 'C': { if (LocaleCompare("clip-path",keyword) == 0) { /* Create clip mask. */ GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->clip_mask,token); (void) DrawClipPath(image,graphic_context[n], graphic_context[n]->clip_mask,exception); break; } if (LocaleCompare("clip-rule",keyword) == 0) { ssize_t fill_rule; GetNextToken(q,&q,extent,token); fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse, token); if (fill_rule == -1) status=MagickFalse; else graphic_context[n]->fill_rule=(FillRule) fill_rule; break; } if (LocaleCompare("clip-units",keyword) == 0) { ssize_t clip_units; GetNextToken(q,&q,extent,token); clip_units=ParseCommandOption(MagickClipPathOptions,MagickFalse, token); if (clip_units == -1) { status=MagickFalse; break; } graphic_context[n]->clip_units=(ClipPathUnits) clip_units; if (clip_units == ObjectBoundingBox) { GetAffineMatrix(&current); affine.sx=draw_info->bounds.x2; affine.sy=draw_info->bounds.y2; affine.tx=draw_info->bounds.x1; affine.ty=draw_info->bounds.y1; break; } break; } if (LocaleCompare("circle",keyword) == 0) { primitive_type=CirclePrimitive; break; } if (LocaleCompare("color",keyword) == 0) { primitive_type=ColorPrimitive; break; } status=MagickFalse; break; } case 'd': case 'D': { if (LocaleCompare("decorate",keyword) == 0) { ssize_t decorate; GetNextToken(q,&q,extent,token); decorate=ParseCommandOption(MagickDecorateOptions,MagickFalse, token); if (decorate == -1) status=MagickFalse; else graphic_context[n]->decorate=(DecorationType) decorate; break; } if (LocaleCompare("density",keyword) == 0) { GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->density,token); break; } if (LocaleCompare("direction",keyword) == 0) { ssize_t direction; GetNextToken(q,&q,extent,token); direction=ParseCommandOption(MagickDirectionOptions,MagickFalse, token); if (direction == -1) status=MagickFalse; else graphic_context[n]->direction=(DirectionType) direction; break; } status=MagickFalse; break; } case 'e': case 'E': { if (LocaleCompare("ellipse",keyword) == 0) { primitive_type=EllipsePrimitive; break; } if (LocaleCompare("encoding",keyword) == 0) { GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->encoding,token); break; } status=MagickFalse; break; } case 'f': case 'F': { if (LocaleCompare("fill",keyword) == 0) { GetNextToken(q,&q,extent,token); (void) FormatLocaleString(pattern,MagickPathExtent,"%s",token); if (GetImageArtifact(image,pattern) != (const char *) NULL) (void) DrawPatternPath(image,draw_info,token, &graphic_context[n]->fill_pattern,exception); else { status&=QueryColorCompliance(token,AllCompliance, &graphic_context[n]->fill,exception); if (graphic_context[n]->fill_alpha != OpaqueAlpha) graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha; if (status == MagickFalse) { ImageInfo *pattern_info; pattern_info=AcquireImageInfo(); (void) CopyMagickString(pattern_info->filename,token, MagickPathExtent); graphic_context[n]->fill_pattern=ReadImage(pattern_info, exception); CatchException(exception); pattern_info=DestroyImageInfo(pattern_info); } } break; } if (LocaleCompare("fill-opacity",keyword) == 0) { GetNextToken(q,&q,extent,token); factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; graphic_context[n]->fill.alpha=QuantumRange-ClampToQuantum( (MagickRealType) QuantumRange*(1.0-factor*StringToDouble(token, &next_token))); if (token == next_token) status=MagickFalse; break; } if (LocaleCompare("fill-rule",keyword) == 0) { ssize_t fill_rule; GetNextToken(q,&q,extent,token); fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse, token); if (fill_rule == -1) status=MagickFalse; else graphic_context[n]->fill_rule=(FillRule) fill_rule; break; } if (LocaleCompare("font",keyword) == 0) { GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->font,token); if (LocaleCompare("none",token) == 0) graphic_context[n]->font=(char *) RelinquishMagickMemory( graphic_context[n]->font); break; } if (LocaleCompare("font-family",keyword) == 0) { GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->family,token); break; } if (LocaleCompare("font-size",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->pointsize=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; break; } if (LocaleCompare("font-stretch",keyword) == 0) { ssize_t stretch; GetNextToken(q,&q,extent,token); stretch=ParseCommandOption(MagickStretchOptions,MagickFalse,token); if (stretch == -1) status=MagickFalse; else graphic_context[n]->stretch=(StretchType) stretch; break; } if (LocaleCompare("font-style",keyword) == 0) { ssize_t style; GetNextToken(q,&q,extent,token); style=ParseCommandOption(MagickStyleOptions,MagickFalse,token); if (style == -1) status=MagickFalse; else graphic_context[n]->style=(StyleType) style; break; } if (LocaleCompare("font-weight",keyword) == 0) { ssize_t weight; GetNextToken(q,&q,extent,token); weight=ParseCommandOption(MagickWeightOptions,MagickFalse,token); if (weight == -1) weight=(ssize_t) StringToUnsignedLong(token); graphic_context[n]->weight=(size_t) weight; break; } status=MagickFalse; break; } case 'g': case 'G': { if (LocaleCompare("gradient-units",keyword) == 0) { GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("gravity",keyword) == 0) { ssize_t gravity; GetNextToken(q,&q,extent,token); gravity=ParseCommandOption(MagickGravityOptions,MagickFalse,token); if (gravity == -1) status=MagickFalse; else graphic_context[n]->gravity=(GravityType) gravity; break; } status=MagickFalse; break; } case 'i': case 'I': { if (LocaleCompare("image",keyword) == 0) { ssize_t compose; primitive_type=ImagePrimitive; GetNextToken(q,&q,extent,token); compose=ParseCommandOption(MagickComposeOptions,MagickFalse,token); if (compose == -1) status=MagickFalse; else graphic_context[n]->compose=(CompositeOperator) compose; break; } if (LocaleCompare("interline-spacing",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->interline_spacing=StringToDouble(token, &next_token); if (token == next_token) status=MagickFalse; break; } if (LocaleCompare("interword-spacing",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->interword_spacing=StringToDouble(token, &next_token); if (token == next_token) status=MagickFalse; break; } status=MagickFalse; break; } case 'k': case 'K': { if (LocaleCompare("kerning",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->kerning=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; break; } status=MagickFalse; break; } case 'l': case 'L': { if (LocaleCompare("line",keyword) == 0) primitive_type=LinePrimitive; else status=MagickFalse; break; } case 'o': case 'O': { if (LocaleCompare("offset",keyword) == 0) { GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("opacity",keyword) == 0) { GetNextToken(q,&q,extent,token); factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; graphic_context[n]->alpha=QuantumRange*(1.0-(QuantumScale* graphic_context[n]->alpha*(1.0-factor*StringToDouble(token, &next_token)))); graphic_context[n]->fill_alpha=QuantumRange*(1.0-(QuantumScale* graphic_context[n]->fill_alpha*(1.0-factor*StringToDouble(token, &next_token)))); graphic_context[n]->stroke_alpha=QuantumRange*(1.0-(QuantumScale* graphic_context[n]->stroke_alpha*(1.0-factor*StringToDouble(token, &next_token)))); if (token == next_token) status=MagickFalse; break; } status=MagickFalse; break; } case 'p': case 'P': { if (LocaleCompare("path",keyword) == 0) { primitive_type=PathPrimitive; break; } if (LocaleCompare("point",keyword) == 0) { primitive_type=PointPrimitive; break; } if (LocaleCompare("polyline",keyword) == 0) { primitive_type=PolylinePrimitive; break; } if (LocaleCompare("polygon",keyword) == 0) { primitive_type=PolygonPrimitive; break; } if (LocaleCompare("pop",keyword) == 0) { GetNextToken(q,&q,extent,token); if (LocaleCompare("clip-path",token) == 0) break; if (LocaleCompare("defs",token) == 0) break; if (LocaleCompare("gradient",token) == 0) break; if (LocaleCompare("graphic-context",token) == 0) { if (n <= 0) { (void) ThrowMagickException(exception,GetMagickModule(), DrawError,"UnbalancedGraphicContextPushPop","`%s'",token); status=MagickFalse; n=0; break; } if (graphic_context[n]->clip_mask != (char *) NULL) if (LocaleCompare(graphic_context[n]->clip_mask, graphic_context[n-1]->clip_mask) != 0) (void) SetImageMask(image,ReadPixelMask,(Image *) NULL, exception); graphic_context[n]=DestroyDrawInfo(graphic_context[n]); n--; break; } if (LocaleCompare("pattern",token) == 0) break; status=MagickFalse; break; } if (LocaleCompare("push",keyword) == 0) { GetNextToken(q,&q,extent,token); if (LocaleCompare("clip-path",token) == 0) { char name[MagickPathExtent]; GetNextToken(q,&q,extent,token); (void) FormatLocaleString(name,MagickPathExtent,"%s",token); for (p=q; *q != '\0'; ) { GetNextToken(q,&q,extent,token); if (LocaleCompare(token,"pop") != 0) continue; GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"clip-path") != 0) continue; break; } (void) CopyMagickString(token,p,(size_t) (q-p-4+1)); (void) SetImageArtifact(image,name,token); GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("gradient",token) == 0) { char key[2*MagickPathExtent], name[MagickPathExtent], type[MagickPathExtent]; SegmentInfo segment; GetNextToken(q,&q,extent,token); (void) CopyMagickString(name,token,MagickPathExtent); GetNextToken(q,&q,extent,token); (void) CopyMagickString(type,token,MagickPathExtent); GetNextToken(q,&q,extent,token); segment.x1=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); segment.y1=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); segment.x2=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); segment.y2=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; if (LocaleCompare(type,"radial") == 0) { GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); } for (p=q; *q != '\0'; ) { GetNextToken(q,&q,extent,token); if (LocaleCompare(token,"pop") != 0) continue; GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"gradient") != 0) continue; break; } (void) CopyMagickString(token,p,(size_t) (q-p-4+1)); bounds.x1=graphic_context[n]->affine.sx*segment.x1+ graphic_context[n]->affine.ry*segment.y1+ graphic_context[n]->affine.tx; bounds.y1=graphic_context[n]->affine.rx*segment.x1+ graphic_context[n]->affine.sy*segment.y1+ graphic_context[n]->affine.ty; bounds.x2=graphic_context[n]->affine.sx*segment.x2+ graphic_context[n]->affine.ry*segment.y2+ graphic_context[n]->affine.tx; bounds.y2=graphic_context[n]->affine.rx*segment.x2+ graphic_context[n]->affine.sy*segment.y2+ graphic_context[n]->affine.ty; (void) FormatLocaleString(key,MagickPathExtent,"%s",name); (void) SetImageArtifact(image,key,token); (void) FormatLocaleString(key,MagickPathExtent,"%s-type",name); (void) SetImageArtifact(image,key,type); (void) FormatLocaleString(key,MagickPathExtent,"%s-geometry", name); (void) FormatLocaleString(geometry,MagickPathExtent, "%gx%g%+.15g%+.15g", MagickMax(fabs(bounds.x2-bounds.x1+1.0),1.0), MagickMax(fabs(bounds.y2-bounds.y1+1.0),1.0), bounds.x1,bounds.y1); (void) SetImageArtifact(image,key,geometry); GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("pattern",token) == 0) { char key[2*MagickPathExtent], name[MagickPathExtent]; RectangleInfo pattern_bounds; GetNextToken(q,&q,extent,token); (void) CopyMagickString(name,token,MagickPathExtent); GetNextToken(q,&q,extent,token); pattern_bounds.x=(ssize_t) ceil(StringToDouble(token, &next_token)-0.5); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); pattern_bounds.y=(ssize_t) ceil(StringToDouble(token, &next_token)-0.5); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); pattern_bounds.width=(size_t) floor(StringToDouble(token, &next_token)+0.5); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); pattern_bounds.height=(size_t) floor(StringToDouble(token, &next_token)+0.5); if (token == next_token) status=MagickFalse; for (p=q; *q != '\0'; ) { GetNextToken(q,&q,extent,token); if (LocaleCompare(token,"pop") != 0) continue; GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"pattern") != 0) continue; break; } (void) CopyMagickString(token,p,(size_t) (q-p-4+1)); (void) FormatLocaleString(key,MagickPathExtent,"%s",name); (void) SetImageArtifact(image,key,token); (void) FormatLocaleString(key,MagickPathExtent,"%s-geometry", name); (void) FormatLocaleString(geometry,MagickPathExtent, "%.20gx%.20g%+.20g%+.20g",(double)pattern_bounds.width, (double)pattern_bounds.height,(double)pattern_bounds.x, (double)pattern_bounds.y); (void) SetImageArtifact(image,key,geometry); GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("graphic-context",token) == 0) { n++; graphic_context=(DrawInfo **) ResizeQuantumMemory( graphic_context,(size_t) (n+1),sizeof(*graphic_context)); if (graphic_context == (DrawInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); break; } graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL, graphic_context[n-1]); break; } if (LocaleCompare("defs",token) == 0) break; status=MagickFalse; break; } status=MagickFalse; break; } case 'r': case 'R': { if (LocaleCompare("rectangle",keyword) == 0) { primitive_type=RectanglePrimitive; break; } if (LocaleCompare("rotate",keyword) == 0) { GetNextToken(q,&q,extent,token); angle=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; affine.sx=cos(DegreesToRadians(fmod((double) angle,360.0))); affine.rx=sin(DegreesToRadians(fmod((double) angle,360.0))); affine.ry=(-sin(DegreesToRadians(fmod((double) angle,360.0)))); affine.sy=cos(DegreesToRadians(fmod((double) angle,360.0))); break; } if (LocaleCompare("roundRectangle",keyword) == 0) { primitive_type=RoundRectanglePrimitive; break; } status=MagickFalse; break; } case 's': case 'S': { if (LocaleCompare("scale",keyword) == 0) { GetNextToken(q,&q,extent,token); affine.sx=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.sy=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; break; } if (LocaleCompare("skewX",keyword) == 0) { GetNextToken(q,&q,extent,token); angle=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; affine.ry=sin(DegreesToRadians(angle)); break; } if (LocaleCompare("skewY",keyword) == 0) { GetNextToken(q,&q,extent,token); angle=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; affine.rx=(-tan(DegreesToRadians(angle)/2.0)); break; } if (LocaleCompare("stop-color",keyword) == 0) { PixelInfo stop_color; number_stops++; if (number_stops == 1) stops=(StopInfo *) AcquireQuantumMemory(2,sizeof(*stops)); else if (number_stops > 2) stops=(StopInfo *) ResizeQuantumMemory(stops,number_stops, sizeof(*stops)); if (stops == (StopInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); break; } GetNextToken(q,&q,extent,token); (void) QueryColorCompliance(token,AllCompliance,&stop_color, exception); stops[number_stops-1].color=stop_color; GetNextToken(q,&q,extent,token); stops[number_stops-1].offset=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; break; } if (LocaleCompare("stroke",keyword) == 0) { GetNextToken(q,&q,extent,token); (void) FormatLocaleString(pattern,MagickPathExtent,"%s",token); if (GetImageArtifact(image,pattern) != (const char *) NULL) (void) DrawPatternPath(image,draw_info,token, &graphic_context[n]->stroke_pattern,exception); else { status&=QueryColorCompliance(token,AllCompliance, &graphic_context[n]->stroke,exception); if (graphic_context[n]->stroke_alpha != OpaqueAlpha) graphic_context[n]->stroke.alpha= graphic_context[n]->stroke_alpha; if (status == MagickFalse) { ImageInfo *pattern_info; pattern_info=AcquireImageInfo(); (void) CopyMagickString(pattern_info->filename,token, MagickPathExtent); graphic_context[n]->stroke_pattern=ReadImage(pattern_info, exception); CatchException(exception); pattern_info=DestroyImageInfo(pattern_info); } } break; } if (LocaleCompare("stroke-antialias",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->stroke_antialias= StringToLong(token) != 0 ? MagickTrue : MagickFalse; break; } if (LocaleCompare("stroke-dasharray",keyword) == 0) { if (graphic_context[n]->dash_pattern != (double *) NULL) graphic_context[n]->dash_pattern=(double *) RelinquishMagickMemory(graphic_context[n]->dash_pattern); if (IsPoint(q) != MagickFalse) { const char *r; r=q; GetNextToken(r,&r,extent,token); if (*token == ',') GetNextToken(r,&r,extent,token); for (x=0; IsPoint(token) != MagickFalse; x++) { GetNextToken(r,&r,extent,token); if (*token == ',') GetNextToken(r,&r,extent,token); } graphic_context[n]->dash_pattern=(double *) AcquireQuantumMemory((size_t) (2UL*x+1UL), sizeof(*graphic_context[n]->dash_pattern)); if (graphic_context[n]->dash_pattern == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); status=MagickFalse; break; } for (j=0; j < x; j++) { GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); graphic_context[n]->dash_pattern[j]=StringToDouble(token, &next_token); if (token == next_token) status=MagickFalse; if (graphic_context[n]->dash_pattern[j] < 0.0) status=MagickFalse; } if ((x & 0x01) != 0) for ( ; j < (2*x); j++) graphic_context[n]->dash_pattern[j]= graphic_context[n]->dash_pattern[j-x]; graphic_context[n]->dash_pattern[j]=0.0; break; } GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("stroke-dashoffset",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->dash_offset=StringToDouble(token, &next_token); if (token == next_token) status=MagickFalse; break; } if (LocaleCompare("stroke-linecap",keyword) == 0) { ssize_t linecap; GetNextToken(q,&q,extent,token); linecap=ParseCommandOption(MagickLineCapOptions,MagickFalse,token); if (linecap == -1) status=MagickFalse; else graphic_context[n]->linecap=(LineCap) linecap; break; } if (LocaleCompare("stroke-linejoin",keyword) == 0) { ssize_t linejoin; GetNextToken(q,&q,extent,token); linejoin=ParseCommandOption(MagickLineJoinOptions,MagickFalse, token); if (linejoin == -1) status=MagickFalse; else graphic_context[n]->linejoin=(LineJoin) linejoin; break; } if (LocaleCompare("stroke-miterlimit",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->miterlimit=StringToUnsignedLong(token); break; } if (LocaleCompare("stroke-opacity",keyword) == 0) { GetNextToken(q,&q,extent,token); factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; graphic_context[n]->stroke.alpha=QuantumRange-ClampToQuantum( (MagickRealType) QuantumRange*(1.0-factor*StringToDouble(token, &next_token))); if (token == next_token) status=MagickFalse; break; } if (LocaleCompare("stroke-width",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->stroke_width=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; break; } status=MagickFalse; break; } case 't': case 'T': { if (LocaleCompare("text",keyword) == 0) { primitive_type=TextPrimitive; break; } if (LocaleCompare("text-align",keyword) == 0) { ssize_t align; GetNextToken(q,&q,extent,token); align=ParseCommandOption(MagickAlignOptions,MagickFalse,token); if (align == -1) status=MagickFalse; else graphic_context[n]->align=(AlignType) align; break; } if (LocaleCompare("text-anchor",keyword) == 0) { ssize_t align; GetNextToken(q,&q,extent,token); align=ParseCommandOption(MagickAlignOptions,MagickFalse,token); if (align == -1) status=MagickFalse; else graphic_context[n]->align=(AlignType) align; break; } if (LocaleCompare("text-antialias",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->text_antialias=StringToLong(token) != 0 ? MagickTrue : MagickFalse; break; } if (LocaleCompare("text-undercolor",keyword) == 0) { GetNextToken(q,&q,extent,token); (void) QueryColorCompliance(token,AllCompliance, &graphic_context[n]->undercolor,exception); break; } if (LocaleCompare("translate",keyword) == 0) { GetNextToken(q,&q,extent,token); affine.tx=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.ty=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; break; } status=MagickFalse; break; } case 'v': case 'V': { if (LocaleCompare("viewbox",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.x=(ssize_t) ceil(StringToDouble(token, &next_token)-0.5); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.y=(ssize_t) ceil(StringToDouble(token, &next_token)-0.5); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.width=(size_t) floor(StringToDouble( token,&next_token)+0.5); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.height=(size_t) floor(StringToDouble( token,&next_token)+0.5); if (token == next_token) status=MagickFalse; break; } status=MagickFalse; break; } default: { status=MagickFalse; break; } } if (status == MagickFalse) break; if ((fabs(affine.sx-1.0) >= DrawEpsilon) || (fabs(affine.rx) >= DrawEpsilon) || (fabs(affine.ry) >= DrawEpsilon) || (fabs(affine.sy-1.0) >= DrawEpsilon) || (fabs(affine.tx) >= DrawEpsilon) || (fabs(affine.ty) >= DrawEpsilon)) { graphic_context[n]->affine.sx=current.sx*affine.sx+current.ry*affine.rx; graphic_context[n]->affine.rx=current.rx*affine.sx+current.sy*affine.rx; graphic_context[n]->affine.ry=current.sx*affine.ry+current.ry*affine.sy; graphic_context[n]->affine.sy=current.rx*affine.ry+current.sy*affine.sy; graphic_context[n]->affine.tx=current.sx*affine.tx+current.ry*affine.ty+ current.tx; graphic_context[n]->affine.ty=current.rx*affine.tx+current.sy*affine.ty+ current.ty; } if (primitive_type == UndefinedPrimitive) { if (*q == '\0') { if (number_stops > 1) { GradientType type; type=LinearGradient; if (draw_info->gradient.type == RadialGradient) type=RadialGradient; (void) GradientImage(image,type,PadSpread,stops,number_stops, exception); } if (number_stops > 0) stops=(StopInfo *) RelinquishMagickMemory(stops); } if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p),p); continue; } /* Parse the primitive attributes. */ i=0; j=0; primitive_info[0].point.x=0.0; primitive_info[0].point.y=0.0; for (x=0; *q != '\0'; x++) { /* Define points. */ if (IsPoint(q) == MagickFalse) break; GetNextToken(q,&q,extent,token); point.x=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); point.y=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; GetNextToken(q,(const char **) NULL,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); primitive_info[i].primitive=primitive_type; primitive_info[i].point=point; primitive_info[i].coordinates=0; primitive_info[i].method=FloodfillMethod; i++; if (i < (ssize_t) number_points) continue; number_points<<=1; primitive_info=(PrimitiveInfo *) ResizeQuantumMemory(primitive_info, (size_t) number_points,sizeof(*primitive_info)); if ((primitive_info == (PrimitiveInfo *) NULL) || (number_points != (MagickSizeType) ((size_t) number_points))) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } primitive_info[j].primitive=primitive_type; primitive_info[j].coordinates=(size_t) x; primitive_info[j].method=FloodfillMethod; primitive_info[j].text=(char *) NULL; /* Circumscribe primitive within a circle. */ bounds.x1=primitive_info[j].point.x; bounds.y1=primitive_info[j].point.y; bounds.x2=primitive_info[j].point.x; bounds.y2=primitive_info[j].point.y; for (k=1; k < (ssize_t) primitive_info[j].coordinates; k++) { point=primitive_info[j+k].point; if (point.x < bounds.x1) bounds.x1=point.x; if (point.y < bounds.y1) bounds.y1=point.y; if (point.x > bounds.x2) bounds.x2=point.x; if (point.y > bounds.y2) bounds.y2=point.y; } /* Speculate how many points our primitive might consume. */ length=primitive_info[j].coordinates; switch (primitive_type) { case RectanglePrimitive: { length*=5; break; } case RoundRectanglePrimitive: { double alpha, beta, radius; alpha=bounds.x2-bounds.x1; beta=bounds.y2-bounds.y1; radius=hypot((double) alpha,(double) beta); length*=5; length+=2*((size_t) ceil((double) MagickPI*radius))+6*BezierQuantum+360; break; } case BezierPrimitive: { if (primitive_info[j].coordinates > 107) (void) ThrowMagickException(exception,GetMagickModule(),DrawError, "TooManyBezierCoordinates","`%s'",token); length=BezierQuantum*primitive_info[j].coordinates; break; } case PathPrimitive: { char *s, *t; GetNextToken(q,&q,extent,token); length=1; t=token; for (s=token; *s != '\0'; s=t) { double value; value=StringToDouble(s,&t); (void) value; if (s == t) { t++; continue; } length++; } length=length*BezierQuantum; break; } case CirclePrimitive: case ArcPrimitive: case EllipsePrimitive: { double alpha, beta, radius; alpha=bounds.x2-bounds.x1; beta=bounds.y2-bounds.y1; radius=hypot((double) alpha,(double) beta); length=2*((size_t) ceil((double) MagickPI*radius))+6*BezierQuantum+360; break; } default: break; } if ((i+length) >= number_points) { /* Resize based on speculative points required by primitive. */ number_points+=length+1; primitive_info=(PrimitiveInfo *) ResizeQuantumMemory(primitive_info, (size_t) number_points,sizeof(*primitive_info)); if ((primitive_info == (PrimitiveInfo *) NULL) || (number_points != (MagickSizeType) ((size_t) number_points))) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); break; } } switch (primitive_type) { case PointPrimitive: default: { if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } TracePoint(primitive_info+j,primitive_info[j].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case LinePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } TraceLine(primitive_info+j,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case RectanglePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } TraceRectangle(primitive_info+j,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case RoundRectanglePrimitive: { if (primitive_info[j].coordinates != 3) { status=MagickFalse; break; } TraceRoundRectangle(primitive_info+j,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case ArcPrimitive: { if (primitive_info[j].coordinates != 3) { primitive_type=UndefinedPrimitive; break; } TraceArc(primitive_info+j,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case EllipsePrimitive: { if (primitive_info[j].coordinates != 3) { status=MagickFalse; break; } TraceEllipse(primitive_info+j,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case CirclePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } TraceCircle(primitive_info+j,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case PolylinePrimitive: break; case PolygonPrimitive: { primitive_info[i]=primitive_info[j]; primitive_info[i].coordinates=0; primitive_info[j].coordinates++; i++; break; } case BezierPrimitive: { if (primitive_info[j].coordinates < 3) { status=MagickFalse; break; } TraceBezier(primitive_info+j,primitive_info[j].coordinates); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case PathPrimitive: { i=(ssize_t) (j+TracePath(primitive_info+j,token)); break; } case AlphaPrimitive: case ColorPrimitive: { ssize_t method; if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } GetNextToken(q,&q,extent,token); method=ParseCommandOption(MagickMethodOptions,MagickFalse,token); if (method == -1) status=MagickFalse; else primitive_info[j].method=(PaintMethod) method; break; } case TextPrimitive: { if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } if (*token != ',') GetNextToken(q,&q,extent,token); primitive_info[j].text=AcquireString(token); break; } case ImagePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } GetNextToken(q,&q,extent,token); primitive_info[j].text=AcquireString(token); break; } } if (primitive_info == (PrimitiveInfo *) NULL) break; if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p),p); if (status == MagickFalse) break; primitive_info[i].primitive=UndefinedPrimitive; if (i == 0) continue; /* Transform points. */ for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { point=primitive_info[i].point; primitive_info[i].point.x=graphic_context[n]->affine.sx*point.x+ graphic_context[n]->affine.ry*point.y+graphic_context[n]->affine.tx; primitive_info[i].point.y=graphic_context[n]->affine.rx*point.x+ graphic_context[n]->affine.sy*point.y+graphic_context[n]->affine.ty; point=primitive_info[i].point; if (point.x < graphic_context[n]->bounds.x1) graphic_context[n]->bounds.x1=point.x; if (point.y < graphic_context[n]->bounds.y1) graphic_context[n]->bounds.y1=point.y; if (point.x > graphic_context[n]->bounds.x2) graphic_context[n]->bounds.x2=point.x; if (point.y > graphic_context[n]->bounds.y2) graphic_context[n]->bounds.y2=point.y; if (primitive_info[i].primitive == ImagePrimitive) break; if (i >= (ssize_t) number_points) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); } if (graphic_context[n]->render != MagickFalse) { if ((n != 0) && (graphic_context[n]->clip_mask != (char *) NULL) && (LocaleCompare(graphic_context[n]->clip_mask, graphic_context[n-1]->clip_mask) != 0)) status&=DrawClipPath(image,graphic_context[n], graphic_context[n]->clip_mask,exception); status&=DrawPrimitive(image,graphic_context[n],primitive_info, exception); } if (primitive_info->text != (char *) NULL) primitive_info->text=(char *) RelinquishMagickMemory( primitive_info->text); proceed=SetImageProgress(image,RenderImageTag,q-primitive,(MagickSizeType) primitive_extent); if (proceed == MagickFalse) break; if (status == 0) break; } if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end draw-image"); /* Relinquish resources. */ token=DestroyString(token); if (primitive_info != (PrimitiveInfo *) NULL) primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(primitive_info); primitive=DestroyString(primitive); for ( ; n >= 0; n--) graphic_context[n]=DestroyDrawInfo(graphic_context[n]); graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context); if (status == MagickFalse) ThrowBinaryException(DrawError,"NonconformingDrawingPrimitiveDefinition", keyword); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w G r a d i e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawGradientImage() draws a linear gradient on the image. % % The format of the DrawGradientImage method is: % % MagickBooleanType DrawGradientImage(Image *image, % const DrawInfo *draw_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o exception: return any errors or warnings in this structure. % */ static inline double GetStopColorOffset(const GradientInfo *gradient, const ssize_t x,const ssize_t y) { switch (gradient->type) { case UndefinedGradient: case LinearGradient: { double gamma, length, offset, scale; PointInfo p, q; const SegmentInfo *gradient_vector; gradient_vector=(&gradient->gradient_vector); p.x=gradient_vector->x2-gradient_vector->x1; p.y=gradient_vector->y2-gradient_vector->y1; q.x=(double) x-gradient_vector->x1; q.y=(double) y-gradient_vector->y1; length=sqrt(q.x*q.x+q.y*q.y); gamma=sqrt(p.x*p.x+p.y*p.y)*length; gamma=PerceptibleReciprocal(gamma); scale=p.x*q.x+p.y*q.y; offset=gamma*scale*length; return(offset); } case RadialGradient: { PointInfo v; if (gradient->spread == RepeatSpread) { v.x=(double) x-gradient->center.x; v.y=(double) y-gradient->center.y; return(sqrt(v.x*v.x+v.y*v.y)); } v.x=(double) (((x-gradient->center.x)*cos(DegreesToRadians( gradient->angle)))+((y-gradient->center.y)*sin(DegreesToRadians( gradient->angle))))/gradient->radii.x; v.y=(double) (((x-gradient->center.x)*sin(DegreesToRadians( gradient->angle)))-((y-gradient->center.y)*cos(DegreesToRadians( gradient->angle))))/gradient->radii.y; return(sqrt(v.x*v.x+v.y*v.y)); } } return(0.0); } static int StopInfoCompare(const void *x,const void *y) { StopInfo *stop_1, *stop_2; stop_1=(StopInfo *) x; stop_2=(StopInfo *) y; if (stop_1->offset > stop_2->offset) return(1); if (fabs(stop_1->offset-stop_2->offset) <= DrawEpsilon) return(0); return(-1); } MagickExport MagickBooleanType DrawGradientImage(Image *image, const DrawInfo *draw_info,ExceptionInfo *exception) { CacheView *image_view; const GradientInfo *gradient; const SegmentInfo *gradient_vector; double length; MagickBooleanType status; PixelInfo zero; PointInfo point; RectangleInfo bounding_box; ssize_t y; /* Draw linear or radial gradient on image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); gradient=(&draw_info->gradient); qsort(gradient->stops,gradient->number_stops,sizeof(StopInfo), StopInfoCompare); gradient_vector=(&gradient->gradient_vector); point.x=gradient_vector->x2-gradient_vector->x1; point.y=gradient_vector->y2-gradient_vector->y1; length=sqrt(point.x*point.x+point.y*point.y); bounding_box=gradient->bounding_box; status=MagickTrue; GetPixelInfo(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,1,1) #endif for (y=bounding_box.y; y < (ssize_t) bounding_box.height; y++) { PixelInfo composite, pixel; double alpha, offset; register Quantum *magick_restrict q; register ssize_t i, x; ssize_t j; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; composite=zero; offset=GetStopColorOffset(gradient,0,y); if (gradient->type != RadialGradient) offset/=length; for (x=bounding_box.x; x < (ssize_t) bounding_box.width; x++) { GetPixelInfoPixel(image,q,&pixel); switch (gradient->spread) { case UndefinedSpread: case PadSpread: { if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) || (y != (ssize_t) ceil(gradient_vector->y1-0.5))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type != RadialGradient) offset/=length; } for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if ((offset < 0.0) || (i == 0)) composite=gradient->stops[0].color; else if ((offset > 1.0) || (i == (ssize_t) gradient->number_stops)) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } case ReflectSpread: { if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) || (y != (ssize_t) ceil(gradient_vector->y1-0.5))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type != RadialGradient) offset/=length; } if (offset < 0.0) offset=(-offset); if ((ssize_t) fmod(offset,2.0) == 0) offset=fmod(offset,1.0); else offset=1.0-fmod(offset,1.0); for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if (i == 0) composite=gradient->stops[0].color; else if (i == (ssize_t) gradient->number_stops) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } case RepeatSpread: { MagickBooleanType antialias; double repeat; antialias=MagickFalse; repeat=0.0; if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) || (y != (ssize_t) ceil(gradient_vector->y1-0.5))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type == LinearGradient) { repeat=fmod(offset,length); if (repeat < 0.0) repeat=length-fmod(-repeat,length); else repeat=fmod(offset,length); antialias=(repeat < length) && ((repeat+1.0) > length) ? MagickTrue : MagickFalse; offset=repeat/length; } else { repeat=fmod(offset,gradient->radius); if (repeat < 0.0) repeat=gradient->radius-fmod(-repeat,gradient->radius); else repeat=fmod(offset,gradient->radius); antialias=repeat+1.0 > gradient->radius ? MagickTrue : MagickFalse; offset=repeat/gradient->radius; } } for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if (i == 0) composite=gradient->stops[0].color; else if (i == (ssize_t) gradient->number_stops) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); if (antialias != MagickFalse) { if (gradient->type == LinearGradient) alpha=length-repeat; else alpha=gradient->radius-repeat; i=0; j=(ssize_t) gradient->number_stops-1L; } CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } } CompositePixelInfoOver(&composite,composite.alpha,&pixel,pixel.alpha, &pixel); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P a t t e r n P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPatternPath() draws a pattern. % % The format of the DrawPatternPath method is: % % MagickBooleanType DrawPatternPath(Image *image,const DrawInfo *draw_info, % const char *name,Image **pattern,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o name: the pattern name. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType DrawPatternPath(Image *image, const DrawInfo *draw_info,const char *name,Image **pattern, ExceptionInfo *exception) { char property[MagickPathExtent]; const char *geometry, *path, *type; DrawInfo *clone_info; ImageInfo *image_info; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); assert(name != (const char *) NULL); (void) FormatLocaleString(property,MagickPathExtent,"%s",name); path=GetImageArtifact(image,property); if (path == (const char *) NULL) return(MagickFalse); (void) FormatLocaleString(property,MagickPathExtent,"%s-geometry",name); geometry=GetImageArtifact(image,property); if (geometry == (const char *) NULL) return(MagickFalse); if ((*pattern) != (Image *) NULL) *pattern=DestroyImage(*pattern); image_info=AcquireImageInfo(); image_info->size=AcquireString(geometry); *pattern=AcquireImage(image_info,exception); image_info=DestroyImageInfo(image_info); (void) QueryColorCompliance("#000000ff",AllCompliance, &(*pattern)->background_color,exception); (void) SetImageBackgroundColor(*pattern,exception); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), "begin pattern-path %s %s",name,geometry); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->fill_pattern=NewImageList(); clone_info->stroke_pattern=NewImageList(); (void) FormatLocaleString(property,MagickPathExtent,"%s-type",name); type=GetImageArtifact(image,property); if (type != (const char *) NULL) clone_info->gradient.type=(GradientType) ParseCommandOption( MagickGradientOptions,MagickFalse,type); (void) CloneString(&clone_info->primitive,path); status=DrawImage(*pattern,clone_info,exception); clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end pattern-path"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w P o l y g o n P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPolygonPrimitive() draws a polygon on the image. % % The format of the DrawPolygonPrimitive method is: % % MagickBooleanType DrawPolygonPrimitive(Image *image, % const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % o exception: return any errors or warnings in this structure. % */ static PolygonInfo **DestroyPolygonThreadSet(PolygonInfo **polygon_info) { register ssize_t i; assert(polygon_info != (PolygonInfo **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (polygon_info[i] != (PolygonInfo *) NULL) polygon_info[i]=DestroyPolygonInfo(polygon_info[i]); polygon_info=(PolygonInfo **) RelinquishMagickMemory(polygon_info); return(polygon_info); } static PolygonInfo **AcquirePolygonThreadSet( const PrimitiveInfo *primitive_info) { PathInfo *magick_restrict path_info; PolygonInfo **polygon_info; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); polygon_info=(PolygonInfo **) AcquireQuantumMemory(number_threads, sizeof(*polygon_info)); if (polygon_info == (PolygonInfo **) NULL) return((PolygonInfo **) NULL); (void) ResetMagickMemory(polygon_info,0,number_threads*sizeof(*polygon_info)); path_info=ConvertPrimitiveToPath(primitive_info); if (path_info == (PathInfo *) NULL) return(DestroyPolygonThreadSet(polygon_info)); for (i=0; i < (ssize_t) number_threads; i++) { polygon_info[i]=ConvertPathToPolygon(path_info); if (polygon_info[i] == (PolygonInfo *) NULL) return(DestroyPolygonThreadSet(polygon_info)); } path_info=(PathInfo *) RelinquishMagickMemory(path_info); return(polygon_info); } static double GetFillAlpha(PolygonInfo *polygon_info,const double mid, const MagickBooleanType fill,const FillRule fill_rule,const ssize_t x, const ssize_t y,double *stroke_alpha) { double alpha, beta, distance, subpath_alpha; PointInfo delta; register const PointInfo *q; register EdgeInfo *p; register ssize_t i; ssize_t j, winding_number; /* Compute fill & stroke opacity for this (x,y) point. */ *stroke_alpha=0.0; subpath_alpha=0.0; p=polygon_info->edges; for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++) { if ((double) y <= (p->bounds.y1-mid-0.5)) break; if ((double) y > (p->bounds.y2+mid+0.5)) { (void) DestroyEdge(polygon_info,(size_t) j); continue; } if (((double) x <= (p->bounds.x1-mid-0.5)) || ((double) x > (p->bounds.x2+mid+0.5))) continue; i=(ssize_t) MagickMax((double) p->highwater,1.0); for ( ; i < (ssize_t) p->number_points; i++) { if ((double) y <= (p->points[i-1].y-mid-0.5)) break; if ((double) y > (p->points[i].y+mid+0.5)) continue; if (p->scanline != (double) y) { p->scanline=(double) y; p->highwater=(size_t) i; } /* Compute distance between a point and an edge. */ q=p->points+i-1; delta.x=(q+1)->x-q->x; delta.y=(q+1)->y-q->y; beta=delta.x*(x-q->x)+delta.y*(y-q->y); if (beta < 0.0) { delta.x=(double) x-q->x; delta.y=(double) y-q->y; distance=delta.x*delta.x+delta.y*delta.y; } else { alpha=delta.x*delta.x+delta.y*delta.y; if (beta > alpha) { delta.x=(double) x-(q+1)->x; delta.y=(double) y-(q+1)->y; distance=delta.x*delta.x+delta.y*delta.y; } else { alpha=1.0/alpha; beta=delta.x*(y-q->y)-delta.y*(x-q->x); distance=alpha*beta*beta; } } /* Compute stroke & subpath opacity. */ beta=0.0; if (p->ghostline == MagickFalse) { alpha=mid+0.5; if ((*stroke_alpha < 1.0) && (distance <= ((alpha+0.25)*(alpha+0.25)))) { alpha=mid-0.5; if (distance <= ((alpha+0.25)*(alpha+0.25))) *stroke_alpha=1.0; else { beta=1.0; if (fabs(distance-1.0) >= DrawEpsilon) beta=sqrt((double) distance); alpha=beta-mid-0.5; if (*stroke_alpha < ((alpha-0.25)*(alpha-0.25))) *stroke_alpha=(alpha-0.25)*(alpha-0.25); } } } if ((fill == MagickFalse) || (distance > 1.0) || (subpath_alpha >= 1.0)) continue; if (distance <= 0.0) { subpath_alpha=1.0; continue; } if (distance > 1.0) continue; if (fabs(beta) < DrawEpsilon) { beta=1.0; if (fabs(distance-1.0) >= DrawEpsilon) beta=sqrt(distance); } alpha=beta-1.0; if (subpath_alpha < (alpha*alpha)) subpath_alpha=alpha*alpha; } } /* Compute fill opacity. */ if (fill == MagickFalse) return(0.0); if (subpath_alpha >= 1.0) return(1.0); /* Determine winding number. */ winding_number=0; p=polygon_info->edges; for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++) { if ((double) y <= p->bounds.y1) break; if (((double) y > p->bounds.y2) || ((double) x <= p->bounds.x1)) continue; if ((double) x > p->bounds.x2) { winding_number+=p->direction ? 1 : -1; continue; } i=(ssize_t) MagickMax((double) p->highwater,1.0); for ( ; i < (ssize_t) p->number_points; i++) if ((double) y <= p->points[i].y) break; q=p->points+i-1; if ((((q+1)->x-q->x)*(y-q->y)) <= (((q+1)->y-q->y)*(x-q->x))) winding_number+=p->direction ? 1 : -1; } if (fill_rule != NonZeroRule) { if ((MagickAbsoluteValue(winding_number) & 0x01) != 0) return(1.0); } else if (MagickAbsoluteValue(winding_number) != 0) return(1.0); return(subpath_alpha); } static MagickBooleanType DrawPolygonPrimitive(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType fill, status; double mid; PolygonInfo **magick_restrict polygon_info; register EdgeInfo *p; register ssize_t i; SegmentInfo bounds; ssize_t start_y, stop_y, y; /* Compute bounding box. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); assert(primitive_info != (PrimitiveInfo *) NULL); if (primitive_info->coordinates == 0) return(MagickTrue); polygon_info=AcquirePolygonThreadSet(primitive_info); if (polygon_info == (PolygonInfo **) NULL) return(MagickFalse); DisableMSCWarning(4127) if (0) DrawBoundingRectangles(image,draw_info,polygon_info[0],exception); RestoreMSCWarning if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-polygon"); fill=(primitive_info->method == FillToBorderMethod) || (primitive_info->method == FloodfillMethod) ? MagickTrue : MagickFalse; mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0; bounds=polygon_info[0]->edges[0].bounds; for (i=1; i < (ssize_t) polygon_info[0]->number_edges; i++) { p=polygon_info[0]->edges+i; if (p->bounds.x1 < bounds.x1) bounds.x1=p->bounds.x1; if (p->bounds.y1 < bounds.y1) bounds.y1=p->bounds.y1; if (p->bounds.x2 > bounds.x2) bounds.x2=p->bounds.x2; if (p->bounds.y2 > bounds.y2) bounds.y2=p->bounds.y2; } bounds.x1-=(mid+1.0); bounds.x1=bounds.x1 < 0.0 ? 0.0 : (size_t) ceil(bounds.x1-0.5) >= image->columns ? (double) image->columns-1 : bounds.x1; bounds.y1-=(mid+1.0); bounds.y1=bounds.y1 < 0.0 ? 0.0 : (size_t) ceil(bounds.y1-0.5) >= image->rows ? (double) image->rows-1 : bounds.y1; bounds.x2+=(mid+1.0); bounds.x2=bounds.x2 < 0.0 ? 0.0 : (size_t) floor(bounds.x2+0.5) >= image->columns ? (double) image->columns-1 : bounds.x2; bounds.y2+=(mid+1.0); bounds.y2=bounds.y2 < 0.0 ? 0.0 : (size_t) floor(bounds.y2+0.5) >= image->rows ? (double) image->rows-1 : bounds.y2; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); if ((primitive_info->coordinates == 1) || (polygon_info[0]->number_edges == 0)) { /* Draw point. */ start_y=(ssize_t) ceil(bounds.y1-0.5); stop_y=(ssize_t) floor(bounds.y2+0.5); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,1,1) #endif for (y=start_y; y <= stop_y; y++) { MagickBooleanType sync; PixelInfo pixel; register ssize_t x; register Quantum *magick_restrict q; ssize_t start_x, stop_x; if (status == MagickFalse) continue; start_x=(ssize_t) ceil(bounds.x1-0.5); stop_x=(ssize_t) floor(bounds.x2+0.5); x=start_x; q=GetCacheViewAuthenticPixels(image_view,x,y,(size_t) (stop_x-x+1),1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } GetPixelInfo(image,&pixel); for ( ; x <= stop_x; x++) { if ((x == (ssize_t) ceil(primitive_info->point.x-0.5)) && (y == (ssize_t) ceil(primitive_info->point.y-0.5))) { GetFillColor(draw_info,x-start_x,y-start_y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); } q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); polygon_info=DestroyPolygonThreadSet(polygon_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " end draw-polygon"); return(status); } /* Draw polygon or line. */ if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); start_y=(ssize_t) ceil(bounds.y1-0.5); stop_y=(ssize_t) floor(bounds.y2+0.5); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,1,1) #endif for (y=start_y; y <= stop_y; y++) { const int id = GetOpenMPThreadId(); double fill_alpha, stroke_alpha; PixelInfo fill_color, stroke_color; register Quantum *magick_restrict q; register ssize_t x; ssize_t start_x, stop_x; if (status == MagickFalse) continue; start_x=(ssize_t) ceil(bounds.x1-0.5); stop_x=(ssize_t) floor(bounds.x2+0.5); q=GetCacheViewAuthenticPixels(image_view,start_x,y,(size_t) (stop_x-start_x+ 1),1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=start_x; x <= stop_x; x++) { /* Fill and/or stroke. */ fill_alpha=GetFillAlpha(polygon_info[id],mid,fill,draw_info->fill_rule, x,y,&stroke_alpha); if (draw_info->stroke_antialias == MagickFalse) { fill_alpha=fill_alpha > 0.25 ? 1.0 : 0.0; stroke_alpha=stroke_alpha > 0.25 ? 1.0 : 0.0; } GetFillColor(draw_info,x-start_x,y-start_y,&fill_color,exception); fill_alpha=fill_alpha*fill_color.alpha; CompositePixelOver(image,&fill_color,fill_alpha,q,(double) GetPixelAlpha(image,q),q); GetStrokeColor(draw_info,x-start_x,y-start_y,&stroke_color,exception); stroke_alpha=stroke_alpha*stroke_color.alpha; CompositePixelOver(image,&stroke_color,stroke_alpha,q,(double) GetPixelAlpha(image,q),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); polygon_info=DestroyPolygonThreadSet(polygon_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-polygon"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPrimitive() draws a primitive (line, rectangle, ellipse) on the image. % % The format of the DrawPrimitive method is: % % MagickBooleanType DrawPrimitive(Image *image,const DrawInfo *draw_info, % PrimitiveInfo *primitive_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % o exception: return any errors or warnings in this structure. % */ static void LogPrimitiveInfo(const PrimitiveInfo *primitive_info) { const char *methods[] = { "point", "replace", "floodfill", "filltoborder", "reset", "?" }; PointInfo p, q, point; register ssize_t i, x; ssize_t coordinates, y; x=(ssize_t) ceil(primitive_info->point.x-0.5); y=(ssize_t) ceil(primitive_info->point.y-0.5); switch (primitive_info->primitive) { case AlphaPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "AlphaPrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case ColorPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "ColorPrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case ImagePrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "ImagePrimitive %.20g,%.20g",(double) x,(double) y); return; } case PointPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "PointPrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case TextPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "TextPrimitive %.20g,%.20g",(double) x,(double) y); return; } default: break; } coordinates=0; p=primitive_info[0].point; q.x=(-1.0); q.y=(-1.0); for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { point=primitive_info[i].point; if (coordinates <= 0) { coordinates=(ssize_t) primitive_info[i].coordinates; (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin open (%.20g)",(double) coordinates); p=point; } point=primitive_info[i].point; if ((fabs(q.x-point.x) >= DrawEpsilon) || (fabs(q.y-point.y) >= DrawEpsilon)) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %.20g: %.18g,%.18g",(double) coordinates,point.x,point.y); else (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %.20g: %g %g (duplicate)",(double) coordinates,point.x,point.y); q=point; coordinates--; if (coordinates > 0) continue; if ((fabs(p.x-point.x) >= DrawEpsilon) || (fabs(p.y-point.y) >= DrawEpsilon)) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end last (%.20g)", (double) coordinates); else (void) LogMagickEvent(DrawEvent,GetMagickModule()," end open (%.20g)", (double) coordinates); } } MagickExport MagickBooleanType DrawPrimitive(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { CacheView *image_view; MagickStatusType status; register ssize_t i, x; ssize_t y; if (image->debug != MagickFalse) { (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin draw-primitive"); (void) LogMagickEvent(DrawEvent,GetMagickModule(), " affine: %g,%g,%g,%g,%g,%g",draw_info->affine.sx, draw_info->affine.rx,draw_info->affine.ry,draw_info->affine.sy, draw_info->affine.tx,draw_info->affine.ty); } if ((IsGrayColorspace(image->colorspace) != MagickFalse) && ((IsPixelInfoGray(&draw_info->fill) == MagickFalse) || (IsPixelInfoGray(&draw_info->stroke) == MagickFalse))) (void) SetImageColorspace(image,sRGBColorspace,exception); status=MagickTrue; x=(ssize_t) ceil(primitive_info->point.x-0.5); y=(ssize_t) ceil(primitive_info->point.y-0.5); image_view=AcquireAuthenticCacheView(image,exception); switch (primitive_info->primitive) { case AlphaPrimitive: { if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); switch (primitive_info->method) { case PointMethod: default: { PixelInfo pixel; register Quantum *q; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (Quantum *) NULL) break; GetFillColor(draw_info,x,y,&pixel,exception); SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q); (void) SyncCacheViewAuthenticPixels(image_view,exception); break; } case ReplaceMethod: { MagickBooleanType sync; PixelInfo pixel, target; (void) GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target, exception); GetPixelInfo(image,&pixel); for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse) { q+=GetPixelChannels(image); continue; } GetFillColor(draw_info,x,y,&pixel,exception); SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) break; } break; } case FloodfillMethod: case FillToBorderMethod: { ChannelType channel_mask; PixelInfo target; (void) GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y, &target,exception); if (primitive_info->method == FillToBorderMethod) { target.red=(double) draw_info->border_color.red; target.green=(double) draw_info->border_color.green; target.blue=(double) draw_info->border_color.blue; } channel_mask=SetImageChannelMask(image,AlphaChannel); status&=FloodfillPaintImage(image,draw_info,&target,x,y, primitive_info->method == FloodfillMethod ? MagickFalse : MagickTrue,exception); (void) SetImageChannelMask(image,channel_mask); break; } case ResetMethod: { MagickBooleanType sync; PixelInfo pixel; for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetFillColor(draw_info,x,y,&pixel,exception); SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) break; } break; } } break; } case ColorPrimitive: { switch (primitive_info->method) { case PointMethod: default: { PixelInfo pixel; register Quantum *q; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (Quantum *) NULL) break; GetPixelInfo(image,&pixel); GetFillColor(draw_info,x,y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); (void) SyncCacheViewAuthenticPixels(image_view,exception); break; } case ReplaceMethod: { MagickBooleanType sync; PixelInfo pixel, target; (void) GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target, exception); for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse) { q+=GetPixelChannels(image); continue; } GetFillColor(draw_info,x,y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) break; } break; } case FloodfillMethod: case FillToBorderMethod: { PixelInfo target; (void) GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y, &target,exception); if (primitive_info->method == FillToBorderMethod) { target.red=(double) draw_info->border_color.red; target.green=(double) draw_info->border_color.green; target.blue=(double) draw_info->border_color.blue; } status&=FloodfillPaintImage(image,draw_info,&target,x,y, primitive_info->method == FloodfillMethod ? MagickFalse : MagickTrue,exception); break; } case ResetMethod: { MagickBooleanType sync; PixelInfo pixel; GetPixelInfo(image,&pixel); for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetFillColor(draw_info,x,y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) break; } break; } } break; } case ImagePrimitive: { AffineMatrix affine; char composite_geometry[MagickPathExtent]; Image *composite_image; ImageInfo *clone_info; RectangleInfo geometry; ssize_t x1, y1; if (primitive_info->text == (char *) NULL) break; clone_info=AcquireImageInfo(); if (LocaleNCompare(primitive_info->text,"data:",5) == 0) composite_image=ReadInlineImage(clone_info,primitive_info->text, exception); else { (void) CopyMagickString(clone_info->filename,primitive_info->text, MagickPathExtent); composite_image=ReadImage(clone_info,exception); } clone_info=DestroyImageInfo(clone_info); if (composite_image == (Image *) NULL) break; (void) SetImageProgressMonitor(composite_image,(MagickProgressMonitor) NULL,(void *) NULL); x1=(ssize_t) ceil(primitive_info[1].point.x-0.5); y1=(ssize_t) ceil(primitive_info[1].point.y-0.5); if (((x1 != 0L) && (x1 != (ssize_t) composite_image->columns)) || ((y1 != 0L) && (y1 != (ssize_t) composite_image->rows))) { /* Resize image. */ (void) FormatLocaleString(composite_geometry,MagickPathExtent, "%gx%g!",primitive_info[1].point.x,primitive_info[1].point.y); composite_image->filter=image->filter; (void) TransformImage(&composite_image,(char *) NULL, composite_geometry,exception); } if (composite_image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(composite_image,OpaqueAlphaChannel, exception); if (draw_info->alpha != OpaqueAlpha) (void) SetImageAlpha(composite_image,draw_info->alpha,exception); SetGeometry(image,&geometry); image->gravity=draw_info->gravity; geometry.x=x; geometry.y=y; (void) FormatLocaleString(composite_geometry,MagickPathExtent, "%.20gx%.20g%+.20g%+.20g",(double) composite_image->columns,(double) composite_image->rows,(double) geometry.x,(double) geometry.y); (void) ParseGravityGeometry(image,composite_geometry,&geometry,exception); affine=draw_info->affine; affine.tx=(double) geometry.x; affine.ty=(double) geometry.y; composite_image->interpolate=image->interpolate; if (draw_info->compose == OverCompositeOp) (void) DrawAffineImage(image,composite_image,&affine,exception); else (void) CompositeImage(image,composite_image,draw_info->compose, MagickTrue,geometry.x,geometry.y,exception); composite_image=DestroyImage(composite_image); break; } case PointPrimitive: { PixelInfo fill_color; register Quantum *q; if ((y < 0) || (y >= (ssize_t) image->rows)) break; if ((x < 0) || (x >= (ssize_t) image->columns)) break; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (Quantum *) NULL) break; GetFillColor(draw_info,x,y,&fill_color,exception); CompositePixelOver(image,&fill_color,(double) fill_color.alpha,q, (double) GetPixelAlpha(image,q),q); (void) SyncCacheViewAuthenticPixels(image_view,exception); break; } case TextPrimitive: { char geometry[MagickPathExtent]; DrawInfo *clone_info; if (primitive_info->text == (char *) NULL) break; clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) CloneString(&clone_info->text,primitive_info->text); (void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f", primitive_info->point.x,primitive_info->point.y); (void) CloneString(&clone_info->geometry,geometry); status&=AnnotateImage(image,clone_info,exception); clone_info=DestroyDrawInfo(clone_info); break; } default: { double mid, scale; DrawInfo *clone_info; if (IsEventLogging() != MagickFalse) LogPrimitiveInfo(primitive_info); scale=ExpandAffine(&draw_info->affine); if ((draw_info->dash_pattern != (double *) NULL) && (fabs(draw_info->dash_pattern[0]) >= DrawEpsilon) && (fabs(scale*draw_info->stroke_width) >= DrawEpsilon) && (draw_info->stroke.alpha != (Quantum) TransparentAlpha)) { /* Draw dash polygon. */ clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->stroke_width=0.0; clone_info->stroke.alpha=(MagickRealType) TransparentAlpha; status&=DrawPolygonPrimitive(image,clone_info,primitive_info, exception); clone_info=DestroyDrawInfo(clone_info); (void) DrawDashPolygon(draw_info,primitive_info,image,exception); break; } mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0; if ((mid > 1.0) && ((draw_info->stroke.alpha != (Quantum) TransparentAlpha) || (draw_info->stroke_pattern != (Image *) NULL))) { MagickBooleanType closed_path; /* Draw strokes while respecting line cap/join attributes. */ for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ; closed_path= (fabs(primitive_info[i-1].point.x-primitive_info[0].point.x) < DrawEpsilon) && (fabs(primitive_info[i-1].point.y-primitive_info[0].point.y) < DrawEpsilon) ? MagickTrue : MagickFalse; i=(ssize_t) primitive_info[0].coordinates; if (((closed_path != MagickFalse) && (draw_info->linejoin == RoundJoin)) || (primitive_info[i].primitive != UndefinedPrimitive)) { (void) DrawPolygonPrimitive(image,draw_info,primitive_info, exception); break; } if (draw_info->linecap == RoundCap) { (void) DrawPolygonPrimitive(image,draw_info,primitive_info, exception); break; } clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->stroke_width=0.0; clone_info->stroke.alpha=(MagickRealType) TransparentAlpha; status&=DrawPolygonPrimitive(image,clone_info,primitive_info, exception); clone_info=DestroyDrawInfo(clone_info); status&=DrawStrokePolygon(image,draw_info,primitive_info,exception); break; } status&=DrawPolygonPrimitive(image,draw_info,primitive_info,exception); break; } } image_view=DestroyCacheView(image_view); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-primitive"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w S t r o k e P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawStrokePolygon() draws a stroked polygon (line, rectangle, ellipse) on % the image while respecting the line cap and join attributes. % % The format of the DrawStrokePolygon method is: % % MagickBooleanType DrawStrokePolygon(Image *image, % const DrawInfo *draw_info,const PrimitiveInfo *primitive_info) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % */ static void DrawRoundLinecap(Image *image,const DrawInfo *draw_info, const PrimitiveInfo *primitive_info,ExceptionInfo *exception) { PrimitiveInfo linecap[5]; register ssize_t i; for (i=0; i < 4; i++) linecap[i]=(*primitive_info); linecap[0].coordinates=4; linecap[1].point.x+=2.0*DrawEpsilon; linecap[2].point.x+=2.0*DrawEpsilon; linecap[2].point.y+=2.0*DrawEpsilon; linecap[3].point.y+=2.0*DrawEpsilon; linecap[4].primitive=UndefinedPrimitive; (void) DrawPolygonPrimitive(image,draw_info,linecap,exception); } static MagickBooleanType DrawStrokePolygon(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { DrawInfo *clone_info; MagickBooleanType closed_path; MagickStatusType status; PrimitiveInfo *stroke_polygon; register const PrimitiveInfo *p, *q; /* Draw stroked polygon. */ if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin draw-stroke-polygon"); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->fill=draw_info->stroke; if (clone_info->fill_pattern != (Image *) NULL) clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern); if (clone_info->stroke_pattern != (Image *) NULL) clone_info->fill_pattern=CloneImage(clone_info->stroke_pattern,0,0, MagickTrue,exception); clone_info->stroke.alpha=(MagickRealType) TransparentAlpha; clone_info->stroke_width=0.0; clone_info->fill_rule=NonZeroRule; status=MagickTrue; for (p=primitive_info; p->primitive != UndefinedPrimitive; p+=p->coordinates) { stroke_polygon=TraceStrokePolygon(draw_info,p); status&=DrawPolygonPrimitive(image,clone_info,stroke_polygon,exception); if (status == 0) break; stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon); q=p+p->coordinates-1; closed_path=(fabs(q->point.x-p->point.x) < DrawEpsilon) && (fabs(q->point.y-p->point.y) < DrawEpsilon) ? MagickTrue : MagickFalse; if ((draw_info->linecap == RoundCap) && (closed_path == MagickFalse)) { DrawRoundLinecap(image,draw_info,p,exception); DrawRoundLinecap(image,draw_info,q,exception); } } clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " end draw-stroke-polygon"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A f f i n e M a t r i x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAffineMatrix() returns an AffineMatrix initialized to the identity % matrix. % % The format of the GetAffineMatrix method is: % % void GetAffineMatrix(AffineMatrix *affine_matrix) % % A description of each parameter follows: % % o affine_matrix: the affine matrix. % */ MagickExport void GetAffineMatrix(AffineMatrix *affine_matrix) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(affine_matrix != (AffineMatrix *) NULL); (void) ResetMagickMemory(affine_matrix,0,sizeof(*affine_matrix)); affine_matrix->sx=1.0; affine_matrix->sy=1.0; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetDrawInfo() initializes draw_info to default values from image_info. % % The format of the GetDrawInfo method is: % % void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info) % % A description of each parameter follows: % % o image_info: the image info.. % % o draw_info: the draw info. % */ MagickExport void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info) { char *next_token; const char *option; ExceptionInfo *exception; ImageInfo *clone_info; /* Initialize draw attributes. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(draw_info != (DrawInfo *) NULL); (void) ResetMagickMemory(draw_info,0,sizeof(*draw_info)); clone_info=CloneImageInfo(image_info); GetAffineMatrix(&draw_info->affine); exception=AcquireExceptionInfo(); (void) QueryColorCompliance("#000F",AllCompliance,&draw_info->fill, exception); (void) QueryColorCompliance("#0000",AllCompliance,&draw_info->stroke, exception); draw_info->stroke_width=1.0; draw_info->fill_rule=EvenOddRule; draw_info->alpha=OpaqueAlpha; draw_info->fill_alpha=OpaqueAlpha; draw_info->stroke_alpha=OpaqueAlpha; draw_info->linecap=ButtCap; draw_info->linejoin=MiterJoin; draw_info->miterlimit=10; draw_info->decorate=NoDecoration; draw_info->pointsize=12.0; draw_info->undercolor.alpha=(MagickRealType) TransparentAlpha; draw_info->compose=OverCompositeOp; draw_info->render=MagickTrue; draw_info->debug=IsEventLogging(); draw_info->stroke_antialias=clone_info->antialias; if (clone_info->font != (char *) NULL) draw_info->font=AcquireString(clone_info->font); if (clone_info->density != (char *) NULL) draw_info->density=AcquireString(clone_info->density); draw_info->text_antialias=clone_info->antialias; if (fabs(clone_info->pointsize) >= DrawEpsilon) draw_info->pointsize=clone_info->pointsize; draw_info->border_color=clone_info->border_color; if (clone_info->server_name != (char *) NULL) draw_info->server_name=AcquireString(clone_info->server_name); option=GetImageOption(clone_info,"direction"); if (option != (const char *) NULL) draw_info->direction=(DirectionType) ParseCommandOption( MagickDirectionOptions,MagickFalse,option); else draw_info->direction=UndefinedDirection; option=GetImageOption(clone_info,"encoding"); if (option != (const char *) NULL) (void) CloneString(&draw_info->encoding,option); option=GetImageOption(clone_info,"family"); if (option != (const char *) NULL) (void) CloneString(&draw_info->family,option); option=GetImageOption(clone_info,"fill"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&draw_info->fill, exception); option=GetImageOption(clone_info,"gravity"); if (option != (const char *) NULL) draw_info->gravity=(GravityType) ParseCommandOption(MagickGravityOptions, MagickFalse,option); option=GetImageOption(clone_info,"interline-spacing"); if (option != (const char *) NULL) draw_info->interline_spacing=StringToDouble(option,&next_token); option=GetImageOption(clone_info,"interword-spacing"); if (option != (const char *) NULL) draw_info->interword_spacing=StringToDouble(option,&next_token); option=GetImageOption(clone_info,"kerning"); if (option != (const char *) NULL) draw_info->kerning=StringToDouble(option,&next_token); option=GetImageOption(clone_info,"stroke"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&draw_info->stroke, exception); option=GetImageOption(clone_info,"strokewidth"); if (option != (const char *) NULL) draw_info->stroke_width=StringToDouble(option,&next_token); option=GetImageOption(clone_info,"style"); if (option != (const char *) NULL) draw_info->style=(StyleType) ParseCommandOption(MagickStyleOptions, MagickFalse,option); option=GetImageOption(clone_info,"undercolor"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&draw_info->undercolor, exception); option=GetImageOption(clone_info,"weight"); if (option != (const char *) NULL) { ssize_t weight; weight=ParseCommandOption(MagickWeightOptions,MagickFalse,option); if (weight == -1) weight=(ssize_t) StringToUnsignedLong(option); draw_info->weight=(size_t) weight; } exception=DestroyExceptionInfo(exception); draw_info->signature=MagickCoreSignature; clone_info=DestroyImageInfo(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P e r m u t a t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Permutate() returns the permuation of the (n,k). % % The format of the Permutate method is: % % void Permutate(ssize_t n,ssize_t k) % % A description of each parameter follows: % % o n: % % o k: % % */ static inline double Permutate(const ssize_t n,const ssize_t k) { double r; register ssize_t i; r=1.0; for (i=k+1; i <= n; i++) r*=i; for (i=1; i <= (n-k); i++) r/=i; return(r); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + T r a c e P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TracePrimitive is a collection of methods for generating graphic % primitives such as arcs, ellipses, paths, etc. % */ static void TraceArc(PrimitiveInfo *primitive_info,const PointInfo start, const PointInfo end,const PointInfo degrees) { PointInfo center, radii; center.x=0.5*(end.x+start.x); center.y=0.5*(end.y+start.y); radii.x=fabs(center.x-start.x); radii.y=fabs(center.y-start.y); TraceEllipse(primitive_info,center,radii,degrees); } static void TraceArcPath(PrimitiveInfo *primitive_info,const PointInfo start, const PointInfo end,const PointInfo arc,const double angle, const MagickBooleanType large_arc,const MagickBooleanType sweep) { double alpha, beta, delta, factor, gamma, theta; PointInfo center, points[3], radii; register double cosine, sine; register PrimitiveInfo *p; register ssize_t i; size_t arc_segments; if ((fabs(start.x-end.x) < DrawEpsilon) && (fabs(start.y-end.y) < DrawEpsilon)) { TracePoint(primitive_info,end); return; } radii.x=fabs(arc.x); radii.y=fabs(arc.y); if ((fabs(radii.x) < DrawEpsilon) || (fabs(radii.y) < DrawEpsilon)) { TraceLine(primitive_info,start,end); return; } cosine=cos(DegreesToRadians(fmod((double) angle,360.0))); sine=sin(DegreesToRadians(fmod((double) angle,360.0))); center.x=(double) (cosine*(end.x-start.x)/2+sine*(end.y-start.y)/2); center.y=(double) (cosine*(end.y-start.y)/2-sine*(end.x-start.x)/2); delta=(center.x*center.x)/(radii.x*radii.x)+(center.y*center.y)/ (radii.y*radii.y); if (delta < DrawEpsilon) { TraceLine(primitive_info,start,end); return; } if (delta > 1.0) { radii.x*=sqrt((double) delta); radii.y*=sqrt((double) delta); } points[0].x=(double) (cosine*start.x/radii.x+sine*start.y/radii.x); points[0].y=(double) (cosine*start.y/radii.y-sine*start.x/radii.y); points[1].x=(double) (cosine*end.x/radii.x+sine*end.y/radii.x); points[1].y=(double) (cosine*end.y/radii.y-sine*end.x/radii.y); alpha=points[1].x-points[0].x; beta=points[1].y-points[0].y; factor=PerceptibleReciprocal(alpha*alpha+beta*beta)-0.25; if (factor <= 0.0) factor=0.0; else { factor=sqrt((double) factor); if (sweep == large_arc) factor=(-factor); } center.x=(double) ((points[0].x+points[1].x)/2-factor*beta); center.y=(double) ((points[0].y+points[1].y)/2+factor*alpha); alpha=atan2(points[0].y-center.y,points[0].x-center.x); theta=atan2(points[1].y-center.y,points[1].x-center.x)-alpha; if ((theta < 0.0) && (sweep != MagickFalse)) theta+=2.0*MagickPI; else if ((theta > 0.0) && (sweep == MagickFalse)) theta-=2.0*MagickPI; arc_segments=(size_t) ceil(fabs((double) (theta/(0.5*MagickPI+DrawEpsilon)))); p=primitive_info; for (i=0; i < (ssize_t) arc_segments; i++) { beta=0.5*((alpha+(i+1)*theta/arc_segments)-(alpha+i*theta/arc_segments)); gamma=(8.0/3.0)*sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))* sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))/ sin(fmod((double) beta,DegreesToRadians(360.0))); points[0].x=(double) (center.x+cos(fmod((double) (alpha+(double) i*theta/ arc_segments),DegreesToRadians(360.0)))-gamma*sin(fmod((double) (alpha+ (double) i*theta/arc_segments),DegreesToRadians(360.0)))); points[0].y=(double) (center.y+sin(fmod((double) (alpha+(double) i*theta/ arc_segments),DegreesToRadians(360.0)))+gamma*cos(fmod((double) (alpha+ (double) i*theta/arc_segments),DegreesToRadians(360.0)))); points[2].x=(double) (center.x+cos(fmod((double) (alpha+(double) (i+1)* theta/arc_segments),DegreesToRadians(360.0)))); points[2].y=(double) (center.y+sin(fmod((double) (alpha+(double) (i+1)* theta/arc_segments),DegreesToRadians(360.0)))); points[1].x=(double) (points[2].x+gamma*sin(fmod((double) (alpha+(double) (i+1)*theta/arc_segments),DegreesToRadians(360.0)))); points[1].y=(double) (points[2].y-gamma*cos(fmod((double) (alpha+(double) (i+1)*theta/arc_segments),DegreesToRadians(360.0)))); p->point.x=(p == primitive_info) ? start.x : (p-1)->point.x; p->point.y=(p == primitive_info) ? start.y : (p-1)->point.y; (p+1)->point.x=(double) (cosine*radii.x*points[0].x-sine*radii.y* points[0].y); (p+1)->point.y=(double) (sine*radii.x*points[0].x+cosine*radii.y* points[0].y); (p+2)->point.x=(double) (cosine*radii.x*points[1].x-sine*radii.y* points[1].y); (p+2)->point.y=(double) (sine*radii.x*points[1].x+cosine*radii.y* points[1].y); (p+3)->point.x=(double) (cosine*radii.x*points[2].x-sine*radii.y* points[2].y); (p+3)->point.y=(double) (sine*radii.x*points[2].x+cosine*radii.y* points[2].y); if (i == (ssize_t) (arc_segments-1)) (p+3)->point=end; TraceBezier(p,4); p+=p->coordinates; } primitive_info->coordinates=(size_t) (p-primitive_info); for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } } static void TraceBezier(PrimitiveInfo *primitive_info, const size_t number_coordinates) { double alpha, *coefficients, weight; PointInfo end, point, *points; register PrimitiveInfo *p; register ssize_t i, j; size_t control_points, quantum; /* Allocate coeficients. */ quantum=number_coordinates; for (i=0; i < (ssize_t) number_coordinates; i++) { for (j=i+1; j < (ssize_t) number_coordinates; j++) { alpha=fabs(primitive_info[j].point.x-primitive_info[i].point.x); if (alpha > (double) quantum) quantum=(size_t) alpha; alpha=fabs(primitive_info[j].point.y-primitive_info[i].point.y); if (alpha > (double) quantum) quantum=(size_t) alpha; } } quantum=(size_t) MagickMin((double) quantum/number_coordinates, (double) BezierQuantum); control_points=quantum*number_coordinates; coefficients=(double *) AcquireQuantumMemory((size_t) number_coordinates,sizeof(*coefficients)); points=(PointInfo *) AcquireQuantumMemory((size_t) control_points, sizeof(*points)); if ((coefficients == (double *) NULL) || (points == (PointInfo *) NULL)) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); /* Compute bezier points. */ end=primitive_info[number_coordinates-1].point; for (i=0; i < (ssize_t) number_coordinates; i++) coefficients[i]=Permutate((ssize_t) number_coordinates-1,i); weight=0.0; for (i=0; i < (ssize_t) control_points; i++) { p=primitive_info; point.x=0.0; point.y=0.0; alpha=pow((double) (1.0-weight),(double) number_coordinates-1.0); for (j=0; j < (ssize_t) number_coordinates; j++) { point.x+=alpha*coefficients[j]*p->point.x; point.y+=alpha*coefficients[j]*p->point.y; alpha*=weight/(1.0-weight); p++; } points[i]=point; weight+=1.0/control_points; } /* Bezier curves are just short segmented polys. */ p=primitive_info; for (i=0; i < (ssize_t) control_points; i++) { TracePoint(p,points[i]); p+=p->coordinates; } TracePoint(p,end); p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } points=(PointInfo *) RelinquishMagickMemory(points); coefficients=(double *) RelinquishMagickMemory(coefficients); } static void TraceCircle(PrimitiveInfo *primitive_info,const PointInfo start, const PointInfo end) { double alpha, beta, radius; PointInfo offset, degrees; alpha=end.x-start.x; beta=end.y-start.y; radius=hypot((double) alpha,(double) beta); offset.x=(double) radius; offset.y=(double) radius; degrees.x=0.0; degrees.y=360.0; TraceEllipse(primitive_info,start,offset,degrees); } static void TraceEllipse(PrimitiveInfo *primitive_info,const PointInfo start, const PointInfo stop,const PointInfo degrees) { double delta, step, y; PointInfo angle, point; register PrimitiveInfo *p; register ssize_t i; /* Ellipses are just short segmented polys. */ if ((fabs(stop.x) < DrawEpsilon) && (fabs(stop.y) < DrawEpsilon)) { TracePoint(primitive_info,start); return; } delta=2.0/MagickMax(stop.x,stop.y); step=MagickPI/8.0; if ((delta >= 0.0) && (delta < (MagickPI/8.0))) step=MagickPI/(4*(MagickPI/delta/2+0.5)); angle.x=DegreesToRadians(degrees.x); y=degrees.y; while (y < degrees.x) y+=360.0; angle.y=DegreesToRadians(y); for (p=primitive_info; angle.x < angle.y; angle.x+=step) { point.x=cos(fmod(angle.x,DegreesToRadians(360.0)))*stop.x+start.x; point.y=sin(fmod(angle.x,DegreesToRadians(360.0)))*stop.y+start.y; TracePoint(p,point); p+=p->coordinates; } point.x=cos(fmod(angle.y,DegreesToRadians(360.0)))*stop.x+start.x; point.y=sin(fmod(angle.y,DegreesToRadians(360.0)))*stop.y+start.y; TracePoint(p,point); p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } } static void TraceLine(PrimitiveInfo *primitive_info,const PointInfo start, const PointInfo end) { TracePoint(primitive_info,start); if ((fabs(start.x-end.x) < DrawEpsilon) && (fabs(start.y-end.y) < DrawEpsilon)) { primitive_info->primitive=PointPrimitive; primitive_info->coordinates=1; return; } TracePoint(primitive_info+1,end); (primitive_info+1)->primitive=primitive_info->primitive; primitive_info->coordinates=2; } static size_t TracePath(PrimitiveInfo *primitive_info,const char *path) { char *next_token, token[MagickPathExtent]; const char *p; double x, y; int attribute, last_attribute; PointInfo end = {0.0, 0.0}, points[4] = { {0.0,0.0}, {0.0,0.0}, {0.0,0.0}, {0.0,0.0} }, point = {0.0, 0.0}, start = {0.0, 0.0}; PrimitiveType primitive_type; register PrimitiveInfo *q; register ssize_t i; size_t number_coordinates, z_count; attribute=0; number_coordinates=0; z_count=0; primitive_type=primitive_info->primitive; q=primitive_info; for (p=path; *p != '\0'; ) { while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == '\0') break; last_attribute=attribute; attribute=(int) (*p++); switch (attribute) { case 'a': case 'A': { double angle; MagickBooleanType large_arc, sweep; PointInfo arc; /* Compute arc points. */ do { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); arc.x=StringToDouble(token,&next_token); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); arc.y=StringToDouble(token,&next_token); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); angle=StringToDouble(token,&next_token); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); large_arc=StringToLong(token) != 0 ? MagickTrue : MagickFalse; GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); sweep=StringToLong(token) != 0 ? MagickTrue : MagickFalse; GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); end.x=(double) (attribute == (int) 'A' ? x : point.x+x); end.y=(double) (attribute == (int) 'A' ? y : point.y+y); TraceArcPath(q,point,end,arc,angle,large_arc,sweep); q+=q->coordinates; point=end; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'c': case 'C': { /* Compute bezier points. */ do { points[0]=point; for (i=1; i < 4; i++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); end.x=(double) (attribute == (int) 'C' ? x : point.x+x); end.y=(double) (attribute == (int) 'C' ? y : point.y+y); points[i]=end; } for (i=0; i < 4; i++) (q+i)->point=points[i]; TraceBezier(q,4); q+=q->coordinates; point=end; } while (IsPoint(p) != MagickFalse); break; } case 'H': case 'h': { do { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); point.x=(double) (attribute == (int) 'H' ? x: point.x+x); TracePoint(q,point); q+=q->coordinates; } while (IsPoint(p) != MagickFalse); break; } case 'l': case 'L': { do { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); point.x=(double) (attribute == (int) 'L' ? x : point.x+x); point.y=(double) (attribute == (int) 'L' ? y : point.y+y); TracePoint(q,point); q+=q->coordinates; } while (IsPoint(p) != MagickFalse); break; } case 'M': case 'm': { if (q != primitive_info) { primitive_info->coordinates=(size_t) (q-primitive_info); number_coordinates+=primitive_info->coordinates; primitive_info=q; } i=0; do { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); point.x=(double) (attribute == (int) 'M' ? x : point.x+x); point.y=(double) (attribute == (int) 'M' ? y : point.y+y); if (i == 0) start=point; i++; TracePoint(q,point); q+=q->coordinates; if ((i != 0) && (attribute == (int) 'M')) { TracePoint(q,point); q+=q->coordinates; } } while (IsPoint(p) != MagickFalse); break; } case 'q': case 'Q': { /* Compute bezier points. */ do { points[0]=point; for (i=1; i < 3; i++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); if (*p == ',') p++; end.x=(double) (attribute == (int) 'Q' ? x : point.x+x); end.y=(double) (attribute == (int) 'Q' ? y : point.y+y); points[i]=end; } for (i=0; i < 3; i++) (q+i)->point=points[i]; TraceBezier(q,3); q+=q->coordinates; point=end; } while (IsPoint(p) != MagickFalse); break; } case 's': case 'S': { /* Compute bezier points. */ do { points[0]=points[3]; points[1].x=2.0*points[3].x-points[2].x; points[1].y=2.0*points[3].y-points[2].y; for (i=2; i < 4; i++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); if (*p == ',') p++; end.x=(double) (attribute == (int) 'S' ? x : point.x+x); end.y=(double) (attribute == (int) 'S' ? y : point.y+y); points[i]=end; } if (strchr("CcSs",last_attribute) == (char *) NULL) { points[0]=point; points[1]=point; } for (i=0; i < 4; i++) (q+i)->point=points[i]; TraceBezier(q,4); q+=q->coordinates; point=end; } while (IsPoint(p) != MagickFalse); break; } case 't': case 'T': { /* Compute bezier points. */ do { points[0]=points[2]; points[1].x=2.0*points[2].x-points[1].x; points[1].y=2.0*points[2].y-points[1].y; for (i=2; i < 3; i++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); end.x=(double) (attribute == (int) 'T' ? x : point.x+x); end.y=(double) (attribute == (int) 'T' ? y : point.y+y); points[i]=end; } if (strchr("QqTt",last_attribute) == (char *) NULL) { points[0]=point; points[1]=point; } for (i=0; i < 3; i++) (q+i)->point=points[i]; TraceBezier(q,3); q+=q->coordinates; point=end; } while (IsPoint(p) != MagickFalse); break; } case 'v': case 'V': { do { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); point.y=(double) (attribute == (int) 'V' ? y : point.y+y); TracePoint(q,point); q+=q->coordinates; } while (IsPoint(p) != MagickFalse); break; } case 'z': case 'Z': { point=start; TracePoint(q,point); q+=q->coordinates; primitive_info->coordinates=(size_t) (q-primitive_info); number_coordinates+=primitive_info->coordinates; primitive_info=q; z_count++; break; } default: { if (isalpha((int) ((unsigned char) attribute)) != 0) (void) FormatLocaleFile(stderr,"attribute not recognized: %c\n", attribute); break; } } } primitive_info->coordinates=(size_t) (q-primitive_info); number_coordinates+=primitive_info->coordinates; for (i=0; i < (ssize_t) number_coordinates; i++) { q--; q->primitive=primitive_type; if (z_count > 1) q->method=FillToBorderMethod; } q=primitive_info; return(number_coordinates); } static void TraceRectangle(PrimitiveInfo *primitive_info,const PointInfo start, const PointInfo end) { PointInfo point; register PrimitiveInfo *p; register ssize_t i; p=primitive_info; TracePoint(p,start); p+=p->coordinates; point.x=start.x; point.y=end.y; TracePoint(p,point); p+=p->coordinates; TracePoint(p,end); p+=p->coordinates; point.x=end.x; point.y=start.y; TracePoint(p,point); p+=p->coordinates; TracePoint(p,start); p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } } static void TraceRoundRectangle(PrimitiveInfo *primitive_info, const PointInfo start,const PointInfo end,PointInfo arc) { PointInfo degrees, offset, point; register PrimitiveInfo *p; register ssize_t i; p=primitive_info; offset.x=fabs(end.x-start.x); offset.y=fabs(end.y-start.y); if (arc.x > (0.5*offset.x)) arc.x=0.5*offset.x; if (arc.y > (0.5*offset.y)) arc.y=0.5*offset.y; point.x=start.x+offset.x-arc.x; point.y=start.y+arc.y; degrees.x=270.0; degrees.y=360.0; TraceEllipse(p,point,arc,degrees); p+=p->coordinates; point.x=start.x+offset.x-arc.x; point.y=start.y+offset.y-arc.y; degrees.x=0.0; degrees.y=90.0; TraceEllipse(p,point,arc,degrees); p+=p->coordinates; point.x=start.x+arc.x; point.y=start.y+offset.y-arc.y; degrees.x=90.0; degrees.y=180.0; TraceEllipse(p,point,arc,degrees); p+=p->coordinates; point.x=start.x+arc.x; point.y=start.y+arc.y; degrees.x=180.0; degrees.y=270.0; TraceEllipse(p,point,arc,degrees); p+=p->coordinates; TracePoint(p,primitive_info->point); p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } } static void TraceSquareLinecap(PrimitiveInfo *primitive_info, const size_t number_vertices,const double offset) { double distance; register double dx, dy; register ssize_t i; ssize_t j; dx=0.0; dy=0.0; for (i=1; i < (ssize_t) number_vertices; i++) { dx=primitive_info[0].point.x-primitive_info[i].point.x; dy=primitive_info[0].point.y-primitive_info[i].point.y; if ((fabs((double) dx) >= DrawEpsilon) || (fabs((double) dy) >= DrawEpsilon)) break; } if (i == (ssize_t) number_vertices) i=(ssize_t) number_vertices-1L; distance=hypot((double) dx,(double) dy); primitive_info[0].point.x=(double) (primitive_info[i].point.x+ dx*(distance+offset)/distance); primitive_info[0].point.y=(double) (primitive_info[i].point.y+ dy*(distance+offset)/distance); for (j=(ssize_t) number_vertices-2; j >= 0; j--) { dx=primitive_info[number_vertices-1].point.x-primitive_info[j].point.x; dy=primitive_info[number_vertices-1].point.y-primitive_info[j].point.y; if ((fabs((double) dx) >= DrawEpsilon) || (fabs((double) dy) >= DrawEpsilon)) break; } distance=hypot((double) dx,(double) dy); primitive_info[number_vertices-1].point.x=(double) (primitive_info[j].point.x+ dx*(distance+offset)/distance); primitive_info[number_vertices-1].point.y=(double) (primitive_info[j].point.y+ dy*(distance+offset)/distance); } static PrimitiveInfo *TraceStrokePolygon(const DrawInfo *draw_info, const PrimitiveInfo *primitive_info) { typedef struct _LineSegment { double p, q; } LineSegment; double delta_theta, dot_product, mid, miterlimit; LineSegment dx, dy, inverse_slope, slope, theta; MagickBooleanType closed_path; PointInfo box_p[5], box_q[5], center, offset, *path_p, *path_q; PrimitiveInfo *polygon_primitive, *stroke_polygon; register ssize_t i; size_t arc_segments, max_strokes, number_vertices; ssize_t j, n, p, q; /* Allocate paths. */ number_vertices=primitive_info->coordinates; max_strokes=2*number_vertices+6*BezierQuantum+360; path_p=(PointInfo *) AcquireQuantumMemory((size_t) max_strokes, sizeof(*path_p)); path_q=(PointInfo *) AcquireQuantumMemory((size_t) max_strokes, sizeof(*path_q)); polygon_primitive=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_vertices+2UL,sizeof(*polygon_primitive)); if ((path_p == (PointInfo *) NULL) || (path_q == (PointInfo *) NULL) || (polygon_primitive == (PrimitiveInfo *) NULL)) return((PrimitiveInfo *) NULL); (void) CopyMagickMemory(polygon_primitive,primitive_info,(size_t) number_vertices*sizeof(*polygon_primitive)); closed_path= (fabs(primitive_info[number_vertices-1].point.x-primitive_info[0].point.x) < DrawEpsilon) && (fabs(primitive_info[number_vertices-1].point.y-primitive_info[0].point.y) < DrawEpsilon) ? MagickTrue : MagickFalse; if (((draw_info->linejoin == RoundJoin) || (draw_info->linejoin == MiterJoin)) && (closed_path != MagickFalse)) { polygon_primitive[number_vertices]=primitive_info[1]; number_vertices++; } polygon_primitive[number_vertices].primitive=UndefinedPrimitive; /* Compute the slope for the first line segment, p. */ dx.p=0.0; dy.p=0.0; for (n=1; n < (ssize_t) number_vertices; n++) { dx.p=polygon_primitive[n].point.x-polygon_primitive[0].point.x; dy.p=polygon_primitive[n].point.y-polygon_primitive[0].point.y; if ((fabs(dx.p) >= DrawEpsilon) || (fabs(dy.p) >= DrawEpsilon)) break; } if (n == (ssize_t) number_vertices) n=(ssize_t) number_vertices-1L; slope.p=0.0; inverse_slope.p=0.0; if (fabs(dx.p) < DrawEpsilon) { if (dx.p >= 0.0) slope.p=dy.p < 0.0 ? -1.0/DrawEpsilon : 1.0/DrawEpsilon; else slope.p=dy.p < 0.0 ? 1.0/DrawEpsilon : -1.0/DrawEpsilon; } else if (fabs(dy.p) < DrawEpsilon) { if (dy.p >= 0.0) inverse_slope.p=dx.p < 0.0 ? -1.0/DrawEpsilon : 1.0/DrawEpsilon; else inverse_slope.p=dx.p < 0.0 ? 1.0/DrawEpsilon : -1.0/DrawEpsilon; } else { slope.p=dy.p/dx.p; inverse_slope.p=(-1.0/slope.p); } mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0; miterlimit=(double) (draw_info->miterlimit*draw_info->miterlimit*mid*mid); if ((draw_info->linecap == SquareCap) && (closed_path == MagickFalse)) TraceSquareLinecap(polygon_primitive,number_vertices,mid); offset.x=sqrt((double) (mid*mid/(inverse_slope.p*inverse_slope.p+1.0))); offset.y=(double) (offset.x*inverse_slope.p); if ((dy.p*offset.x-dx.p*offset.y) > 0.0) { box_p[0].x=polygon_primitive[0].point.x-offset.x; box_p[0].y=polygon_primitive[0].point.y-offset.x*inverse_slope.p; box_p[1].x=polygon_primitive[n].point.x-offset.x; box_p[1].y=polygon_primitive[n].point.y-offset.x*inverse_slope.p; box_q[0].x=polygon_primitive[0].point.x+offset.x; box_q[0].y=polygon_primitive[0].point.y+offset.x*inverse_slope.p; box_q[1].x=polygon_primitive[n].point.x+offset.x; box_q[1].y=polygon_primitive[n].point.y+offset.x*inverse_slope.p; } else { box_p[0].x=polygon_primitive[0].point.x+offset.x; box_p[0].y=polygon_primitive[0].point.y+offset.y; box_p[1].x=polygon_primitive[n].point.x+offset.x; box_p[1].y=polygon_primitive[n].point.y+offset.y; box_q[0].x=polygon_primitive[0].point.x-offset.x; box_q[0].y=polygon_primitive[0].point.y-offset.y; box_q[1].x=polygon_primitive[n].point.x-offset.x; box_q[1].y=polygon_primitive[n].point.y-offset.y; } /* Create strokes for the line join attribute: bevel, miter, round. */ p=0; q=0; path_q[p++]=box_q[0]; path_p[q++]=box_p[0]; for (i=(ssize_t) n+1; i < (ssize_t) number_vertices; i++) { /* Compute the slope for this line segment, q. */ dx.q=polygon_primitive[i].point.x-polygon_primitive[n].point.x; dy.q=polygon_primitive[i].point.y-polygon_primitive[n].point.y; dot_product=dx.q*dx.q+dy.q*dy.q; if (dot_product < 0.25) continue; slope.q=0.0; inverse_slope.q=0.0; if (fabs(dx.q) < DrawEpsilon) { if (dx.q >= 0.0) slope.q=dy.q < 0.0 ? -1.0/DrawEpsilon : 1.0/DrawEpsilon; else slope.q=dy.q < 0.0 ? 1.0/DrawEpsilon : -1.0/DrawEpsilon; } else if (fabs(dy.q) < DrawEpsilon) { if (dy.q >= 0.0) inverse_slope.q=dx.q < 0.0 ? -1.0/DrawEpsilon : 1.0/DrawEpsilon; else inverse_slope.q=dx.q < 0.0 ? 1.0/DrawEpsilon : -1.0/DrawEpsilon; } else { slope.q=dy.q/dx.q; inverse_slope.q=(-1.0/slope.q); } offset.x=sqrt((double) (mid*mid/(inverse_slope.q*inverse_slope.q+1.0))); offset.y=(double) (offset.x*inverse_slope.q); dot_product=dy.q*offset.x-dx.q*offset.y; if (dot_product > 0.0) { box_p[2].x=polygon_primitive[n].point.x-offset.x; box_p[2].y=polygon_primitive[n].point.y-offset.y; box_p[3].x=polygon_primitive[i].point.x-offset.x; box_p[3].y=polygon_primitive[i].point.y-offset.y; box_q[2].x=polygon_primitive[n].point.x+offset.x; box_q[2].y=polygon_primitive[n].point.y+offset.y; box_q[3].x=polygon_primitive[i].point.x+offset.x; box_q[3].y=polygon_primitive[i].point.y+offset.y; } else { box_p[2].x=polygon_primitive[n].point.x+offset.x; box_p[2].y=polygon_primitive[n].point.y+offset.y; box_p[3].x=polygon_primitive[i].point.x+offset.x; box_p[3].y=polygon_primitive[i].point.y+offset.y; box_q[2].x=polygon_primitive[n].point.x-offset.x; box_q[2].y=polygon_primitive[n].point.y-offset.y; box_q[3].x=polygon_primitive[i].point.x-offset.x; box_q[3].y=polygon_primitive[i].point.y-offset.y; } if (fabs((double) (slope.p-slope.q)) < DrawEpsilon) { box_p[4]=box_p[1]; box_q[4]=box_q[1]; } else { box_p[4].x=(double) ((slope.p*box_p[0].x-box_p[0].y-slope.q*box_p[3].x+ box_p[3].y)/(slope.p-slope.q)); box_p[4].y=(double) (slope.p*(box_p[4].x-box_p[0].x)+box_p[0].y); box_q[4].x=(double) ((slope.p*box_q[0].x-box_q[0].y-slope.q*box_q[3].x+ box_q[3].y)/(slope.p-slope.q)); box_q[4].y=(double) (slope.p*(box_q[4].x-box_q[0].x)+box_q[0].y); } if (q >= (ssize_t) (max_strokes-6*BezierQuantum-360)) { if (~max_strokes < (6*BezierQuantum+360)) { path_p=(PointInfo *) RelinquishMagickMemory(path_p); path_q=(PointInfo *) RelinquishMagickMemory(path_q); } else { max_strokes+=6*BezierQuantum+360; path_p=(PointInfo *) ResizeQuantumMemory(path_p,max_strokes, sizeof(*path_p)); path_q=(PointInfo *) ResizeQuantumMemory(path_q,max_strokes, sizeof(*path_q)); } if ((path_p == (PointInfo *) NULL) || (path_q == (PointInfo *) NULL)) { if (path_p != (PointInfo *) NULL) path_p=(PointInfo *) RelinquishMagickMemory(path_p); if (path_q != (PointInfo *) NULL) path_q=(PointInfo *) RelinquishMagickMemory(path_q); polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive); return((PrimitiveInfo *) NULL); } } dot_product=dx.q*dy.p-dx.p*dy.q; if (dot_product <= 0.0) switch (draw_info->linejoin) { case BevelJoin: { path_q[q++]=box_q[1]; path_q[q++]=box_q[2]; dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) path_p[p++]=box_p[4]; else { path_p[p++]=box_p[1]; path_p[p++]=box_p[2]; } break; } case MiterJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) { path_q[q++]=box_q[4]; path_p[p++]=box_p[4]; } else { path_q[q++]=box_q[1]; path_q[q++]=box_q[2]; path_p[p++]=box_p[1]; path_p[p++]=box_p[2]; } break; } case RoundJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) path_p[p++]=box_p[4]; else { path_p[p++]=box_p[1]; path_p[p++]=box_p[2]; } center=polygon_primitive[n].point; theta.p=atan2(box_q[1].y-center.y,box_q[1].x-center.x); theta.q=atan2(box_q[2].y-center.y,box_q[2].x-center.x); if (theta.q < theta.p) theta.q+=2.0*MagickPI; arc_segments=(size_t) ceil((double) ((theta.q-theta.p)/ (2.0*sqrt((double) (1.0/mid))))); path_q[q].x=box_q[1].x; path_q[q].y=box_q[1].y; q++; for (j=1; j < (ssize_t) arc_segments; j++) { delta_theta=(double) (j*(theta.q-theta.p)/arc_segments); path_q[q].x=(double) (center.x+mid*cos(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); path_q[q].y=(double) (center.y+mid*sin(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); q++; } path_q[q++]=box_q[2]; break; } default: break; } else switch (draw_info->linejoin) { case BevelJoin: { path_p[p++]=box_p[1]; path_p[p++]=box_p[2]; dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) path_q[q++]=box_q[4]; else { path_q[q++]=box_q[1]; path_q[q++]=box_q[2]; } break; } case MiterJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) { path_q[q++]=box_q[4]; path_p[p++]=box_p[4]; } else { path_q[q++]=box_q[1]; path_q[q++]=box_q[2]; path_p[p++]=box_p[1]; path_p[p++]=box_p[2]; } break; } case RoundJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) path_q[q++]=box_q[4]; else { path_q[q++]=box_q[1]; path_q[q++]=box_q[2]; } center=polygon_primitive[n].point; theta.p=atan2(box_p[1].y-center.y,box_p[1].x-center.x); theta.q=atan2(box_p[2].y-center.y,box_p[2].x-center.x); if (theta.p < theta.q) theta.p+=2.0*MagickPI; arc_segments=(size_t) ceil((double) ((theta.p-theta.q)/ (2.0*sqrt((double) (1.0/mid))))); path_p[p++]=box_p[1]; for (j=1; j < (ssize_t) arc_segments; j++) { delta_theta=(double) (j*(theta.q-theta.p)/arc_segments); path_p[p].x=(double) (center.x+mid*cos(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); path_p[p].y=(double) (center.y+mid*sin(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); p++; } path_p[p++]=box_p[2]; break; } default: break; } slope.p=slope.q; inverse_slope.p=inverse_slope.q; box_p[0]=box_p[2]; box_p[1]=box_p[3]; box_q[0]=box_q[2]; box_q[1]=box_q[3]; dx.p=dx.q; dy.p=dy.q; n=i; } path_p[p++]=box_p[1]; path_q[q++]=box_q[1]; /* Trace stroked polygon. */ stroke_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t) (p+q+2UL*closed_path+2UL),sizeof(*stroke_polygon)); if (stroke_polygon != (PrimitiveInfo *) NULL) { for (i=0; i < (ssize_t) p; i++) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=path_p[i]; } if (closed_path != MagickFalse) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[0].point; i++; } for ( ; i < (ssize_t) (p+q+closed_path); i++) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=path_q[p+q+closed_path-(i+1)]; } if (closed_path != MagickFalse) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[p+closed_path].point; i++; } stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[0].point; i++; stroke_polygon[i].primitive=UndefinedPrimitive; stroke_polygon[0].coordinates=(size_t) (p+q+2*closed_path+1); } path_p=(PointInfo *) RelinquishMagickMemory(path_p); path_q=(PointInfo *) RelinquishMagickMemory(path_q); polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive); return(stroke_polygon); }
/* * Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/annotate.h" #include "MagickCore/artifact.h" #include "MagickCore/blob.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/constitute.h" #include "MagickCore/draw.h" #include "MagickCore/draw-private.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/property.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/transform-private.h" #include "MagickCore/utility.h" /* * Define declarations. */ #define BezierQuantum 200 #define DrawEpsilon (1.0e-10) /* * Typedef declarations. */ typedef struct _EdgeInfo { SegmentInfo bounds; double scanline; PointInfo * points; size_t number_points; ssize_t direction; MagickBooleanType ghostline; size_t highwater; } EdgeInfo; typedef struct _ElementInfo { double cx, cy, major, minor, angle; } ElementInfo; typedef struct _PolygonInfo { EdgeInfo * edges; size_t number_edges; } PolygonInfo; typedef enum { MoveToCode, OpenCode, GhostlineCode, LineToCode, EndCode } PathInfoCode; typedef struct _PathInfo { PointInfo point; PathInfoCode code; } PathInfo; /* * Forward declarations. */ static MagickBooleanType DrawStrokePolygon(Image *, const DrawInfo *, const PrimitiveInfo *, ExceptionInfo *); static PrimitiveInfo * TraceStrokePolygon(const DrawInfo *, const PrimitiveInfo *); static size_t TracePath(PrimitiveInfo *, const char *); static void TraceArc(PrimitiveInfo *, const PointInfo, const PointInfo, const PointInfo), TraceArcPath(PrimitiveInfo *, const PointInfo, const PointInfo, const PointInfo, const double, const MagickBooleanType, const MagickBooleanType), TraceBezier(PrimitiveInfo *, const size_t), TraceCircle(PrimitiveInfo *, const PointInfo, const PointInfo), TraceEllipse(PrimitiveInfo *, const PointInfo, const PointInfo, const PointInfo), TraceLine(PrimitiveInfo *, const PointInfo, const PointInfo), TraceRectangle(PrimitiveInfo *, const PointInfo, const PointInfo), TraceRoundRectangle(PrimitiveInfo *, const PointInfo, const PointInfo, PointInfo), TraceSquareLinecap(PrimitiveInfo *, const size_t, const double); /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % A c q u i r e D r a w I n f o * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % AcquireDrawInfo() returns a DrawInfo structure properly * initialized. % % The format of the AcquireDrawInfo method is: % % * DrawInfo *AcquireDrawInfo(void) % */ MagickExport DrawInfo * AcquireDrawInfo(void) { DrawInfo * draw_info; draw_info = (DrawInfo *) AcquireMagickMemory(sizeof(*draw_info)); if (draw_info == (DrawInfo *) NULL) ThrowFatalException(ResourceLimitFatalError, "MemoryAllocationFailed"); GetDrawInfo((ImageInfo *) NULL, draw_info); return (draw_info); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % C l o n e D r a w I n f o * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % CloneDrawInfo() makes a copy of the given draw_info structure. * If NULL % is specified, a new DrawInfo structure is created initialized * to default % values. % % The format of the CloneDrawInfo method is: % % * DrawInfo *CloneDrawInfo(const ImageInfo *image_info, % const * DrawInfo *draw_info) % % A description of each parameter follows: % % * o image_info: the image info. % % o draw_info: the draw info. % */ MagickExport DrawInfo * CloneDrawInfo(const ImageInfo * image_info, const DrawInfo * draw_info) { DrawInfo * clone_info; ExceptionInfo * exception; clone_info = (DrawInfo *) AcquireMagickMemory(sizeof(*clone_info)); if (clone_info == (DrawInfo *) NULL) ThrowFatalException(ResourceLimitFatalError, "MemoryAllocationFailed"); GetDrawInfo(image_info, clone_info); if (draw_info == (DrawInfo *) NULL) return (clone_info); exception = AcquireExceptionInfo(); if (clone_info->primitive != (char *)NULL) (void)CloneString(&clone_info->primitive, draw_info->primitive); if (draw_info->geometry != (char *)NULL) (void)CloneString(&clone_info->geometry, draw_info->geometry); clone_info->viewbox = draw_info->viewbox; clone_info->affine = draw_info->affine; clone_info->gravity = draw_info->gravity; clone_info->fill = draw_info->fill; clone_info->stroke = draw_info->stroke; clone_info->stroke_width = draw_info->stroke_width; if (draw_info->fill_pattern != (Image *) NULL) clone_info->fill_pattern = CloneImage(draw_info->fill_pattern, 0, 0, MagickTrue, exception); if (draw_info->stroke_pattern != (Image *) NULL) clone_info->stroke_pattern = CloneImage(draw_info->stroke_pattern, 0, 0, MagickTrue, exception); clone_info->stroke_antialias = draw_info->stroke_antialias; clone_info->text_antialias = draw_info->text_antialias; clone_info->fill_rule = draw_info->fill_rule; clone_info->linecap = draw_info->linecap; clone_info->linejoin = draw_info->linejoin; clone_info->miterlimit = draw_info->miterlimit; clone_info->dash_offset = draw_info->dash_offset; clone_info->decorate = draw_info->decorate; clone_info->compose = draw_info->compose; if (draw_info->text != (char *)NULL) (void)CloneString(&clone_info->text, draw_info->text); if (draw_info->font != (char *)NULL) (void)CloneString(&clone_info->font, draw_info->font); if (draw_info->metrics != (char *)NULL) (void)CloneString(&clone_info->metrics, draw_info->metrics); if (draw_info->family != (char *)NULL) (void)CloneString(&clone_info->family, draw_info->family); clone_info->style = draw_info->style; clone_info->stretch = draw_info->stretch; clone_info->weight = draw_info->weight; if (draw_info->encoding != (char *)NULL) (void)CloneString(&clone_info->encoding, draw_info->encoding); clone_info->pointsize = draw_info->pointsize; clone_info->kerning = draw_info->kerning; clone_info->interline_spacing = draw_info->interline_spacing; clone_info->interword_spacing = draw_info->interword_spacing; clone_info->direction = draw_info->direction; if (draw_info->density != (char *)NULL) (void)CloneString(&clone_info->density, draw_info->density); clone_info->align = draw_info->align; clone_info->undercolor = draw_info->undercolor; clone_info->border_color = draw_info->border_color; if (draw_info->server_name != (char *)NULL) (void)CloneString(&clone_info->server_name, draw_info->server_name); if (draw_info->dash_pattern != (double *)NULL) { register ssize_t x; for (x = 0; fabs(draw_info->dash_pattern[x]) >= DrawEpsilon; x++); clone_info->dash_pattern = (double *)AcquireQuantumMemory((size_t) x + 1UL, sizeof(*clone_info->dash_pattern)); if (clone_info->dash_pattern == (double *)NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateDashPattern"); (void)CopyMagickMemory(clone_info->dash_pattern, draw_info->dash_pattern, (size_t) (x + 1) * sizeof(*clone_info->dash_pattern)); } clone_info->gradient = draw_info->gradient; if (draw_info->gradient.stops != (StopInfo *) NULL) { size_t number_stops; number_stops = clone_info->gradient.number_stops; clone_info->gradient.stops = (StopInfo *) AcquireQuantumMemory((size_t) number_stops, sizeof(*clone_info->gradient.stops)); if (clone_info->gradient.stops == (StopInfo *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateDashPattern"); (void)CopyMagickMemory(clone_info->gradient.stops, draw_info->gradient.stops, (size_t) number_stops * sizeof(*clone_info->gradient.stops)); } if (draw_info->clip_mask != (char *)NULL) (void)CloneString(&clone_info->clip_mask, draw_info->clip_mask); clone_info->bounds = draw_info->bounds; clone_info->clip_units = draw_info->clip_units; clone_info->render = draw_info->render; clone_info->fill_alpha = draw_info->fill_alpha; clone_info->stroke_alpha = draw_info->stroke_alpha; clone_info->element_reference = draw_info->element_reference; clone_info->debug = IsEventLogging(); exception = DestroyExceptionInfo(exception); return (clone_info); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + C o n v e r t P a t h T o P o l y g o n * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % ConvertPathToPolygon() converts a path to the more efficient * sorted % rendering form. % % The format of the ConvertPathToPolygon * method is: % % PolygonInfo *ConvertPathToPolygon(const DrawInfo * *draw_info, % const PathInfo *path_info) % % A description of each * parameter follows: % % o Method ConvertPathToPolygon returns the path * in a more efficient sorted % rendering form of type PolygonInfo. % % * o draw_info: Specifies a pointer to an DrawInfo structure. % % o * path_info: Specifies a pointer to an PathInfo structure. % % */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int CompareEdges(const void *x, const void *y) { register const EdgeInfo * p, *q; /* * Compare two edges. */ p = (const EdgeInfo *)x; q = (const EdgeInfo *)y; if ((p->points[0].y - DrawEpsilon) > q->points[0].y) return (1); if ((p->points[0].y + DrawEpsilon) < q->points[0].y) return (-1); if ((p->points[0].x - DrawEpsilon) > q->points[0].x) return (1); if ((p->points[0].x + DrawEpsilon) < q->points[0].x) return (-1); if (((p->points[1].x - p->points[0].x) * (q->points[1].y - q->points[0].y) - (p->points[1].y - p->points[0].y) * (q->points[1].x - q->points[0].x)) > 0.0) return (1); return (-1); } #if defined(__cplusplus) || defined(c_plusplus) } #endif static void LogPolygonInfo(const PolygonInfo * polygon_info) { register EdgeInfo * p; register ssize_t i, j; (void)LogMagickEvent(DrawEvent, GetMagickModule(), " begin active-edge"); p = polygon_info->edges; for (i = 0; i < (ssize_t) polygon_info->number_edges; i++) { (void)LogMagickEvent(DrawEvent, GetMagickModule(), " edge %.20g:", (double)i); (void)LogMagickEvent(DrawEvent, GetMagickModule(), " direction: %s", p->direction != MagickFalse ? "down" : "up"); (void)LogMagickEvent(DrawEvent, GetMagickModule(), " ghostline: %s", p->ghostline != MagickFalse ? "transparent" : "opaque"); (void)LogMagickEvent(DrawEvent, GetMagickModule(), " bounds: %g,%g - %g,%g", p->bounds.x1, p->bounds.y1, p->bounds.x2, p->bounds.y2); for (j = 0; j < (ssize_t) p->number_points; j++) (void)LogMagickEvent(DrawEvent, GetMagickModule(), " %g,%g", p->points[j].x, p->points[j].y); p++; } (void)LogMagickEvent(DrawEvent, GetMagickModule(), " end active-edge"); } static void ReversePoints(PointInfo * points, const size_t number_points) { PointInfo point; register ssize_t i; for (i = 0; i < (ssize_t) (number_points >> 1); i++) { point = points[i]; points[i] = points[number_points - (i + 1)]; points[number_points - (i + 1)] = point; } } static PolygonInfo * ConvertPathToPolygon(const PathInfo * path_info) { long direction, next_direction; PointInfo point, *points; PolygonInfo * polygon_info; SegmentInfo bounds; register ssize_t i, n; MagickBooleanType ghostline; size_t edge, number_edges, number_points; /* * Convert a path to the more efficient sorted rendering form. */ polygon_info = (PolygonInfo *) AcquireMagickMemory(sizeof(*polygon_info)); if (polygon_info == (PolygonInfo *) NULL) return ((PolygonInfo *) NULL); number_edges = 16; polygon_info->edges = (EdgeInfo *) AcquireQuantumMemory(number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return ((PolygonInfo *) NULL); (void)ResetMagickMemory(polygon_info->edges, 0, number_edges * sizeof(*polygon_info->edges)); direction = 0; edge = 0; ghostline = MagickFalse; n = 0; number_points = 0; points = (PointInfo *) NULL; (void)ResetMagickMemory(&point, 0, sizeof(point)); (void)ResetMagickMemory(&bounds, 0, sizeof(bounds)); for (i = 0; path_info[i].code != EndCode; i++) { if ((path_info[i].code == MoveToCode) || (path_info[i].code == OpenCode) || (path_info[i].code == GhostlineCode)) { /* * Move to. */ if ((points != (PointInfo *) NULL) && (n >= 2)) { if (edge == number_edges) { number_edges <<= 1; polygon_info->edges = (EdgeInfo *) ResizeQuantumMemory( polygon_info->edges, (size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return ((PolygonInfo *) NULL); } polygon_info->edges[edge].number_points = (size_t) n; polygon_info->edges[edge].scanline = (-1.0); polygon_info->edges[edge].highwater = 0; polygon_info->edges[edge].ghostline = ghostline; polygon_info->edges[edge].direction = (ssize_t) (direction > 0); if (direction < 0) ReversePoints(points, (size_t) n); polygon_info->edges[edge].points = points; polygon_info->edges[edge].bounds = bounds; polygon_info->edges[edge].bounds.y1 = points[0].y; polygon_info->edges[edge].bounds.y2 = points[n - 1].y; points = (PointInfo *) NULL; ghostline = MagickFalse; edge++; } if (points == (PointInfo *) NULL) { number_points = 16; points = (PointInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) return ((PolygonInfo *) NULL); } ghostline = path_info[i].code == GhostlineCode ? MagickTrue : MagickFalse; point = path_info[i].point; points[0] = point; bounds.x1 = point.x; bounds.x2 = point.x; direction = 0; n = 1; continue; } /* * Line to. */ next_direction = ((path_info[i].point.y > point.y) || ((fabs(path_info[i].point.y - point.y) < DrawEpsilon) && (path_info[i].point.x > point.x))) ? 1 : -1; if ((points != (PointInfo *) NULL) && (direction != 0) && (direction != next_direction)) { /* * New edge. */ point = points[n - 1]; if (edge == number_edges) { number_edges <<= 1; polygon_info->edges = (EdgeInfo *) ResizeQuantumMemory( polygon_info->edges, (size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return ((PolygonInfo *) NULL); } polygon_info->edges[edge].number_points = (size_t) n; polygon_info->edges[edge].scanline = (-1.0); polygon_info->edges[edge].highwater = 0; polygon_info->edges[edge].ghostline = ghostline; polygon_info->edges[edge].direction = (ssize_t) (direction > 0); if (direction < 0) ReversePoints(points, (size_t) n); polygon_info->edges[edge].points = points; polygon_info->edges[edge].bounds = bounds; polygon_info->edges[edge].bounds.y1 = points[0].y; polygon_info->edges[edge].bounds.y2 = points[n - 1].y; number_points = 16; points = (PointInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) return ((PolygonInfo *) NULL); n = 1; ghostline = MagickFalse; points[0] = point; bounds.x1 = point.x; bounds.x2 = point.x; edge++; } direction = next_direction; if (points == (PointInfo *) NULL) continue; if (n == (ssize_t) number_points) { number_points <<= 1; points = (PointInfo *) ResizeQuantumMemory(points, (size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) return ((PolygonInfo *) NULL); } point = path_info[i].point; points[n] = point; if (point.x < bounds.x1) bounds.x1 = point.x; if (point.x > bounds.x2) bounds.x2 = point.x; n++; } if (points != (PointInfo *) NULL) { if (n < 2) points = (PointInfo *) RelinquishMagickMemory(points); else { if (edge == number_edges) { number_edges <<= 1; polygon_info->edges = (EdgeInfo *) ResizeQuantumMemory( polygon_info->edges, (size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return ((PolygonInfo *) NULL); } polygon_info->edges[edge].number_points = (size_t) n; polygon_info->edges[edge].scanline = (-1.0); polygon_info->edges[edge].highwater = 0; polygon_info->edges[edge].ghostline = ghostline; polygon_info->edges[edge].direction = (ssize_t) (direction > 0); if (direction < 0) ReversePoints(points, (size_t) n); polygon_info->edges[edge].points = points; polygon_info->edges[edge].bounds = bounds; polygon_info->edges[edge].bounds.y1 = points[0].y; polygon_info->edges[edge].bounds.y2 = points[n - 1].y; ghostline = MagickFalse; edge++; } } polygon_info->number_edges = edge; qsort(polygon_info->edges, (size_t) polygon_info->number_edges, sizeof(*polygon_info->edges), CompareEdges); if (IsEventLogging() != MagickFalse) LogPolygonInfo(polygon_info); return (polygon_info); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + C o n v e r t P r i m i t i v e T o P a t h * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % ConvertPrimitiveToPath() converts a PrimitiveInfo structure into * a vector % path structure. % % The format of the ConvertPrimitiveToPath * method is: % % PathInfo *ConvertPrimitiveToPath(const DrawInfo * *draw_info, % const PrimitiveInfo *primitive_info) % % A * description of each parameter follows: % % o Method * ConvertPrimitiveToPath returns a vector path structure of type % * PathInfo. % % o draw_info: a structure of type DrawInfo. % % o * primitive_info: Specifies a pointer to an PrimitiveInfo structure. % % */ static void LogPathInfo(const PathInfo * path_info) { register const PathInfo * p; (void)LogMagickEvent(DrawEvent, GetMagickModule(), " begin vector-path"); for (p = path_info; p->code != EndCode; p++) (void)LogMagickEvent(DrawEvent, GetMagickModule(), " %g,%g %s", p->point.x, p->point.y, p->code == GhostlineCode ? "moveto ghostline" : p->code == OpenCode ? "moveto open" : p->code == MoveToCode ? "moveto" : p->code == LineToCode ? "lineto" : "?"); (void)LogMagickEvent(DrawEvent, GetMagickModule(), " end vector-path"); } static PathInfo * ConvertPrimitiveToPath(const PrimitiveInfo * primitive_info) { PathInfo * path_info; PathInfoCode code; PointInfo p, q; register ssize_t i, n; ssize_t coordinates, start; /* * Converts a PrimitiveInfo structure into a vector path structure. */ switch (primitive_info->primitive) { case AlphaPrimitive: case ColorPrimitive: case ImagePrimitive: case PointPrimitive: case TextPrimitive: return ((PathInfo *) NULL); default: break; } for (i = 0; primitive_info[i].primitive != UndefinedPrimitive; i++); path_info = (PathInfo *) AcquireQuantumMemory((size_t) (2UL * i + 3UL), sizeof(*path_info)); if (path_info == (PathInfo *) NULL) return ((PathInfo *) NULL); coordinates = 0; n = 0; p.x = (-1.0); p.y = (-1.0); q.x = (-1.0); q.y = (-1.0); start = 0; for (i = 0; primitive_info[i].primitive != UndefinedPrimitive; i++) { code = LineToCode; if (coordinates <= 0) { coordinates = (ssize_t) primitive_info[i].coordinates; p = primitive_info[i].point; start = n; code = MoveToCode; } coordinates--; /* * Eliminate duplicate points. */ if ((i == 0) || (fabs(q.x - primitive_info[i].point.x) >= DrawEpsilon) || (fabs(q.y - primitive_info[i].point.y) >= DrawEpsilon)) { path_info[n].code = code; path_info[n].point = primitive_info[i].point; q = primitive_info[i].point; n++; } if (coordinates > 0) continue; if ((fabs(p.x - primitive_info[i].point.x) < DrawEpsilon) && (fabs(p.y - primitive_info[i].point.y) < DrawEpsilon)) continue; /* * Mark the p point as open if it does not match the q. */ path_info[start].code = OpenCode; path_info[n].code = GhostlineCode; path_info[n].point = primitive_info[i].point; n++; path_info[n].code = LineToCode; path_info[n].point = p; n++; } path_info[n].code = EndCode; path_info[n].point.x = 0.0; path_info[n].point.y = 0.0; if (IsEventLogging() != MagickFalse) LogPathInfo(path_info); return (path_info); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % D e s t r o y D r a w I n f o * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DestroyDrawInfo() deallocates memory associated with an DrawInfo * % structure. % % The format of the DestroyDrawInfo method is: % % * DrawInfo *DestroyDrawInfo(DrawInfo *draw_info) % % A description of each * parameter follows: % % o draw_info: the draw info. % */ MagickExport DrawInfo * DestroyDrawInfo(DrawInfo * draw_info) { if (draw_info->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "..."); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); if (draw_info->primitive != (char *)NULL) draw_info->primitive = DestroyString(draw_info->primitive); if (draw_info->text != (char *)NULL) draw_info->text = DestroyString(draw_info->text); if (draw_info->geometry != (char *)NULL) draw_info->geometry = DestroyString(draw_info->geometry); if (draw_info->fill_pattern != (Image *) NULL) draw_info->fill_pattern = DestroyImage(draw_info->fill_pattern); if (draw_info->stroke_pattern != (Image *) NULL) draw_info->stroke_pattern = DestroyImage(draw_info->stroke_pattern); if (draw_info->font != (char *)NULL) draw_info->font = DestroyString(draw_info->font); if (draw_info->metrics != (char *)NULL) draw_info->metrics = DestroyString(draw_info->metrics); if (draw_info->family != (char *)NULL) draw_info->family = DestroyString(draw_info->family); if (draw_info->encoding != (char *)NULL) draw_info->encoding = DestroyString(draw_info->encoding); if (draw_info->density != (char *)NULL) draw_info->density = DestroyString(draw_info->density); if (draw_info->server_name != (char *)NULL) draw_info->server_name = (char *) RelinquishMagickMemory(draw_info->server_name); if (draw_info->dash_pattern != (double *)NULL) draw_info->dash_pattern = (double *)RelinquishMagickMemory( draw_info->dash_pattern); if (draw_info->gradient.stops != (StopInfo *) NULL) draw_info->gradient.stops = (StopInfo *) RelinquishMagickMemory( draw_info->gradient.stops); if (draw_info->clip_mask != (char *)NULL) draw_info->clip_mask = DestroyString(draw_info->clip_mask); draw_info->signature = (~MagickCoreSignature); draw_info = (DrawInfo *) RelinquishMagickMemory(draw_info); return (draw_info); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + D e s t r o y E d g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DestroyEdge() destroys the specified polygon edge. % % The * format of the DestroyEdge method is: % % ssize_t * DestroyEdge(PolygonInfo *polygon_info,const int edge) % % A description * of each parameter follows: % % o polygon_info: Specifies a pointer to * an PolygonInfo structure. % % o edge: the polygon edge number to * destroy. % */ static size_t DestroyEdge(PolygonInfo * polygon_info, const size_t edge) { assert(edge < polygon_info->number_edges); polygon_info->edges[edge].points = (PointInfo *) RelinquishMagickMemory( polygon_info->edges[edge].points); polygon_info->number_edges--; if (edge < polygon_info->number_edges) (void)CopyMagickMemory(polygon_info->edges + edge, polygon_info->edges + edge + 1, (size_t) (polygon_info->number_edges - edge) * sizeof(*polygon_info->edges)); return (polygon_info->number_edges); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + D e s t r o y P o l y g o n I n f o * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DestroyPolygonInfo() destroys the PolygonInfo data structure. % * % The format of the DestroyPolygonInfo method is: % % PolygonInfo * *DestroyPolygonInfo(PolygonInfo *polygon_info) % % A description of each * parameter follows: % % o polygon_info: Specifies a pointer to an * PolygonInfo structure. % */ static PolygonInfo * DestroyPolygonInfo(PolygonInfo * polygon_info) { register ssize_t i; for (i = 0; i < (ssize_t) polygon_info->number_edges; i++) polygon_info->edges[i].points = (PointInfo *) RelinquishMagickMemory(polygon_info->edges[i].points); polygon_info->edges = (EdgeInfo *) RelinquishMagickMemory(polygon_info->edges); return ((PolygonInfo *) RelinquishMagickMemory(polygon_info)); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % D r a w A f f i n e I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DrawAffineImage() composites the source over the destination * image as % dictated by the affine transform. % % The format of the * DrawAffineImage method is: % % MagickBooleanType * DrawAffineImage(Image *image,const Image *source, % const * AffineMatrix *affine,ExceptionInfo *exception) % % A description of each * parameter follows: % % o image: the image. % % o source: the source * image. % % o affine: the affine transform. % % o exception: return * any errors or warnings in this structure. % */ static SegmentInfo AffineEdge(const Image * image, const AffineMatrix * affine, const double y, const SegmentInfo * edge) { double intercept, z; register double x; SegmentInfo inverse_edge; /* * Determine left and right edges. */ inverse_edge.x1 = edge->x1; inverse_edge.y1 = edge->y1; inverse_edge.x2 = edge->x2; inverse_edge.y2 = edge->y2; z = affine->ry * y + affine->tx; if (affine->sx >= DrawEpsilon) { intercept = (-z / affine->sx); x = intercept; if (x > inverse_edge.x1) inverse_edge.x1 = x; intercept = (-z + (double)image->columns) / affine->sx; x = intercept; if (x < inverse_edge.x2) inverse_edge.x2 = x; } else if (affine->sx < -DrawEpsilon) { intercept = (-z + (double)image->columns) / affine->sx; x = intercept; if (x > inverse_edge.x1) inverse_edge.x1 = x; intercept = (-z / affine->sx); x = intercept; if (x < inverse_edge.x2) inverse_edge.x2 = x; } else if ((z < 0.0) || ((size_t) floor(z + 0.5) >= image->columns)) { inverse_edge.x2 = edge->x1; return (inverse_edge); } /* * Determine top and bottom edges. */ z = affine->sy * y + affine->ty; if (affine->rx >= DrawEpsilon) { intercept = (-z / affine->rx); x = intercept; if (x > inverse_edge.x1) inverse_edge.x1 = x; intercept = (-z + (double)image->rows) / affine->rx; x = intercept; if (x < inverse_edge.x2) inverse_edge.x2 = x; } else if (affine->rx < -DrawEpsilon) { intercept = (-z + (double)image->rows) / affine->rx; x = intercept; if (x > inverse_edge.x1) inverse_edge.x1 = x; intercept = (-z / affine->rx); x = intercept; if (x < inverse_edge.x2) inverse_edge.x2 = x; } else if ((z < 0.0) || ((size_t) floor(z + 0.5) >= image->rows)) { inverse_edge.x2 = edge->x2; return (inverse_edge); } return (inverse_edge); } static AffineMatrix InverseAffineMatrix(const AffineMatrix * affine) { AffineMatrix inverse_affine; double determinant; determinant = PerceptibleReciprocal(affine->sx * affine->sy - affine->rx * affine->ry); inverse_affine.sx = determinant * affine->sy; inverse_affine.rx = determinant * (-affine->rx); inverse_affine.ry = determinant * (-affine->ry); inverse_affine.sy = determinant * affine->sx; inverse_affine.tx = (-affine->tx) * inverse_affine.sx - affine->ty * inverse_affine.ry; inverse_affine.ty = (-affine->tx) * inverse_affine.rx - affine->ty * inverse_affine.sy; return (inverse_affine); } MagickExport MagickBooleanType DrawAffineImage(Image * image, const Image * source, const AffineMatrix * affine, ExceptionInfo * exception) { AffineMatrix inverse_affine; CacheView * image_view, *source_view; MagickBooleanType status; PixelInfo zero; PointInfo extent[4], min, max; register ssize_t i; SegmentInfo edge; ssize_t start, stop, y; /* * Determine bounding box. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(source != (const Image *)NULL); assert(source->signature == MagickCoreSignature); assert(affine != (AffineMatrix *) NULL); extent[0].x = 0.0; extent[0].y = 0.0; extent[1].x = (double)source->columns - 1.0; extent[1].y = 0.0; extent[2].x = (double)source->columns - 1.0; extent[2].y = (double)source->rows - 1.0; extent[3].x = 0.0; extent[3].y = (double)source->rows - 1.0; for (i = 0; i < 4; i++) { PointInfo point; point = extent[i]; extent[i].x = point.x * affine->sx + point.y * affine->ry + affine->tx; extent[i].y = point.x * affine->rx + point.y * affine->sy + affine->ty; } min = extent[0]; max = extent[0]; for (i = 1; i < 4; i++) { if (min.x > extent[i].x) min.x = extent[i].x; if (min.y > extent[i].y) min.y = extent[i].y; if (max.x < extent[i].x) max.x = extent[i].x; if (max.y < extent[i].y) max.y = extent[i].y; } /* * Affine transform image. */ if (SetImageStorageClass(image, DirectClass, exception) == MagickFalse) return (MagickFalse); status = MagickTrue; edge.x1 = MagickMax(min.x, 0.0); edge.y1 = MagickMax(min.y, 0.0); edge.x2 = MagickMin(max.x, (double)image->columns - 1.0); edge.y2 = MagickMin(max.y, (double)image->rows - 1.0); inverse_affine = InverseAffineMatrix(affine); GetPixelInfo(image, &zero); start = (ssize_t) ceil(edge.y1 - 0.5); stop = (ssize_t) floor(edge.y2 + 0.5); source_view = AcquireVirtualCacheView(source, exception); image_view = AcquireAuthenticCacheView(image, exception); for (y = start; y <= stop; y++) { PixelInfo composite, pixel; PointInfo point; register ssize_t x; register Quantum * magick_restrict q; SegmentInfo inverse_edge; ssize_t x_offset; inverse_edge = AffineEdge(source, &inverse_affine, (double)y, &edge); if (inverse_edge.x2 < inverse_edge.x1) continue; q = GetCacheViewAuthenticPixels(image_view, (ssize_t) ceil(inverse_edge.x1 - 0.5), y, (size_t) (floor(inverse_edge.x2 + 0.5) - ceil(inverse_edge.x1 - 0.5) + 1), 1, exception); if (q == (Quantum *) NULL) continue; pixel = zero; composite = zero; x_offset = 0; for (x = (ssize_t) ceil(inverse_edge.x1 - 0.5); x <= (ssize_t) floor(inverse_edge.x2 + 0.5); x++) { point.x = (double)x *inverse_affine.sx + y * inverse_affine.ry + inverse_affine.tx; point.y = (double)x *inverse_affine.rx + y * inverse_affine.sy + inverse_affine.ty; (void)InterpolatePixelInfo(source, source_view, UndefinedInterpolatePixel, point.x, point.y, &pixel, exception); GetPixelInfoPixel(image, q, &composite); CompositePixelInfoOver(&pixel, pixel.alpha, &composite, composite.alpha, &composite); SetPixelViaPixelInfo(image, &composite, q); x_offset++; q += GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; } source_view = DestroyCacheView(source_view); image_view = DestroyCacheView(image_view); return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + D r a w B o u n d i n g R e c t a n g l e s * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DrawBoundingRectangles() draws the bounding rectangles on the * image. This % is only useful for developers debugging the rendering * algorithm. % % The format of the DrawBoundingRectangles method is: % % * void DrawBoundingRectangles(Image *image,const DrawInfo *draw_info, % * PolygonInfo *polygon_info,ExceptionInfo *exception) % % A description of * each parameter follows: % % o image: the image. % % o draw_info: the * draw info. % % o polygon_info: Specifies a pointer to a PolygonInfo * structure. % % o exception: return any errors or warnings in this * structure. % */ static void DrawBoundingRectangles(Image * image, const DrawInfo * draw_info, const PolygonInfo * polygon_info, ExceptionInfo * exception) { DrawInfo * clone_info; double mid; PointInfo end, resolution, start; PrimitiveInfo primitive_info[6]; register ssize_t i; SegmentInfo bounds; ssize_t coordinates; clone_info = CloneDrawInfo((ImageInfo *) NULL, draw_info); (void)QueryColorCompliance("#000F", AllCompliance, &clone_info->fill, exception); resolution.x = DefaultResolution; resolution.y = DefaultResolution; if (clone_info->density != (char *)NULL) { GeometryInfo geometry_info; MagickStatusType flags; flags = ParseGeometry(clone_info->density, &geometry_info); resolution.x = geometry_info.rho; resolution.y = geometry_info.sigma; if ((flags & SigmaValue) == MagickFalse) resolution.y = resolution.x; } mid = (resolution.x / 72.0) * ExpandAffine(&clone_info->affine) * clone_info->stroke_width / 2.0; bounds.x1 = 0.0; bounds.y1 = 0.0; bounds.x2 = 0.0; bounds.y2 = 0.0; if (polygon_info != (PolygonInfo *) NULL) { bounds = polygon_info->edges[0].bounds; for (i = 1; i < (ssize_t) polygon_info->number_edges; i++) { if (polygon_info->edges[i].bounds.x1 < (double)bounds.x1) bounds.x1 = polygon_info->edges[i].bounds.x1; if (polygon_info->edges[i].bounds.y1 < (double)bounds.y1) bounds.y1 = polygon_info->edges[i].bounds.y1; if (polygon_info->edges[i].bounds.x2 > (double)bounds.x2) bounds.x2 = polygon_info->edges[i].bounds.x2; if (polygon_info->edges[i].bounds.y2 > (double)bounds.y2) bounds.y2 = polygon_info->edges[i].bounds.y2; } bounds.x1 -= mid; bounds.x1 = bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns ? (double)image->columns - 1 : bounds.x1; bounds.y1 -= mid; bounds.y1 = bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows ? (double)image->rows - 1 : bounds.y1; bounds.x2 += mid; bounds.x2 = bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns ? (double)image->columns - 1 : bounds.x2; bounds.y2 += mid; bounds.y2 = bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows ? (double)image->rows - 1 : bounds.y2; for (i = 0; i < (ssize_t) polygon_info->number_edges; i++) { if (polygon_info->edges[i].direction != 0) (void)QueryColorCompliance("red", AllCompliance, &clone_info->stroke, exception); else (void)QueryColorCompliance("green", AllCompliance, &clone_info->stroke, exception); start.x = (double)(polygon_info->edges[i].bounds.x1 - mid); start.y = (double)(polygon_info->edges[i].bounds.y1 - mid); end.x = (double)(polygon_info->edges[i].bounds.x2 + mid); end.y = (double)(polygon_info->edges[i].bounds.y2 + mid); primitive_info[0].primitive = RectanglePrimitive; TraceRectangle(primitive_info, start, end); primitive_info[0].method = ReplaceMethod; coordinates = (ssize_t) primitive_info[0].coordinates; primitive_info[coordinates].primitive = UndefinedPrimitive; (void)DrawPrimitive(image, clone_info, primitive_info, exception); } } (void)QueryColorCompliance("blue", AllCompliance, &clone_info->stroke, exception); start.x = (double)(bounds.x1 - mid); start.y = (double)(bounds.y1 - mid); end.x = (double)(bounds.x2 + mid); end.y = (double)(bounds.y2 + mid); primitive_info[0].primitive = RectanglePrimitive; TraceRectangle(primitive_info, start, end); primitive_info[0].method = ReplaceMethod; coordinates = (ssize_t) primitive_info[0].coordinates; primitive_info[coordinates].primitive = UndefinedPrimitive; (void)DrawPrimitive(image, clone_info, primitive_info, exception); clone_info = DestroyDrawInfo(clone_info); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % D r a w C l i p P a t h * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DrawClipPath() draws the clip path on the image mask. % % The * format of the DrawClipPath method is: % % MagickBooleanType * DrawClipPath(Image *image,const DrawInfo *draw_info, % const char * *name,ExceptionInfo *exception) % % A description of each parameter * follows: % % o image: the image. % % o draw_info: the draw info. % % * o name: the name of the clip path. % % o exception: return any errors * or warnings in this structure. % */ MagickExport MagickBooleanType DrawClipPath(Image * image, const DrawInfo * draw_info, const char *name, ExceptionInfo * exception) { char filename[MagickPathExtent]; Image * clip_mask; const char *value; DrawInfo * clone_info; MagickStatusType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(draw_info != (const DrawInfo *)NULL); (void)FormatLocaleString(filename, MagickPathExtent, "%s", name); value = GetImageArtifact(image, filename); if (value == (const char *)NULL) return (MagickFalse); clip_mask = CloneImage(image, image->columns, image->rows, MagickTrue, exception); if (clip_mask == (Image *) NULL) return (MagickFalse); (void)QueryColorCompliance("#0000", AllCompliance, &clip_mask->background_color, exception); clip_mask->background_color.alpha = (MagickRealType) TransparentAlpha; (void)SetImageBackgroundColor(clip_mask, exception); if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), "\nbegin clip-path %s", draw_info->clip_mask); clone_info = CloneDrawInfo((ImageInfo *) NULL, draw_info); (void)CloneString(&clone_info->primitive, value); (void)QueryColorCompliance("#ffffff", AllCompliance, &clone_info->fill, exception); clone_info->clip_mask = (char *)NULL; status = NegateImage(clip_mask, MagickFalse, exception); (void)SetImageMask(image, ReadPixelMask, clip_mask, exception); clip_mask = DestroyImage(clip_mask); status &= DrawImage(image, clone_info, exception); clone_info = DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), "end clip-path"); return (status != 0 ? MagickTrue : MagickFalse); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + D r a w D a s h P o l y g o n * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DrawDashPolygon() draws a dashed polygon (line, rectangle, * ellipse) on the % image while respecting the dash offset and dash pattern * attributes. % % The format of the DrawDashPolygon method is: % % * MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info, % * const PrimitiveInfo *primitive_info,Image *image, % ExceptionInfo * *exception) % % A description of each parameter follows: % % o * draw_info: the draw info. % % o primitive_info: Specifies a pointer to * a PrimitiveInfo structure. % % o image: the image. % % o exception: * return any errors or warnings in this structure. % */ static MagickBooleanType DrawDashPolygon(const DrawInfo * draw_info, const PrimitiveInfo * primitive_info, Image * image, ExceptionInfo * exception) { DrawInfo * clone_info; double length, maximum_length, offset, scale, total_length; MagickStatusType status; PrimitiveInfo * dash_polygon; register ssize_t i; register double dx, dy; size_t number_vertices; ssize_t j, n; assert(draw_info != (const DrawInfo *)NULL); if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), " begin draw-dash"); for (i = 0; primitive_info[i].primitive != UndefinedPrimitive; i++); number_vertices = (size_t) i; dash_polygon = (PrimitiveInfo *) AcquireQuantumMemory((size_t) (2UL * number_vertices + 1UL), sizeof(*dash_polygon)); if (dash_polygon == (PrimitiveInfo *) NULL) return (MagickFalse); clone_info = CloneDrawInfo((ImageInfo *) NULL, draw_info); clone_info->miterlimit = 0; dash_polygon[0] = primitive_info[0]; scale = ExpandAffine(&draw_info->affine); length = scale * (draw_info->dash_pattern[0] - 0.5); offset = fabs(draw_info->dash_offset) >= DrawEpsilon ? scale * draw_info->dash_offset : 0.0; j = 1; for (n = 0; offset > 0.0; j = 0) { if (draw_info->dash_pattern[n] <= 0.0) break; length = scale * (draw_info->dash_pattern[n] + (n == 0 ? -0.5 : 0.5)); if (offset > length) { offset -= length; n++; length = scale * (draw_info->dash_pattern[n] + 0.5); continue; } if (offset < length) { length -= offset; offset = 0.0; break; } offset = 0.0; n++; } status = MagickTrue; maximum_length = 0.0; total_length = 0.0; for (i = 1; (i < (ssize_t) number_vertices) && (length >= 0.0); i++) { dx = primitive_info[i].point.x - primitive_info[i - 1].point.x; dy = primitive_info[i].point.y - primitive_info[i - 1].point.y; maximum_length = hypot((double)dx, dy); if (fabs(length) < DrawEpsilon) { n++; if (fabs(draw_info->dash_pattern[n]) < DrawEpsilon) n = 0; length = scale * (draw_info->dash_pattern[n] + (n == 0 ? -0.5 : 0.5)); } for (total_length = 0.0; (length >= 0.0) && (maximum_length >= (total_length + length));) { total_length += length; if ((n & 0x01) != 0) { dash_polygon[0] = primitive_info[0]; dash_polygon[0].point.x = (double)(primitive_info[i - 1].point.x + dx * total_length / maximum_length); dash_polygon[0].point.y = (double)(primitive_info[i - 1].point.y + dy * total_length / maximum_length); j = 1; } else { if ((j + 1) > (ssize_t) (2 * number_vertices)) break; dash_polygon[j] = primitive_info[i - 1]; dash_polygon[j].point.x = (double)(primitive_info[i - 1].point.x + dx * total_length / maximum_length); dash_polygon[j].point.y = (double)(primitive_info[i - 1].point.y + dy * total_length / maximum_length); dash_polygon[j].coordinates = 1; j++; dash_polygon[0].coordinates = (size_t) j; dash_polygon[j].primitive = UndefinedPrimitive; status &= DrawStrokePolygon(image, clone_info, dash_polygon, exception); } n++; if (fabs(draw_info->dash_pattern[n]) < DrawEpsilon) n = 0; length = scale * (draw_info->dash_pattern[n] + (n == 0 ? -0.5 : 0.5)); } length -= (maximum_length - total_length); if ((n & 0x01) != 0) continue; dash_polygon[j] = primitive_info[i]; dash_polygon[j].coordinates = 1; j++; } if ((total_length <= maximum_length) && ((n & 0x01) == 0) && (j > 1)) { dash_polygon[j] = primitive_info[i - 1]; dash_polygon[j].point.x += DrawEpsilon; dash_polygon[j].point.y += DrawEpsilon; dash_polygon[j].coordinates = 1; j++; dash_polygon[0].coordinates = (size_t) j; dash_polygon[j].primitive = UndefinedPrimitive; status &= DrawStrokePolygon(image, clone_info, dash_polygon, exception); } dash_polygon = (PrimitiveInfo *) RelinquishMagickMemory(dash_polygon); clone_info = DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), " end draw-dash"); return (status != 0 ? MagickTrue : MagickFalse); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % D r a w I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DrawImage() draws a graphic primitive on your image. The * primitive % may be represented as a string or filename. Precede the * filename with an % "at" sign (@) and the contents of the file are drawn * on the image. You % can affect how text is drawn by setting one or more * members of the draw % info structure. % % The format of the DrawImage * method is: % % MagickBooleanType DrawImage(Image *image,const * DrawInfo *draw_info, % ExceptionInfo *exception) % % A description * of each parameter follows: % % o image: the image. % % o draw_info: * the draw info. % % o exception: return any errors or warnings in this * structure. % */ static inline MagickBooleanType IsPoint(const char *point) { char *p; double value; value = StringToDouble(point, &p); return ((fabs(value) < DrawEpsilon) && (p == point) ? MagickFalse : MagickTrue); } static inline void TracePoint(PrimitiveInfo * primitive_info, const PointInfo point) { primitive_info->coordinates = 1; primitive_info->point = point; } MagickExport MagickBooleanType DrawImage(Image * image, const DrawInfo * draw_info, ExceptionInfo * exception) { #define RenderImageTag "Render/Image" AffineMatrix affine, current; char keyword[MagickPathExtent], geometry[MagickPathExtent], *next_token, pattern[MagickPathExtent], *primitive, *token; const char *q; double angle, factor, primitive_extent; DrawInfo ** graphic_context; MagickBooleanType proceed; MagickSizeType length, number_points; MagickStatusType status; PointInfo point; PrimitiveInfo * primitive_info; PrimitiveType primitive_type; register const char *p; register ssize_t i, x; SegmentInfo bounds; size_t extent, number_stops; ssize_t j, k, n; StopInfo * stops; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "..."); if ((draw_info->primitive == (char *)NULL) || (*draw_info->primitive == '\0')) return (MagickFalse); if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), "begin draw-image"); if (*draw_info->primitive != '@') primitive = AcquireString(draw_info->primitive); else primitive = FileToString(draw_info->primitive + 1, ~0UL, exception); if (primitive == (char *)NULL) return (MagickFalse); primitive_extent = (double)strlen(primitive); (void)SetImageArtifact(image, "MVG", primitive); n = 0; number_stops = 0; stops = (StopInfo *) NULL; /* * Allocate primitive info memory. */ graphic_context = (DrawInfo **) AcquireMagickMemory(sizeof(*graphic_context)); if (graphic_context == (DrawInfo **) NULL) { primitive = DestroyString(primitive); ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); } number_points = 6553; primitive_info = (PrimitiveInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*primitive_info)); if (primitive_info == (PrimitiveInfo *) NULL) { primitive = DestroyString(primitive); for (; n >= 0; n--) graphic_context[n] = DestroyDrawInfo(graphic_context[n]); graphic_context = (DrawInfo **) RelinquishMagickMemory(graphic_context); ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); } graphic_context[n] = CloneDrawInfo((ImageInfo *) NULL, draw_info); graphic_context[n]->viewbox = image->page; if ((image->page.width == 0) || (image->page.height == 0)) { graphic_context[n]->viewbox.width = image->columns; graphic_context[n]->viewbox.height = image->rows; } token = AcquireString(primitive); extent = strlen(token) + MagickPathExtent; if (SetImageStorageClass(image, DirectClass, exception) == MagickFalse) return (MagickFalse); status = MagickTrue; for (q = primitive; *q != '\0';) { /* * Interpret graphic primitive. */ GetNextToken(q, &q, MagickPathExtent, keyword); if (*keyword == '\0') break; if (*keyword == '#') { /* * Comment. */ while ((*q != '\n') && (*q != '\0')) q++; continue; } p = q - strlen(keyword) - 1; primitive_type = UndefinedPrimitive; current = graphic_context[n]->affine; GetAffineMatrix(&affine); switch (*keyword) { case ';': break; case 'a': case 'A': { if (LocaleCompare("affine", keyword) == 0) { GetNextToken(q, &q, extent, token); affine.sx = StringToDouble(token, &next_token); if (token == next_token) status = MagickFalse; GetNextToken(q, &q, extent, token); if (*token == ',') GetNextToken(q, &q, extent, token); affine.rx = StringToDouble(token, &next_token); if (token == next_token) status = MagickFalse; GetNextToken(q, &q, extent, token); if (*token == ',') GetNextToken(q, &q, extent, token); affine.ry = StringToDouble(token, &next_token); if (token == next_token) status = MagickFalse; GetNextToken(q, &q, extent, token); if (*token == ',') GetNextToken(q, &q, extent, token); affine.sy = StringToDouble(token, &next_token); if (token == next_token) status = MagickFalse; GetNextToken(q, &q, extent, token); if (*token == ',') GetNextToken(q, &q, extent, token); affine.tx = StringToDouble(token, &next_token); if (token == next_token) status = MagickFalse; GetNextToken(q, &q, extent, token); if (*token == ',') GetNextToken(q, &q, extent, token); affine.ty = StringToDouble(token, &next_token); if (token == next_token) status = MagickFalse; break; } if (LocaleCompare("alpha", keyword) == 0) { primitive_type = AlphaPrimitive; break; } if (LocaleCompare("arc", keyword) == 0) { primitive_type = ArcPrimitive; break; } status = MagickFalse; break; } case 'b': case 'B': { if (LocaleCompare("bezier", keyword) == 0) { primitive_type = BezierPrimitive; break; } if (LocaleCompare("border-color", keyword) == 0) { GetNextToken(q, &q, extent, token); (void)QueryColorCompliance(token, AllCompliance, &graphic_context[n]->border_color, exception); break; } status = MagickFalse; break; } case 'c': case 'C': { if (LocaleCompare("clip-path", keyword) == 0) { /* * Create clip mask. */ GetNextToken(q, &q, extent, token); (void)CloneString(&graphic_context[n]->clip_mask, token); (void)DrawClipPath(image, graphic_context[n], graphic_context[n]->clip_mask, exception); break; } if (LocaleCompare("clip-rule", keyword) == 0) { ssize_t fill_rule; GetNextToken(q, &q, extent, token); fill_rule = ParseCommandOption(MagickFillRuleOptions, MagickFalse, token); if (fill_rule == -1) status = MagickFalse; else graphic_context[n]->fill_rule = (FillRule) fill_rule; break; } if (LocaleCompare("clip-units", keyword) == 0) { ssize_t clip_units; GetNextToken(q, &q, extent, token); clip_units = ParseCommandOption(MagickClipPathOptions, MagickFalse, token); if (clip_units == -1) { status = MagickFalse; break; } graphic_context[n]->clip_units = (ClipPathUnits) clip_units; if (clip_units == ObjectBoundingBox) { GetAffineMatrix(&current); affine.sx = draw_info->bounds.x2; affine.sy = draw_info->bounds.y2; affine.tx = draw_info->bounds.x1; affine.ty = draw_info->bounds.y1; break; } break; } if (LocaleCompare("circle", keyword) == 0) { primitive_type = CirclePrimitive; break; } if (LocaleCompare("color", keyword) == 0) { primitive_type = ColorPrimitive; break; } status = MagickFalse; break; } case 'd': case 'D': { if (LocaleCompare("decorate", keyword) == 0) { ssize_t decorate; GetNextToken(q, &q, extent, token); decorate = ParseCommandOption(MagickDecorateOptions, MagickFalse, token); if (decorate == -1) status = MagickFalse; else graphic_context[n]->decorate = (DecorationType) decorate; break; } if (LocaleCompare("density", keyword) == 0) { GetNextToken(q, &q, extent, token); (void)CloneString(&graphic_context[n]->density, token); break; } if (LocaleCompare("direction", keyword) == 0) { ssize_t direction; GetNextToken(q, &q, extent, token); direction = ParseCommandOption(MagickDirectionOptions, MagickFalse, token); if (direction == -1) status = MagickFalse; else graphic_context[n]->direction = (DirectionType) direction; break; } status = MagickFalse; break; } case 'e': case 'E': { if (LocaleCompare("ellipse", keyword) == 0) { primitive_type = EllipsePrimitive; break; } if (LocaleCompare("encoding", keyword) == 0) { GetNextToken(q, &q, extent, token); (void)CloneString(&graphic_context[n]->encoding, token); break; } status = MagickFalse; break; } case 'f': case 'F': { if (LocaleCompare("fill", keyword) == 0) { GetNextToken(q, &q, extent, token); (void)FormatLocaleString(pattern, MagickPathExtent, "%s", token); if (GetImageArtifact(image, pattern) != (const char *)NULL) (void)DrawPatternPath(image, draw_info, token, &graphic_context[n]->fill_pattern, exception); else { status &= QueryColorCompliance(token, AllCompliance, &graphic_context[n]->fill, exception); if (graphic_context[n]->fill_alpha != OpaqueAlpha) graphic_context[n]->fill.alpha = graphic_context[n]->fill_alpha; if (status == MagickFalse) { ImageInfo * pattern_info; pattern_info = AcquireImageInfo(); (void)CopyMagickString(pattern_info->filename, token, MagickPathExtent); graphic_context[n]->fill_pattern = ReadImage(pattern_info, exception); CatchException(exception); pattern_info = DestroyImageInfo(pattern_info); } } break; } if (LocaleCompare("fill-opacity", keyword) == 0) { GetNextToken(q, &q, extent, token); factor = strchr(token, '%') != (char *)NULL ? 0.01 : 1.0; graphic_context[n]->fill.alpha = QuantumRange - ClampToQuantum( (MagickRealType) QuantumRange * (1.0 - factor * StringToDouble(token, &next_token))); if (token == next_token) status = MagickFalse; break; } if (LocaleCompare("fill-rule", keyword) == 0) { ssize_t fill_rule; GetNextToken(q, &q, extent, token); fill_rule = ParseCommandOption(MagickFillRuleOptions, MagickFalse, token); if (fill_rule == -1) status = MagickFalse; else graphic_context[n]->fill_rule = (FillRule) fill_rule; break; } if (LocaleCompare("font", keyword) == 0) { GetNextToken(q, &q, extent, token); (void)CloneString(&graphic_context[n]->font, token); if (LocaleCompare("none", token) == 0) graphic_context[n]->font = (char *)RelinquishMagickMemory( graphic_context[n]->font); break; } if (LocaleCompare("font-family", keyword) == 0) { GetNextToken(q, &q, extent, token); (void)CloneString(&graphic_context[n]->family, token); break; } if (LocaleCompare("font-size", keyword) == 0) { GetNextToken(q, &q, extent, token); graphic_context[n]->pointsize = StringToDouble(token, &next_token); if (token == next_token) status = MagickFalse; break; } if (LocaleCompare("font-stretch", keyword) == 0) { ssize_t stretch; GetNextToken(q, &q, extent, token); stretch = ParseCommandOption(MagickStretchOptions, MagickFalse, token); if (stretch == -1) status = MagickFalse; else graphic_context[n]->stretch = (StretchType) stretch; break; } if (LocaleCompare("font-style", keyword) == 0) { ssize_t style; GetNextToken(q, &q, extent, token); style = ParseCommandOption(MagickStyleOptions, MagickFalse, token); if (style == -1) status = MagickFalse; else graphic_context[n]->style = (StyleType) style; break; } if (LocaleCompare("font-weight", keyword) == 0) { ssize_t weight; GetNextToken(q, &q, extent, token); weight = ParseCommandOption(MagickWeightOptions, MagickFalse, token); if (weight == -1) weight = (ssize_t) StringToUnsignedLong(token); graphic_context[n]->weight = (size_t) weight; break; } status = MagickFalse; break; } case 'g': case 'G': { if (LocaleCompare("gradient-units", keyword) == 0) { GetNextToken(q, &q, extent, token); break; } if (LocaleCompare("gravity", keyword) == 0) { ssize_t gravity; GetNextToken(q, &q, extent, token); gravity = ParseCommandOption(MagickGravityOptions, MagickFalse, token); if (gravity == -1) status = MagickFalse; else graphic_context[n]->gravity = (GravityType) gravity; break; } status = MagickFalse; break; } case 'i': case 'I': { if (LocaleCompare("image", keyword) == 0) { ssize_t compose; primitive_type = ImagePrimitive; GetNextToken(q, &q, extent, token); compose = ParseCommandOption(MagickComposeOptions, MagickFalse, token); if (compose == -1) status = MagickFalse; else graphic_context[n]->compose = (CompositeOperator) compose; break; } if (LocaleCompare("interline-spacing", keyword) == 0) { GetNextToken(q, &q, extent, token); graphic_context[n]->interline_spacing = StringToDouble(token, &next_token); if (token == next_token) status = MagickFalse; break; } if (LocaleCompare("interword-spacing", keyword) == 0) { GetNextToken(q, &q, extent, token); graphic_context[n]->interword_spacing = StringToDouble(token, &next_token); if (token == next_token) status = MagickFalse; break; } status = MagickFalse; break; } case 'k': case 'K': { if (LocaleCompare("kerning", keyword) == 0) { GetNextToken(q, &q, extent, token); graphic_context[n]->kerning = StringToDouble(token, &next_token); if (token == next_token) status = MagickFalse; break; } status = MagickFalse; break; } case 'l': case 'L': { if (LocaleCompare("line", keyword) == 0) primitive_type = LinePrimitive; else status = MagickFalse; break; } case 'o': case 'O': { if (LocaleCompare("offset", keyword) == 0) { GetNextToken(q, &q, extent, token); break; } if (LocaleCompare("opacity", keyword) == 0) { GetNextToken(q, &q, extent, token); factor = strchr(token, '%') != (char *)NULL ? 0.01 : 1.0; graphic_context[n]->alpha = QuantumRange * (1.0 - (QuantumScale * graphic_context[n]->alpha * (1.0 - factor * StringToDouble(token, &next_token)))); graphic_context[n]->fill_alpha = QuantumRange * (1.0 - (QuantumScale * graphic_context[n]->fill_alpha * (1.0 - factor * StringToDouble(token, &next_token)))); graphic_context[n]->stroke_alpha = QuantumRange * (1.0 - (QuantumScale * graphic_context[n]->stroke_alpha * (1.0 - factor * StringToDouble(token, &next_token)))); if (token == next_token) status = MagickFalse; break; } status = MagickFalse; break; } case 'p': case 'P': { if (LocaleCompare("path", keyword) == 0) { primitive_type = PathPrimitive; break; } if (LocaleCompare("point", keyword) == 0) { primitive_type = PointPrimitive; break; } if (LocaleCompare("polyline", keyword) == 0) { primitive_type = PolylinePrimitive; break; } if (LocaleCompare("polygon", keyword) == 0) { primitive_type = PolygonPrimitive; break; } if (LocaleCompare("pop", keyword) == 0) { GetNextToken(q, &q, extent, token); if (LocaleCompare("clip-path", token) == 0) break; if (LocaleCompare("defs", token) == 0) break; if (LocaleCompare("gradient", token) == 0) break; if (LocaleCompare("graphic-context", token) == 0) { if (n <= 0) { (void)ThrowMagickException(exception, GetMagickModule(), DrawError, "UnbalancedGraphicContextPushPop", "`%s'", token); status = MagickFalse; n = 0; break; } if (graphic_context[n]->clip_mask != (char *)NULL) if (LocaleCompare(graphic_context[n]->clip_mask, graphic_context[n - 1]->clip_mask) != 0) (void)SetImageMask(image, ReadPixelMask, (Image *) NULL, exception); graphic_context[n] = DestroyDrawInfo(graphic_context[n]); n--; break; } if (LocaleCompare("pattern", token) == 0) break; status = MagickFalse; break; } if (LocaleCompare("push", keyword) == 0) { GetNextToken(q, &q, extent, token); if (LocaleCompare("clip-path", token) == 0) { char name[MagickPathExtent]; GetNextToken(q, &q, extent, token); (void)FormatLocaleString(name, MagickPathExtent, "%s", token); for (p = q; *q != '\0';) { GetNextToken(q, &q, extent, token); if (LocaleCompare(token, "pop") != 0) continue; GetNextToken(q, (const char **)NULL, extent, token); if (LocaleCompare(token, "clip-path") != 0) continue; break; } (void)CopyMagickString(token, p, (size_t) (q - p - 4 + 1)); (void)SetImageArtifact(image, name, token); GetNextToken(q, &q, extent, token); break; } if (LocaleCompare("gradient", token) == 0) { char key[2 * MagickPathExtent], name[MagickPathExtent], type[MagickPathExtent]; SegmentInfo segment; GetNextToken(q, &q, extent, token); (void)CopyMagickString(name, token, MagickPathExtent); GetNextToken(q, &q, extent, token); (void)CopyMagickString(type, token, MagickPathExtent); GetNextToken(q, &q, extent, token); segment.x1 = StringToDouble(token, &next_token); if (token == next_token) status = MagickFalse; GetNextToken(q, &q, extent, token); if (*token == ',') GetNextToken(q, &q, extent, token); segment.y1 = StringToDouble(token, &next_token); if (token == next_token) status = MagickFalse; GetNextToken(q, &q, extent, token); if (*token == ',') GetNextToken(q, &q, extent, token); segment.x2 = StringToDouble(token, &next_token); if (token == next_token) status = MagickFalse; GetNextToken(q, &q, extent, token); if (*token == ',') GetNextToken(q, &q, extent, token); segment.y2 = StringToDouble(token, &next_token); if (token == next_token) status = MagickFalse; if (LocaleCompare(type, "radial") == 0) { GetNextToken(q, &q, extent, token); if (*token == ',') GetNextToken(q, &q, extent, token); } for (p = q; *q != '\0';) { GetNextToken(q, &q, extent, token); if (LocaleCompare(token, "pop") != 0) continue; GetNextToken(q, (const char **)NULL, extent, token); if (LocaleCompare(token, "gradient") != 0) continue; break; } (void)CopyMagickString(token, p, (size_t) (q - p - 4 + 1)); bounds.x1 = graphic_context[n]->affine.sx * segment.x1 + graphic_context[n]->affine.ry * segment.y1 + graphic_context[n]->affine.tx; bounds.y1 = graphic_context[n]->affine.rx * segment.x1 + graphic_context[n]->affine.sy * segment.y1 + graphic_context[n]->affine.ty; bounds.x2 = graphic_context[n]->affine.sx * segment.x2 + graphic_context[n]->affine.ry * segment.y2 + graphic_context[n]->affine.tx; bounds.y2 = graphic_context[n]->affine.rx * segment.x2 + graphic_context[n]->affine.sy * segment.y2 + graphic_context[n]->affine.ty; (void)FormatLocaleString(key, MagickPathExtent, "%s", name); (void)SetImageArtifact(image, key, token); (void)FormatLocaleString(key, MagickPathExtent, "%s-type", name); (void)SetImageArtifact(image, key, type); (void)FormatLocaleString(key, MagickPathExtent, "%s-geometry", name); (void)FormatLocaleString(geometry, MagickPathExtent, "%gx%g%+.15g%+.15g", MagickMax(fabs(bounds.x2 - bounds.x1 + 1.0), 1.0), MagickMax(fabs(bounds.y2 - bounds.y1 + 1.0), 1.0), bounds.x1, bounds.y1); (void)SetImageArtifact(image, key, geometry); GetNextToken(q, &q, extent, token); break; } if (LocaleCompare("pattern", token) == 0) { char key[2 * MagickPathExtent], name[MagickPathExtent]; RectangleInfo pattern_bounds; GetNextToken(q, &q, extent, token); (void)CopyMagickString(name, token, MagickPathExtent); GetNextToken(q, &q, extent, token); pattern_bounds.x = (ssize_t) ceil(StringToDouble(token, &next_token) - 0.5); if (token == next_token) status = MagickFalse; GetNextToken(q, &q, extent, token); if (*token == ',') GetNextToken(q, &q, extent, token); pattern_bounds.y = (ssize_t) ceil(StringToDouble(token, &next_token) - 0.5); if (token == next_token) status = MagickFalse; GetNextToken(q, &q, extent, token); if (*token == ',') GetNextToken(q, &q, extent, token); pattern_bounds.width = (size_t) floor(StringToDouble(token, &next_token) + 0.5); if (token == next_token) status = MagickFalse; GetNextToken(q, &q, extent, token); if (*token == ',') GetNextToken(q, &q, extent, token); pattern_bounds.height = (size_t) floor(StringToDouble(token, &next_token) + 0.5); if (token == next_token) status = MagickFalse; for (p = q; *q != '\0';) { GetNextToken(q, &q, extent, token); if (LocaleCompare(token, "pop") != 0) continue; GetNextToken(q, (const char **)NULL, extent, token); if (LocaleCompare(token, "pattern") != 0) continue; break; } (void)CopyMagickString(token, p, (size_t) (q - p - 4 + 1)); (void)FormatLocaleString(key, MagickPathExtent, "%s", name); (void)SetImageArtifact(image, key, token); (void)FormatLocaleString(key, MagickPathExtent, "%s-geometry", name); (void)FormatLocaleString(geometry, MagickPathExtent, "%.20gx%.20g%+.20g%+.20g", (double)pattern_bounds.width, (double)pattern_bounds.height, (double)pattern_bounds.x, (double)pattern_bounds.y); (void)SetImageArtifact(image, key, geometry); GetNextToken(q, &q, extent, token); break; } if (LocaleCompare("graphic-context", token) == 0) { n++; graphic_context = (DrawInfo **) ResizeQuantumMemory( graphic_context, (size_t) (n + 1), sizeof(*graphic_context)); if (graphic_context == (DrawInfo **) NULL) { (void)ThrowMagickException(exception, GetMagickModule(), ResourceLimitError, "MemoryAllocationFailed", "`%s'", image->filename); break; } graphic_context[n] = CloneDrawInfo((ImageInfo *) NULL, graphic_context[n - 1]); break; } if (LocaleCompare("defs", token) == 0) break; status = MagickFalse; break; } status = MagickFalse; break; } case 'r': case 'R': { if (LocaleCompare("rectangle", keyword) == 0) { primitive_type = RectanglePrimitive; break; } if (LocaleCompare("rotate", keyword) == 0) { GetNextToken(q, &q, extent, token); angle = StringToDouble(token, &next_token); if (token == next_token) status = MagickFalse; affine.sx = cos(DegreesToRadians(fmod((double)angle, 360.0))); affine.rx = sin(DegreesToRadians(fmod((double)angle, 360.0))); affine.ry = (-sin(DegreesToRadians(fmod((double)angle, 360.0)))); affine.sy = cos(DegreesToRadians(fmod((double)angle, 360.0))); break; } if (LocaleCompare("roundRectangle", keyword) == 0) { primitive_type = RoundRectanglePrimitive; break; } status = MagickFalse; break; } case 's': case 'S': { if (LocaleCompare("scale", keyword) == 0) { GetNextToken(q, &q, extent, token); affine.sx = StringToDouble(token, &next_token); if (token == next_token) status = MagickFalse; GetNextToken(q, &q, extent, token); if (*token == ',') GetNextToken(q, &q, extent, token); affine.sy = StringToDouble(token, &next_token); if (token == next_token) status = MagickFalse; break; } if (LocaleCompare("skewX", keyword) == 0) { GetNextToken(q, &q, extent, token); angle = StringToDouble(token, &next_token); if (token == next_token) status = MagickFalse; affine.ry = sin(DegreesToRadians(angle)); break; } if (LocaleCompare("skewY", keyword) == 0) { GetNextToken(q, &q, extent, token); angle = StringToDouble(token, &next_token); if (token == next_token) status = MagickFalse; affine.rx = (-tan(DegreesToRadians(angle) / 2.0)); break; } if (LocaleCompare("stop-color", keyword) == 0) { PixelInfo stop_color; number_stops++; if (number_stops == 1) stops = (StopInfo *) AcquireQuantumMemory(2, sizeof(*stops)); else if (number_stops > 2) stops = (StopInfo *) ResizeQuantumMemory(stops, number_stops, sizeof(*stops)); if (stops == (StopInfo *) NULL) { (void)ThrowMagickException(exception, GetMagickModule(), ResourceLimitError, "MemoryAllocationFailed", "`%s'", image->filename); break; } GetNextToken(q, &q, extent, token); (void)QueryColorCompliance(token, AllCompliance, &stop_color, exception); stops[number_stops - 1].color = stop_color; GetNextToken(q, &q, extent, token); stops[number_stops - 1].offset = StringToDouble(token, &next_token); if (token == next_token) status = MagickFalse; break; } if (LocaleCompare("stroke", keyword) == 0) { GetNextToken(q, &q, extent, token); (void)FormatLocaleString(pattern, MagickPathExtent, "%s", token); if (GetImageArtifact(image, pattern) != (const char *)NULL) (void)DrawPatternPath(image, draw_info, token, &graphic_context[n]->stroke_pattern, exception); else { status &= QueryColorCompliance(token, AllCompliance, &graphic_context[n]->stroke, exception); if (graphic_context[n]->stroke_alpha != OpaqueAlpha) graphic_context[n]->stroke.alpha = graphic_context[n]->stroke_alpha; if (status == MagickFalse) { ImageInfo * pattern_info; pattern_info = AcquireImageInfo(); (void)CopyMagickString(pattern_info->filename, token, MagickPathExtent); graphic_context[n]->stroke_pattern = ReadImage(pattern_info, exception); CatchException(exception); pattern_info = DestroyImageInfo(pattern_info); } } break; } if (LocaleCompare("stroke-antialias", keyword) == 0) { GetNextToken(q, &q, extent, token); graphic_context[n]->stroke_antialias = StringToLong(token) != 0 ? MagickTrue : MagickFalse; break; } if (LocaleCompare("stroke-dasharray", keyword) == 0) { if (graphic_context[n]->dash_pattern != (double *)NULL) graphic_context[n]->dash_pattern = (double *) RelinquishMagickMemory(graphic_context[n]->dash_pattern); if (IsPoint(q) != MagickFalse) { const char *r; r = q; GetNextToken(r, &r, extent, token); if (*token == ',') GetNextToken(r, &r, extent, token); for (x = 0; IsPoint(token) != MagickFalse; x++) { GetNextToken(r, &r, extent, token); if (*token == ',') GetNextToken(r, &r, extent, token); } graphic_context[n]->dash_pattern = (double *) AcquireQuantumMemory((size_t) (2UL * x + 1UL), sizeof(*graphic_context[n]->dash_pattern)); if (graphic_context[n]->dash_pattern == (double *)NULL) { (void)ThrowMagickException(exception, GetMagickModule(), ResourceLimitError, "MemoryAllocationFailed", "`%s'", image->filename); status = MagickFalse; break; } for (j = 0; j < x; j++) { GetNextToken(q, &q, extent, token); if (*token == ',') GetNextToken(q, &q, extent, token); graphic_context[n]->dash_pattern[j] = StringToDouble(token, &next_token); if (token == next_token) status = MagickFalse; if (graphic_context[n]->dash_pattern[j] < 0.0) status = MagickFalse; } if ((x & 0x01) != 0) for (; j < (2 * x); j++) graphic_context[n]->dash_pattern[j] = graphic_context[n]->dash_pattern[j - x]; graphic_context[n]->dash_pattern[j] = 0.0; break; } GetNextToken(q, &q, extent, token); break; } if (LocaleCompare("stroke-dashoffset", keyword) == 0) { GetNextToken(q, &q, extent, token); graphic_context[n]->dash_offset = StringToDouble(token, &next_token); if (token == next_token) status = MagickFalse; break; } if (LocaleCompare("stroke-linecap", keyword) == 0) { ssize_t linecap; GetNextToken(q, &q, extent, token); linecap = ParseCommandOption(MagickLineCapOptions, MagickFalse, token); if (linecap == -1) status = MagickFalse; else graphic_context[n]->linecap = (LineCap) linecap; break; } if (LocaleCompare("stroke-linejoin", keyword) == 0) { ssize_t linejoin; GetNextToken(q, &q, extent, token); linejoin = ParseCommandOption(MagickLineJoinOptions, MagickFalse, token); if (linejoin == -1) status = MagickFalse; else graphic_context[n]->linejoin = (LineJoin) linejoin; break; } if (LocaleCompare("stroke-miterlimit", keyword) == 0) { GetNextToken(q, &q, extent, token); graphic_context[n]->miterlimit = StringToUnsignedLong(token); break; } if (LocaleCompare("stroke-opacity", keyword) == 0) { GetNextToken(q, &q, extent, token); factor = strchr(token, '%') != (char *)NULL ? 0.01 : 1.0; graphic_context[n]->stroke.alpha = QuantumRange - ClampToQuantum( (MagickRealType) QuantumRange * (1.0 - factor * StringToDouble(token, &next_token))); if (token == next_token) status = MagickFalse; break; } if (LocaleCompare("stroke-width", keyword) == 0) { GetNextToken(q, &q, extent, token); graphic_context[n]->stroke_width = StringToDouble(token, &next_token); if (token == next_token) status = MagickFalse; break; } status = MagickFalse; break; } case 't': case 'T': { if (LocaleCompare("text", keyword) == 0) { primitive_type = TextPrimitive; break; } if (LocaleCompare("text-align", keyword) == 0) { ssize_t align; GetNextToken(q, &q, extent, token); align = ParseCommandOption(MagickAlignOptions, MagickFalse, token); if (align == -1) status = MagickFalse; else graphic_context[n]->align = (AlignType) align; break; } if (LocaleCompare("text-anchor", keyword) == 0) { ssize_t align; GetNextToken(q, &q, extent, token); align = ParseCommandOption(MagickAlignOptions, MagickFalse, token); if (align == -1) status = MagickFalse; else graphic_context[n]->align = (AlignType) align; break; } if (LocaleCompare("text-antialias", keyword) == 0) { GetNextToken(q, &q, extent, token); graphic_context[n]->text_antialias = StringToLong(token) != 0 ? MagickTrue : MagickFalse; break; } if (LocaleCompare("text-undercolor", keyword) == 0) { GetNextToken(q, &q, extent, token); (void)QueryColorCompliance(token, AllCompliance, &graphic_context[n]->undercolor, exception); break; } if (LocaleCompare("translate", keyword) == 0) { GetNextToken(q, &q, extent, token); affine.tx = StringToDouble(token, &next_token); if (token == next_token) status = MagickFalse; GetNextToken(q, &q, extent, token); if (*token == ',') GetNextToken(q, &q, extent, token); affine.ty = StringToDouble(token, &next_token); if (token == next_token) status = MagickFalse; break; } status = MagickFalse; break; } case 'v': case 'V': { if (LocaleCompare("viewbox", keyword) == 0) { GetNextToken(q, &q, extent, token); graphic_context[n]->viewbox.x = (ssize_t) ceil(StringToDouble(token, &next_token) - 0.5); if (token == next_token) status = MagickFalse; GetNextToken(q, &q, extent, token); if (*token == ',') GetNextToken(q, &q, extent, token); graphic_context[n]->viewbox.y = (ssize_t) ceil(StringToDouble(token, &next_token) - 0.5); if (token == next_token) status = MagickFalse; GetNextToken(q, &q, extent, token); if (*token == ',') GetNextToken(q, &q, extent, token); graphic_context[n]->viewbox.width = (size_t) floor(StringToDouble( token, &next_token) + 0.5); if (token == next_token) status = MagickFalse; GetNextToken(q, &q, extent, token); if (*token == ',') GetNextToken(q, &q, extent, token); graphic_context[n]->viewbox.height = (size_t) floor(StringToDouble( token, &next_token) + 0.5); if (token == next_token) status = MagickFalse; break; } status = MagickFalse; break; } default: { status = MagickFalse; break; } } if (status == MagickFalse) break; if ((fabs(affine.sx - 1.0) >= DrawEpsilon) || (fabs(affine.rx) >= DrawEpsilon) || (fabs(affine.ry) >= DrawEpsilon) || (fabs(affine.sy - 1.0) >= DrawEpsilon) || (fabs(affine.tx) >= DrawEpsilon) || (fabs(affine.ty) >= DrawEpsilon)) { graphic_context[n]->affine.sx = current.sx * affine.sx + current.ry * affine.rx; graphic_context[n]->affine.rx = current.rx * affine.sx + current.sy * affine.rx; graphic_context[n]->affine.ry = current.sx * affine.ry + current.ry * affine.sy; graphic_context[n]->affine.sy = current.rx * affine.ry + current.sy * affine.sy; graphic_context[n]->affine.tx = current.sx * affine.tx + current.ry * affine.ty + current.tx; graphic_context[n]->affine.ty = current.rx * affine.tx + current.sy * affine.ty + current.ty; } if (primitive_type == UndefinedPrimitive) { if (*q == '\0') { if (number_stops > 1) { GradientType type; type = LinearGradient; if (draw_info->gradient.type == RadialGradient) type = RadialGradient; (void)GradientImage(image, type, PadSpread, stops, number_stops, exception); } if (number_stops > 0) stops = (StopInfo *) RelinquishMagickMemory(stops); } if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), " %.*s", (int) (q - p), p); continue; } /* * Parse the primitive attributes. */ i = 0; j = 0; primitive_info[0].point.x = 0.0; primitive_info[0].point.y = 0.0; for (x = 0; *q != '\0'; x++) { /* * Define points. */ if (IsPoint(q) == MagickFalse) break; GetNextToken(q, &q, extent, token); point.x = StringToDouble(token, &next_token); if (token == next_token) status = MagickFalse; GetNextToken(q, &q, extent, token); if (*token == ',') GetNextToken(q, &q, extent, token); point.y = StringToDouble(token, &next_token); if (token == next_token) status = MagickFalse; GetNextToken(q, (const char **)NULL, extent, token); if (*token == ',') GetNextToken(q, &q, extent, token); primitive_info[i].primitive = primitive_type; primitive_info[i].point = point; primitive_info[i].coordinates = 0; primitive_info[i].method = FloodfillMethod; i++; if (i < (ssize_t) number_points) continue; number_points <<= 1; primitive_info = (PrimitiveInfo *) ResizeQuantumMemory(primitive_info, (size_t) number_points, sizeof(*primitive_info)); if ((primitive_info == (PrimitiveInfo *) NULL) || (number_points != (MagickSizeType) ((size_t) number_points))) ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); } primitive_info[j].primitive = primitive_type; primitive_info[j].coordinates = (size_t) x; primitive_info[j].method = FloodfillMethod; primitive_info[j].text = (char *)NULL; /* * Circumscribe primitive within a circle. */ bounds.x1 = primitive_info[j].point.x; bounds.y1 = primitive_info[j].point.y; bounds.x2 = primitive_info[j].point.x; bounds.y2 = primitive_info[j].point.y; for (k = 1; k < (ssize_t) primitive_info[j].coordinates; k++) { point = primitive_info[j + k].point; if (point.x < bounds.x1) bounds.x1 = point.x; if (point.y < bounds.y1) bounds.y1 = point.y; if (point.x > bounds.x2) bounds.x2 = point.x; if (point.y > bounds.y2) bounds.y2 = point.y; } /* * Speculate how many points our primitive might consume. */ length = primitive_info[j].coordinates; switch (primitive_type) { case RectanglePrimitive: { length *= 5; break; } case RoundRectanglePrimitive: { double alpha, beta, radius; alpha = bounds.x2 - bounds.x1; beta = bounds.y2 - bounds.y1; radius = hypot((double)alpha, (double)beta); length *= 5; length += 2 * ((size_t) ceil((double)MagickPI * radius)) + 6 * BezierQuantum + 360; break; } case BezierPrimitive: { if (primitive_info[j].coordinates > 107) (void)ThrowMagickException(exception, GetMagickModule(), DrawError, "TooManyBezierCoordinates", "`%s'", token); length = BezierQuantum * primitive_info[j].coordinates; break; } case PathPrimitive: { char *s, *t; GetNextToken(q, &q, extent, token); length = 1; t = token; for (s = token; *s != '\0'; s = t) { double value; value = StringToDouble(s, &t); (void)value; if (s == t) { t++; continue; } length++; } length = length * BezierQuantum; break; } case CirclePrimitive: case ArcPrimitive: case EllipsePrimitive: { double alpha, beta, radius; alpha = bounds.x2 - bounds.x1; beta = bounds.y2 - bounds.y1; radius = hypot((double)alpha, (double)beta); length = 2 * ((size_t) ceil((double)MagickPI * radius)) + 6 * BezierQuantum + 360; break; } default: break; } if ((i + length) >= number_points) { /* * Resize based on speculative points required by primitive. */ number_points += length + 1; primitive_info = (PrimitiveInfo *) ResizeQuantumMemory(primitive_info, (size_t) number_points, sizeof(*primitive_info)); if ((primitive_info == (PrimitiveInfo *) NULL) || (number_points != (MagickSizeType) ((size_t) number_points))) { (void)ThrowMagickException(exception, GetMagickModule(), ResourceLimitError, "MemoryAllocationFailed", "`%s'", image->filename); break; } } switch (primitive_type) { case PointPrimitive: default: { if (primitive_info[j].coordinates != 1) { status = MagickFalse; break; } TracePoint(primitive_info + j, primitive_info[j].point); i = (ssize_t) (j + primitive_info[j].coordinates); break; } case LinePrimitive: { if (primitive_info[j].coordinates != 2) { status = MagickFalse; break; } TraceLine(primitive_info + j, primitive_info[j].point, primitive_info[j + 1].point); i = (ssize_t) (j + primitive_info[j].coordinates); break; } case RectanglePrimitive: { if (primitive_info[j].coordinates != 2) { status = MagickFalse; break; } TraceRectangle(primitive_info + j, primitive_info[j].point, primitive_info[j + 1].point); i = (ssize_t) (j + primitive_info[j].coordinates); break; } case RoundRectanglePrimitive: { if (primitive_info[j].coordinates != 3) { status = MagickFalse; break; } TraceRoundRectangle(primitive_info + j, primitive_info[j].point, primitive_info[j + 1].point, primitive_info[j + 2].point); i = (ssize_t) (j + primitive_info[j].coordinates); break; } case ArcPrimitive: { if (primitive_info[j].coordinates != 3) { primitive_type = UndefinedPrimitive; break; } TraceArc(primitive_info + j, primitive_info[j].point, primitive_info[j + 1].point, primitive_info[j + 2].point); i = (ssize_t) (j + primitive_info[j].coordinates); break; } case EllipsePrimitive: { if (primitive_info[j].coordinates != 3) { status = MagickFalse; break; } TraceEllipse(primitive_info + j, primitive_info[j].point, primitive_info[j + 1].point, primitive_info[j + 2].point); i = (ssize_t) (j + primitive_info[j].coordinates); break; } case CirclePrimitive: { if (primitive_info[j].coordinates != 2) { status = MagickFalse; break; } TraceCircle(primitive_info + j, primitive_info[j].point, primitive_info[j + 1].point); i = (ssize_t) (j + primitive_info[j].coordinates); break; } case PolylinePrimitive: break; case PolygonPrimitive: { primitive_info[i] = primitive_info[j]; primitive_info[i].coordinates = 0; primitive_info[j].coordinates++; i++; break; } case BezierPrimitive: { if (primitive_info[j].coordinates < 3) { status = MagickFalse; break; } TraceBezier(primitive_info + j, primitive_info[j].coordinates); i = (ssize_t) (j + primitive_info[j].coordinates); break; } case PathPrimitive: { i = (ssize_t) (j + TracePath(primitive_info + j, token)); break; } case AlphaPrimitive: case ColorPrimitive: { ssize_t method; if (primitive_info[j].coordinates != 1) { status = MagickFalse; break; } GetNextToken(q, &q, extent, token); method = ParseCommandOption(MagickMethodOptions, MagickFalse, token); if (method == -1) status = MagickFalse; else primitive_info[j].method = (PaintMethod) method; break; } case TextPrimitive: { if (primitive_info[j].coordinates != 1) { status = MagickFalse; break; } if (*token != ',') GetNextToken(q, &q, extent, token); primitive_info[j].text = AcquireString(token); break; } case ImagePrimitive: { if (primitive_info[j].coordinates != 2) { status = MagickFalse; break; } GetNextToken(q, &q, extent, token); primitive_info[j].text = AcquireString(token); break; } } if (primitive_info == (PrimitiveInfo *) NULL) break; if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), " %.*s", (int)(q - p), p); if (status == MagickFalse) break; primitive_info[i].primitive = UndefinedPrimitive; if (i == 0) continue; /* * Transform points. */ for (i = 0; primitive_info[i].primitive != UndefinedPrimitive; i++) { point = primitive_info[i].point; primitive_info[i].point.x = graphic_context[n]->affine.sx * point.x + graphic_context[n]->affine.ry * point.y + graphic_context[n]->affine.tx; primitive_info[i].point.y = graphic_context[n]->affine.rx * point.x + graphic_context[n]->affine.sy * point.y + graphic_context[n]->affine.ty; point = primitive_info[i].point; if (point.x < graphic_context[n]->bounds.x1) graphic_context[n]->bounds.x1 = point.x; if (point.y < graphic_context[n]->bounds.y1) graphic_context[n]->bounds.y1 = point.y; if (point.x > graphic_context[n]->bounds.x2) graphic_context[n]->bounds.x2 = point.x; if (point.y > graphic_context[n]->bounds.y2) graphic_context[n]->bounds.y2 = point.y; if (primitive_info[i].primitive == ImagePrimitive) break; if (i >= (ssize_t) number_points) ThrowFatalException(ResourceLimitFatalError, "MemoryAllocationFailed"); } if (graphic_context[n]->render != MagickFalse) { if ((n != 0) && (graphic_context[n]->clip_mask != (char *)NULL) && (LocaleCompare(graphic_context[n]->clip_mask, graphic_context[n - 1]->clip_mask) != 0)) status &= DrawClipPath(image, graphic_context[n], graphic_context[n]->clip_mask, exception); status &= DrawPrimitive(image, graphic_context[n], primitive_info, exception); } if (primitive_info->text != (char *)NULL) primitive_info->text = (char *)RelinquishMagickMemory( primitive_info->text); proceed = SetImageProgress(image, RenderImageTag, q - primitive, (MagickSizeType) primitive_extent); if (proceed == MagickFalse) break; if (status == 0) break; } if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), "end draw-image"); /* * Relinquish resources. */ token = DestroyString(token); if (primitive_info != (PrimitiveInfo *) NULL) primitive_info = (PrimitiveInfo *) RelinquishMagickMemory(primitive_info); primitive = DestroyString(primitive); for (; n >= 0; n--) graphic_context[n] = DestroyDrawInfo(graphic_context[n]); graphic_context = (DrawInfo **) RelinquishMagickMemory(graphic_context); if (status == MagickFalse) ThrowBinaryException(DrawError, "NonconformingDrawingPrimitiveDefinition", keyword); return (status != 0 ? MagickTrue : MagickFalse); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % D r a w G r a d i e n t I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DrawGradientImage() draws a linear gradient on the image. % % * The format of the DrawGradientImage method is: % % MagickBooleanType * DrawGradientImage(Image *image, % const DrawInfo * *draw_info,ExceptionInfo *exception) % % A description of each parameter * follows: % % o image: the image. % % o draw_info: the draw info. % % * o exception: return any errors or warnings in this structure. % */ static inline double GetStopColorOffset(const GradientInfo * gradient, const ssize_t x, const ssize_t y) { switch (gradient->type) { case UndefinedGradient: case LinearGradient: { double gamma, length, offset, scale; PointInfo p, q; const SegmentInfo * gradient_vector; gradient_vector = (&gradient->gradient_vector); p.x = gradient_vector->x2 - gradient_vector->x1; p.y = gradient_vector->y2 - gradient_vector->y1; q.x = (double)x - gradient_vector->x1; q.y = (double)y - gradient_vector->y1; length = sqrt(q.x * q.x + q.y * q.y); gamma = sqrt(p.x * p.x + p.y * p.y) * length; gamma = PerceptibleReciprocal(gamma); scale = p.x * q.x + p.y * q.y; offset = gamma * scale * length; return (offset); } case RadialGradient: { PointInfo v; if (gradient->spread == RepeatSpread) { v.x = (double)x - gradient->center.x; v.y = (double)y - gradient->center.y; return (sqrt(v.x * v.x + v.y * v.y)); } v.x = (double)(((x - gradient->center.x) * cos(DegreesToRadians( gradient->angle))) + ((y - gradient->center.y) * sin(DegreesToRadians( gradient->angle)))) / gradient->radii.x; v.y = (double)(((x - gradient->center.x) * sin(DegreesToRadians( gradient->angle))) - ((y - gradient->center.y) * cos(DegreesToRadians( gradient->angle)))) / gradient->radii.y; return (sqrt(v.x * v.x + v.y * v.y)); } } return (0.0); } static int StopInfoCompare(const void *x, const void *y) { StopInfo * stop_1, *stop_2; stop_1 = (StopInfo *) x; stop_2 = (StopInfo *) y; if (stop_1->offset > stop_2->offset) return (1); if (fabs(stop_1->offset - stop_2->offset) <= DrawEpsilon) return (0); return (-1); } MagickExport MagickBooleanType DrawGradientImage(Image * image, const DrawInfo * draw_info, ExceptionInfo * exception) { CacheView * image_view; const GradientInfo * gradient; const SegmentInfo * gradient_vector; double length; MagickBooleanType status; PixelInfo zero; PointInfo point; RectangleInfo bounding_box; ssize_t y; /* * Draw linear or radial gradient on image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(draw_info != (const DrawInfo *)NULL); gradient = (&draw_info->gradient); qsort(gradient->stops, gradient->number_stops, sizeof(StopInfo), StopInfoCompare); gradient_vector = (&gradient->gradient_vector); point.x = gradient_vector->x2 - gradient_vector->x1; point.y = gradient_vector->y2 - gradient_vector->y1; length = sqrt(point.x * point.x + point.y * point.y); bounding_box = gradient->bounding_box; status = MagickTrue; GetPixelInfo(image, &zero); image_view = AcquireAuthenticCacheView(image, exception); for (y = bounding_box.y; y < (ssize_t) bounding_box.height; y++) { PixelInfo composite, pixel; double alpha, offset; register Quantum * magick_restrict q; register ssize_t i, x; ssize_t j; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } pixel = zero; composite = zero; offset = GetStopColorOffset(gradient, 0, y); if (gradient->type != RadialGradient) offset /= length; for (x = bounding_box.x; x < (ssize_t) bounding_box.width; x++) { GetPixelInfoPixel(image, q, &pixel); switch (gradient->spread) { case UndefinedSpread: case PadSpread: { if ((x != (ssize_t) ceil(gradient_vector->x1 - 0.5)) || (y != (ssize_t) ceil(gradient_vector->y1 - 0.5))) { offset = GetStopColorOffset(gradient, x, y); if (gradient->type != RadialGradient) offset /= length; } for (i = 0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if ((offset < 0.0) || (i == 0)) composite = gradient->stops[0].color; else if ((offset > 1.0) || (i == (ssize_t) gradient->number_stops)) composite = gradient->stops[gradient->number_stops - 1].color; else { j = i; i--; alpha = (offset - gradient->stops[i].offset) / (gradient->stops[j].offset - gradient->stops[i].offset); CompositePixelInfoBlend(&gradient->stops[i].color, 1.0 - alpha, &gradient->stops[j].color, alpha, &composite); } break; } case ReflectSpread: { if ((x != (ssize_t) ceil(gradient_vector->x1 - 0.5)) || (y != (ssize_t) ceil(gradient_vector->y1 - 0.5))) { offset = GetStopColorOffset(gradient, x, y); if (gradient->type != RadialGradient) offset /= length; } if (offset < 0.0) offset = (-offset); if ((ssize_t) fmod(offset, 2.0) == 0) offset = fmod(offset, 1.0); else offset = 1.0 - fmod(offset, 1.0); for (i = 0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if (i == 0) composite = gradient->stops[0].color; else if (i == (ssize_t) gradient->number_stops) composite = gradient->stops[gradient->number_stops - 1].color; else { j = i; i--; alpha = (offset - gradient->stops[i].offset) / (gradient->stops[j].offset - gradient->stops[i].offset); CompositePixelInfoBlend(&gradient->stops[i].color, 1.0 - alpha, &gradient->stops[j].color, alpha, &composite); } break; } case RepeatSpread: { MagickBooleanType antialias; double repeat; antialias = MagickFalse; repeat = 0.0; if ((x != (ssize_t) ceil(gradient_vector->x1 - 0.5)) || (y != (ssize_t) ceil(gradient_vector->y1 - 0.5))) { offset = GetStopColorOffset(gradient, x, y); if (gradient->type == LinearGradient) { repeat = fmod(offset, length); if (repeat < 0.0) repeat = length - fmod(-repeat, length); else repeat = fmod(offset, length); antialias = (repeat < length) && ((repeat + 1.0) > length) ? MagickTrue : MagickFalse; offset = repeat / length; } else { repeat = fmod(offset, gradient->radius); if (repeat < 0.0) repeat = gradient->radius - fmod(-repeat, gradient->radius); else repeat = fmod(offset, gradient->radius); antialias = repeat + 1.0 > gradient->radius ? MagickTrue : MagickFalse; offset = repeat / gradient->radius; } } for (i = 0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if (i == 0) composite = gradient->stops[0].color; else if (i == (ssize_t) gradient->number_stops) composite = gradient->stops[gradient->number_stops - 1].color; else { j = i; i--; alpha = (offset - gradient->stops[i].offset) / (gradient->stops[j].offset - gradient->stops[i].offset); if (antialias != MagickFalse) { if (gradient->type == LinearGradient) alpha = length - repeat; else alpha = gradient->radius - repeat; i = 0; j = (ssize_t) gradient->number_stops - 1L; } CompositePixelInfoBlend(&gradient->stops[i].color, 1.0 - alpha, &gradient->stops[j].color, alpha, &composite); } break; } } CompositePixelInfoOver(&composite, composite.alpha, &pixel, pixel.alpha, &pixel); SetPixelViaPixelInfo(image, &pixel, q); q += GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; } image_view = DestroyCacheView(image_view); return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % D r a w P a t t e r n P a t h * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DrawPatternPath() draws a pattern. % % The format of the * DrawPatternPath method is: % % MagickBooleanType * DrawPatternPath(Image *image,const DrawInfo *draw_info, % const * char *name,Image **pattern,ExceptionInfo *exception) % % A description of * each parameter follows: % % o image: the image. % % o draw_info: the * draw info. % % o name: the pattern name. % % o image: the image. % % * o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType DrawPatternPath(Image * image, const DrawInfo * draw_info, const char *name, Image ** pattern, ExceptionInfo * exception) { char property[MagickPathExtent]; const char *geometry, *path, *type; DrawInfo * clone_info; ImageInfo * image_info; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(draw_info != (const DrawInfo *)NULL); assert(name != (const char *)NULL); (void)FormatLocaleString(property, MagickPathExtent, "%s", name); path = GetImageArtifact(image, property); if (path == (const char *)NULL) return (MagickFalse); (void)FormatLocaleString(property, MagickPathExtent, "%s-geometry", name); geometry = GetImageArtifact(image, property); if (geometry == (const char *)NULL) return (MagickFalse); if ((*pattern) != (Image *) NULL) *pattern = DestroyImage(*pattern); image_info = AcquireImageInfo(); image_info->size = AcquireString(geometry); *pattern = AcquireImage(image_info, exception); image_info = DestroyImageInfo(image_info); (void)QueryColorCompliance("#000000ff", AllCompliance, &(*pattern)->background_color, exception); (void)SetImageBackgroundColor(*pattern, exception); if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), "begin pattern-path %s %s", name, geometry); clone_info = CloneDrawInfo((ImageInfo *) NULL, draw_info); clone_info->fill_pattern = NewImageList(); clone_info->stroke_pattern = NewImageList(); (void)FormatLocaleString(property, MagickPathExtent, "%s-type", name); type = GetImageArtifact(image, property); if (type != (const char *)NULL) clone_info->gradient.type = (GradientType) ParseCommandOption( MagickGradientOptions, MagickFalse, type); (void)CloneString(&clone_info->primitive, path); status = DrawImage(*pattern, clone_info, exception); clone_info = DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), "end pattern-path"); return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + D r a w P o l y g o n P r i m i t i v e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DrawPolygonPrimitive() draws a polygon on the image. % % The * format of the DrawPolygonPrimitive method is: % % MagickBooleanType * DrawPolygonPrimitive(Image *image, % const DrawInfo * *draw_info,const PrimitiveInfo *primitive_info, % ExceptionInfo * *exception) % % A description of each parameter follows: % % o image: * the image. % % o draw_info: the draw info. % % o primitive_info: * Specifies a pointer to a PrimitiveInfo structure. % % o exception: * return any errors or warnings in this structure. % */ static PolygonInfo ** DestroyPolygonThreadSet(PolygonInfo ** polygon_info) { register ssize_t i; assert(polygon_info != (PolygonInfo **) NULL); for (i = 0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (polygon_info[i] != (PolygonInfo *) NULL) polygon_info[i] = DestroyPolygonInfo(polygon_info[i]); polygon_info = (PolygonInfo **) RelinquishMagickMemory(polygon_info); return (polygon_info); } static PolygonInfo ** AcquirePolygonThreadSet( const PrimitiveInfo * primitive_info) { PathInfo * magick_restrict path_info; PolygonInfo ** polygon_info; register ssize_t i; size_t number_threads; number_threads = (size_t) GetMagickResourceLimit(ThreadResource); polygon_info = (PolygonInfo **) AcquireQuantumMemory(number_threads, sizeof(*polygon_info)); if (polygon_info == (PolygonInfo **) NULL) return ((PolygonInfo **) NULL); (void)ResetMagickMemory(polygon_info, 0, number_threads * sizeof(*polygon_info)); path_info = ConvertPrimitiveToPath(primitive_info); if (path_info == (PathInfo *) NULL) return (DestroyPolygonThreadSet(polygon_info)); for (i = 0; i < (ssize_t) number_threads; i++) { polygon_info[i] = ConvertPathToPolygon(path_info); if (polygon_info[i] == (PolygonInfo *) NULL) return (DestroyPolygonThreadSet(polygon_info)); } path_info = (PathInfo *) RelinquishMagickMemory(path_info); return (polygon_info); } static double GetFillAlpha(PolygonInfo * polygon_info, const double mid, const MagickBooleanType fill, const FillRule fill_rule, const ssize_t x, const ssize_t y, double *stroke_alpha) { double alpha, beta, distance, subpath_alpha; PointInfo delta; register const PointInfo * q; register EdgeInfo * p; register ssize_t i; ssize_t j, winding_number; /* * Compute fill & stroke opacity for this (x,y) point. */ *stroke_alpha = 0.0; subpath_alpha = 0.0; p = polygon_info->edges; for (j = 0; j < (ssize_t) polygon_info->number_edges; j++, p++) { if ((double)y <= (p->bounds.y1 - mid - 0.5)) break; if ((double)y > (p->bounds.y2 + mid + 0.5)) { (void)DestroyEdge(polygon_info, (size_t) j); continue; } if (((double)x <= (p->bounds.x1 - mid - 0.5)) || ((double)x > (p->bounds.x2 + mid + 0.5))) continue; i = (ssize_t) MagickMax((double)p->highwater, 1.0); for (; i < (ssize_t) p->number_points; i++) { if ((double)y <= (p->points[i - 1].y - mid - 0.5)) break; if ((double)y > (p->points[i].y + mid + 0.5)) continue; if (p->scanline != (double)y) { p->scanline = (double)y; p->highwater = (size_t) i; } /* * Compute distance between a point and an edge. */ q = p->points + i - 1; delta.x = (q + 1)->x - q->x; delta.y = (q + 1)->y - q->y; beta = delta.x * (x - q->x) + delta.y * (y - q->y); if (beta < 0.0) { delta.x = (double)x - q->x; delta.y = (double)y - q->y; distance = delta.x * delta.x + delta.y * delta.y; } else { alpha = delta.x * delta.x + delta.y * delta.y; if (beta > alpha) { delta.x = (double)x - (q + 1)->x; delta.y = (double)y - (q + 1)->y; distance = delta.x * delta.x + delta.y * delta.y; } else { alpha = 1.0 / alpha; beta = delta.x * (y - q->y) - delta.y * (x - q->x); distance = alpha * beta * beta; } } /* * Compute stroke & subpath opacity. */ beta = 0.0; if (p->ghostline == MagickFalse) { alpha = mid + 0.5; if ((*stroke_alpha < 1.0) && (distance <= ((alpha + 0.25) * (alpha + 0.25)))) { alpha = mid - 0.5; if (distance <= ((alpha + 0.25) * (alpha + 0.25))) *stroke_alpha = 1.0; else { beta = 1.0; if (fabs(distance - 1.0) >= DrawEpsilon) beta = sqrt((double)distance); alpha = beta - mid - 0.5; if (*stroke_alpha < ((alpha - 0.25) * (alpha - 0.25))) *stroke_alpha = (alpha - 0.25) * (alpha - 0.25); } } } if ((fill == MagickFalse) || (distance > 1.0) || (subpath_alpha >= 1.0)) continue; if (distance <= 0.0) { subpath_alpha = 1.0; continue; } if (distance > 1.0) continue; if (fabs(beta) < DrawEpsilon) { beta = 1.0; if (fabs(distance - 1.0) >= DrawEpsilon) beta = sqrt(distance); } alpha = beta - 1.0; if (subpath_alpha < (alpha * alpha)) subpath_alpha = alpha * alpha; } } /* * Compute fill opacity. */ if (fill == MagickFalse) return (0.0); if (subpath_alpha >= 1.0) return (1.0); /* * Determine winding number. */ winding_number = 0; p = polygon_info->edges; for (j = 0; j < (ssize_t) polygon_info->number_edges; j++, p++) { if ((double)y <= p->bounds.y1) break; if (((double)y > p->bounds.y2) || ((double)x <= p->bounds.x1)) continue; if ((double)x > p->bounds.x2) { winding_number += p->direction ? 1 : -1; continue; } i = (ssize_t) MagickMax((double)p->highwater, 1.0); for (; i < (ssize_t) p->number_points; i++) if ((double)y <= p->points[i].y) break; q = p->points + i - 1; if ((((q + 1)->x - q->x) * (y - q->y)) <= (((q + 1)->y - q->y) * (x - q->x))) winding_number += p->direction ? 1 : -1; } if (fill_rule != NonZeroRule) { if ((MagickAbsoluteValue(winding_number) & 0x01) != 0) return (1.0); } else if (MagickAbsoluteValue(winding_number) != 0) return (1.0); return (subpath_alpha); } static MagickBooleanType DrawPolygonPrimitive(Image * image, const DrawInfo * draw_info, const PrimitiveInfo * primitive_info, ExceptionInfo * exception) { CacheView * image_view; MagickBooleanType fill, status; double mid; PolygonInfo ** magick_restrict polygon_info; register EdgeInfo * p; register ssize_t i; SegmentInfo bounds; ssize_t start_y, stop_y, y; /* * Compute bounding box. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); assert(primitive_info != (PrimitiveInfo *) NULL); if (primitive_info->coordinates == 0) return (MagickTrue); polygon_info = AcquirePolygonThreadSet(primitive_info); if (polygon_info == (PolygonInfo **) NULL) return (MagickFalse); DisableMSCWarning(4127) if (0) DrawBoundingRectangles(image, draw_info, polygon_info[0], exception); RestoreMSCWarning if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), " begin draw-polygon"); fill = (primitive_info->method == FillToBorderMethod) || (primitive_info->method == FloodfillMethod) ? MagickTrue : MagickFalse; mid = ExpandAffine(&draw_info->affine) * draw_info->stroke_width / 2.0; bounds = polygon_info[0]->edges[0].bounds; for (i = 1; i < (ssize_t) polygon_info[0]->number_edges; i++) { p = polygon_info[0]->edges + i; if (p->bounds.x1 < bounds.x1) bounds.x1 = p->bounds.x1; if (p->bounds.y1 < bounds.y1) bounds.y1 = p->bounds.y1; if (p->bounds.x2 > bounds.x2) bounds.x2 = p->bounds.x2; if (p->bounds.y2 > bounds.y2) bounds.y2 = p->bounds.y2; } bounds.x1 -= (mid + 1.0); bounds.x1 = bounds.x1 < 0.0 ? 0.0 : (size_t) ceil(bounds.x1 - 0.5) >= image->columns ? (double)image->columns - 1 : bounds.x1; bounds.y1 -= (mid + 1.0); bounds.y1 = bounds.y1 < 0.0 ? 0.0 : (size_t) ceil(bounds.y1 - 0.5) >= image->rows ? (double)image->rows - 1 : bounds.y1; bounds.x2 += (mid + 1.0); bounds.x2 = bounds.x2 < 0.0 ? 0.0 : (size_t) floor(bounds.x2 + 0.5) >= image->columns ? (double)image->columns - 1 : bounds.x2; bounds.y2 += (mid + 1.0); bounds.y2 = bounds.y2 < 0.0 ? 0.0 : (size_t) floor(bounds.y2 + 0.5) >= image->rows ? (double)image->rows - 1 : bounds.y2; status = MagickTrue; image_view = AcquireAuthenticCacheView(image, exception); if ((primitive_info->coordinates == 1) || (polygon_info[0]->number_edges == 0)) { /* * Draw point. */ start_y = (ssize_t) ceil(bounds.y1 - 0.5); stop_y = (ssize_t) floor(bounds.y2 + 0.5); for (y = start_y; y <= stop_y; y++) { MagickBooleanType sync; PixelInfo pixel; register ssize_t x; register Quantum * magick_restrict q; ssize_t start_x, stop_x; if (status == MagickFalse) continue; start_x = (ssize_t) ceil(bounds.x1 - 0.5); stop_x = (ssize_t) floor(bounds.x2 + 0.5); x = start_x; q = GetCacheViewAuthenticPixels(image_view, x, y, (size_t) (stop_x - x + 1), 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } GetPixelInfo(image, &pixel); for (; x <= stop_x; x++) { if ((x == (ssize_t) ceil(primitive_info->point.x - 0.5)) && (y == (ssize_t) ceil(primitive_info->point.y - 0.5))) { GetFillColor(draw_info, x - start_x, y - start_y, &pixel, exception); SetPixelViaPixelInfo(image, &pixel, q); } q += GetPixelChannels(image); } sync = SyncCacheViewAuthenticPixels(image_view, exception); if (sync == MagickFalse) status = MagickFalse; } image_view = DestroyCacheView(image_view); polygon_info = DestroyPolygonThreadSet(polygon_info); if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), " end draw-polygon"); return (status); } /* * Draw polygon or line. */ if (image->alpha_trait == UndefinedPixelTrait) (void)SetImageAlphaChannel(image, OpaqueAlphaChannel, exception); start_y = (ssize_t) ceil(bounds.y1 - 0.5); stop_y = (ssize_t) floor(bounds.y2 + 0.5); for (y = start_y; y <= stop_y; y++) { const int id = GetOpenMPThreadId(); double fill_alpha, stroke_alpha; PixelInfo fill_color, stroke_color; register Quantum * magick_restrict q; register ssize_t x; ssize_t start_x, stop_x; if (status == MagickFalse) continue; start_x = (ssize_t) ceil(bounds.x1 - 0.5); stop_x = (ssize_t) floor(bounds.x2 + 0.5); q = GetCacheViewAuthenticPixels(image_view, start_x, y, (size_t) (stop_x - start_x + 1), 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = start_x; x <= stop_x; x++) { /* * Fill and/or stroke. */ fill_alpha = GetFillAlpha(polygon_info[id], mid, fill, draw_info->fill_rule, x, y, &stroke_alpha); if (draw_info->stroke_antialias == MagickFalse) { fill_alpha = fill_alpha > 0.25 ? 1.0 : 0.0; stroke_alpha = stroke_alpha > 0.25 ? 1.0 : 0.0; } GetFillColor(draw_info, x - start_x, y - start_y, &fill_color, exception); fill_alpha = fill_alpha * fill_color.alpha; CompositePixelOver(image, &fill_color, fill_alpha, q, (double) GetPixelAlpha(image, q), q); GetStrokeColor(draw_info, x - start_x, y - start_y, &stroke_color, exception); stroke_alpha = stroke_alpha * stroke_color.alpha; CompositePixelOver(image, &stroke_color, stroke_alpha, q, (double) GetPixelAlpha(image, q), q); q += GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; } image_view = DestroyCacheView(image_view); polygon_info = DestroyPolygonThreadSet(polygon_info); if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), " end draw-polygon"); return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % D r a w P r i m i t i v e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DrawPrimitive() draws a primitive (line, rectangle, ellipse) on * the image. % % The format of the DrawPrimitive method is: % % * MagickBooleanType DrawPrimitive(Image *image,const DrawInfo *draw_info, % * PrimitiveInfo *primitive_info,ExceptionInfo *exception) % % A description * of each parameter follows: % % o image: the image. % % o draw_info: * the draw info. % % o primitive_info: Specifies a pointer to a * PrimitiveInfo structure. % % o exception: return any errors or warnings * in this structure. % */ static void LogPrimitiveInfo(const PrimitiveInfo * primitive_info) { const char *methods[] = { "point", "replace", "floodfill", "filltoborder", "reset", "?" }; PointInfo p, q, point; register ssize_t i, x; ssize_t coordinates, y; x = (ssize_t) ceil(primitive_info->point.x - 0.5); y = (ssize_t) ceil(primitive_info->point.y - 0.5); switch (primitive_info->primitive) { case AlphaPrimitive: { (void)LogMagickEvent(DrawEvent, GetMagickModule(), "AlphaPrimitive %.20g,%.20g %s", (double)x, (double)y, methods[primitive_info->method]); return; } case ColorPrimitive: { (void)LogMagickEvent(DrawEvent, GetMagickModule(), "ColorPrimitive %.20g,%.20g %s", (double)x, (double)y, methods[primitive_info->method]); return; } case ImagePrimitive: { (void)LogMagickEvent(DrawEvent, GetMagickModule(), "ImagePrimitive %.20g,%.20g", (double)x, (double)y); return; } case PointPrimitive: { (void)LogMagickEvent(DrawEvent, GetMagickModule(), "PointPrimitive %.20g,%.20g %s", (double)x, (double)y, methods[primitive_info->method]); return; } case TextPrimitive: { (void)LogMagickEvent(DrawEvent, GetMagickModule(), "TextPrimitive %.20g,%.20g", (double)x, (double)y); return; } default: break; } coordinates = 0; p = primitive_info[0].point; q.x = (-1.0); q.y = (-1.0); for (i = 0; primitive_info[i].primitive != UndefinedPrimitive; i++) { point = primitive_info[i].point; if (coordinates <= 0) { coordinates = (ssize_t) primitive_info[i].coordinates; (void)LogMagickEvent(DrawEvent, GetMagickModule(), " begin open (%.20g)", (double)coordinates); p = point; } point = primitive_info[i].point; if ((fabs(q.x - point.x) >= DrawEpsilon) || (fabs(q.y - point.y) >= DrawEpsilon)) (void)LogMagickEvent(DrawEvent, GetMagickModule(), " %.20g: %.18g,%.18g", (double)coordinates, point.x, point.y); else (void)LogMagickEvent(DrawEvent, GetMagickModule(), " %.20g: %g %g (duplicate)", (double)coordinates, point.x, point.y); q = point; coordinates--; if (coordinates > 0) continue; if ((fabs(p.x - point.x) >= DrawEpsilon) || (fabs(p.y - point.y) >= DrawEpsilon)) (void)LogMagickEvent(DrawEvent, GetMagickModule(), " end last (%.20g)", (double)coordinates); else (void)LogMagickEvent(DrawEvent, GetMagickModule(), " end open (%.20g)", (double)coordinates); } } MagickExport MagickBooleanType DrawPrimitive(Image * image, const DrawInfo * draw_info, const PrimitiveInfo * primitive_info, ExceptionInfo * exception) { CacheView * image_view; MagickStatusType status; register ssize_t i, x; ssize_t y; if (image->debug != MagickFalse) { (void)LogMagickEvent(DrawEvent, GetMagickModule(), " begin draw-primitive"); (void)LogMagickEvent(DrawEvent, GetMagickModule(), " affine: %g,%g,%g,%g,%g,%g", draw_info->affine.sx, draw_info->affine.rx, draw_info->affine.ry, draw_info->affine.sy, draw_info->affine.tx, draw_info->affine.ty); } if ((IsGrayColorspace(image->colorspace) != MagickFalse) && ((IsPixelInfoGray(&draw_info->fill) == MagickFalse) || (IsPixelInfoGray(&draw_info->stroke) == MagickFalse))) (void)SetImageColorspace(image, sRGBColorspace, exception); status = MagickTrue; x = (ssize_t) ceil(primitive_info->point.x - 0.5); y = (ssize_t) ceil(primitive_info->point.y - 0.5); image_view = AcquireAuthenticCacheView(image, exception); switch (primitive_info->primitive) { case AlphaPrimitive: { if (image->alpha_trait == UndefinedPixelTrait) (void)SetImageAlphaChannel(image, OpaqueAlphaChannel, exception); switch (primitive_info->method) { case PointMethod: default: { PixelInfo pixel; register Quantum * q; q = GetCacheViewAuthenticPixels(image_view, x, y, 1, 1, exception); if (q == (Quantum *) NULL) break; GetFillColor(draw_info, x, y, &pixel, exception); SetPixelAlpha(image, ClampToQuantum(pixel.alpha), q); (void)SyncCacheViewAuthenticPixels(image_view, exception); break; } case ReplaceMethod: { MagickBooleanType sync; PixelInfo pixel, target; (void)GetOneCacheViewVirtualPixelInfo(image_view, x, y, &target, exception); GetPixelInfo(image, &pixel); for (y = 0; y < (ssize_t) image->rows; y++) { register Quantum * magick_restrict q; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) break; for (x = 0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image, q, &pixel); if (IsFuzzyEquivalencePixelInfo(&pixel, &target) == MagickFalse) { q += GetPixelChannels(image); continue; } GetFillColor(draw_info, x, y, &pixel, exception); SetPixelAlpha(image, ClampToQuantum(pixel.alpha), q); q += GetPixelChannels(image); } sync = SyncCacheViewAuthenticPixels(image_view, exception); if (sync == MagickFalse) break; } break; } case FloodfillMethod: case FillToBorderMethod: { ChannelType channel_mask; PixelInfo target; (void)GetOneVirtualPixelInfo(image, TileVirtualPixelMethod, x, y, &target, exception); if (primitive_info->method == FillToBorderMethod) { target.red = (double)draw_info->border_color.red; target.green = (double)draw_info->border_color.green; target.blue = (double)draw_info->border_color.blue; } channel_mask = SetImageChannelMask(image, AlphaChannel); status &= FloodfillPaintImage(image, draw_info, &target, x, y, primitive_info->method == FloodfillMethod ? MagickFalse : MagickTrue, exception); (void)SetImageChannelMask(image, channel_mask); break; } case ResetMethod: { MagickBooleanType sync; PixelInfo pixel; for (y = 0; y < (ssize_t) image->rows; y++) { register Quantum * magick_restrict q; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) break; for (x = 0; x < (ssize_t) image->columns; x++) { GetFillColor(draw_info, x, y, &pixel, exception); SetPixelAlpha(image, ClampToQuantum(pixel.alpha), q); q += GetPixelChannels(image); } sync = SyncCacheViewAuthenticPixels(image_view, exception); if (sync == MagickFalse) break; } break; } } break; } case ColorPrimitive: { switch (primitive_info->method) { case PointMethod: default: { PixelInfo pixel; register Quantum * q; q = GetCacheViewAuthenticPixels(image_view, x, y, 1, 1, exception); if (q == (Quantum *) NULL) break; GetPixelInfo(image, &pixel); GetFillColor(draw_info, x, y, &pixel, exception); SetPixelViaPixelInfo(image, &pixel, q); (void)SyncCacheViewAuthenticPixels(image_view, exception); break; } case ReplaceMethod: { MagickBooleanType sync; PixelInfo pixel, target; (void)GetOneCacheViewVirtualPixelInfo(image_view, x, y, &target, exception); for (y = 0; y < (ssize_t) image->rows; y++) { register Quantum * magick_restrict q; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) break; for (x = 0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image, q, &pixel); if (IsFuzzyEquivalencePixelInfo(&pixel, &target) == MagickFalse) { q += GetPixelChannels(image); continue; } GetFillColor(draw_info, x, y, &pixel, exception); SetPixelViaPixelInfo(image, &pixel, q); q += GetPixelChannels(image); } sync = SyncCacheViewAuthenticPixels(image_view, exception); if (sync == MagickFalse) break; } break; } case FloodfillMethod: case FillToBorderMethod: { PixelInfo target; (void)GetOneVirtualPixelInfo(image, TileVirtualPixelMethod, x, y, &target, exception); if (primitive_info->method == FillToBorderMethod) { target.red = (double)draw_info->border_color.red; target.green = (double)draw_info->border_color.green; target.blue = (double)draw_info->border_color.blue; } status &= FloodfillPaintImage(image, draw_info, &target, x, y, primitive_info->method == FloodfillMethod ? MagickFalse : MagickTrue, exception); break; } case ResetMethod: { MagickBooleanType sync; PixelInfo pixel; GetPixelInfo(image, &pixel); for (y = 0; y < (ssize_t) image->rows; y++) { register Quantum * magick_restrict q; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) break; for (x = 0; x < (ssize_t) image->columns; x++) { GetFillColor(draw_info, x, y, &pixel, exception); SetPixelViaPixelInfo(image, &pixel, q); q += GetPixelChannels(image); } sync = SyncCacheViewAuthenticPixels(image_view, exception); if (sync == MagickFalse) break; } break; } } break; } case ImagePrimitive: { AffineMatrix affine; char composite_geometry[MagickPathExtent]; Image * composite_image; ImageInfo * clone_info; RectangleInfo geometry; ssize_t x1, y1; if (primitive_info->text == (char *)NULL) break; clone_info = AcquireImageInfo(); if (LocaleNCompare(primitive_info->text, "data:", 5) == 0) composite_image = ReadInlineImage(clone_info, primitive_info->text, exception); else { (void)CopyMagickString(clone_info->filename, primitive_info->text, MagickPathExtent); composite_image = ReadImage(clone_info, exception); } clone_info = DestroyImageInfo(clone_info); if (composite_image == (Image *) NULL) break; (void)SetImageProgressMonitor(composite_image, (MagickProgressMonitor) NULL, (void *)NULL); x1 = (ssize_t) ceil(primitive_info[1].point.x - 0.5); y1 = (ssize_t) ceil(primitive_info[1].point.y - 0.5); if (((x1 != 0L) && (x1 != (ssize_t) composite_image->columns)) || ((y1 != 0L) && (y1 != (ssize_t) composite_image->rows))) { /* * Resize image. */ (void)FormatLocaleString(composite_geometry, MagickPathExtent, "%gx%g!", primitive_info[1].point.x, primitive_info[1].point.y); composite_image->filter = image->filter; (void)TransformImage(&composite_image, (char *)NULL, composite_geometry, exception); } if (composite_image->alpha_trait == UndefinedPixelTrait) (void)SetImageAlphaChannel(composite_image, OpaqueAlphaChannel, exception); if (draw_info->alpha != OpaqueAlpha) (void)SetImageAlpha(composite_image, draw_info->alpha, exception); SetGeometry(image, &geometry); image->gravity = draw_info->gravity; geometry.x = x; geometry.y = y; (void)FormatLocaleString(composite_geometry, MagickPathExtent, "%.20gx%.20g%+.20g%+.20g", (double)composite_image->columns, (double) composite_image->rows, (double)geometry.x, (double)geometry.y); (void)ParseGravityGeometry(image, composite_geometry, &geometry, exception); affine = draw_info->affine; affine.tx = (double)geometry.x; affine.ty = (double)geometry.y; composite_image->interpolate = image->interpolate; if (draw_info->compose == OverCompositeOp) (void)DrawAffineImage(image, composite_image, &affine, exception); else (void)CompositeImage(image, composite_image, draw_info->compose, MagickTrue, geometry.x, geometry.y, exception); composite_image = DestroyImage(composite_image); break; } case PointPrimitive: { PixelInfo fill_color; register Quantum * q; if ((y < 0) || (y >= (ssize_t) image->rows)) break; if ((x < 0) || (x >= (ssize_t) image->columns)) break; q = GetCacheViewAuthenticPixels(image_view, x, y, 1, 1, exception); if (q == (Quantum *) NULL) break; GetFillColor(draw_info, x, y, &fill_color, exception); CompositePixelOver(image, &fill_color, (double)fill_color.alpha, q, (double)GetPixelAlpha(image, q), q); (void)SyncCacheViewAuthenticPixels(image_view, exception); break; } case TextPrimitive: { char geometry[MagickPathExtent]; DrawInfo * clone_info; if (primitive_info->text == (char *)NULL) break; clone_info = CloneDrawInfo((ImageInfo *) NULL, draw_info); (void)CloneString(&clone_info->text, primitive_info->text); (void)FormatLocaleString(geometry, MagickPathExtent, "%+f%+f", primitive_info->point.x, primitive_info->point.y); (void)CloneString(&clone_info->geometry, geometry); status &= AnnotateImage(image, clone_info, exception); clone_info = DestroyDrawInfo(clone_info); break; } default: { double mid, scale; DrawInfo * clone_info; if (IsEventLogging() != MagickFalse) LogPrimitiveInfo(primitive_info); scale = ExpandAffine(&draw_info->affine); if ((draw_info->dash_pattern != (double *)NULL) && (fabs(draw_info->dash_pattern[0]) >= DrawEpsilon) && (fabs(scale * draw_info->stroke_width) >= DrawEpsilon) && (draw_info->stroke.alpha != (Quantum) TransparentAlpha)) { /* * Draw dash polygon. */ clone_info = CloneDrawInfo((ImageInfo *) NULL, draw_info); clone_info->stroke_width = 0.0; clone_info->stroke.alpha = (MagickRealType) TransparentAlpha; status &= DrawPolygonPrimitive(image, clone_info, primitive_info, exception); clone_info = DestroyDrawInfo(clone_info); (void)DrawDashPolygon(draw_info, primitive_info, image, exception); break; } mid = ExpandAffine(&draw_info->affine) * draw_info->stroke_width / 2.0; if ((mid > 1.0) && ((draw_info->stroke.alpha != (Quantum) TransparentAlpha) || (draw_info->stroke_pattern != (Image *) NULL))) { MagickBooleanType closed_path; /* * Draw strokes while respecting line cap/join attributes. */ for (i = 0; primitive_info[i].primitive != UndefinedPrimitive; i++); closed_path = (fabs(primitive_info[i - 1].point.x - primitive_info[0].point.x) < DrawEpsilon) && (fabs(primitive_info[i - 1].point.y - primitive_info[0].point.y) < DrawEpsilon) ? MagickTrue : MagickFalse; i = (ssize_t) primitive_info[0].coordinates; if (((closed_path != MagickFalse) && (draw_info->linejoin == RoundJoin)) || (primitive_info[i].primitive != UndefinedPrimitive)) { (void)DrawPolygonPrimitive(image, draw_info, primitive_info, exception); break; } if (draw_info->linecap == RoundCap) { (void)DrawPolygonPrimitive(image, draw_info, primitive_info, exception); break; } clone_info = CloneDrawInfo((ImageInfo *) NULL, draw_info); clone_info->stroke_width = 0.0; clone_info->stroke.alpha = (MagickRealType) TransparentAlpha; status &= DrawPolygonPrimitive(image, clone_info, primitive_info, exception); clone_info = DestroyDrawInfo(clone_info); status &= DrawStrokePolygon(image, draw_info, primitive_info, exception); break; } status &= DrawPolygonPrimitive(image, draw_info, primitive_info, exception); break; } } image_view = DestroyCacheView(image_view); if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), " end draw-primitive"); return (status != 0 ? MagickTrue : MagickFalse); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + D r a w S t r o k e P o l y g o n * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DrawStrokePolygon() draws a stroked polygon (line, rectangle, * ellipse) on % the image while respecting the line cap and join * attributes. % % The format of the DrawStrokePolygon method is: % % * MagickBooleanType DrawStrokePolygon(Image *image, % const DrawInfo * *draw_info,const PrimitiveInfo *primitive_info) % % A description of each * parameter follows: % % o image: the image. % % o draw_info: the draw * info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo * structure. % % */ static void DrawRoundLinecap(Image * image, const DrawInfo * draw_info, const PrimitiveInfo * primitive_info, ExceptionInfo * exception) { PrimitiveInfo linecap[5]; register ssize_t i; for (i = 0; i < 4; i++) linecap[i] = (*primitive_info); linecap[0].coordinates = 4; linecap[1].point.x += 2.0 * DrawEpsilon; linecap[2].point.x += 2.0 * DrawEpsilon; linecap[2].point.y += 2.0 * DrawEpsilon; linecap[3].point.y += 2.0 * DrawEpsilon; linecap[4].primitive = UndefinedPrimitive; (void)DrawPolygonPrimitive(image, draw_info, linecap, exception); } static MagickBooleanType DrawStrokePolygon(Image * image, const DrawInfo * draw_info, const PrimitiveInfo * primitive_info, ExceptionInfo * exception) { DrawInfo * clone_info; MagickBooleanType closed_path; MagickStatusType status; PrimitiveInfo * stroke_polygon; register const PrimitiveInfo * p, *q; /* * Draw stroked polygon. */ if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), " begin draw-stroke-polygon"); clone_info = CloneDrawInfo((ImageInfo *) NULL, draw_info); clone_info->fill = draw_info->stroke; if (clone_info->fill_pattern != (Image *) NULL) clone_info->fill_pattern = DestroyImage(clone_info->fill_pattern); if (clone_info->stroke_pattern != (Image *) NULL) clone_info->fill_pattern = CloneImage(clone_info->stroke_pattern, 0, 0, MagickTrue, exception); clone_info->stroke.alpha = (MagickRealType) TransparentAlpha; clone_info->stroke_width = 0.0; clone_info->fill_rule = NonZeroRule; status = MagickTrue; for (p = primitive_info; p->primitive != UndefinedPrimitive; p += p->coordinates) { stroke_polygon = TraceStrokePolygon(draw_info, p); status &= DrawPolygonPrimitive(image, clone_info, stroke_polygon, exception); if (status == 0) break; stroke_polygon = (PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon); q = p + p->coordinates - 1; closed_path = (fabs(q->point.x - p->point.x) < DrawEpsilon) && (fabs(q->point.y - p->point.y) < DrawEpsilon) ? MagickTrue : MagickFalse; if ((draw_info->linecap == RoundCap) && (closed_path == MagickFalse)) { DrawRoundLinecap(image, draw_info, p, exception); DrawRoundLinecap(image, draw_info, q, exception); } } clone_info = DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), " end draw-stroke-polygon"); return (status != 0 ? MagickTrue : MagickFalse); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % G e t A f f i n e M a t r i x * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % GetAffineMatrix() returns an AffineMatrix initialized to the * identity % matrix. % % The format of the GetAffineMatrix method is: % % * void GetAffineMatrix(AffineMatrix *affine_matrix) % % A description of * each parameter follows: % % o affine_matrix: the affine matrix. % */ MagickExport void GetAffineMatrix(AffineMatrix * affine_matrix) { (void)LogMagickEvent(TraceEvent, GetMagickModule(), "..."); assert(affine_matrix != (AffineMatrix *) NULL); (void)ResetMagickMemory(affine_matrix, 0, sizeof(*affine_matrix)); affine_matrix->sx = 1.0; affine_matrix->sy = 1.0; } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + G e t D r a w I n f o * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % GetDrawInfo() initializes draw_info to default values from * image_info. % % The format of the GetDrawInfo method is: % % void * GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info) % % A * description of each parameter follows: % % o image_info: the image * info.. % % o draw_info: the draw info. % */ MagickExport void GetDrawInfo(const ImageInfo * image_info, DrawInfo * draw_info) { char *next_token; const char *option; ExceptionInfo * exception; ImageInfo * clone_info; /* * Initialize draw attributes. */ (void)LogMagickEvent(TraceEvent, GetMagickModule(), "..."); assert(draw_info != (DrawInfo *) NULL); (void)ResetMagickMemory(draw_info, 0, sizeof(*draw_info)); clone_info = CloneImageInfo(image_info); GetAffineMatrix(&draw_info->affine); exception = AcquireExceptionInfo(); (void)QueryColorCompliance("#000F", AllCompliance, &draw_info->fill, exception); (void)QueryColorCompliance("#0000", AllCompliance, &draw_info->stroke, exception); draw_info->stroke_width = 1.0; draw_info->fill_rule = EvenOddRule; draw_info->alpha = OpaqueAlpha; draw_info->fill_alpha = OpaqueAlpha; draw_info->stroke_alpha = OpaqueAlpha; draw_info->linecap = ButtCap; draw_info->linejoin = MiterJoin; draw_info->miterlimit = 10; draw_info->decorate = NoDecoration; draw_info->pointsize = 12.0; draw_info->undercolor.alpha = (MagickRealType) TransparentAlpha; draw_info->compose = OverCompositeOp; draw_info->render = MagickTrue; draw_info->debug = IsEventLogging(); draw_info->stroke_antialias = clone_info->antialias; if (clone_info->font != (char *)NULL) draw_info->font = AcquireString(clone_info->font); if (clone_info->density != (char *)NULL) draw_info->density = AcquireString(clone_info->density); draw_info->text_antialias = clone_info->antialias; if (fabs(clone_info->pointsize) >= DrawEpsilon) draw_info->pointsize = clone_info->pointsize; draw_info->border_color = clone_info->border_color; if (clone_info->server_name != (char *)NULL) draw_info->server_name = AcquireString(clone_info->server_name); option = GetImageOption(clone_info, "direction"); if (option != (const char *)NULL) draw_info->direction = (DirectionType) ParseCommandOption( MagickDirectionOptions, MagickFalse, option); else draw_info->direction = UndefinedDirection; option = GetImageOption(clone_info, "encoding"); if (option != (const char *)NULL) (void)CloneString(&draw_info->encoding, option); option = GetImageOption(clone_info, "family"); if (option != (const char *)NULL) (void)CloneString(&draw_info->family, option); option = GetImageOption(clone_info, "fill"); if (option != (const char *)NULL) (void)QueryColorCompliance(option, AllCompliance, &draw_info->fill, exception); option = GetImageOption(clone_info, "gravity"); if (option != (const char *)NULL) draw_info->gravity = (GravityType) ParseCommandOption(MagickGravityOptions, MagickFalse, option); option = GetImageOption(clone_info, "interline-spacing"); if (option != (const char *)NULL) draw_info->interline_spacing = StringToDouble(option, &next_token); option = GetImageOption(clone_info, "interword-spacing"); if (option != (const char *)NULL) draw_info->interword_spacing = StringToDouble(option, &next_token); option = GetImageOption(clone_info, "kerning"); if (option != (const char *)NULL) draw_info->kerning = StringToDouble(option, &next_token); option = GetImageOption(clone_info, "stroke"); if (option != (const char *)NULL) (void)QueryColorCompliance(option, AllCompliance, &draw_info->stroke, exception); option = GetImageOption(clone_info, "strokewidth"); if (option != (const char *)NULL) draw_info->stroke_width = StringToDouble(option, &next_token); option = GetImageOption(clone_info, "style"); if (option != (const char *)NULL) draw_info->style = (StyleType) ParseCommandOption(MagickStyleOptions, MagickFalse, option); option = GetImageOption(clone_info, "undercolor"); if (option != (const char *)NULL) (void)QueryColorCompliance(option, AllCompliance, &draw_info->undercolor, exception); option = GetImageOption(clone_info, "weight"); if (option != (const char *)NULL) { ssize_t weight; weight = ParseCommandOption(MagickWeightOptions, MagickFalse, option); if (weight == -1) weight = (ssize_t) StringToUnsignedLong(option); draw_info->weight = (size_t) weight; } exception = DestroyExceptionInfo(exception); draw_info->signature = MagickCoreSignature; clone_info = DestroyImageInfo(clone_info); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + P e r m u t a t e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % Permutate() returns the permuation of the (n,k). % % The format * of the Permutate method is: % % void Permutate(ssize_t n,ssize_t k) % * % A description of each parameter follows: % % o n: % % o k: % % */ static inline double Permutate(const ssize_t n, const ssize_t k) { double r; register ssize_t i; r = 1.0; for (i = k + 1; i <= n; i++) r *= i; for (i = 1; i <= (n - k); i++) r /= i; return (r); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + T r a c e P r i m i t i v e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % TracePrimitive is a collection of methods for generating graphic * % primitives such as arcs, ellipses, paths, etc. % */ static void TraceArc(PrimitiveInfo * primitive_info, const PointInfo start, const PointInfo end, const PointInfo degrees) { PointInfo center, radii; center.x = 0.5 * (end.x + start.x); center.y = 0.5 * (end.y + start.y); radii.x = fabs(center.x - start.x); radii.y = fabs(center.y - start.y); TraceEllipse(primitive_info, center, radii, degrees); } static void TraceArcPath(PrimitiveInfo * primitive_info, const PointInfo start, const PointInfo end, const PointInfo arc, const double angle, const MagickBooleanType large_arc, const MagickBooleanType sweep) { double alpha, beta, delta, factor, gamma, theta; PointInfo center, points[3], radii; register double cosine, sine; register PrimitiveInfo * p; register ssize_t i; size_t arc_segments; if ((fabs(start.x - end.x) < DrawEpsilon) && (fabs(start.y - end.y) < DrawEpsilon)) { TracePoint(primitive_info, end); return; } radii.x = fabs(arc.x); radii.y = fabs(arc.y); if ((fabs(radii.x) < DrawEpsilon) || (fabs(radii.y) < DrawEpsilon)) { TraceLine(primitive_info, start, end); return; } cosine = cos(DegreesToRadians(fmod((double)angle, 360.0))); sine = sin(DegreesToRadians(fmod((double)angle, 360.0))); center.x = (double)(cosine * (end.x - start.x) / 2 + sine * (end.y - start.y) / 2); center.y = (double)(cosine * (end.y - start.y) / 2 - sine * (end.x - start.x) / 2); delta = (center.x * center.x) / (radii.x * radii.x) + (center.y * center.y) / (radii.y * radii.y); if (delta < DrawEpsilon) { TraceLine(primitive_info, start, end); return; } if (delta > 1.0) { radii.x *= sqrt((double)delta); radii.y *= sqrt((double)delta); } points[0].x = (double)(cosine * start.x / radii.x + sine * start.y / radii.x); points[0].y = (double)(cosine * start.y / radii.y - sine * start.x / radii.y); points[1].x = (double)(cosine * end.x / radii.x + sine * end.y / radii.x); points[1].y = (double)(cosine * end.y / radii.y - sine * end.x / radii.y); alpha = points[1].x - points[0].x; beta = points[1].y - points[0].y; factor = PerceptibleReciprocal(alpha * alpha + beta * beta) - 0.25; if (factor <= 0.0) factor = 0.0; else { factor = sqrt((double)factor); if (sweep == large_arc) factor = (-factor); } center.x = (double)((points[0].x + points[1].x) / 2 - factor * beta); center.y = (double)((points[0].y + points[1].y) / 2 + factor * alpha); alpha = atan2(points[0].y - center.y, points[0].x - center.x); theta = atan2(points[1].y - center.y, points[1].x - center.x) - alpha; if ((theta < 0.0) && (sweep != MagickFalse)) theta += 2.0 * MagickPI; else if ((theta > 0.0) && (sweep == MagickFalse)) theta -= 2.0 * MagickPI; arc_segments = (size_t) ceil(fabs((double)(theta / (0.5 * MagickPI + DrawEpsilon)))); p = primitive_info; for (i = 0; i < (ssize_t) arc_segments; i++) { beta = 0.5 * ((alpha + (i + 1) * theta / arc_segments) - (alpha + i * theta / arc_segments)); gamma = (8.0 / 3.0) * sin(fmod((double)(0.5 * beta), DegreesToRadians(360.0))) * sin(fmod((double)(0.5 * beta), DegreesToRadians(360.0))) / sin(fmod((double)beta, DegreesToRadians(360.0))); points[0].x = (double)(center.x + cos(fmod((double)(alpha + (double)i * theta / arc_segments), DegreesToRadians(360.0))) - gamma * sin(fmod((double)(alpha + (double)i * theta / arc_segments), DegreesToRadians(360.0)))); points[0].y = (double)(center.y + sin(fmod((double)(alpha + (double)i * theta / arc_segments), DegreesToRadians(360.0))) + gamma * cos(fmod((double)(alpha + (double)i * theta / arc_segments), DegreesToRadians(360.0)))); points[2].x = (double)(center.x + cos(fmod((double)(alpha + (double)(i + 1) * theta / arc_segments), DegreesToRadians(360.0)))); points[2].y = (double)(center.y + sin(fmod((double)(alpha + (double)(i + 1) * theta / arc_segments), DegreesToRadians(360.0)))); points[1].x = (double)(points[2].x + gamma * sin(fmod((double)(alpha + (double) (i + 1) * theta / arc_segments), DegreesToRadians(360.0)))); points[1].y = (double)(points[2].y - gamma * cos(fmod((double)(alpha + (double) (i + 1) * theta / arc_segments), DegreesToRadians(360.0)))); p->point.x = (p == primitive_info) ? start.x : (p - 1)->point.x; p->point.y = (p == primitive_info) ? start.y : (p - 1)->point.y; (p + 1)->point.x = (double)(cosine * radii.x * points[0].x - sine * radii.y * points[0].y); (p + 1)->point.y = (double)(sine * radii.x * points[0].x + cosine * radii.y * points[0].y); (p + 2)->point.x = (double)(cosine * radii.x * points[1].x - sine * radii.y * points[1].y); (p + 2)->point.y = (double)(sine * radii.x * points[1].x + cosine * radii.y * points[1].y); (p + 3)->point.x = (double)(cosine * radii.x * points[2].x - sine * radii.y * points[2].y); (p + 3)->point.y = (double)(sine * radii.x * points[2].x + cosine * radii.y * points[2].y); if (i == (ssize_t) (arc_segments - 1)) (p + 3)->point = end; TraceBezier(p, 4); p += p->coordinates; } primitive_info->coordinates = (size_t) (p - primitive_info); for (i = 0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive = primitive_info->primitive; p--; } } static void TraceBezier(PrimitiveInfo * primitive_info, const size_t number_coordinates) { double alpha, *coefficients, weight; PointInfo end, point, *points; register PrimitiveInfo * p; register ssize_t i, j; size_t control_points, quantum; /* * Allocate coeficients. */ quantum = number_coordinates; for (i = 0; i < (ssize_t) number_coordinates; i++) { for (j = i + 1; j < (ssize_t) number_coordinates; j++) { alpha = fabs(primitive_info[j].point.x - primitive_info[i].point.x); if (alpha > (double)quantum) quantum = (size_t) alpha; alpha = fabs(primitive_info[j].point.y - primitive_info[i].point.y); if (alpha > (double)quantum) quantum = (size_t) alpha; } } quantum = (size_t) MagickMin((double)quantum / number_coordinates, (double)BezierQuantum); control_points = quantum * number_coordinates; coefficients = (double *)AcquireQuantumMemory((size_t) number_coordinates, sizeof(*coefficients)); points = (PointInfo *) AcquireQuantumMemory((size_t) control_points, sizeof(*points)); if ((coefficients == (double *)NULL) || (points == (PointInfo *) NULL)) ThrowFatalException(ResourceLimitFatalError, "MemoryAllocationFailed"); /* * Compute bezier points. */ end = primitive_info[number_coordinates - 1].point; for (i = 0; i < (ssize_t) number_coordinates; i++) coefficients[i] = Permutate((ssize_t) number_coordinates - 1, i); weight = 0.0; for (i = 0; i < (ssize_t) control_points; i++) { p = primitive_info; point.x = 0.0; point.y = 0.0; alpha = pow((double)(1.0 - weight), (double)number_coordinates - 1.0); for (j = 0; j < (ssize_t) number_coordinates; j++) { point.x += alpha * coefficients[j] * p->point.x; point.y += alpha * coefficients[j] * p->point.y; alpha *= weight / (1.0 - weight); p++; } points[i] = point; weight += 1.0 / control_points; } /* * Bezier curves are just short segmented polys. */ p = primitive_info; for (i = 0; i < (ssize_t) control_points; i++) { TracePoint(p, points[i]); p += p->coordinates; } TracePoint(p, end); p += p->coordinates; primitive_info->coordinates = (size_t) (p - primitive_info); for (i = 0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive = primitive_info->primitive; p--; } points = (PointInfo *) RelinquishMagickMemory(points); coefficients = (double *)RelinquishMagickMemory(coefficients); } static void TraceCircle(PrimitiveInfo * primitive_info, const PointInfo start, const PointInfo end) { double alpha, beta, radius; PointInfo offset, degrees; alpha = end.x - start.x; beta = end.y - start.y; radius = hypot((double)alpha, (double)beta); offset.x = (double)radius; offset.y = (double)radius; degrees.x = 0.0; degrees.y = 360.0; TraceEllipse(primitive_info, start, offset, degrees); } static void TraceEllipse(PrimitiveInfo * primitive_info, const PointInfo start, const PointInfo stop, const PointInfo degrees) { double delta, step, y; PointInfo angle, point; register PrimitiveInfo * p; register ssize_t i; /* * Ellipses are just short segmented polys. */ if ((fabs(stop.x) < DrawEpsilon) && (fabs(stop.y) < DrawEpsilon)) { TracePoint(primitive_info, start); return; } delta = 2.0 / MagickMax(stop.x, stop.y); step = MagickPI / 8.0; if ((delta >= 0.0) && (delta < (MagickPI / 8.0))) step = MagickPI / (4 * (MagickPI / delta / 2 + 0.5)); angle.x = DegreesToRadians(degrees.x); y = degrees.y; while (y < degrees.x) y += 360.0; angle.y = DegreesToRadians(y); for (p = primitive_info; angle.x < angle.y; angle.x += step) { point.x = cos(fmod(angle.x, DegreesToRadians(360.0))) * stop.x + start.x; point.y = sin(fmod(angle.x, DegreesToRadians(360.0))) * stop.y + start.y; TracePoint(p, point); p += p->coordinates; } point.x = cos(fmod(angle.y, DegreesToRadians(360.0))) * stop.x + start.x; point.y = sin(fmod(angle.y, DegreesToRadians(360.0))) * stop.y + start.y; TracePoint(p, point); p += p->coordinates; primitive_info->coordinates = (size_t) (p - primitive_info); for (i = 0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive = primitive_info->primitive; p--; } } static void TraceLine(PrimitiveInfo * primitive_info, const PointInfo start, const PointInfo end) { TracePoint(primitive_info, start); if ((fabs(start.x - end.x) < DrawEpsilon) && (fabs(start.y - end.y) < DrawEpsilon)) { primitive_info->primitive = PointPrimitive; primitive_info->coordinates = 1; return; } TracePoint(primitive_info + 1, end); (primitive_info + 1)->primitive = primitive_info->primitive; primitive_info->coordinates = 2; } static size_t TracePath(PrimitiveInfo * primitive_info, const char *path) { char *next_token, token[MagickPathExtent]; const char *p; double x, y; int attribute, last_attribute; PointInfo end = { 0.0, 0.0 }, points[4] = { { 0.0, 0.0 }, { 0.0, 0.0 }, { 0.0, 0.0 }, { 0.0, 0.0 } }, point = { 0.0, 0.0 }, start = { 0.0, 0.0 }; PrimitiveType primitive_type; register PrimitiveInfo * q; register ssize_t i; size_t number_coordinates, z_count; attribute = 0; number_coordinates = 0; z_count = 0; primitive_type = primitive_info->primitive; q = primitive_info; for (p = path; *p != '\0';) { while (isspace((int)((unsigned char)*p)) != 0) p++; if (*p == '\0') break; last_attribute = attribute; attribute = (int)(*p++); switch (attribute) { case 'a': case 'A': { double angle; MagickBooleanType large_arc, sweep; PointInfo arc; /* * Compute arc points. */ do { GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') GetNextToken(p, &p, MagickPathExtent, token); arc.x = StringToDouble(token, &next_token); GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') GetNextToken(p, &p, MagickPathExtent, token); arc.y = StringToDouble(token, &next_token); GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') GetNextToken(p, &p, MagickPathExtent, token); angle = StringToDouble(token, &next_token); GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') GetNextToken(p, &p, MagickPathExtent, token); large_arc = StringToLong(token) != 0 ? MagickTrue : MagickFalse; GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') GetNextToken(p, &p, MagickPathExtent, token); sweep = StringToLong(token) != 0 ? MagickTrue : MagickFalse; GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') GetNextToken(p, &p, MagickPathExtent, token); x = StringToDouble(token, &next_token); GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') GetNextToken(p, &p, MagickPathExtent, token); y = StringToDouble(token, &next_token); end.x = (double)(attribute == (int)'A' ? x : point.x + x); end.y = (double)(attribute == (int)'A' ? y : point.y + y); TraceArcPath(q, point, end, arc, angle, large_arc, sweep); q += q->coordinates; point = end; while (isspace((int)((unsigned char)*p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'c': case 'C': { /* * Compute bezier points. */ do { points[0] = point; for (i = 1; i < 4; i++) { GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') GetNextToken(p, &p, MagickPathExtent, token); x = StringToDouble(token, &next_token); GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') GetNextToken(p, &p, MagickPathExtent, token); y = StringToDouble(token, &next_token); end.x = (double)(attribute == (int)'C' ? x : point.x + x); end.y = (double)(attribute == (int)'C' ? y : point.y + y); points[i] = end; } for (i = 0; i < 4; i++) (q + i)->point = points[i]; TraceBezier(q, 4); q += q->coordinates; point = end; } while (IsPoint(p) != MagickFalse); break; } case 'H': case 'h': { do { GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') GetNextToken(p, &p, MagickPathExtent, token); x = StringToDouble(token, &next_token); point.x = (double)(attribute == (int)'H' ? x : point.x + x); TracePoint(q, point); q += q->coordinates; } while (IsPoint(p) != MagickFalse); break; } case 'l': case 'L': { do { GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') GetNextToken(p, &p, MagickPathExtent, token); x = StringToDouble(token, &next_token); GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') GetNextToken(p, &p, MagickPathExtent, token); y = StringToDouble(token, &next_token); point.x = (double)(attribute == (int)'L' ? x : point.x + x); point.y = (double)(attribute == (int)'L' ? y : point.y + y); TracePoint(q, point); q += q->coordinates; } while (IsPoint(p) != MagickFalse); break; } case 'M': case 'm': { if (q != primitive_info) { primitive_info->coordinates = (size_t) (q - primitive_info); number_coordinates += primitive_info->coordinates; primitive_info = q; } i = 0; do { GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') GetNextToken(p, &p, MagickPathExtent, token); x = StringToDouble(token, &next_token); GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') GetNextToken(p, &p, MagickPathExtent, token); y = StringToDouble(token, &next_token); point.x = (double)(attribute == (int)'M' ? x : point.x + x); point.y = (double)(attribute == (int)'M' ? y : point.y + y); if (i == 0) start = point; i++; TracePoint(q, point); q += q->coordinates; if ((i != 0) && (attribute == (int)'M')) { TracePoint(q, point); q += q->coordinates; } } while (IsPoint(p) != MagickFalse); break; } case 'q': case 'Q': { /* * Compute bezier points. */ do { points[0] = point; for (i = 1; i < 3; i++) { GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') GetNextToken(p, &p, MagickPathExtent, token); x = StringToDouble(token, &next_token); GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') GetNextToken(p, &p, MagickPathExtent, token); y = StringToDouble(token, &next_token); if (*p == ',') p++; end.x = (double)(attribute == (int)'Q' ? x : point.x + x); end.y = (double)(attribute == (int)'Q' ? y : point.y + y); points[i] = end; } for (i = 0; i < 3; i++) (q + i)->point = points[i]; TraceBezier(q, 3); q += q->coordinates; point = end; } while (IsPoint(p) != MagickFalse); break; } case 's': case 'S': { /* * Compute bezier points. */ do { points[0] = points[3]; points[1].x = 2.0 * points[3].x - points[2].x; points[1].y = 2.0 * points[3].y - points[2].y; for (i = 2; i < 4; i++) { GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') GetNextToken(p, &p, MagickPathExtent, token); x = StringToDouble(token, &next_token); GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') GetNextToken(p, &p, MagickPathExtent, token); y = StringToDouble(token, &next_token); if (*p == ',') p++; end.x = (double)(attribute == (int)'S' ? x : point.x + x); end.y = (double)(attribute == (int)'S' ? y : point.y + y); points[i] = end; } if (strchr("CcSs", last_attribute) == (char *)NULL) { points[0] = point; points[1] = point; } for (i = 0; i < 4; i++) (q + i)->point = points[i]; TraceBezier(q, 4); q += q->coordinates; point = end; } while (IsPoint(p) != MagickFalse); break; } case 't': case 'T': { /* * Compute bezier points. */ do { points[0] = points[2]; points[1].x = 2.0 * points[2].x - points[1].x; points[1].y = 2.0 * points[2].y - points[1].y; for (i = 2; i < 3; i++) { GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') GetNextToken(p, &p, MagickPathExtent, token); x = StringToDouble(token, &next_token); GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') GetNextToken(p, &p, MagickPathExtent, token); y = StringToDouble(token, &next_token); end.x = (double)(attribute == (int)'T' ? x : point.x + x); end.y = (double)(attribute == (int)'T' ? y : point.y + y); points[i] = end; } if (strchr("QqTt", last_attribute) == (char *)NULL) { points[0] = point; points[1] = point; } for (i = 0; i < 3; i++) (q + i)->point = points[i]; TraceBezier(q, 3); q += q->coordinates; point = end; } while (IsPoint(p) != MagickFalse); break; } case 'v': case 'V': { do { GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') GetNextToken(p, &p, MagickPathExtent, token); y = StringToDouble(token, &next_token); point.y = (double)(attribute == (int)'V' ? y : point.y + y); TracePoint(q, point); q += q->coordinates; } while (IsPoint(p) != MagickFalse); break; } case 'z': case 'Z': { point = start; TracePoint(q, point); q += q->coordinates; primitive_info->coordinates = (size_t) (q - primitive_info); number_coordinates += primitive_info->coordinates; primitive_info = q; z_count++; break; } default: { if (isalpha((int)((unsigned char)attribute)) != 0) (void)FormatLocaleFile(stderr, "attribute not recognized: %c\n", attribute); break; } } } primitive_info->coordinates = (size_t) (q - primitive_info); number_coordinates += primitive_info->coordinates; for (i = 0; i < (ssize_t) number_coordinates; i++) { q--; q->primitive = primitive_type; if (z_count > 1) q->method = FillToBorderMethod; } q = primitive_info; return (number_coordinates); } static void TraceRectangle(PrimitiveInfo * primitive_info, const PointInfo start, const PointInfo end) { PointInfo point; register PrimitiveInfo * p; register ssize_t i; p = primitive_info; TracePoint(p, start); p += p->coordinates; point.x = start.x; point.y = end.y; TracePoint(p, point); p += p->coordinates; TracePoint(p, end); p += p->coordinates; point.x = end.x; point.y = start.y; TracePoint(p, point); p += p->coordinates; TracePoint(p, start); p += p->coordinates; primitive_info->coordinates = (size_t) (p - primitive_info); for (i = 0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive = primitive_info->primitive; p--; } } static void TraceRoundRectangle(PrimitiveInfo * primitive_info, const PointInfo start, const PointInfo end, PointInfo arc) { PointInfo degrees, offset, point; register PrimitiveInfo * p; register ssize_t i; p = primitive_info; offset.x = fabs(end.x - start.x); offset.y = fabs(end.y - start.y); if (arc.x > (0.5 * offset.x)) arc.x = 0.5 * offset.x; if (arc.y > (0.5 * offset.y)) arc.y = 0.5 * offset.y; point.x = start.x + offset.x - arc.x; point.y = start.y + arc.y; degrees.x = 270.0; degrees.y = 360.0; TraceEllipse(p, point, arc, degrees); p += p->coordinates; point.x = start.x + offset.x - arc.x; point.y = start.y + offset.y - arc.y; degrees.x = 0.0; degrees.y = 90.0; TraceEllipse(p, point, arc, degrees); p += p->coordinates; point.x = start.x + arc.x; point.y = start.y + offset.y - arc.y; degrees.x = 90.0; degrees.y = 180.0; TraceEllipse(p, point, arc, degrees); p += p->coordinates; point.x = start.x + arc.x; point.y = start.y + arc.y; degrees.x = 180.0; degrees.y = 270.0; TraceEllipse(p, point, arc, degrees); p += p->coordinates; TracePoint(p, primitive_info->point); p += p->coordinates; primitive_info->coordinates = (size_t) (p - primitive_info); for (i = 0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive = primitive_info->primitive; p--; } } static void TraceSquareLinecap(PrimitiveInfo * primitive_info, const size_t number_vertices, const double offset) { double distance; register double dx, dy; register ssize_t i; ssize_t j; dx = 0.0; dy = 0.0; for (i = 1; i < (ssize_t) number_vertices; i++) { dx = primitive_info[0].point.x - primitive_info[i].point.x; dy = primitive_info[0].point.y - primitive_info[i].point.y; if ((fabs((double)dx) >= DrawEpsilon) || (fabs((double)dy) >= DrawEpsilon)) break; } if (i == (ssize_t) number_vertices) i = (ssize_t) number_vertices - 1L; distance = hypot((double)dx, (double)dy); primitive_info[0].point.x = (double)(primitive_info[i].point.x + dx * (distance + offset) / distance); primitive_info[0].point.y = (double)(primitive_info[i].point.y + dy * (distance + offset) / distance); for (j = (ssize_t) number_vertices - 2; j >= 0; j--) { dx = primitive_info[number_vertices - 1].point.x - primitive_info[j].point.x; dy = primitive_info[number_vertices - 1].point.y - primitive_info[j].point.y; if ((fabs((double)dx) >= DrawEpsilon) || (fabs((double)dy) >= DrawEpsilon)) break; } distance = hypot((double)dx, (double)dy); primitive_info[number_vertices - 1].point.x = (double)(primitive_info[j].point.x + dx * (distance + offset) / distance); primitive_info[number_vertices - 1].point.y = (double)(primitive_info[j].point.y + dy * (distance + offset) / distance); } static PrimitiveInfo * TraceStrokePolygon(const DrawInfo * draw_info, const PrimitiveInfo * primitive_info) { typedef struct _LineSegment { double p, q; } LineSegment; double delta_theta, dot_product, mid, miterlimit; LineSegment dx, dy, inverse_slope, slope, theta; MagickBooleanType closed_path; PointInfo box_p[5], box_q[5], center, offset, *path_p, *path_q; PrimitiveInfo * polygon_primitive, *stroke_polygon; register ssize_t i; size_t arc_segments, max_strokes, number_vertices; ssize_t j, n, p, q; /* * Allocate paths. */ number_vertices = primitive_info->coordinates; max_strokes = 2 * number_vertices + 6 * BezierQuantum + 360; path_p = (PointInfo *) AcquireQuantumMemory((size_t) max_strokes, sizeof(*path_p)); path_q = (PointInfo *) AcquireQuantumMemory((size_t) max_strokes, sizeof(*path_q)); polygon_primitive = (PrimitiveInfo *) AcquireQuantumMemory((size_t) number_vertices + 2UL, sizeof(*polygon_primitive)); if ((path_p == (PointInfo *) NULL) || (path_q == (PointInfo *) NULL) || (polygon_primitive == (PrimitiveInfo *) NULL)) return ((PrimitiveInfo *) NULL); (void)CopyMagickMemory(polygon_primitive, primitive_info, (size_t) number_vertices * sizeof(*polygon_primitive)); closed_path = (fabs(primitive_info[number_vertices - 1].point.x - primitive_info[0].point.x) < DrawEpsilon) && (fabs(primitive_info[number_vertices - 1].point.y - primitive_info[0].point.y) < DrawEpsilon) ? MagickTrue : MagickFalse; if (((draw_info->linejoin == RoundJoin) || (draw_info->linejoin == MiterJoin)) && (closed_path != MagickFalse)) { polygon_primitive[number_vertices] = primitive_info[1]; number_vertices++; } polygon_primitive[number_vertices].primitive = UndefinedPrimitive; /* * Compute the slope for the first line segment, p. */ dx.p = 0.0; dy.p = 0.0; for (n = 1; n < (ssize_t) number_vertices; n++) { dx.p = polygon_primitive[n].point.x - polygon_primitive[0].point.x; dy.p = polygon_primitive[n].point.y - polygon_primitive[0].point.y; if ((fabs(dx.p) >= DrawEpsilon) || (fabs(dy.p) >= DrawEpsilon)) break; } if (n == (ssize_t) number_vertices) n = (ssize_t) number_vertices - 1L; slope.p = 0.0; inverse_slope.p = 0.0; if (fabs(dx.p) < DrawEpsilon) { if (dx.p >= 0.0) slope.p = dy.p < 0.0 ? -1.0 / DrawEpsilon : 1.0 / DrawEpsilon; else slope.p = dy.p < 0.0 ? 1.0 / DrawEpsilon : -1.0 / DrawEpsilon; } else if (fabs(dy.p) < DrawEpsilon) { if (dy.p >= 0.0) inverse_slope.p = dx.p < 0.0 ? -1.0 / DrawEpsilon : 1.0 / DrawEpsilon; else inverse_slope.p = dx.p < 0.0 ? 1.0 / DrawEpsilon : -1.0 / DrawEpsilon; } else { slope.p = dy.p / dx.p; inverse_slope.p = (-1.0 / slope.p); } mid = ExpandAffine(&draw_info->affine) * draw_info->stroke_width / 2.0; miterlimit = (double)(draw_info->miterlimit * draw_info->miterlimit * mid * mid); if ((draw_info->linecap == SquareCap) && (closed_path == MagickFalse)) TraceSquareLinecap(polygon_primitive, number_vertices, mid); offset.x = sqrt((double)(mid * mid / (inverse_slope.p * inverse_slope.p + 1.0))); offset.y = (double)(offset.x * inverse_slope.p); if ((dy.p * offset.x - dx.p * offset.y) > 0.0) { box_p[0].x = polygon_primitive[0].point.x - offset.x; box_p[0].y = polygon_primitive[0].point.y - offset.x * inverse_slope.p; box_p[1].x = polygon_primitive[n].point.x - offset.x; box_p[1].y = polygon_primitive[n].point.y - offset.x * inverse_slope.p; box_q[0].x = polygon_primitive[0].point.x + offset.x; box_q[0].y = polygon_primitive[0].point.y + offset.x * inverse_slope.p; box_q[1].x = polygon_primitive[n].point.x + offset.x; box_q[1].y = polygon_primitive[n].point.y + offset.x * inverse_slope.p; } else { box_p[0].x = polygon_primitive[0].point.x + offset.x; box_p[0].y = polygon_primitive[0].point.y + offset.y; box_p[1].x = polygon_primitive[n].point.x + offset.x; box_p[1].y = polygon_primitive[n].point.y + offset.y; box_q[0].x = polygon_primitive[0].point.x - offset.x; box_q[0].y = polygon_primitive[0].point.y - offset.y; box_q[1].x = polygon_primitive[n].point.x - offset.x; box_q[1].y = polygon_primitive[n].point.y - offset.y; } /* * Create strokes for the line join attribute: bevel, miter, round. */ p = 0; q = 0; path_q[p++] = box_q[0]; path_p[q++] = box_p[0]; for (i = (ssize_t) n + 1; i < (ssize_t) number_vertices; i++) { /* * Compute the slope for this line segment, q. */ dx.q = polygon_primitive[i].point.x - polygon_primitive[n].point.x; dy.q = polygon_primitive[i].point.y - polygon_primitive[n].point.y; dot_product = dx.q * dx.q + dy.q * dy.q; if (dot_product < 0.25) continue; slope.q = 0.0; inverse_slope.q = 0.0; if (fabs(dx.q) < DrawEpsilon) { if (dx.q >= 0.0) slope.q = dy.q < 0.0 ? -1.0 / DrawEpsilon : 1.0 / DrawEpsilon; else slope.q = dy.q < 0.0 ? 1.0 / DrawEpsilon : -1.0 / DrawEpsilon; } else if (fabs(dy.q) < DrawEpsilon) { if (dy.q >= 0.0) inverse_slope.q = dx.q < 0.0 ? -1.0 / DrawEpsilon : 1.0 / DrawEpsilon; else inverse_slope.q = dx.q < 0.0 ? 1.0 / DrawEpsilon : -1.0 / DrawEpsilon; } else { slope.q = dy.q / dx.q; inverse_slope.q = (-1.0 / slope.q); } offset.x = sqrt((double)(mid * mid / (inverse_slope.q * inverse_slope.q + 1.0))); offset.y = (double)(offset.x * inverse_slope.q); dot_product = dy.q * offset.x - dx.q * offset.y; if (dot_product > 0.0) { box_p[2].x = polygon_primitive[n].point.x - offset.x; box_p[2].y = polygon_primitive[n].point.y - offset.y; box_p[3].x = polygon_primitive[i].point.x - offset.x; box_p[3].y = polygon_primitive[i].point.y - offset.y; box_q[2].x = polygon_primitive[n].point.x + offset.x; box_q[2].y = polygon_primitive[n].point.y + offset.y; box_q[3].x = polygon_primitive[i].point.x + offset.x; box_q[3].y = polygon_primitive[i].point.y + offset.y; } else { box_p[2].x = polygon_primitive[n].point.x + offset.x; box_p[2].y = polygon_primitive[n].point.y + offset.y; box_p[3].x = polygon_primitive[i].point.x + offset.x; box_p[3].y = polygon_primitive[i].point.y + offset.y; box_q[2].x = polygon_primitive[n].point.x - offset.x; box_q[2].y = polygon_primitive[n].point.y - offset.y; box_q[3].x = polygon_primitive[i].point.x - offset.x; box_q[3].y = polygon_primitive[i].point.y - offset.y; } if (fabs((double)(slope.p - slope.q)) < DrawEpsilon) { box_p[4] = box_p[1]; box_q[4] = box_q[1]; } else { box_p[4].x = (double)((slope.p * box_p[0].x - box_p[0].y - slope.q * box_p[3].x + box_p[3].y) / (slope.p - slope.q)); box_p[4].y = (double)(slope.p * (box_p[4].x - box_p[0].x) + box_p[0].y); box_q[4].x = (double)((slope.p * box_q[0].x - box_q[0].y - slope.q * box_q[3].x + box_q[3].y) / (slope.p - slope.q)); box_q[4].y = (double)(slope.p * (box_q[4].x - box_q[0].x) + box_q[0].y); } if (q >= (ssize_t) (max_strokes - 6 * BezierQuantum - 360)) { if (~max_strokes < (6 * BezierQuantum + 360)) { path_p = (PointInfo *) RelinquishMagickMemory(path_p); path_q = (PointInfo *) RelinquishMagickMemory(path_q); } else { max_strokes += 6 * BezierQuantum + 360; path_p = (PointInfo *) ResizeQuantumMemory(path_p, max_strokes, sizeof(*path_p)); path_q = (PointInfo *) ResizeQuantumMemory(path_q, max_strokes, sizeof(*path_q)); } if ((path_p == (PointInfo *) NULL) || (path_q == (PointInfo *) NULL)) { if (path_p != (PointInfo *) NULL) path_p = (PointInfo *) RelinquishMagickMemory(path_p); if (path_q != (PointInfo *) NULL) path_q = (PointInfo *) RelinquishMagickMemory(path_q); polygon_primitive = (PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive); return ((PrimitiveInfo *) NULL); } } dot_product = dx.q * dy.p - dx.p * dy.q; if (dot_product <= 0.0) switch (draw_info->linejoin) { case BevelJoin: { path_q[q++] = box_q[1]; path_q[q++] = box_q[2]; dot_product = (box_q[4].x - box_p[4].x) * (box_q[4].x - box_p[4].x) + (box_q[4].y - box_p[4].y) * (box_q[4].y - box_p[4].y); if (dot_product <= miterlimit) path_p[p++] = box_p[4]; else { path_p[p++] = box_p[1]; path_p[p++] = box_p[2]; } break; } case MiterJoin: { dot_product = (box_q[4].x - box_p[4].x) * (box_q[4].x - box_p[4].x) + (box_q[4].y - box_p[4].y) * (box_q[4].y - box_p[4].y); if (dot_product <= miterlimit) { path_q[q++] = box_q[4]; path_p[p++] = box_p[4]; } else { path_q[q++] = box_q[1]; path_q[q++] = box_q[2]; path_p[p++] = box_p[1]; path_p[p++] = box_p[2]; } break; } case RoundJoin: { dot_product = (box_q[4].x - box_p[4].x) * (box_q[4].x - box_p[4].x) + (box_q[4].y - box_p[4].y) * (box_q[4].y - box_p[4].y); if (dot_product <= miterlimit) path_p[p++] = box_p[4]; else { path_p[p++] = box_p[1]; path_p[p++] = box_p[2]; } center = polygon_primitive[n].point; theta.p = atan2(box_q[1].y - center.y, box_q[1].x - center.x); theta.q = atan2(box_q[2].y - center.y, box_q[2].x - center.x); if (theta.q < theta.p) theta.q += 2.0 * MagickPI; arc_segments = (size_t) ceil((double)((theta.q - theta.p) / (2.0 * sqrt((double)(1.0 / mid))))); path_q[q].x = box_q[1].x; path_q[q].y = box_q[1].y; q++; for (j = 1; j < (ssize_t) arc_segments; j++) { delta_theta = (double)(j * (theta.q - theta.p) / arc_segments); path_q[q].x = (double)(center.x + mid * cos(fmod((double) (theta.p + delta_theta), DegreesToRadians(360.0)))); path_q[q].y = (double)(center.y + mid * sin(fmod((double) (theta.p + delta_theta), DegreesToRadians(360.0)))); q++; } path_q[q++] = box_q[2]; break; } default: break; } else switch (draw_info->linejoin) { case BevelJoin: { path_p[p++] = box_p[1]; path_p[p++] = box_p[2]; dot_product = (box_q[4].x - box_p[4].x) * (box_q[4].x - box_p[4].x) + (box_q[4].y - box_p[4].y) * (box_q[4].y - box_p[4].y); if (dot_product <= miterlimit) path_q[q++] = box_q[4]; else { path_q[q++] = box_q[1]; path_q[q++] = box_q[2]; } break; } case MiterJoin: { dot_product = (box_q[4].x - box_p[4].x) * (box_q[4].x - box_p[4].x) + (box_q[4].y - box_p[4].y) * (box_q[4].y - box_p[4].y); if (dot_product <= miterlimit) { path_q[q++] = box_q[4]; path_p[p++] = box_p[4]; } else { path_q[q++] = box_q[1]; path_q[q++] = box_q[2]; path_p[p++] = box_p[1]; path_p[p++] = box_p[2]; } break; } case RoundJoin: { dot_product = (box_q[4].x - box_p[4].x) * (box_q[4].x - box_p[4].x) + (box_q[4].y - box_p[4].y) * (box_q[4].y - box_p[4].y); if (dot_product <= miterlimit) path_q[q++] = box_q[4]; else { path_q[q++] = box_q[1]; path_q[q++] = box_q[2]; } center = polygon_primitive[n].point; theta.p = atan2(box_p[1].y - center.y, box_p[1].x - center.x); theta.q = atan2(box_p[2].y - center.y, box_p[2].x - center.x); if (theta.p < theta.q) theta.p += 2.0 * MagickPI; arc_segments = (size_t) ceil((double)((theta.p - theta.q) / (2.0 * sqrt((double)(1.0 / mid))))); path_p[p++] = box_p[1]; for (j = 1; j < (ssize_t) arc_segments; j++) { delta_theta = (double)(j * (theta.q - theta.p) / arc_segments); path_p[p].x = (double)(center.x + mid * cos(fmod((double) (theta.p + delta_theta), DegreesToRadians(360.0)))); path_p[p].y = (double)(center.y + mid * sin(fmod((double) (theta.p + delta_theta), DegreesToRadians(360.0)))); p++; } path_p[p++] = box_p[2]; break; } default: break; } slope.p = slope.q; inverse_slope.p = inverse_slope.q; box_p[0] = box_p[2]; box_p[1] = box_p[3]; box_q[0] = box_q[2]; box_q[1] = box_q[3]; dx.p = dx.q; dy.p = dy.q; n = i; } path_p[p++] = box_p[1]; path_q[q++] = box_q[1]; /* * Trace stroked polygon. */ stroke_polygon = (PrimitiveInfo *) AcquireQuantumMemory((size_t) (p + q + 2UL * closed_path + 2UL), sizeof(*stroke_polygon)); if (stroke_polygon != (PrimitiveInfo *) NULL) { for (i = 0; i < (ssize_t) p; i++) { stroke_polygon[i] = polygon_primitive[0]; stroke_polygon[i].point = path_p[i]; } if (closed_path != MagickFalse) { stroke_polygon[i] = polygon_primitive[0]; stroke_polygon[i].point = stroke_polygon[0].point; i++; } for (; i < (ssize_t) (p + q + closed_path); i++) { stroke_polygon[i] = polygon_primitive[0]; stroke_polygon[i].point = path_q[p + q + closed_path - (i + 1)]; } if (closed_path != MagickFalse) { stroke_polygon[i] = polygon_primitive[0]; stroke_polygon[i].point = stroke_polygon[p + closed_path].point; i++; } stroke_polygon[i] = polygon_primitive[0]; stroke_polygon[i].point = stroke_polygon[0].point; i++; stroke_polygon[i].primitive = UndefinedPrimitive; stroke_polygon[0].coordinates = (size_t) (p + q + 2 * closed_path + 1); } path_p = (PointInfo *) RelinquishMagickMemory(path_p); path_q = (PointInfo *) RelinquishMagickMemory(path_q); polygon_primitive = (PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive); return (stroke_polygon); }
/* * Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/annotate.h" #include "MagickCore/artifact.h" #include "MagickCore/blob.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/constitute.h" #include "MagickCore/draw.h" #include "MagickCore/draw-private.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/property.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/transform-private.h" #include "MagickCore/utility.h" /* * Define declarations. */ #define BezierQuantum 200 #define DrawEpsilon (1.0e-10) /* * Typedef declarations. */ typedef struct _EdgeInfo { SegmentInfo bounds; double scanline; PointInfo * points; size_t number_points; ssize_t direction; MagickBooleanType ghostline; size_t highwater; } EdgeInfo; typedef struct _ElementInfo { double cx, cy, major, minor, angle; } ElementInfo; typedef struct _PolygonInfo { EdgeInfo * edges; size_t number_edges; } PolygonInfo; typedef enum { MoveToCode, OpenCode, GhostlineCode, LineToCode, EndCode } PathInfoCode; typedef struct _PathInfo { PointInfo point; PathInfoCode code; } PathInfo; /* * Forward declarations. */ static MagickBooleanType DrawStrokePolygon(Image *, const DrawInfo *, const PrimitiveInfo *, ExceptionInfo *); static PrimitiveInfo * TraceStrokePolygon(const DrawInfo *, const PrimitiveInfo *); static size_t TracePath(PrimitiveInfo *, const char *); static void TraceArc(PrimitiveInfo *, const PointInfo, const PointInfo, const PointInfo), TraceArcPath(PrimitiveInfo *, const PointInfo, const PointInfo, const PointInfo, const double, const MagickBooleanType, const MagickBooleanType), TraceBezier(PrimitiveInfo *, const size_t), TraceCircle(PrimitiveInfo *, const PointInfo, const PointInfo), TraceEllipse(PrimitiveInfo *, const PointInfo, const PointInfo, const PointInfo), TraceLine(PrimitiveInfo *, const PointInfo, const PointInfo), TraceRectangle(PrimitiveInfo *, const PointInfo, const PointInfo), TraceRoundRectangle(PrimitiveInfo *, const PointInfo, const PointInfo, PointInfo), TraceSquareLinecap(PrimitiveInfo *, const size_t, const double); /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % A c q u i r e D r a w I n f o * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % AcquireDrawInfo() returns a DrawInfo structure properly * initialized. % % The format of the AcquireDrawInfo method is: % % * DrawInfo *AcquireDrawInfo(void) % */ MagickExport DrawInfo * AcquireDrawInfo(void) { DrawInfo * draw_info; draw_info = (DrawInfo *) AcquireMagickMemory(sizeof(*draw_info)); if (draw_info == (DrawInfo *) NULL) ThrowFatalException(ResourceLimitFatalError, "MemoryAllocationFailed"); GetDrawInfo((ImageInfo *) NULL, draw_info); return (draw_info); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % C l o n e D r a w I n f o * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % CloneDrawInfo() makes a copy of the given draw_info structure. * If NULL % is specified, a new DrawInfo structure is created initialized * to default % values. % % The format of the CloneDrawInfo method is: % % * DrawInfo *CloneDrawInfo(const ImageInfo *image_info, % const * DrawInfo *draw_info) % % A description of each parameter follows: % % * o image_info: the image info. % % o draw_info: the draw info. % */ MagickExport DrawInfo * CloneDrawInfo(const ImageInfo * image_info, const DrawInfo * draw_info) { DrawInfo * clone_info; ExceptionInfo * exception; clone_info = (DrawInfo *) AcquireMagickMemory(sizeof(*clone_info)); if (clone_info == (DrawInfo *) NULL) ThrowFatalException(ResourceLimitFatalError, "MemoryAllocationFailed"); GetDrawInfo(image_info, clone_info); if (draw_info == (DrawInfo *) NULL) return (clone_info); exception = AcquireExceptionInfo(); if (clone_info->primitive != (char *)NULL) (void)CloneString(&clone_info->primitive, draw_info->primitive); if (draw_info->geometry != (char *)NULL) (void)CloneString(&clone_info->geometry, draw_info->geometry); clone_info->viewbox = draw_info->viewbox; clone_info->affine = draw_info->affine; clone_info->gravity = draw_info->gravity; clone_info->fill = draw_info->fill; clone_info->stroke = draw_info->stroke; clone_info->stroke_width = draw_info->stroke_width; if (draw_info->fill_pattern != (Image *) NULL) clone_info->fill_pattern = CloneImage(draw_info->fill_pattern, 0, 0, MagickTrue, exception); if (draw_info->stroke_pattern != (Image *) NULL) clone_info->stroke_pattern = CloneImage(draw_info->stroke_pattern, 0, 0, MagickTrue, exception); clone_info->stroke_antialias = draw_info->stroke_antialias; clone_info->text_antialias = draw_info->text_antialias; clone_info->fill_rule = draw_info->fill_rule; clone_info->linecap = draw_info->linecap; clone_info->linejoin = draw_info->linejoin; clone_info->miterlimit = draw_info->miterlimit; clone_info->dash_offset = draw_info->dash_offset; clone_info->decorate = draw_info->decorate; clone_info->compose = draw_info->compose; if (draw_info->text != (char *)NULL) (void)CloneString(&clone_info->text, draw_info->text); if (draw_info->font != (char *)NULL) (void)CloneString(&clone_info->font, draw_info->font); if (draw_info->metrics != (char *)NULL) (void)CloneString(&clone_info->metrics, draw_info->metrics); if (draw_info->family != (char *)NULL) (void)CloneString(&clone_info->family, draw_info->family); clone_info->style = draw_info->style; clone_info->stretch = draw_info->stretch; clone_info->weight = draw_info->weight; if (draw_info->encoding != (char *)NULL) (void)CloneString(&clone_info->encoding, draw_info->encoding); clone_info->pointsize = draw_info->pointsize; clone_info->kerning = draw_info->kerning; clone_info->interline_spacing = draw_info->interline_spacing; clone_info->interword_spacing = draw_info->interword_spacing; clone_info->direction = draw_info->direction; if (draw_info->density != (char *)NULL) (void)CloneString(&clone_info->density, draw_info->density); clone_info->align = draw_info->align; clone_info->undercolor = draw_info->undercolor; clone_info->border_color = draw_info->border_color; if (draw_info->server_name != (char *)NULL) (void)CloneString(&clone_info->server_name, draw_info->server_name); if (draw_info->dash_pattern != (double *)NULL) { register ssize_t x; for (x = 0; fabs(draw_info->dash_pattern[x]) >= DrawEpsilon; x++); clone_info->dash_pattern = (double *)AcquireQuantumMemory((size_t) x + 1UL, sizeof(*clone_info->dash_pattern)); if (clone_info->dash_pattern == (double *)NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateDashPattern"); (void)CopyMagickMemory(clone_info->dash_pattern, draw_info->dash_pattern, (size_t) (x + 1) * sizeof(*clone_info->dash_pattern)); } clone_info->gradient = draw_info->gradient; if (draw_info->gradient.stops != (StopInfo *) NULL) { size_t number_stops; number_stops = clone_info->gradient.number_stops; clone_info->gradient.stops = (StopInfo *) AcquireQuantumMemory((size_t) number_stops, sizeof(*clone_info->gradient.stops)); if (clone_info->gradient.stops == (StopInfo *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateDashPattern"); (void)CopyMagickMemory(clone_info->gradient.stops, draw_info->gradient.stops, (size_t) number_stops * sizeof(*clone_info->gradient.stops)); } if (draw_info->clip_mask != (char *)NULL) (void)CloneString(&clone_info->clip_mask, draw_info->clip_mask); clone_info->bounds = draw_info->bounds; clone_info->clip_units = draw_info->clip_units; clone_info->render = draw_info->render; clone_info->fill_alpha = draw_info->fill_alpha; clone_info->stroke_alpha = draw_info->stroke_alpha; clone_info->element_reference = draw_info->element_reference; clone_info->debug = IsEventLogging(); exception = DestroyExceptionInfo(exception); return (clone_info); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + C o n v e r t P a t h T o P o l y g o n * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % ConvertPathToPolygon() converts a path to the more efficient * sorted % rendering form. % % The format of the ConvertPathToPolygon * method is: % % PolygonInfo *ConvertPathToPolygon(const DrawInfo * *draw_info, % const PathInfo *path_info) % % A description of each * parameter follows: % % o Method ConvertPathToPolygon returns the path * in a more efficient sorted % rendering form of type PolygonInfo. % % * o draw_info: Specifies a pointer to an DrawInfo structure. % % o * path_info: Specifies a pointer to an PathInfo structure. % % */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int CompareEdges(const void *x, const void *y) { register const EdgeInfo * p, *q; /* * Compare two edges. */ p = (const EdgeInfo *)x; q = (const EdgeInfo *)y; if ((p->points[0].y - DrawEpsilon) > q->points[0].y) return (1); if ((p->points[0].y + DrawEpsilon) < q->points[0].y) return (-1); if ((p->points[0].x - DrawEpsilon) > q->points[0].x) return (1); if ((p->points[0].x + DrawEpsilon) < q->points[0].x) return (-1); if (((p->points[1].x - p->points[0].x) * (q->points[1].y - q->points[0].y) - (p->points[1].y - p->points[0].y) * (q->points[1].x - q->points[0].x)) > 0.0) return (1); return (-1); } #if defined(__cplusplus) || defined(c_plusplus) } #endif static void LogPolygonInfo(const PolygonInfo * polygon_info) { register EdgeInfo * p; register ssize_t i, j; (void)LogMagickEvent(DrawEvent, GetMagickModule(), " begin active-edge"); p = polygon_info->edges; for (i = 0; i < (ssize_t) polygon_info->number_edges; i++) { (void)LogMagickEvent(DrawEvent, GetMagickModule(), " edge %.20g:", (double)i); (void)LogMagickEvent(DrawEvent, GetMagickModule(), " direction: %s", p->direction != MagickFalse ? "down" : "up"); (void)LogMagickEvent(DrawEvent, GetMagickModule(), " ghostline: %s", p->ghostline != MagickFalse ? "transparent" : "opaque"); (void)LogMagickEvent(DrawEvent, GetMagickModule(), " bounds: %g,%g - %g,%g", p->bounds.x1, p->bounds.y1, p->bounds.x2, p->bounds.y2); for (j = 0; j < (ssize_t) p->number_points; j++) (void)LogMagickEvent(DrawEvent, GetMagickModule(), " %g,%g", p->points[j].x, p->points[j].y); p++; } (void)LogMagickEvent(DrawEvent, GetMagickModule(), " end active-edge"); } static void ReversePoints(PointInfo * points, const size_t number_points) { PointInfo point; register ssize_t i; for (i = 0; i < (ssize_t) (number_points >> 1); i++) { point = points[i]; points[i] = points[number_points - (i + 1)]; points[number_points - (i + 1)] = point; } } static PolygonInfo * ConvertPathToPolygon(const PathInfo * path_info) { long direction, next_direction; PointInfo point, *points; PolygonInfo * polygon_info; SegmentInfo bounds; register ssize_t i, n; MagickBooleanType ghostline; size_t edge, number_edges, number_points; /* * Convert a path to the more efficient sorted rendering form. */ polygon_info = (PolygonInfo *) AcquireMagickMemory(sizeof(*polygon_info)); if (polygon_info == (PolygonInfo *) NULL) return ((PolygonInfo *) NULL); number_edges = 16; polygon_info->edges = (EdgeInfo *) AcquireQuantumMemory(number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return ((PolygonInfo *) NULL); (void)ResetMagickMemory(polygon_info->edges, 0, number_edges * sizeof(*polygon_info->edges)); direction = 0; edge = 0; ghostline = MagickFalse; n = 0; number_points = 0; points = (PointInfo *) NULL; (void)ResetMagickMemory(&point, 0, sizeof(point)); (void)ResetMagickMemory(&bounds, 0, sizeof(bounds)); for (i = 0; path_info[i].code != EndCode; i++) { if ((path_info[i].code == MoveToCode) || (path_info[i].code == OpenCode) || (path_info[i].code == GhostlineCode)) { /* * Move to. */ if ((points != (PointInfo *) NULL) && (n >= 2)) { if (edge == number_edges) { number_edges <<= 1; polygon_info->edges = (EdgeInfo *) ResizeQuantumMemory( polygon_info->edges, (size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return ((PolygonInfo *) NULL); } polygon_info->edges[edge].number_points = (size_t) n; polygon_info->edges[edge].scanline = (-1.0); polygon_info->edges[edge].highwater = 0; polygon_info->edges[edge].ghostline = ghostline; polygon_info->edges[edge].direction = (ssize_t) (direction > 0); if (direction < 0) ReversePoints(points, (size_t) n); polygon_info->edges[edge].points = points; polygon_info->edges[edge].bounds = bounds; polygon_info->edges[edge].bounds.y1 = points[0].y; polygon_info->edges[edge].bounds.y2 = points[n - 1].y; points = (PointInfo *) NULL; ghostline = MagickFalse; edge++; } if (points == (PointInfo *) NULL) { number_points = 16; points = (PointInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) return ((PolygonInfo *) NULL); } ghostline = path_info[i].code == GhostlineCode ? MagickTrue : MagickFalse; point = path_info[i].point; points[0] = point; bounds.x1 = point.x; bounds.x2 = point.x; direction = 0; n = 1; continue; } /* * Line to. */ next_direction = ((path_info[i].point.y > point.y) || ((fabs(path_info[i].point.y - point.y) < DrawEpsilon) && (path_info[i].point.x > point.x))) ? 1 : -1; if ((points != (PointInfo *) NULL) && (direction != 0) && (direction != next_direction)) { /* * New edge. */ point = points[n - 1]; if (edge == number_edges) { number_edges <<= 1; polygon_info->edges = (EdgeInfo *) ResizeQuantumMemory( polygon_info->edges, (size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return ((PolygonInfo *) NULL); } polygon_info->edges[edge].number_points = (size_t) n; polygon_info->edges[edge].scanline = (-1.0); polygon_info->edges[edge].highwater = 0; polygon_info->edges[edge].ghostline = ghostline; polygon_info->edges[edge].direction = (ssize_t) (direction > 0); if (direction < 0) ReversePoints(points, (size_t) n); polygon_info->edges[edge].points = points; polygon_info->edges[edge].bounds = bounds; polygon_info->edges[edge].bounds.y1 = points[0].y; polygon_info->edges[edge].bounds.y2 = points[n - 1].y; number_points = 16; points = (PointInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) return ((PolygonInfo *) NULL); n = 1; ghostline = MagickFalse; points[0] = point; bounds.x1 = point.x; bounds.x2 = point.x; edge++; } direction = next_direction; if (points == (PointInfo *) NULL) continue; if (n == (ssize_t) number_points) { number_points <<= 1; points = (PointInfo *) ResizeQuantumMemory(points, (size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) return ((PolygonInfo *) NULL); } point = path_info[i].point; points[n] = point; if (point.x < bounds.x1) bounds.x1 = point.x; if (point.x > bounds.x2) bounds.x2 = point.x; n++; } if (points != (PointInfo *) NULL) { if (n < 2) points = (PointInfo *) RelinquishMagickMemory(points); else { if (edge == number_edges) { number_edges <<= 1; polygon_info->edges = (EdgeInfo *) ResizeQuantumMemory( polygon_info->edges, (size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return ((PolygonInfo *) NULL); } polygon_info->edges[edge].number_points = (size_t) n; polygon_info->edges[edge].scanline = (-1.0); polygon_info->edges[edge].highwater = 0; polygon_info->edges[edge].ghostline = ghostline; polygon_info->edges[edge].direction = (ssize_t) (direction > 0); if (direction < 0) ReversePoints(points, (size_t) n); polygon_info->edges[edge].points = points; polygon_info->edges[edge].bounds = bounds; polygon_info->edges[edge].bounds.y1 = points[0].y; polygon_info->edges[edge].bounds.y2 = points[n - 1].y; ghostline = MagickFalse; edge++; } } polygon_info->number_edges = edge; qsort(polygon_info->edges, (size_t) polygon_info->number_edges, sizeof(*polygon_info->edges), CompareEdges); if (IsEventLogging() != MagickFalse) LogPolygonInfo(polygon_info); return (polygon_info); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + C o n v e r t P r i m i t i v e T o P a t h * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % ConvertPrimitiveToPath() converts a PrimitiveInfo structure into * a vector % path structure. % % The format of the ConvertPrimitiveToPath * method is: % % PathInfo *ConvertPrimitiveToPath(const DrawInfo * *draw_info, % const PrimitiveInfo *primitive_info) % % A * description of each parameter follows: % % o Method * ConvertPrimitiveToPath returns a vector path structure of type % * PathInfo. % % o draw_info: a structure of type DrawInfo. % % o * primitive_info: Specifies a pointer to an PrimitiveInfo structure. % % */ static void LogPathInfo(const PathInfo * path_info) { register const PathInfo * p; (void)LogMagickEvent(DrawEvent, GetMagickModule(), " begin vector-path"); for (p = path_info; p->code != EndCode; p++) (void)LogMagickEvent(DrawEvent, GetMagickModule(), " %g,%g %s", p->point.x, p->point.y, p->code == GhostlineCode ? "moveto ghostline" : p->code == OpenCode ? "moveto open" : p->code == MoveToCode ? "moveto" : p->code == LineToCode ? "lineto" : "?"); (void)LogMagickEvent(DrawEvent, GetMagickModule(), " end vector-path"); } static PathInfo * ConvertPrimitiveToPath(const PrimitiveInfo * primitive_info) { PathInfo * path_info; PathInfoCode code; PointInfo p, q; register ssize_t i, n; ssize_t coordinates, start; /* * Converts a PrimitiveInfo structure into a vector path structure. */ switch (primitive_info->primitive) { case AlphaPrimitive: case ColorPrimitive: case ImagePrimitive: case PointPrimitive: case TextPrimitive: return ((PathInfo *) NULL); default: break; } for (i = 0; primitive_info[i].primitive != UndefinedPrimitive; i++); path_info = (PathInfo *) AcquireQuantumMemory((size_t) (2UL * i + 3UL), sizeof(*path_info)); if (path_info == (PathInfo *) NULL) return ((PathInfo *) NULL); coordinates = 0; n = 0; p.x = (-1.0); p.y = (-1.0); q.x = (-1.0); q.y = (-1.0); start = 0; for (i = 0; primitive_info[i].primitive != UndefinedPrimitive; i++) { code = LineToCode; if (coordinates <= 0) { coordinates = (ssize_t) primitive_info[i].coordinates; p = primitive_info[i].point; start = n; code = MoveToCode; } coordinates--; /* * Eliminate duplicate points. */ if ((i == 0) || (fabs(q.x - primitive_info[i].point.x) >= DrawEpsilon) || (fabs(q.y - primitive_info[i].point.y) >= DrawEpsilon)) { path_info[n].code = code; path_info[n].point = primitive_info[i].point; q = primitive_info[i].point; n++; } if (coordinates > 0) continue; if ((fabs(p.x - primitive_info[i].point.x) < DrawEpsilon) && (fabs(p.y - primitive_info[i].point.y) < DrawEpsilon)) continue; /* * Mark the p point as open if it does not match the q. */ path_info[start].code = OpenCode; path_info[n].code = GhostlineCode; path_info[n].point = primitive_info[i].point; n++; path_info[n].code = LineToCode; path_info[n].point = p; n++; } path_info[n].code = EndCode; path_info[n].point.x = 0.0; path_info[n].point.y = 0.0; if (IsEventLogging() != MagickFalse) LogPathInfo(path_info); return (path_info); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % D e s t r o y D r a w I n f o * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DestroyDrawInfo() deallocates memory associated with an DrawInfo * % structure. % % The format of the DestroyDrawInfo method is: % % * DrawInfo *DestroyDrawInfo(DrawInfo *draw_info) % % A description of each * parameter follows: % % o draw_info: the draw info. % */ MagickExport DrawInfo * DestroyDrawInfo(DrawInfo * draw_info) { if (draw_info->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "..."); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); if (draw_info->primitive != (char *)NULL) draw_info->primitive = DestroyString(draw_info->primitive); if (draw_info->text != (char *)NULL) draw_info->text = DestroyString(draw_info->text); if (draw_info->geometry != (char *)NULL) draw_info->geometry = DestroyString(draw_info->geometry); if (draw_info->fill_pattern != (Image *) NULL) draw_info->fill_pattern = DestroyImage(draw_info->fill_pattern); if (draw_info->stroke_pattern != (Image *) NULL) draw_info->stroke_pattern = DestroyImage(draw_info->stroke_pattern); if (draw_info->font != (char *)NULL) draw_info->font = DestroyString(draw_info->font); if (draw_info->metrics != (char *)NULL) draw_info->metrics = DestroyString(draw_info->metrics); if (draw_info->family != (char *)NULL) draw_info->family = DestroyString(draw_info->family); if (draw_info->encoding != (char *)NULL) draw_info->encoding = DestroyString(draw_info->encoding); if (draw_info->density != (char *)NULL) draw_info->density = DestroyString(draw_info->density); if (draw_info->server_name != (char *)NULL) draw_info->server_name = (char *) RelinquishMagickMemory(draw_info->server_name); if (draw_info->dash_pattern != (double *)NULL) draw_info->dash_pattern = (double *)RelinquishMagickMemory( draw_info->dash_pattern); if (draw_info->gradient.stops != (StopInfo *) NULL) draw_info->gradient.stops = (StopInfo *) RelinquishMagickMemory( draw_info->gradient.stops); if (draw_info->clip_mask != (char *)NULL) draw_info->clip_mask = DestroyString(draw_info->clip_mask); draw_info->signature = (~MagickCoreSignature); draw_info = (DrawInfo *) RelinquishMagickMemory(draw_info); return (draw_info); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + D e s t r o y E d g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DestroyEdge() destroys the specified polygon edge. % % The * format of the DestroyEdge method is: % % ssize_t * DestroyEdge(PolygonInfo *polygon_info,const int edge) % % A description * of each parameter follows: % % o polygon_info: Specifies a pointer to * an PolygonInfo structure. % % o edge: the polygon edge number to * destroy. % */ static size_t DestroyEdge(PolygonInfo * polygon_info, const size_t edge) { assert(edge < polygon_info->number_edges); polygon_info->edges[edge].points = (PointInfo *) RelinquishMagickMemory( polygon_info->edges[edge].points); polygon_info->number_edges--; if (edge < polygon_info->number_edges) (void)CopyMagickMemory(polygon_info->edges + edge, polygon_info->edges + edge + 1, (size_t) (polygon_info->number_edges - edge) * sizeof(*polygon_info->edges)); return (polygon_info->number_edges); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + D e s t r o y P o l y g o n I n f o * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DestroyPolygonInfo() destroys the PolygonInfo data structure. % * % The format of the DestroyPolygonInfo method is: % % PolygonInfo * *DestroyPolygonInfo(PolygonInfo *polygon_info) % % A description of each * parameter follows: % % o polygon_info: Specifies a pointer to an * PolygonInfo structure. % */ static PolygonInfo * DestroyPolygonInfo(PolygonInfo * polygon_info) { register ssize_t i; for (i = 0; i < (ssize_t) polygon_info->number_edges; i++) polygon_info->edges[i].points = (PointInfo *) RelinquishMagickMemory(polygon_info->edges[i].points); polygon_info->edges = (EdgeInfo *) RelinquishMagickMemory(polygon_info->edges); return ((PolygonInfo *) RelinquishMagickMemory(polygon_info)); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % D r a w A f f i n e I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DrawAffineImage() composites the source over the destination * image as % dictated by the affine transform. % % The format of the * DrawAffineImage method is: % % MagickBooleanType * DrawAffineImage(Image *image,const Image *source, % const * AffineMatrix *affine,ExceptionInfo *exception) % % A description of each * parameter follows: % % o image: the image. % % o source: the source * image. % % o affine: the affine transform. % % o exception: return * any errors or warnings in this structure. % */ static SegmentInfo AffineEdge(const Image * image, const AffineMatrix * affine, const double y, const SegmentInfo * edge) { double intercept, z; register double x; SegmentInfo inverse_edge; /* * Determine left and right edges. */ inverse_edge.x1 = edge->x1; inverse_edge.y1 = edge->y1; inverse_edge.x2 = edge->x2; inverse_edge.y2 = edge->y2; z = affine->ry * y + affine->tx; if (affine->sx >= DrawEpsilon) { intercept = (-z / affine->sx); x = intercept; if (x > inverse_edge.x1) inverse_edge.x1 = x; intercept = (-z + (double)image->columns) / affine->sx; x = intercept; if (x < inverse_edge.x2) inverse_edge.x2 = x; } else if (affine->sx < -DrawEpsilon) { intercept = (-z + (double)image->columns) / affine->sx; x = intercept; if (x > inverse_edge.x1) inverse_edge.x1 = x; intercept = (-z / affine->sx); x = intercept; if (x < inverse_edge.x2) inverse_edge.x2 = x; } else if ((z < 0.0) || ((size_t) floor(z + 0.5) >= image->columns)) { inverse_edge.x2 = edge->x1; return (inverse_edge); } /* * Determine top and bottom edges. */ z = affine->sy * y + affine->ty; if (affine->rx >= DrawEpsilon) { intercept = (-z / affine->rx); x = intercept; if (x > inverse_edge.x1) inverse_edge.x1 = x; intercept = (-z + (double)image->rows) / affine->rx; x = intercept; if (x < inverse_edge.x2) inverse_edge.x2 = x; } else if (affine->rx < -DrawEpsilon) { intercept = (-z + (double)image->rows) / affine->rx; x = intercept; if (x > inverse_edge.x1) inverse_edge.x1 = x; intercept = (-z / affine->rx); x = intercept; if (x < inverse_edge.x2) inverse_edge.x2 = x; } else if ((z < 0.0) || ((size_t) floor(z + 0.5) >= image->rows)) { inverse_edge.x2 = edge->x2; return (inverse_edge); } return (inverse_edge); } static AffineMatrix InverseAffineMatrix(const AffineMatrix * affine) { AffineMatrix inverse_affine; double determinant; determinant = PerceptibleReciprocal(affine->sx * affine->sy - affine->rx * affine->ry); inverse_affine.sx = determinant * affine->sy; inverse_affine.rx = determinant * (-affine->rx); inverse_affine.ry = determinant * (-affine->ry); inverse_affine.sy = determinant * affine->sx; inverse_affine.tx = (-affine->tx) * inverse_affine.sx - affine->ty * inverse_affine.ry; inverse_affine.ty = (-affine->tx) * inverse_affine.rx - affine->ty * inverse_affine.sy; return (inverse_affine); } MagickExport MagickBooleanType DrawAffineImage(Image * image, const Image * source, const AffineMatrix * affine, ExceptionInfo * exception) { AffineMatrix inverse_affine; CacheView * image_view, *source_view; MagickBooleanType status; PixelInfo zero; PointInfo extent[4], min, max; register ssize_t i; SegmentInfo edge; ssize_t start, stop, y; /* * Determine bounding box. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(source != (const Image *)NULL); assert(source->signature == MagickCoreSignature); assert(affine != (AffineMatrix *) NULL); extent[0].x = 0.0; extent[0].y = 0.0; extent[1].x = (double)source->columns - 1.0; extent[1].y = 0.0; extent[2].x = (double)source->columns - 1.0; extent[2].y = (double)source->rows - 1.0; extent[3].x = 0.0; extent[3].y = (double)source->rows - 1.0; for (i = 0; i < 4; i++) { PointInfo point; point = extent[i]; extent[i].x = point.x * affine->sx + point.y * affine->ry + affine->tx; extent[i].y = point.x * affine->rx + point.y * affine->sy + affine->ty; } min = extent[0]; max = extent[0]; for (i = 1; i < 4; i++) { if (min.x > extent[i].x) min.x = extent[i].x; if (min.y > extent[i].y) min.y = extent[i].y; if (max.x < extent[i].x) max.x = extent[i].x; if (max.y < extent[i].y) max.y = extent[i].y; } /* * Affine transform image. */ if (SetImageStorageClass(image, DirectClass, exception) == MagickFalse) return (MagickFalse); status = MagickTrue; edge.x1 = MagickMax(min.x, 0.0); edge.y1 = MagickMax(min.y, 0.0); edge.x2 = MagickMin(max.x, (double)image->columns - 1.0); edge.y2 = MagickMin(max.y, (double)image->rows - 1.0); inverse_affine = InverseAffineMatrix(affine); GetPixelInfo(image, &zero); start = (ssize_t) ceil(edge.y1 - 0.5); stop = (ssize_t) floor(edge.y2 + 0.5); source_view = AcquireVirtualCacheView(source, exception); image_view = AcquireAuthenticCacheView(image, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(source,image,1,1) #endif for (y = start; y <= stop; y++) { PixelInfo composite, pixel; PointInfo point; register ssize_t x; register Quantum * magick_restrict q; SegmentInfo inverse_edge; ssize_t x_offset; inverse_edge = AffineEdge(source, &inverse_affine, (double)y, &edge); if (inverse_edge.x2 < inverse_edge.x1) continue; q = GetCacheViewAuthenticPixels(image_view, (ssize_t) ceil(inverse_edge.x1 - 0.5), y, (size_t) (floor(inverse_edge.x2 + 0.5) - ceil(inverse_edge.x1 - 0.5) + 1), 1, exception); if (q == (Quantum *) NULL) continue; pixel = zero; composite = zero; x_offset = 0; for (x = (ssize_t) ceil(inverse_edge.x1 - 0.5); x <= (ssize_t) floor(inverse_edge.x2 + 0.5); x++) { point.x = (double)x *inverse_affine.sx + y * inverse_affine.ry + inverse_affine.tx; point.y = (double)x *inverse_affine.rx + y * inverse_affine.sy + inverse_affine.ty; (void)InterpolatePixelInfo(source, source_view, UndefinedInterpolatePixel, point.x, point.y, &pixel, exception); GetPixelInfoPixel(image, q, &composite); CompositePixelInfoOver(&pixel, pixel.alpha, &composite, composite.alpha, &composite); SetPixelViaPixelInfo(image, &composite, q); x_offset++; q += GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; } source_view = DestroyCacheView(source_view); image_view = DestroyCacheView(image_view); return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + D r a w B o u n d i n g R e c t a n g l e s * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DrawBoundingRectangles() draws the bounding rectangles on the * image. This % is only useful for developers debugging the rendering * algorithm. % % The format of the DrawBoundingRectangles method is: % % * void DrawBoundingRectangles(Image *image,const DrawInfo *draw_info, % * PolygonInfo *polygon_info,ExceptionInfo *exception) % % A description of * each parameter follows: % % o image: the image. % % o draw_info: the * draw info. % % o polygon_info: Specifies a pointer to a PolygonInfo * structure. % % o exception: return any errors or warnings in this * structure. % */ static void DrawBoundingRectangles(Image * image, const DrawInfo * draw_info, const PolygonInfo * polygon_info, ExceptionInfo * exception) { DrawInfo * clone_info; double mid; PointInfo end, resolution, start; PrimitiveInfo primitive_info[6]; register ssize_t i; SegmentInfo bounds; ssize_t coordinates; clone_info = CloneDrawInfo((ImageInfo *) NULL, draw_info); (void)QueryColorCompliance("#000F", AllCompliance, &clone_info->fill, exception); resolution.x = DefaultResolution; resolution.y = DefaultResolution; if (clone_info->density != (char *)NULL) { GeometryInfo geometry_info; MagickStatusType flags; flags = ParseGeometry(clone_info->density, &geometry_info); resolution.x = geometry_info.rho; resolution.y = geometry_info.sigma; if ((flags & SigmaValue) == MagickFalse) resolution.y = resolution.x; } mid = (resolution.x / 72.0) * ExpandAffine(&clone_info->affine) * clone_info->stroke_width / 2.0; bounds.x1 = 0.0; bounds.y1 = 0.0; bounds.x2 = 0.0; bounds.y2 = 0.0; if (polygon_info != (PolygonInfo *) NULL) { bounds = polygon_info->edges[0].bounds; for (i = 1; i < (ssize_t) polygon_info->number_edges; i++) { if (polygon_info->edges[i].bounds.x1 < (double)bounds.x1) bounds.x1 = polygon_info->edges[i].bounds.x1; if (polygon_info->edges[i].bounds.y1 < (double)bounds.y1) bounds.y1 = polygon_info->edges[i].bounds.y1; if (polygon_info->edges[i].bounds.x2 > (double)bounds.x2) bounds.x2 = polygon_info->edges[i].bounds.x2; if (polygon_info->edges[i].bounds.y2 > (double)bounds.y2) bounds.y2 = polygon_info->edges[i].bounds.y2; } bounds.x1 -= mid; bounds.x1 = bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns ? (double)image->columns - 1 : bounds.x1; bounds.y1 -= mid; bounds.y1 = bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows ? (double)image->rows - 1 : bounds.y1; bounds.x2 += mid; bounds.x2 = bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns ? (double)image->columns - 1 : bounds.x2; bounds.y2 += mid; bounds.y2 = bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows ? (double)image->rows - 1 : bounds.y2; for (i = 0; i < (ssize_t) polygon_info->number_edges; i++) { if (polygon_info->edges[i].direction != 0) (void)QueryColorCompliance("red", AllCompliance, &clone_info->stroke, exception); else (void)QueryColorCompliance("green", AllCompliance, &clone_info->stroke, exception); start.x = (double)(polygon_info->edges[i].bounds.x1 - mid); start.y = (double)(polygon_info->edges[i].bounds.y1 - mid); end.x = (double)(polygon_info->edges[i].bounds.x2 + mid); end.y = (double)(polygon_info->edges[i].bounds.y2 + mid); primitive_info[0].primitive = RectanglePrimitive; TraceRectangle(primitive_info, start, end); primitive_info[0].method = ReplaceMethod; coordinates = (ssize_t) primitive_info[0].coordinates; primitive_info[coordinates].primitive = UndefinedPrimitive; (void)DrawPrimitive(image, clone_info, primitive_info, exception); } } (void)QueryColorCompliance("blue", AllCompliance, &clone_info->stroke, exception); start.x = (double)(bounds.x1 - mid); start.y = (double)(bounds.y1 - mid); end.x = (double)(bounds.x2 + mid); end.y = (double)(bounds.y2 + mid); primitive_info[0].primitive = RectanglePrimitive; TraceRectangle(primitive_info, start, end); primitive_info[0].method = ReplaceMethod; coordinates = (ssize_t) primitive_info[0].coordinates; primitive_info[coordinates].primitive = UndefinedPrimitive; (void)DrawPrimitive(image, clone_info, primitive_info, exception); clone_info = DestroyDrawInfo(clone_info); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % D r a w C l i p P a t h * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DrawClipPath() draws the clip path on the image mask. % % The * format of the DrawClipPath method is: % % MagickBooleanType * DrawClipPath(Image *image,const DrawInfo *draw_info, % const char * *name,ExceptionInfo *exception) % % A description of each parameter * follows: % % o image: the image. % % o draw_info: the draw info. % % * o name: the name of the clip path. % % o exception: return any errors * or warnings in this structure. % */ MagickExport MagickBooleanType DrawClipPath(Image * image, const DrawInfo * draw_info, const char *name, ExceptionInfo * exception) { char filename[MagickPathExtent]; Image * clip_mask; const char *value; DrawInfo * clone_info; MagickStatusType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(draw_info != (const DrawInfo *)NULL); (void)FormatLocaleString(filename, MagickPathExtent, "%s", name); value = GetImageArtifact(image, filename); if (value == (const char *)NULL) return (MagickFalse); clip_mask = CloneImage(image, image->columns, image->rows, MagickTrue, exception); if (clip_mask == (Image *) NULL) return (MagickFalse); (void)QueryColorCompliance("#0000", AllCompliance, &clip_mask->background_color, exception); clip_mask->background_color.alpha = (MagickRealType) TransparentAlpha; (void)SetImageBackgroundColor(clip_mask, exception); if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), "\nbegin clip-path %s", draw_info->clip_mask); clone_info = CloneDrawInfo((ImageInfo *) NULL, draw_info); (void)CloneString(&clone_info->primitive, value); (void)QueryColorCompliance("#ffffff", AllCompliance, &clone_info->fill, exception); clone_info->clip_mask = (char *)NULL; status = NegateImage(clip_mask, MagickFalse, exception); (void)SetImageMask(image, ReadPixelMask, clip_mask, exception); clip_mask = DestroyImage(clip_mask); status &= DrawImage(image, clone_info, exception); clone_info = DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), "end clip-path"); return (status != 0 ? MagickTrue : MagickFalse); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + D r a w D a s h P o l y g o n * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DrawDashPolygon() draws a dashed polygon (line, rectangle, * ellipse) on the % image while respecting the dash offset and dash pattern * attributes. % % The format of the DrawDashPolygon method is: % % * MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info, % * const PrimitiveInfo *primitive_info,Image *image, % ExceptionInfo * *exception) % % A description of each parameter follows: % % o * draw_info: the draw info. % % o primitive_info: Specifies a pointer to * a PrimitiveInfo structure. % % o image: the image. % % o exception: * return any errors or warnings in this structure. % */ static MagickBooleanType DrawDashPolygon(const DrawInfo * draw_info, const PrimitiveInfo * primitive_info, Image * image, ExceptionInfo * exception) { DrawInfo * clone_info; double length, maximum_length, offset, scale, total_length; MagickStatusType status; PrimitiveInfo * dash_polygon; register ssize_t i; register double dx, dy; size_t number_vertices; ssize_t j, n; assert(draw_info != (const DrawInfo *)NULL); if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), " begin draw-dash"); for (i = 0; primitive_info[i].primitive != UndefinedPrimitive; i++); number_vertices = (size_t) i; dash_polygon = (PrimitiveInfo *) AcquireQuantumMemory((size_t) (2UL * number_vertices + 1UL), sizeof(*dash_polygon)); if (dash_polygon == (PrimitiveInfo *) NULL) return (MagickFalse); clone_info = CloneDrawInfo((ImageInfo *) NULL, draw_info); clone_info->miterlimit = 0; dash_polygon[0] = primitive_info[0]; scale = ExpandAffine(&draw_info->affine); length = scale * (draw_info->dash_pattern[0] - 0.5); offset = fabs(draw_info->dash_offset) >= DrawEpsilon ? scale * draw_info->dash_offset : 0.0; j = 1; for (n = 0; offset > 0.0; j = 0) { if (draw_info->dash_pattern[n] <= 0.0) break; length = scale * (draw_info->dash_pattern[n] + (n == 0 ? -0.5 : 0.5)); if (offset > length) { offset -= length; n++; length = scale * (draw_info->dash_pattern[n] + 0.5); continue; } if (offset < length) { length -= offset; offset = 0.0; break; } offset = 0.0; n++; } status = MagickTrue; maximum_length = 0.0; total_length = 0.0; for (i = 1; (i < (ssize_t) number_vertices) && (length >= 0.0); i++) { dx = primitive_info[i].point.x - primitive_info[i - 1].point.x; dy = primitive_info[i].point.y - primitive_info[i - 1].point.y; maximum_length = hypot((double)dx, dy); if (fabs(length) < DrawEpsilon) { n++; if (fabs(draw_info->dash_pattern[n]) < DrawEpsilon) n = 0; length = scale * (draw_info->dash_pattern[n] + (n == 0 ? -0.5 : 0.5)); } for (total_length = 0.0; (length >= 0.0) && (maximum_length >= (total_length + length));) { total_length += length; if ((n & 0x01) != 0) { dash_polygon[0] = primitive_info[0]; dash_polygon[0].point.x = (double)(primitive_info[i - 1].point.x + dx * total_length / maximum_length); dash_polygon[0].point.y = (double)(primitive_info[i - 1].point.y + dy * total_length / maximum_length); j = 1; } else { if ((j + 1) > (ssize_t) (2 * number_vertices)) break; dash_polygon[j] = primitive_info[i - 1]; dash_polygon[j].point.x = (double)(primitive_info[i - 1].point.x + dx * total_length / maximum_length); dash_polygon[j].point.y = (double)(primitive_info[i - 1].point.y + dy * total_length / maximum_length); dash_polygon[j].coordinates = 1; j++; dash_polygon[0].coordinates = (size_t) j; dash_polygon[j].primitive = UndefinedPrimitive; status &= DrawStrokePolygon(image, clone_info, dash_polygon, exception); } n++; if (fabs(draw_info->dash_pattern[n]) < DrawEpsilon) n = 0; length = scale * (draw_info->dash_pattern[n] + (n == 0 ? -0.5 : 0.5)); } length -= (maximum_length - total_length); if ((n & 0x01) != 0) continue; dash_polygon[j] = primitive_info[i]; dash_polygon[j].coordinates = 1; j++; } if ((total_length <= maximum_length) && ((n & 0x01) == 0) && (j > 1)) { dash_polygon[j] = primitive_info[i - 1]; dash_polygon[j].point.x += DrawEpsilon; dash_polygon[j].point.y += DrawEpsilon; dash_polygon[j].coordinates = 1; j++; dash_polygon[0].coordinates = (size_t) j; dash_polygon[j].primitive = UndefinedPrimitive; status &= DrawStrokePolygon(image, clone_info, dash_polygon, exception); } dash_polygon = (PrimitiveInfo *) RelinquishMagickMemory(dash_polygon); clone_info = DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), " end draw-dash"); return (status != 0 ? MagickTrue : MagickFalse); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % D r a w I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DrawImage() draws a graphic primitive on your image. The * primitive % may be represented as a string or filename. Precede the * filename with an % "at" sign (@) and the contents of the file are drawn * on the image. You % can affect how text is drawn by setting one or more * members of the draw % info structure. % % The format of the DrawImage * method is: % % MagickBooleanType DrawImage(Image *image,const * DrawInfo *draw_info, % ExceptionInfo *exception) % % A description * of each parameter follows: % % o image: the image. % % o draw_info: * the draw info. % % o exception: return any errors or warnings in this * structure. % */ static inline MagickBooleanType IsPoint(const char *point) { char *p; double value; value = StringToDouble(point, &p); return ((fabs(value) < DrawEpsilon) && (p == point) ? MagickFalse : MagickTrue); } static inline void TracePoint(PrimitiveInfo * primitive_info, const PointInfo point) { primitive_info->coordinates = 1; primitive_info->point = point; } MagickExport MagickBooleanType DrawImage(Image * image, const DrawInfo * draw_info, ExceptionInfo * exception) { #define RenderImageTag "Render/Image" AffineMatrix affine, current; char keyword[MagickPathExtent], geometry[MagickPathExtent], *next_token, pattern[MagickPathExtent], *primitive, *token; const char *q; double angle, factor, primitive_extent; DrawInfo ** graphic_context; MagickBooleanType proceed; MagickSizeType length, number_points; MagickStatusType status; PointInfo point; PrimitiveInfo * primitive_info; PrimitiveType primitive_type; register const char *p; register ssize_t i, x; SegmentInfo bounds; size_t extent, number_stops; ssize_t j, k, n; StopInfo * stops; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "..."); if ((draw_info->primitive == (char *)NULL) || (*draw_info->primitive == '\0')) return (MagickFalse); if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), "begin draw-image"); if (*draw_info->primitive != '@') primitive = AcquireString(draw_info->primitive); else primitive = FileToString(draw_info->primitive + 1, ~0UL, exception); if (primitive == (char *)NULL) return (MagickFalse); primitive_extent = (double)strlen(primitive); (void)SetImageArtifact(image, "MVG", primitive); n = 0; number_stops = 0; stops = (StopInfo *) NULL; /* * Allocate primitive info memory. */ graphic_context = (DrawInfo **) AcquireMagickMemory(sizeof(*graphic_context)); if (graphic_context == (DrawInfo **) NULL) { primitive = DestroyString(primitive); ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); } number_points = 6553; primitive_info = (PrimitiveInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*primitive_info)); if (primitive_info == (PrimitiveInfo *) NULL) { primitive = DestroyString(primitive); for (; n >= 0; n--) graphic_context[n] = DestroyDrawInfo(graphic_context[n]); graphic_context = (DrawInfo **) RelinquishMagickMemory(graphic_context); ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); } graphic_context[n] = CloneDrawInfo((ImageInfo *) NULL, draw_info); graphic_context[n]->viewbox = image->page; if ((image->page.width == 0) || (image->page.height == 0)) { graphic_context[n]->viewbox.width = image->columns; graphic_context[n]->viewbox.height = image->rows; } token = AcquireString(primitive); extent = strlen(token) + MagickPathExtent; if (SetImageStorageClass(image, DirectClass, exception) == MagickFalse) return (MagickFalse); status = MagickTrue; for (q = primitive; *q != '\0';) { /* * Interpret graphic primitive. */ GetNextToken(q, &q, MagickPathExtent, keyword); if (*keyword == '\0') break; if (*keyword == '#') { /* * Comment. */ while ((*q != '\n') && (*q != '\0')) q++; continue; } p = q - strlen(keyword) - 1; primitive_type = UndefinedPrimitive; current = graphic_context[n]->affine; GetAffineMatrix(&affine); switch (*keyword) { case ';': break; case 'a': case 'A': { if (LocaleCompare("affine", keyword) == 0) { GetNextToken(q, &q, extent, token); affine.sx = StringToDouble(token, &next_token); if (token == next_token) status = MagickFalse; GetNextToken(q, &q, extent, token); if (*token == ',') GetNextToken(q, &q, extent, token); affine.rx = StringToDouble(token, &next_token); if (token == next_token) status = MagickFalse; GetNextToken(q, &q, extent, token); if (*token == ',') GetNextToken(q, &q, extent, token); affine.ry = StringToDouble(token, &next_token); if (token == next_token) status = MagickFalse; GetNextToken(q, &q, extent, token); if (*token == ',') GetNextToken(q, &q, extent, token); affine.sy = StringToDouble(token, &next_token); if (token == next_token) status = MagickFalse; GetNextToken(q, &q, extent, token); if (*token == ',') GetNextToken(q, &q, extent, token); affine.tx = StringToDouble(token, &next_token); if (token == next_token) status = MagickFalse; GetNextToken(q, &q, extent, token); if (*token == ',') GetNextToken(q, &q, extent, token); affine.ty = StringToDouble(token, &next_token); if (token == next_token) status = MagickFalse; break; } if (LocaleCompare("alpha", keyword) == 0) { primitive_type = AlphaPrimitive; break; } if (LocaleCompare("arc", keyword) == 0) { primitive_type = ArcPrimitive; break; } status = MagickFalse; break; } case 'b': case 'B': { if (LocaleCompare("bezier", keyword) == 0) { primitive_type = BezierPrimitive; break; } if (LocaleCompare("border-color", keyword) == 0) { GetNextToken(q, &q, extent, token); (void)QueryColorCompliance(token, AllCompliance, &graphic_context[n]->border_color, exception); break; } status = MagickFalse; break; } case 'c': case 'C': { if (LocaleCompare("clip-path", keyword) == 0) { /* * Create clip mask. */ GetNextToken(q, &q, extent, token); (void)CloneString(&graphic_context[n]->clip_mask, token); (void)DrawClipPath(image, graphic_context[n], graphic_context[n]->clip_mask, exception); break; } if (LocaleCompare("clip-rule", keyword) == 0) { ssize_t fill_rule; GetNextToken(q, &q, extent, token); fill_rule = ParseCommandOption(MagickFillRuleOptions, MagickFalse, token); if (fill_rule == -1) status = MagickFalse; else graphic_context[n]->fill_rule = (FillRule) fill_rule; break; } if (LocaleCompare("clip-units", keyword) == 0) { ssize_t clip_units; GetNextToken(q, &q, extent, token); clip_units = ParseCommandOption(MagickClipPathOptions, MagickFalse, token); if (clip_units == -1) { status = MagickFalse; break; } graphic_context[n]->clip_units = (ClipPathUnits) clip_units; if (clip_units == ObjectBoundingBox) { GetAffineMatrix(&current); affine.sx = draw_info->bounds.x2; affine.sy = draw_info->bounds.y2; affine.tx = draw_info->bounds.x1; affine.ty = draw_info->bounds.y1; break; } break; } if (LocaleCompare("circle", keyword) == 0) { primitive_type = CirclePrimitive; break; } if (LocaleCompare("color", keyword) == 0) { primitive_type = ColorPrimitive; break; } status = MagickFalse; break; } case 'd': case 'D': { if (LocaleCompare("decorate", keyword) == 0) { ssize_t decorate; GetNextToken(q, &q, extent, token); decorate = ParseCommandOption(MagickDecorateOptions, MagickFalse, token); if (decorate == -1) status = MagickFalse; else graphic_context[n]->decorate = (DecorationType) decorate; break; } if (LocaleCompare("density", keyword) == 0) { GetNextToken(q, &q, extent, token); (void)CloneString(&graphic_context[n]->density, token); break; } if (LocaleCompare("direction", keyword) == 0) { ssize_t direction; GetNextToken(q, &q, extent, token); direction = ParseCommandOption(MagickDirectionOptions, MagickFalse, token); if (direction == -1) status = MagickFalse; else graphic_context[n]->direction = (DirectionType) direction; break; } status = MagickFalse; break; } case 'e': case 'E': { if (LocaleCompare("ellipse", keyword) == 0) { primitive_type = EllipsePrimitive; break; } if (LocaleCompare("encoding", keyword) == 0) { GetNextToken(q, &q, extent, token); (void)CloneString(&graphic_context[n]->encoding, token); break; } status = MagickFalse; break; } case 'f': case 'F': { if (LocaleCompare("fill", keyword) == 0) { GetNextToken(q, &q, extent, token); (void)FormatLocaleString(pattern, MagickPathExtent, "%s", token); if (GetImageArtifact(image, pattern) != (const char *)NULL) (void)DrawPatternPath(image, draw_info, token, &graphic_context[n]->fill_pattern, exception); else { status &= QueryColorCompliance(token, AllCompliance, &graphic_context[n]->fill, exception); if (graphic_context[n]->fill_alpha != OpaqueAlpha) graphic_context[n]->fill.alpha = graphic_context[n]->fill_alpha; if (status == MagickFalse) { ImageInfo * pattern_info; pattern_info = AcquireImageInfo(); (void)CopyMagickString(pattern_info->filename, token, MagickPathExtent); graphic_context[n]->fill_pattern = ReadImage(pattern_info, exception); CatchException(exception); pattern_info = DestroyImageInfo(pattern_info); } } break; } if (LocaleCompare("fill-opacity", keyword) == 0) { GetNextToken(q, &q, extent, token); factor = strchr(token, '%') != (char *)NULL ? 0.01 : 1.0; graphic_context[n]->fill.alpha = QuantumRange - ClampToQuantum( (MagickRealType) QuantumRange * (1.0 - factor * StringToDouble(token, &next_token))); if (token == next_token) status = MagickFalse; break; } if (LocaleCompare("fill-rule", keyword) == 0) { ssize_t fill_rule; GetNextToken(q, &q, extent, token); fill_rule = ParseCommandOption(MagickFillRuleOptions, MagickFalse, token); if (fill_rule == -1) status = MagickFalse; else graphic_context[n]->fill_rule = (FillRule) fill_rule; break; } if (LocaleCompare("font", keyword) == 0) { GetNextToken(q, &q, extent, token); (void)CloneString(&graphic_context[n]->font, token); if (LocaleCompare("none", token) == 0) graphic_context[n]->font = (char *)RelinquishMagickMemory( graphic_context[n]->font); break; } if (LocaleCompare("font-family", keyword) == 0) { GetNextToken(q, &q, extent, token); (void)CloneString(&graphic_context[n]->family, token); break; } if (LocaleCompare("font-size", keyword) == 0) { GetNextToken(q, &q, extent, token); graphic_context[n]->pointsize = StringToDouble(token, &next_token); if (token == next_token) status = MagickFalse; break; } if (LocaleCompare("font-stretch", keyword) == 0) { ssize_t stretch; GetNextToken(q, &q, extent, token); stretch = ParseCommandOption(MagickStretchOptions, MagickFalse, token); if (stretch == -1) status = MagickFalse; else graphic_context[n]->stretch = (StretchType) stretch; break; } if (LocaleCompare("font-style", keyword) == 0) { ssize_t style; GetNextToken(q, &q, extent, token); style = ParseCommandOption(MagickStyleOptions, MagickFalse, token); if (style == -1) status = MagickFalse; else graphic_context[n]->style = (StyleType) style; break; } if (LocaleCompare("font-weight", keyword) == 0) { ssize_t weight; GetNextToken(q, &q, extent, token); weight = ParseCommandOption(MagickWeightOptions, MagickFalse, token); if (weight == -1) weight = (ssize_t) StringToUnsignedLong(token); graphic_context[n]->weight = (size_t) weight; break; } status = MagickFalse; break; } case 'g': case 'G': { if (LocaleCompare("gradient-units", keyword) == 0) { GetNextToken(q, &q, extent, token); break; } if (LocaleCompare("gravity", keyword) == 0) { ssize_t gravity; GetNextToken(q, &q, extent, token); gravity = ParseCommandOption(MagickGravityOptions, MagickFalse, token); if (gravity == -1) status = MagickFalse; else graphic_context[n]->gravity = (GravityType) gravity; break; } status = MagickFalse; break; } case 'i': case 'I': { if (LocaleCompare("image", keyword) == 0) { ssize_t compose; primitive_type = ImagePrimitive; GetNextToken(q, &q, extent, token); compose = ParseCommandOption(MagickComposeOptions, MagickFalse, token); if (compose == -1) status = MagickFalse; else graphic_context[n]->compose = (CompositeOperator) compose; break; } if (LocaleCompare("interline-spacing", keyword) == 0) { GetNextToken(q, &q, extent, token); graphic_context[n]->interline_spacing = StringToDouble(token, &next_token); if (token == next_token) status = MagickFalse; break; } if (LocaleCompare("interword-spacing", keyword) == 0) { GetNextToken(q, &q, extent, token); graphic_context[n]->interword_spacing = StringToDouble(token, &next_token); if (token == next_token) status = MagickFalse; break; } status = MagickFalse; break; } case 'k': case 'K': { if (LocaleCompare("kerning", keyword) == 0) { GetNextToken(q, &q, extent, token); graphic_context[n]->kerning = StringToDouble(token, &next_token); if (token == next_token) status = MagickFalse; break; } status = MagickFalse; break; } case 'l': case 'L': { if (LocaleCompare("line", keyword) == 0) primitive_type = LinePrimitive; else status = MagickFalse; break; } case 'o': case 'O': { if (LocaleCompare("offset", keyword) == 0) { GetNextToken(q, &q, extent, token); break; } if (LocaleCompare("opacity", keyword) == 0) { GetNextToken(q, &q, extent, token); factor = strchr(token, '%') != (char *)NULL ? 0.01 : 1.0; graphic_context[n]->alpha = QuantumRange * (1.0 - (QuantumScale * graphic_context[n]->alpha * (1.0 - factor * StringToDouble(token, &next_token)))); graphic_context[n]->fill_alpha = QuantumRange * (1.0 - (QuantumScale * graphic_context[n]->fill_alpha * (1.0 - factor * StringToDouble(token, &next_token)))); graphic_context[n]->stroke_alpha = QuantumRange * (1.0 - (QuantumScale * graphic_context[n]->stroke_alpha * (1.0 - factor * StringToDouble(token, &next_token)))); if (token == next_token) status = MagickFalse; break; } status = MagickFalse; break; } case 'p': case 'P': { if (LocaleCompare("path", keyword) == 0) { primitive_type = PathPrimitive; break; } if (LocaleCompare("point", keyword) == 0) { primitive_type = PointPrimitive; break; } if (LocaleCompare("polyline", keyword) == 0) { primitive_type = PolylinePrimitive; break; } if (LocaleCompare("polygon", keyword) == 0) { primitive_type = PolygonPrimitive; break; } if (LocaleCompare("pop", keyword) == 0) { GetNextToken(q, &q, extent, token); if (LocaleCompare("clip-path", token) == 0) break; if (LocaleCompare("defs", token) == 0) break; if (LocaleCompare("gradient", token) == 0) break; if (LocaleCompare("graphic-context", token) == 0) { if (n <= 0) { (void)ThrowMagickException(exception, GetMagickModule(), DrawError, "UnbalancedGraphicContextPushPop", "`%s'", token); status = MagickFalse; n = 0; break; } if (graphic_context[n]->clip_mask != (char *)NULL) if (LocaleCompare(graphic_context[n]->clip_mask, graphic_context[n - 1]->clip_mask) != 0) (void)SetImageMask(image, ReadPixelMask, (Image *) NULL, exception); graphic_context[n] = DestroyDrawInfo(graphic_context[n]); n--; break; } if (LocaleCompare("pattern", token) == 0) break; status = MagickFalse; break; } if (LocaleCompare("push", keyword) == 0) { GetNextToken(q, &q, extent, token); if (LocaleCompare("clip-path", token) == 0) { char name[MagickPathExtent]; GetNextToken(q, &q, extent, token); (void)FormatLocaleString(name, MagickPathExtent, "%s", token); for (p = q; *q != '\0';) { GetNextToken(q, &q, extent, token); if (LocaleCompare(token, "pop") != 0) continue; GetNextToken(q, (const char **)NULL, extent, token); if (LocaleCompare(token, "clip-path") != 0) continue; break; } (void)CopyMagickString(token, p, (size_t) (q - p - 4 + 1)); (void)SetImageArtifact(image, name, token); GetNextToken(q, &q, extent, token); break; } if (LocaleCompare("gradient", token) == 0) { char key[2 * MagickPathExtent], name[MagickPathExtent], type[MagickPathExtent]; SegmentInfo segment; GetNextToken(q, &q, extent, token); (void)CopyMagickString(name, token, MagickPathExtent); GetNextToken(q, &q, extent, token); (void)CopyMagickString(type, token, MagickPathExtent); GetNextToken(q, &q, extent, token); segment.x1 = StringToDouble(token, &next_token); if (token == next_token) status = MagickFalse; GetNextToken(q, &q, extent, token); if (*token == ',') GetNextToken(q, &q, extent, token); segment.y1 = StringToDouble(token, &next_token); if (token == next_token) status = MagickFalse; GetNextToken(q, &q, extent, token); if (*token == ',') GetNextToken(q, &q, extent, token); segment.x2 = StringToDouble(token, &next_token); if (token == next_token) status = MagickFalse; GetNextToken(q, &q, extent, token); if (*token == ',') GetNextToken(q, &q, extent, token); segment.y2 = StringToDouble(token, &next_token); if (token == next_token) status = MagickFalse; if (LocaleCompare(type, "radial") == 0) { GetNextToken(q, &q, extent, token); if (*token == ',') GetNextToken(q, &q, extent, token); } for (p = q; *q != '\0';) { GetNextToken(q, &q, extent, token); if (LocaleCompare(token, "pop") != 0) continue; GetNextToken(q, (const char **)NULL, extent, token); if (LocaleCompare(token, "gradient") != 0) continue; break; } (void)CopyMagickString(token, p, (size_t) (q - p - 4 + 1)); bounds.x1 = graphic_context[n]->affine.sx * segment.x1 + graphic_context[n]->affine.ry * segment.y1 + graphic_context[n]->affine.tx; bounds.y1 = graphic_context[n]->affine.rx * segment.x1 + graphic_context[n]->affine.sy * segment.y1 + graphic_context[n]->affine.ty; bounds.x2 = graphic_context[n]->affine.sx * segment.x2 + graphic_context[n]->affine.ry * segment.y2 + graphic_context[n]->affine.tx; bounds.y2 = graphic_context[n]->affine.rx * segment.x2 + graphic_context[n]->affine.sy * segment.y2 + graphic_context[n]->affine.ty; (void)FormatLocaleString(key, MagickPathExtent, "%s", name); (void)SetImageArtifact(image, key, token); (void)FormatLocaleString(key, MagickPathExtent, "%s-type", name); (void)SetImageArtifact(image, key, type); (void)FormatLocaleString(key, MagickPathExtent, "%s-geometry", name); (void)FormatLocaleString(geometry, MagickPathExtent, "%gx%g%+.15g%+.15g", MagickMax(fabs(bounds.x2 - bounds.x1 + 1.0), 1.0), MagickMax(fabs(bounds.y2 - bounds.y1 + 1.0), 1.0), bounds.x1, bounds.y1); (void)SetImageArtifact(image, key, geometry); GetNextToken(q, &q, extent, token); break; } if (LocaleCompare("pattern", token) == 0) { char key[2 * MagickPathExtent], name[MagickPathExtent]; RectangleInfo pattern_bounds; GetNextToken(q, &q, extent, token); (void)CopyMagickString(name, token, MagickPathExtent); GetNextToken(q, &q, extent, token); pattern_bounds.x = (ssize_t) ceil(StringToDouble(token, &next_token) - 0.5); if (token == next_token) status = MagickFalse; GetNextToken(q, &q, extent, token); if (*token == ',') GetNextToken(q, &q, extent, token); pattern_bounds.y = (ssize_t) ceil(StringToDouble(token, &next_token) - 0.5); if (token == next_token) status = MagickFalse; GetNextToken(q, &q, extent, token); if (*token == ',') GetNextToken(q, &q, extent, token); pattern_bounds.width = (size_t) floor(StringToDouble(token, &next_token) + 0.5); if (token == next_token) status = MagickFalse; GetNextToken(q, &q, extent, token); if (*token == ',') GetNextToken(q, &q, extent, token); pattern_bounds.height = (size_t) floor(StringToDouble(token, &next_token) + 0.5); if (token == next_token) status = MagickFalse; for (p = q; *q != '\0';) { GetNextToken(q, &q, extent, token); if (LocaleCompare(token, "pop") != 0) continue; GetNextToken(q, (const char **)NULL, extent, token); if (LocaleCompare(token, "pattern") != 0) continue; break; } (void)CopyMagickString(token, p, (size_t) (q - p - 4 + 1)); (void)FormatLocaleString(key, MagickPathExtent, "%s", name); (void)SetImageArtifact(image, key, token); (void)FormatLocaleString(key, MagickPathExtent, "%s-geometry", name); (void)FormatLocaleString(geometry, MagickPathExtent, "%.20gx%.20g%+.20g%+.20g", (double)pattern_bounds.width, (double)pattern_bounds.height, (double)pattern_bounds.x, (double)pattern_bounds.y); (void)SetImageArtifact(image, key, geometry); GetNextToken(q, &q, extent, token); break; } if (LocaleCompare("graphic-context", token) == 0) { n++; graphic_context = (DrawInfo **) ResizeQuantumMemory( graphic_context, (size_t) (n + 1), sizeof(*graphic_context)); if (graphic_context == (DrawInfo **) NULL) { (void)ThrowMagickException(exception, GetMagickModule(), ResourceLimitError, "MemoryAllocationFailed", "`%s'", image->filename); break; } graphic_context[n] = CloneDrawInfo((ImageInfo *) NULL, graphic_context[n - 1]); break; } if (LocaleCompare("defs", token) == 0) break; status = MagickFalse; break; } status = MagickFalse; break; } case 'r': case 'R': { if (LocaleCompare("rectangle", keyword) == 0) { primitive_type = RectanglePrimitive; break; } if (LocaleCompare("rotate", keyword) == 0) { GetNextToken(q, &q, extent, token); angle = StringToDouble(token, &next_token); if (token == next_token) status = MagickFalse; affine.sx = cos(DegreesToRadians(fmod((double)angle, 360.0))); affine.rx = sin(DegreesToRadians(fmod((double)angle, 360.0))); affine.ry = (-sin(DegreesToRadians(fmod((double)angle, 360.0)))); affine.sy = cos(DegreesToRadians(fmod((double)angle, 360.0))); break; } if (LocaleCompare("roundRectangle", keyword) == 0) { primitive_type = RoundRectanglePrimitive; break; } status = MagickFalse; break; } case 's': case 'S': { if (LocaleCompare("scale", keyword) == 0) { GetNextToken(q, &q, extent, token); affine.sx = StringToDouble(token, &next_token); if (token == next_token) status = MagickFalse; GetNextToken(q, &q, extent, token); if (*token == ',') GetNextToken(q, &q, extent, token); affine.sy = StringToDouble(token, &next_token); if (token == next_token) status = MagickFalse; break; } if (LocaleCompare("skewX", keyword) == 0) { GetNextToken(q, &q, extent, token); angle = StringToDouble(token, &next_token); if (token == next_token) status = MagickFalse; affine.ry = sin(DegreesToRadians(angle)); break; } if (LocaleCompare("skewY", keyword) == 0) { GetNextToken(q, &q, extent, token); angle = StringToDouble(token, &next_token); if (token == next_token) status = MagickFalse; affine.rx = (-tan(DegreesToRadians(angle) / 2.0)); break; } if (LocaleCompare("stop-color", keyword) == 0) { PixelInfo stop_color; number_stops++; if (number_stops == 1) stops = (StopInfo *) AcquireQuantumMemory(2, sizeof(*stops)); else if (number_stops > 2) stops = (StopInfo *) ResizeQuantumMemory(stops, number_stops, sizeof(*stops)); if (stops == (StopInfo *) NULL) { (void)ThrowMagickException(exception, GetMagickModule(), ResourceLimitError, "MemoryAllocationFailed", "`%s'", image->filename); break; } GetNextToken(q, &q, extent, token); (void)QueryColorCompliance(token, AllCompliance, &stop_color, exception); stops[number_stops - 1].color = stop_color; GetNextToken(q, &q, extent, token); stops[number_stops - 1].offset = StringToDouble(token, &next_token); if (token == next_token) status = MagickFalse; break; } if (LocaleCompare("stroke", keyword) == 0) { GetNextToken(q, &q, extent, token); (void)FormatLocaleString(pattern, MagickPathExtent, "%s", token); if (GetImageArtifact(image, pattern) != (const char *)NULL) (void)DrawPatternPath(image, draw_info, token, &graphic_context[n]->stroke_pattern, exception); else { status &= QueryColorCompliance(token, AllCompliance, &graphic_context[n]->stroke, exception); if (graphic_context[n]->stroke_alpha != OpaqueAlpha) graphic_context[n]->stroke.alpha = graphic_context[n]->stroke_alpha; if (status == MagickFalse) { ImageInfo * pattern_info; pattern_info = AcquireImageInfo(); (void)CopyMagickString(pattern_info->filename, token, MagickPathExtent); graphic_context[n]->stroke_pattern = ReadImage(pattern_info, exception); CatchException(exception); pattern_info = DestroyImageInfo(pattern_info); } } break; } if (LocaleCompare("stroke-antialias", keyword) == 0) { GetNextToken(q, &q, extent, token); graphic_context[n]->stroke_antialias = StringToLong(token) != 0 ? MagickTrue : MagickFalse; break; } if (LocaleCompare("stroke-dasharray", keyword) == 0) { if (graphic_context[n]->dash_pattern != (double *)NULL) graphic_context[n]->dash_pattern = (double *) RelinquishMagickMemory(graphic_context[n]->dash_pattern); if (IsPoint(q) != MagickFalse) { const char *r; r = q; GetNextToken(r, &r, extent, token); if (*token == ',') GetNextToken(r, &r, extent, token); for (x = 0; IsPoint(token) != MagickFalse; x++) { GetNextToken(r, &r, extent, token); if (*token == ',') GetNextToken(r, &r, extent, token); } graphic_context[n]->dash_pattern = (double *) AcquireQuantumMemory((size_t) (2UL * x + 1UL), sizeof(*graphic_context[n]->dash_pattern)); if (graphic_context[n]->dash_pattern == (double *)NULL) { (void)ThrowMagickException(exception, GetMagickModule(), ResourceLimitError, "MemoryAllocationFailed", "`%s'", image->filename); status = MagickFalse; break; } for (j = 0; j < x; j++) { GetNextToken(q, &q, extent, token); if (*token == ',') GetNextToken(q, &q, extent, token); graphic_context[n]->dash_pattern[j] = StringToDouble(token, &next_token); if (token == next_token) status = MagickFalse; if (graphic_context[n]->dash_pattern[j] < 0.0) status = MagickFalse; } if ((x & 0x01) != 0) for (; j < (2 * x); j++) graphic_context[n]->dash_pattern[j] = graphic_context[n]->dash_pattern[j - x]; graphic_context[n]->dash_pattern[j] = 0.0; break; } GetNextToken(q, &q, extent, token); break; } if (LocaleCompare("stroke-dashoffset", keyword) == 0) { GetNextToken(q, &q, extent, token); graphic_context[n]->dash_offset = StringToDouble(token, &next_token); if (token == next_token) status = MagickFalse; break; } if (LocaleCompare("stroke-linecap", keyword) == 0) { ssize_t linecap; GetNextToken(q, &q, extent, token); linecap = ParseCommandOption(MagickLineCapOptions, MagickFalse, token); if (linecap == -1) status = MagickFalse; else graphic_context[n]->linecap = (LineCap) linecap; break; } if (LocaleCompare("stroke-linejoin", keyword) == 0) { ssize_t linejoin; GetNextToken(q, &q, extent, token); linejoin = ParseCommandOption(MagickLineJoinOptions, MagickFalse, token); if (linejoin == -1) status = MagickFalse; else graphic_context[n]->linejoin = (LineJoin) linejoin; break; } if (LocaleCompare("stroke-miterlimit", keyword) == 0) { GetNextToken(q, &q, extent, token); graphic_context[n]->miterlimit = StringToUnsignedLong(token); break; } if (LocaleCompare("stroke-opacity", keyword) == 0) { GetNextToken(q, &q, extent, token); factor = strchr(token, '%') != (char *)NULL ? 0.01 : 1.0; graphic_context[n]->stroke.alpha = QuantumRange - ClampToQuantum( (MagickRealType) QuantumRange * (1.0 - factor * StringToDouble(token, &next_token))); if (token == next_token) status = MagickFalse; break; } if (LocaleCompare("stroke-width", keyword) == 0) { GetNextToken(q, &q, extent, token); graphic_context[n]->stroke_width = StringToDouble(token, &next_token); if (token == next_token) status = MagickFalse; break; } status = MagickFalse; break; } case 't': case 'T': { if (LocaleCompare("text", keyword) == 0) { primitive_type = TextPrimitive; break; } if (LocaleCompare("text-align", keyword) == 0) { ssize_t align; GetNextToken(q, &q, extent, token); align = ParseCommandOption(MagickAlignOptions, MagickFalse, token); if (align == -1) status = MagickFalse; else graphic_context[n]->align = (AlignType) align; break; } if (LocaleCompare("text-anchor", keyword) == 0) { ssize_t align; GetNextToken(q, &q, extent, token); align = ParseCommandOption(MagickAlignOptions, MagickFalse, token); if (align == -1) status = MagickFalse; else graphic_context[n]->align = (AlignType) align; break; } if (LocaleCompare("text-antialias", keyword) == 0) { GetNextToken(q, &q, extent, token); graphic_context[n]->text_antialias = StringToLong(token) != 0 ? MagickTrue : MagickFalse; break; } if (LocaleCompare("text-undercolor", keyword) == 0) { GetNextToken(q, &q, extent, token); (void)QueryColorCompliance(token, AllCompliance, &graphic_context[n]->undercolor, exception); break; } if (LocaleCompare("translate", keyword) == 0) { GetNextToken(q, &q, extent, token); affine.tx = StringToDouble(token, &next_token); if (token == next_token) status = MagickFalse; GetNextToken(q, &q, extent, token); if (*token == ',') GetNextToken(q, &q, extent, token); affine.ty = StringToDouble(token, &next_token); if (token == next_token) status = MagickFalse; break; } status = MagickFalse; break; } case 'v': case 'V': { if (LocaleCompare("viewbox", keyword) == 0) { GetNextToken(q, &q, extent, token); graphic_context[n]->viewbox.x = (ssize_t) ceil(StringToDouble(token, &next_token) - 0.5); if (token == next_token) status = MagickFalse; GetNextToken(q, &q, extent, token); if (*token == ',') GetNextToken(q, &q, extent, token); graphic_context[n]->viewbox.y = (ssize_t) ceil(StringToDouble(token, &next_token) - 0.5); if (token == next_token) status = MagickFalse; GetNextToken(q, &q, extent, token); if (*token == ',') GetNextToken(q, &q, extent, token); graphic_context[n]->viewbox.width = (size_t) floor(StringToDouble( token, &next_token) + 0.5); if (token == next_token) status = MagickFalse; GetNextToken(q, &q, extent, token); if (*token == ',') GetNextToken(q, &q, extent, token); graphic_context[n]->viewbox.height = (size_t) floor(StringToDouble( token, &next_token) + 0.5); if (token == next_token) status = MagickFalse; break; } status = MagickFalse; break; } default: { status = MagickFalse; break; } } if (status == MagickFalse) break; if ((fabs(affine.sx - 1.0) >= DrawEpsilon) || (fabs(affine.rx) >= DrawEpsilon) || (fabs(affine.ry) >= DrawEpsilon) || (fabs(affine.sy - 1.0) >= DrawEpsilon) || (fabs(affine.tx) >= DrawEpsilon) || (fabs(affine.ty) >= DrawEpsilon)) { graphic_context[n]->affine.sx = current.sx * affine.sx + current.ry * affine.rx; graphic_context[n]->affine.rx = current.rx * affine.sx + current.sy * affine.rx; graphic_context[n]->affine.ry = current.sx * affine.ry + current.ry * affine.sy; graphic_context[n]->affine.sy = current.rx * affine.ry + current.sy * affine.sy; graphic_context[n]->affine.tx = current.sx * affine.tx + current.ry * affine.ty + current.tx; graphic_context[n]->affine.ty = current.rx * affine.tx + current.sy * affine.ty + current.ty; } if (primitive_type == UndefinedPrimitive) { if (*q == '\0') { if (number_stops > 1) { GradientType type; type = LinearGradient; if (draw_info->gradient.type == RadialGradient) type = RadialGradient; (void)GradientImage(image, type, PadSpread, stops, number_stops, exception); } if (number_stops > 0) stops = (StopInfo *) RelinquishMagickMemory(stops); } if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), " %.*s", (int) (q - p), p); continue; } /* * Parse the primitive attributes. */ i = 0; j = 0; primitive_info[0].point.x = 0.0; primitive_info[0].point.y = 0.0; for (x = 0; *q != '\0'; x++) { /* * Define points. */ if (IsPoint(q) == MagickFalse) break; GetNextToken(q, &q, extent, token); point.x = StringToDouble(token, &next_token); if (token == next_token) status = MagickFalse; GetNextToken(q, &q, extent, token); if (*token == ',') GetNextToken(q, &q, extent, token); point.y = StringToDouble(token, &next_token); if (token == next_token) status = MagickFalse; GetNextToken(q, (const char **)NULL, extent, token); if (*token == ',') GetNextToken(q, &q, extent, token); primitive_info[i].primitive = primitive_type; primitive_info[i].point = point; primitive_info[i].coordinates = 0; primitive_info[i].method = FloodfillMethod; i++; if (i < (ssize_t) number_points) continue; number_points <<= 1; primitive_info = (PrimitiveInfo *) ResizeQuantumMemory(primitive_info, (size_t) number_points, sizeof(*primitive_info)); if ((primitive_info == (PrimitiveInfo *) NULL) || (number_points != (MagickSizeType) ((size_t) number_points))) ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); } primitive_info[j].primitive = primitive_type; primitive_info[j].coordinates = (size_t) x; primitive_info[j].method = FloodfillMethod; primitive_info[j].text = (char *)NULL; /* * Circumscribe primitive within a circle. */ bounds.x1 = primitive_info[j].point.x; bounds.y1 = primitive_info[j].point.y; bounds.x2 = primitive_info[j].point.x; bounds.y2 = primitive_info[j].point.y; for (k = 1; k < (ssize_t) primitive_info[j].coordinates; k++) { point = primitive_info[j + k].point; if (point.x < bounds.x1) bounds.x1 = point.x; if (point.y < bounds.y1) bounds.y1 = point.y; if (point.x > bounds.x2) bounds.x2 = point.x; if (point.y > bounds.y2) bounds.y2 = point.y; } /* * Speculate how many points our primitive might consume. */ length = primitive_info[j].coordinates; switch (primitive_type) { case RectanglePrimitive: { length *= 5; break; } case RoundRectanglePrimitive: { double alpha, beta, radius; alpha = bounds.x2 - bounds.x1; beta = bounds.y2 - bounds.y1; radius = hypot((double)alpha, (double)beta); length *= 5; length += 2 * ((size_t) ceil((double)MagickPI * radius)) + 6 * BezierQuantum + 360; break; } case BezierPrimitive: { if (primitive_info[j].coordinates > 107) (void)ThrowMagickException(exception, GetMagickModule(), DrawError, "TooManyBezierCoordinates", "`%s'", token); length = BezierQuantum * primitive_info[j].coordinates; break; } case PathPrimitive: { char *s, *t; GetNextToken(q, &q, extent, token); length = 1; t = token; for (s = token; *s != '\0'; s = t) { double value; value = StringToDouble(s, &t); (void)value; if (s == t) { t++; continue; } length++; } length = length * BezierQuantum; break; } case CirclePrimitive: case ArcPrimitive: case EllipsePrimitive: { double alpha, beta, radius; alpha = bounds.x2 - bounds.x1; beta = bounds.y2 - bounds.y1; radius = hypot((double)alpha, (double)beta); length = 2 * ((size_t) ceil((double)MagickPI * radius)) + 6 * BezierQuantum + 360; break; } default: break; } if ((i + length) >= number_points) { /* * Resize based on speculative points required by primitive. */ number_points += length + 1; primitive_info = (PrimitiveInfo *) ResizeQuantumMemory(primitive_info, (size_t) number_points, sizeof(*primitive_info)); if ((primitive_info == (PrimitiveInfo *) NULL) || (number_points != (MagickSizeType) ((size_t) number_points))) { (void)ThrowMagickException(exception, GetMagickModule(), ResourceLimitError, "MemoryAllocationFailed", "`%s'", image->filename); break; } } switch (primitive_type) { case PointPrimitive: default: { if (primitive_info[j].coordinates != 1) { status = MagickFalse; break; } TracePoint(primitive_info + j, primitive_info[j].point); i = (ssize_t) (j + primitive_info[j].coordinates); break; } case LinePrimitive: { if (primitive_info[j].coordinates != 2) { status = MagickFalse; break; } TraceLine(primitive_info + j, primitive_info[j].point, primitive_info[j + 1].point); i = (ssize_t) (j + primitive_info[j].coordinates); break; } case RectanglePrimitive: { if (primitive_info[j].coordinates != 2) { status = MagickFalse; break; } TraceRectangle(primitive_info + j, primitive_info[j].point, primitive_info[j + 1].point); i = (ssize_t) (j + primitive_info[j].coordinates); break; } case RoundRectanglePrimitive: { if (primitive_info[j].coordinates != 3) { status = MagickFalse; break; } TraceRoundRectangle(primitive_info + j, primitive_info[j].point, primitive_info[j + 1].point, primitive_info[j + 2].point); i = (ssize_t) (j + primitive_info[j].coordinates); break; } case ArcPrimitive: { if (primitive_info[j].coordinates != 3) { primitive_type = UndefinedPrimitive; break; } TraceArc(primitive_info + j, primitive_info[j].point, primitive_info[j + 1].point, primitive_info[j + 2].point); i = (ssize_t) (j + primitive_info[j].coordinates); break; } case EllipsePrimitive: { if (primitive_info[j].coordinates != 3) { status = MagickFalse; break; } TraceEllipse(primitive_info + j, primitive_info[j].point, primitive_info[j + 1].point, primitive_info[j + 2].point); i = (ssize_t) (j + primitive_info[j].coordinates); break; } case CirclePrimitive: { if (primitive_info[j].coordinates != 2) { status = MagickFalse; break; } TraceCircle(primitive_info + j, primitive_info[j].point, primitive_info[j + 1].point); i = (ssize_t) (j + primitive_info[j].coordinates); break; } case PolylinePrimitive: break; case PolygonPrimitive: { primitive_info[i] = primitive_info[j]; primitive_info[i].coordinates = 0; primitive_info[j].coordinates++; i++; break; } case BezierPrimitive: { if (primitive_info[j].coordinates < 3) { status = MagickFalse; break; } TraceBezier(primitive_info + j, primitive_info[j].coordinates); i = (ssize_t) (j + primitive_info[j].coordinates); break; } case PathPrimitive: { i = (ssize_t) (j + TracePath(primitive_info + j, token)); break; } case AlphaPrimitive: case ColorPrimitive: { ssize_t method; if (primitive_info[j].coordinates != 1) { status = MagickFalse; break; } GetNextToken(q, &q, extent, token); method = ParseCommandOption(MagickMethodOptions, MagickFalse, token); if (method == -1) status = MagickFalse; else primitive_info[j].method = (PaintMethod) method; break; } case TextPrimitive: { if (primitive_info[j].coordinates != 1) { status = MagickFalse; break; } if (*token != ',') GetNextToken(q, &q, extent, token); primitive_info[j].text = AcquireString(token); break; } case ImagePrimitive: { if (primitive_info[j].coordinates != 2) { status = MagickFalse; break; } GetNextToken(q, &q, extent, token); primitive_info[j].text = AcquireString(token); break; } } if (primitive_info == (PrimitiveInfo *) NULL) break; if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), " %.*s", (int)(q - p), p); if (status == MagickFalse) break; primitive_info[i].primitive = UndefinedPrimitive; if (i == 0) continue; /* * Transform points. */ for (i = 0; primitive_info[i].primitive != UndefinedPrimitive; i++) { point = primitive_info[i].point; primitive_info[i].point.x = graphic_context[n]->affine.sx * point.x + graphic_context[n]->affine.ry * point.y + graphic_context[n]->affine.tx; primitive_info[i].point.y = graphic_context[n]->affine.rx * point.x + graphic_context[n]->affine.sy * point.y + graphic_context[n]->affine.ty; point = primitive_info[i].point; if (point.x < graphic_context[n]->bounds.x1) graphic_context[n]->bounds.x1 = point.x; if (point.y < graphic_context[n]->bounds.y1) graphic_context[n]->bounds.y1 = point.y; if (point.x > graphic_context[n]->bounds.x2) graphic_context[n]->bounds.x2 = point.x; if (point.y > graphic_context[n]->bounds.y2) graphic_context[n]->bounds.y2 = point.y; if (primitive_info[i].primitive == ImagePrimitive) break; if (i >= (ssize_t) number_points) ThrowFatalException(ResourceLimitFatalError, "MemoryAllocationFailed"); } if (graphic_context[n]->render != MagickFalse) { if ((n != 0) && (graphic_context[n]->clip_mask != (char *)NULL) && (LocaleCompare(graphic_context[n]->clip_mask, graphic_context[n - 1]->clip_mask) != 0)) status &= DrawClipPath(image, graphic_context[n], graphic_context[n]->clip_mask, exception); status &= DrawPrimitive(image, graphic_context[n], primitive_info, exception); } if (primitive_info->text != (char *)NULL) primitive_info->text = (char *)RelinquishMagickMemory( primitive_info->text); proceed = SetImageProgress(image, RenderImageTag, q - primitive, (MagickSizeType) primitive_extent); if (proceed == MagickFalse) break; if (status == 0) break; } if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), "end draw-image"); /* * Relinquish resources. */ token = DestroyString(token); if (primitive_info != (PrimitiveInfo *) NULL) primitive_info = (PrimitiveInfo *) RelinquishMagickMemory(primitive_info); primitive = DestroyString(primitive); for (; n >= 0; n--) graphic_context[n] = DestroyDrawInfo(graphic_context[n]); graphic_context = (DrawInfo **) RelinquishMagickMemory(graphic_context); if (status == MagickFalse) ThrowBinaryException(DrawError, "NonconformingDrawingPrimitiveDefinition", keyword); return (status != 0 ? MagickTrue : MagickFalse); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % D r a w G r a d i e n t I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DrawGradientImage() draws a linear gradient on the image. % % * The format of the DrawGradientImage method is: % % MagickBooleanType * DrawGradientImage(Image *image, % const DrawInfo * *draw_info,ExceptionInfo *exception) % % A description of each parameter * follows: % % o image: the image. % % o draw_info: the draw info. % % * o exception: return any errors or warnings in this structure. % */ static inline double GetStopColorOffset(const GradientInfo * gradient, const ssize_t x, const ssize_t y) { switch (gradient->type) { case UndefinedGradient: case LinearGradient: { double gamma, length, offset, scale; PointInfo p, q; const SegmentInfo * gradient_vector; gradient_vector = (&gradient->gradient_vector); p.x = gradient_vector->x2 - gradient_vector->x1; p.y = gradient_vector->y2 - gradient_vector->y1; q.x = (double)x - gradient_vector->x1; q.y = (double)y - gradient_vector->y1; length = sqrt(q.x * q.x + q.y * q.y); gamma = sqrt(p.x * p.x + p.y * p.y) * length; gamma = PerceptibleReciprocal(gamma); scale = p.x * q.x + p.y * q.y; offset = gamma * scale * length; return (offset); } case RadialGradient: { PointInfo v; if (gradient->spread == RepeatSpread) { v.x = (double)x - gradient->center.x; v.y = (double)y - gradient->center.y; return (sqrt(v.x * v.x + v.y * v.y)); } v.x = (double)(((x - gradient->center.x) * cos(DegreesToRadians( gradient->angle))) + ((y - gradient->center.y) * sin(DegreesToRadians( gradient->angle)))) / gradient->radii.x; v.y = (double)(((x - gradient->center.x) * sin(DegreesToRadians( gradient->angle))) - ((y - gradient->center.y) * cos(DegreesToRadians( gradient->angle)))) / gradient->radii.y; return (sqrt(v.x * v.x + v.y * v.y)); } } return (0.0); } static int StopInfoCompare(const void *x, const void *y) { StopInfo * stop_1, *stop_2; stop_1 = (StopInfo *) x; stop_2 = (StopInfo *) y; if (stop_1->offset > stop_2->offset) return (1); if (fabs(stop_1->offset - stop_2->offset) <= DrawEpsilon) return (0); return (-1); } MagickExport MagickBooleanType DrawGradientImage(Image * image, const DrawInfo * draw_info, ExceptionInfo * exception) { CacheView * image_view; const GradientInfo * gradient; const SegmentInfo * gradient_vector; double length; MagickBooleanType status; PixelInfo zero; PointInfo point; RectangleInfo bounding_box; ssize_t y; /* * Draw linear or radial gradient on image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(draw_info != (const DrawInfo *)NULL); gradient = (&draw_info->gradient); qsort(gradient->stops, gradient->number_stops, sizeof(StopInfo), StopInfoCompare); gradient_vector = (&gradient->gradient_vector); point.x = gradient_vector->x2 - gradient_vector->x1; point.y = gradient_vector->y2 - gradient_vector->y1; length = sqrt(point.x * point.x + point.y * point.y); bounding_box = gradient->bounding_box; status = MagickTrue; GetPixelInfo(image, &zero); image_view = AcquireAuthenticCacheView(image, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,1,1) #endif for (y = bounding_box.y; y < (ssize_t) bounding_box.height; y++) { PixelInfo composite, pixel; double alpha, offset; register Quantum * magick_restrict q; register ssize_t i, x; ssize_t j; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } pixel = zero; composite = zero; offset = GetStopColorOffset(gradient, 0, y); if (gradient->type != RadialGradient) offset /= length; for (x = bounding_box.x; x < (ssize_t) bounding_box.width; x++) { GetPixelInfoPixel(image, q, &pixel); switch (gradient->spread) { case UndefinedSpread: case PadSpread: { if ((x != (ssize_t) ceil(gradient_vector->x1 - 0.5)) || (y != (ssize_t) ceil(gradient_vector->y1 - 0.5))) { offset = GetStopColorOffset(gradient, x, y); if (gradient->type != RadialGradient) offset /= length; } for (i = 0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if ((offset < 0.0) || (i == 0)) composite = gradient->stops[0].color; else if ((offset > 1.0) || (i == (ssize_t) gradient->number_stops)) composite = gradient->stops[gradient->number_stops - 1].color; else { j = i; i--; alpha = (offset - gradient->stops[i].offset) / (gradient->stops[j].offset - gradient->stops[i].offset); CompositePixelInfoBlend(&gradient->stops[i].color, 1.0 - alpha, &gradient->stops[j].color, alpha, &composite); } break; } case ReflectSpread: { if ((x != (ssize_t) ceil(gradient_vector->x1 - 0.5)) || (y != (ssize_t) ceil(gradient_vector->y1 - 0.5))) { offset = GetStopColorOffset(gradient, x, y); if (gradient->type != RadialGradient) offset /= length; } if (offset < 0.0) offset = (-offset); if ((ssize_t) fmod(offset, 2.0) == 0) offset = fmod(offset, 1.0); else offset = 1.0 - fmod(offset, 1.0); for (i = 0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if (i == 0) composite = gradient->stops[0].color; else if (i == (ssize_t) gradient->number_stops) composite = gradient->stops[gradient->number_stops - 1].color; else { j = i; i--; alpha = (offset - gradient->stops[i].offset) / (gradient->stops[j].offset - gradient->stops[i].offset); CompositePixelInfoBlend(&gradient->stops[i].color, 1.0 - alpha, &gradient->stops[j].color, alpha, &composite); } break; } case RepeatSpread: { MagickBooleanType antialias; double repeat; antialias = MagickFalse; repeat = 0.0; if ((x != (ssize_t) ceil(gradient_vector->x1 - 0.5)) || (y != (ssize_t) ceil(gradient_vector->y1 - 0.5))) { offset = GetStopColorOffset(gradient, x, y); if (gradient->type == LinearGradient) { repeat = fmod(offset, length); if (repeat < 0.0) repeat = length - fmod(-repeat, length); else repeat = fmod(offset, length); antialias = (repeat < length) && ((repeat + 1.0) > length) ? MagickTrue : MagickFalse; offset = repeat / length; } else { repeat = fmod(offset, gradient->radius); if (repeat < 0.0) repeat = gradient->radius - fmod(-repeat, gradient->radius); else repeat = fmod(offset, gradient->radius); antialias = repeat + 1.0 > gradient->radius ? MagickTrue : MagickFalse; offset = repeat / gradient->radius; } } for (i = 0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if (i == 0) composite = gradient->stops[0].color; else if (i == (ssize_t) gradient->number_stops) composite = gradient->stops[gradient->number_stops - 1].color; else { j = i; i--; alpha = (offset - gradient->stops[i].offset) / (gradient->stops[j].offset - gradient->stops[i].offset); if (antialias != MagickFalse) { if (gradient->type == LinearGradient) alpha = length - repeat; else alpha = gradient->radius - repeat; i = 0; j = (ssize_t) gradient->number_stops - 1L; } CompositePixelInfoBlend(&gradient->stops[i].color, 1.0 - alpha, &gradient->stops[j].color, alpha, &composite); } break; } } CompositePixelInfoOver(&composite, composite.alpha, &pixel, pixel.alpha, &pixel); SetPixelViaPixelInfo(image, &pixel, q); q += GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; } image_view = DestroyCacheView(image_view); return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % D r a w P a t t e r n P a t h * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DrawPatternPath() draws a pattern. % % The format of the * DrawPatternPath method is: % % MagickBooleanType * DrawPatternPath(Image *image,const DrawInfo *draw_info, % const * char *name,Image **pattern,ExceptionInfo *exception) % % A description of * each parameter follows: % % o image: the image. % % o draw_info: the * draw info. % % o name: the pattern name. % % o image: the image. % % * o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType DrawPatternPath(Image * image, const DrawInfo * draw_info, const char *name, Image ** pattern, ExceptionInfo * exception) { char property[MagickPathExtent]; const char *geometry, *path, *type; DrawInfo * clone_info; ImageInfo * image_info; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(draw_info != (const DrawInfo *)NULL); assert(name != (const char *)NULL); (void)FormatLocaleString(property, MagickPathExtent, "%s", name); path = GetImageArtifact(image, property); if (path == (const char *)NULL) return (MagickFalse); (void)FormatLocaleString(property, MagickPathExtent, "%s-geometry", name); geometry = GetImageArtifact(image, property); if (geometry == (const char *)NULL) return (MagickFalse); if ((*pattern) != (Image *) NULL) *pattern = DestroyImage(*pattern); image_info = AcquireImageInfo(); image_info->size = AcquireString(geometry); *pattern = AcquireImage(image_info, exception); image_info = DestroyImageInfo(image_info); (void)QueryColorCompliance("#000000ff", AllCompliance, &(*pattern)->background_color, exception); (void)SetImageBackgroundColor(*pattern, exception); if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), "begin pattern-path %s %s", name, geometry); clone_info = CloneDrawInfo((ImageInfo *) NULL, draw_info); clone_info->fill_pattern = NewImageList(); clone_info->stroke_pattern = NewImageList(); (void)FormatLocaleString(property, MagickPathExtent, "%s-type", name); type = GetImageArtifact(image, property); if (type != (const char *)NULL) clone_info->gradient.type = (GradientType) ParseCommandOption( MagickGradientOptions, MagickFalse, type); (void)CloneString(&clone_info->primitive, path); status = DrawImage(*pattern, clone_info, exception); clone_info = DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), "end pattern-path"); return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + D r a w P o l y g o n P r i m i t i v e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DrawPolygonPrimitive() draws a polygon on the image. % % The * format of the DrawPolygonPrimitive method is: % % MagickBooleanType * DrawPolygonPrimitive(Image *image, % const DrawInfo * *draw_info,const PrimitiveInfo *primitive_info, % ExceptionInfo * *exception) % % A description of each parameter follows: % % o image: * the image. % % o draw_info: the draw info. % % o primitive_info: * Specifies a pointer to a PrimitiveInfo structure. % % o exception: * return any errors or warnings in this structure. % */ static PolygonInfo ** DestroyPolygonThreadSet(PolygonInfo ** polygon_info) { register ssize_t i; assert(polygon_info != (PolygonInfo **) NULL); for (i = 0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (polygon_info[i] != (PolygonInfo *) NULL) polygon_info[i] = DestroyPolygonInfo(polygon_info[i]); polygon_info = (PolygonInfo **) RelinquishMagickMemory(polygon_info); return (polygon_info); } static PolygonInfo ** AcquirePolygonThreadSet( const PrimitiveInfo * primitive_info) { PathInfo * magick_restrict path_info; PolygonInfo ** polygon_info; register ssize_t i; size_t number_threads; number_threads = (size_t) GetMagickResourceLimit(ThreadResource); polygon_info = (PolygonInfo **) AcquireQuantumMemory(number_threads, sizeof(*polygon_info)); if (polygon_info == (PolygonInfo **) NULL) return ((PolygonInfo **) NULL); (void)ResetMagickMemory(polygon_info, 0, number_threads * sizeof(*polygon_info)); path_info = ConvertPrimitiveToPath(primitive_info); if (path_info == (PathInfo *) NULL) return (DestroyPolygonThreadSet(polygon_info)); for (i = 0; i < (ssize_t) number_threads; i++) { polygon_info[i] = ConvertPathToPolygon(path_info); if (polygon_info[i] == (PolygonInfo *) NULL) return (DestroyPolygonThreadSet(polygon_info)); } path_info = (PathInfo *) RelinquishMagickMemory(path_info); return (polygon_info); } static double GetFillAlpha(PolygonInfo * polygon_info, const double mid, const MagickBooleanType fill, const FillRule fill_rule, const ssize_t x, const ssize_t y, double *stroke_alpha) { double alpha, beta, distance, subpath_alpha; PointInfo delta; register const PointInfo * q; register EdgeInfo * p; register ssize_t i; ssize_t j, winding_number; /* * Compute fill & stroke opacity for this (x,y) point. */ *stroke_alpha = 0.0; subpath_alpha = 0.0; p = polygon_info->edges; for (j = 0; j < (ssize_t) polygon_info->number_edges; j++, p++) { if ((double)y <= (p->bounds.y1 - mid - 0.5)) break; if ((double)y > (p->bounds.y2 + mid + 0.5)) { (void)DestroyEdge(polygon_info, (size_t) j); continue; } if (((double)x <= (p->bounds.x1 - mid - 0.5)) || ((double)x > (p->bounds.x2 + mid + 0.5))) continue; i = (ssize_t) MagickMax((double)p->highwater, 1.0); for (; i < (ssize_t) p->number_points; i++) { if ((double)y <= (p->points[i - 1].y - mid - 0.5)) break; if ((double)y > (p->points[i].y + mid + 0.5)) continue; if (p->scanline != (double)y) { p->scanline = (double)y; p->highwater = (size_t) i; } /* * Compute distance between a point and an edge. */ q = p->points + i - 1; delta.x = (q + 1)->x - q->x; delta.y = (q + 1)->y - q->y; beta = delta.x * (x - q->x) + delta.y * (y - q->y); if (beta < 0.0) { delta.x = (double)x - q->x; delta.y = (double)y - q->y; distance = delta.x * delta.x + delta.y * delta.y; } else { alpha = delta.x * delta.x + delta.y * delta.y; if (beta > alpha) { delta.x = (double)x - (q + 1)->x; delta.y = (double)y - (q + 1)->y; distance = delta.x * delta.x + delta.y * delta.y; } else { alpha = 1.0 / alpha; beta = delta.x * (y - q->y) - delta.y * (x - q->x); distance = alpha * beta * beta; } } /* * Compute stroke & subpath opacity. */ beta = 0.0; if (p->ghostline == MagickFalse) { alpha = mid + 0.5; if ((*stroke_alpha < 1.0) && (distance <= ((alpha + 0.25) * (alpha + 0.25)))) { alpha = mid - 0.5; if (distance <= ((alpha + 0.25) * (alpha + 0.25))) *stroke_alpha = 1.0; else { beta = 1.0; if (fabs(distance - 1.0) >= DrawEpsilon) beta = sqrt((double)distance); alpha = beta - mid - 0.5; if (*stroke_alpha < ((alpha - 0.25) * (alpha - 0.25))) *stroke_alpha = (alpha - 0.25) * (alpha - 0.25); } } } if ((fill == MagickFalse) || (distance > 1.0) || (subpath_alpha >= 1.0)) continue; if (distance <= 0.0) { subpath_alpha = 1.0; continue; } if (distance > 1.0) continue; if (fabs(beta) < DrawEpsilon) { beta = 1.0; if (fabs(distance - 1.0) >= DrawEpsilon) beta = sqrt(distance); } alpha = beta - 1.0; if (subpath_alpha < (alpha * alpha)) subpath_alpha = alpha * alpha; } } /* * Compute fill opacity. */ if (fill == MagickFalse) return (0.0); if (subpath_alpha >= 1.0) return (1.0); /* * Determine winding number. */ winding_number = 0; p = polygon_info->edges; for (j = 0; j < (ssize_t) polygon_info->number_edges; j++, p++) { if ((double)y <= p->bounds.y1) break; if (((double)y > p->bounds.y2) || ((double)x <= p->bounds.x1)) continue; if ((double)x > p->bounds.x2) { winding_number += p->direction ? 1 : -1; continue; } i = (ssize_t) MagickMax((double)p->highwater, 1.0); for (; i < (ssize_t) p->number_points; i++) if ((double)y <= p->points[i].y) break; q = p->points + i - 1; if ((((q + 1)->x - q->x) * (y - q->y)) <= (((q + 1)->y - q->y) * (x - q->x))) winding_number += p->direction ? 1 : -1; } if (fill_rule != NonZeroRule) { if ((MagickAbsoluteValue(winding_number) & 0x01) != 0) return (1.0); } else if (MagickAbsoluteValue(winding_number) != 0) return (1.0); return (subpath_alpha); } static MagickBooleanType DrawPolygonPrimitive(Image * image, const DrawInfo * draw_info, const PrimitiveInfo * primitive_info, ExceptionInfo * exception) { CacheView * image_view; MagickBooleanType fill, status; double mid; PolygonInfo ** magick_restrict polygon_info; register EdgeInfo * p; register ssize_t i; SegmentInfo bounds; ssize_t start_y, stop_y, y; /* * Compute bounding box. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); assert(primitive_info != (PrimitiveInfo *) NULL); if (primitive_info->coordinates == 0) return (MagickTrue); polygon_info = AcquirePolygonThreadSet(primitive_info); if (polygon_info == (PolygonInfo **) NULL) return (MagickFalse); DisableMSCWarning(4127) if (0) DrawBoundingRectangles(image, draw_info, polygon_info[0], exception); RestoreMSCWarning if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), " begin draw-polygon"); fill = (primitive_info->method == FillToBorderMethod) || (primitive_info->method == FloodfillMethod) ? MagickTrue : MagickFalse; mid = ExpandAffine(&draw_info->affine) * draw_info->stroke_width / 2.0; bounds = polygon_info[0]->edges[0].bounds; for (i = 1; i < (ssize_t) polygon_info[0]->number_edges; i++) { p = polygon_info[0]->edges + i; if (p->bounds.x1 < bounds.x1) bounds.x1 = p->bounds.x1; if (p->bounds.y1 < bounds.y1) bounds.y1 = p->bounds.y1; if (p->bounds.x2 > bounds.x2) bounds.x2 = p->bounds.x2; if (p->bounds.y2 > bounds.y2) bounds.y2 = p->bounds.y2; } bounds.x1 -= (mid + 1.0); bounds.x1 = bounds.x1 < 0.0 ? 0.0 : (size_t) ceil(bounds.x1 - 0.5) >= image->columns ? (double)image->columns - 1 : bounds.x1; bounds.y1 -= (mid + 1.0); bounds.y1 = bounds.y1 < 0.0 ? 0.0 : (size_t) ceil(bounds.y1 - 0.5) >= image->rows ? (double)image->rows - 1 : bounds.y1; bounds.x2 += (mid + 1.0); bounds.x2 = bounds.x2 < 0.0 ? 0.0 : (size_t) floor(bounds.x2 + 0.5) >= image->columns ? (double)image->columns - 1 : bounds.x2; bounds.y2 += (mid + 1.0); bounds.y2 = bounds.y2 < 0.0 ? 0.0 : (size_t) floor(bounds.y2 + 0.5) >= image->rows ? (double)image->rows - 1 : bounds.y2; status = MagickTrue; image_view = AcquireAuthenticCacheView(image, exception); if ((primitive_info->coordinates == 1) || (polygon_info[0]->number_edges == 0)) { /* * Draw point. */ start_y = (ssize_t) ceil(bounds.y1 - 0.5); stop_y = (ssize_t) floor(bounds.y2 + 0.5); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,1,1) #endif for (y = start_y; y <= stop_y; y++) { MagickBooleanType sync; PixelInfo pixel; register ssize_t x; register Quantum * magick_restrict q; ssize_t start_x, stop_x; if (status == MagickFalse) continue; start_x = (ssize_t) ceil(bounds.x1 - 0.5); stop_x = (ssize_t) floor(bounds.x2 + 0.5); x = start_x; q = GetCacheViewAuthenticPixels(image_view, x, y, (size_t) (stop_x - x + 1), 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } GetPixelInfo(image, &pixel); for (; x <= stop_x; x++) { if ((x == (ssize_t) ceil(primitive_info->point.x - 0.5)) && (y == (ssize_t) ceil(primitive_info->point.y - 0.5))) { GetFillColor(draw_info, x - start_x, y - start_y, &pixel, exception); SetPixelViaPixelInfo(image, &pixel, q); } q += GetPixelChannels(image); } sync = SyncCacheViewAuthenticPixels(image_view, exception); if (sync == MagickFalse) status = MagickFalse; } image_view = DestroyCacheView(image_view); polygon_info = DestroyPolygonThreadSet(polygon_info); if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), " end draw-polygon"); return (status); } /* * Draw polygon or line. */ if (image->alpha_trait == UndefinedPixelTrait) (void)SetImageAlphaChannel(image, OpaqueAlphaChannel, exception); start_y = (ssize_t) ceil(bounds.y1 - 0.5); stop_y = (ssize_t) floor(bounds.y2 + 0.5); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,1,1) #endif for (y = start_y; y <= stop_y; y++) { const int id = GetOpenMPThreadId(); double fill_alpha, stroke_alpha; PixelInfo fill_color, stroke_color; register Quantum * magick_restrict q; register ssize_t x; ssize_t start_x, stop_x; if (status == MagickFalse) continue; start_x = (ssize_t) ceil(bounds.x1 - 0.5); stop_x = (ssize_t) floor(bounds.x2 + 0.5); q = GetCacheViewAuthenticPixels(image_view, start_x, y, (size_t) (stop_x - start_x + 1), 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = start_x; x <= stop_x; x++) { /* * Fill and/or stroke. */ fill_alpha = GetFillAlpha(polygon_info[id], mid, fill, draw_info->fill_rule, x, y, &stroke_alpha); if (draw_info->stroke_antialias == MagickFalse) { fill_alpha = fill_alpha > 0.25 ? 1.0 : 0.0; stroke_alpha = stroke_alpha > 0.25 ? 1.0 : 0.0; } GetFillColor(draw_info, x - start_x, y - start_y, &fill_color, exception); fill_alpha = fill_alpha * fill_color.alpha; CompositePixelOver(image, &fill_color, fill_alpha, q, (double) GetPixelAlpha(image, q), q); GetStrokeColor(draw_info, x - start_x, y - start_y, &stroke_color, exception); stroke_alpha = stroke_alpha * stroke_color.alpha; CompositePixelOver(image, &stroke_color, stroke_alpha, q, (double) GetPixelAlpha(image, q), q); q += GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; } image_view = DestroyCacheView(image_view); polygon_info = DestroyPolygonThreadSet(polygon_info); if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), " end draw-polygon"); return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % D r a w P r i m i t i v e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DrawPrimitive() draws a primitive (line, rectangle, ellipse) on * the image. % % The format of the DrawPrimitive method is: % % * MagickBooleanType DrawPrimitive(Image *image,const DrawInfo *draw_info, % * PrimitiveInfo *primitive_info,ExceptionInfo *exception) % % A description * of each parameter follows: % % o image: the image. % % o draw_info: * the draw info. % % o primitive_info: Specifies a pointer to a * PrimitiveInfo structure. % % o exception: return any errors or warnings * in this structure. % */ static void LogPrimitiveInfo(const PrimitiveInfo * primitive_info) { const char *methods[] = { "point", "replace", "floodfill", "filltoborder", "reset", "?" }; PointInfo p, q, point; register ssize_t i, x; ssize_t coordinates, y; x = (ssize_t) ceil(primitive_info->point.x - 0.5); y = (ssize_t) ceil(primitive_info->point.y - 0.5); switch (primitive_info->primitive) { case AlphaPrimitive: { (void)LogMagickEvent(DrawEvent, GetMagickModule(), "AlphaPrimitive %.20g,%.20g %s", (double)x, (double)y, methods[primitive_info->method]); return; } case ColorPrimitive: { (void)LogMagickEvent(DrawEvent, GetMagickModule(), "ColorPrimitive %.20g,%.20g %s", (double)x, (double)y, methods[primitive_info->method]); return; } case ImagePrimitive: { (void)LogMagickEvent(DrawEvent, GetMagickModule(), "ImagePrimitive %.20g,%.20g", (double)x, (double)y); return; } case PointPrimitive: { (void)LogMagickEvent(DrawEvent, GetMagickModule(), "PointPrimitive %.20g,%.20g %s", (double)x, (double)y, methods[primitive_info->method]); return; } case TextPrimitive: { (void)LogMagickEvent(DrawEvent, GetMagickModule(), "TextPrimitive %.20g,%.20g", (double)x, (double)y); return; } default: break; } coordinates = 0; p = primitive_info[0].point; q.x = (-1.0); q.y = (-1.0); for (i = 0; primitive_info[i].primitive != UndefinedPrimitive; i++) { point = primitive_info[i].point; if (coordinates <= 0) { coordinates = (ssize_t) primitive_info[i].coordinates; (void)LogMagickEvent(DrawEvent, GetMagickModule(), " begin open (%.20g)", (double)coordinates); p = point; } point = primitive_info[i].point; if ((fabs(q.x - point.x) >= DrawEpsilon) || (fabs(q.y - point.y) >= DrawEpsilon)) (void)LogMagickEvent(DrawEvent, GetMagickModule(), " %.20g: %.18g,%.18g", (double)coordinates, point.x, point.y); else (void)LogMagickEvent(DrawEvent, GetMagickModule(), " %.20g: %g %g (duplicate)", (double)coordinates, point.x, point.y); q = point; coordinates--; if (coordinates > 0) continue; if ((fabs(p.x - point.x) >= DrawEpsilon) || (fabs(p.y - point.y) >= DrawEpsilon)) (void)LogMagickEvent(DrawEvent, GetMagickModule(), " end last (%.20g)", (double)coordinates); else (void)LogMagickEvent(DrawEvent, GetMagickModule(), " end open (%.20g)", (double)coordinates); } } MagickExport MagickBooleanType DrawPrimitive(Image * image, const DrawInfo * draw_info, const PrimitiveInfo * primitive_info, ExceptionInfo * exception) { CacheView * image_view; MagickStatusType status; register ssize_t i, x; ssize_t y; if (image->debug != MagickFalse) { (void)LogMagickEvent(DrawEvent, GetMagickModule(), " begin draw-primitive"); (void)LogMagickEvent(DrawEvent, GetMagickModule(), " affine: %g,%g,%g,%g,%g,%g", draw_info->affine.sx, draw_info->affine.rx, draw_info->affine.ry, draw_info->affine.sy, draw_info->affine.tx, draw_info->affine.ty); } if ((IsGrayColorspace(image->colorspace) != MagickFalse) && ((IsPixelInfoGray(&draw_info->fill) == MagickFalse) || (IsPixelInfoGray(&draw_info->stroke) == MagickFalse))) (void)SetImageColorspace(image, sRGBColorspace, exception); status = MagickTrue; x = (ssize_t) ceil(primitive_info->point.x - 0.5); y = (ssize_t) ceil(primitive_info->point.y - 0.5); image_view = AcquireAuthenticCacheView(image, exception); switch (primitive_info->primitive) { case AlphaPrimitive: { if (image->alpha_trait == UndefinedPixelTrait) (void)SetImageAlphaChannel(image, OpaqueAlphaChannel, exception); switch (primitive_info->method) { case PointMethod: default: { PixelInfo pixel; register Quantum * q; q = GetCacheViewAuthenticPixels(image_view, x, y, 1, 1, exception); if (q == (Quantum *) NULL) break; GetFillColor(draw_info, x, y, &pixel, exception); SetPixelAlpha(image, ClampToQuantum(pixel.alpha), q); (void)SyncCacheViewAuthenticPixels(image_view, exception); break; } case ReplaceMethod: { MagickBooleanType sync; PixelInfo pixel, target; (void)GetOneCacheViewVirtualPixelInfo(image_view, x, y, &target, exception); GetPixelInfo(image, &pixel); for (y = 0; y < (ssize_t) image->rows; y++) { register Quantum * magick_restrict q; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) break; for (x = 0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image, q, &pixel); if (IsFuzzyEquivalencePixelInfo(&pixel, &target) == MagickFalse) { q += GetPixelChannels(image); continue; } GetFillColor(draw_info, x, y, &pixel, exception); SetPixelAlpha(image, ClampToQuantum(pixel.alpha), q); q += GetPixelChannels(image); } sync = SyncCacheViewAuthenticPixels(image_view, exception); if (sync == MagickFalse) break; } break; } case FloodfillMethod: case FillToBorderMethod: { ChannelType channel_mask; PixelInfo target; (void)GetOneVirtualPixelInfo(image, TileVirtualPixelMethod, x, y, &target, exception); if (primitive_info->method == FillToBorderMethod) { target.red = (double)draw_info->border_color.red; target.green = (double)draw_info->border_color.green; target.blue = (double)draw_info->border_color.blue; } channel_mask = SetImageChannelMask(image, AlphaChannel); status &= FloodfillPaintImage(image, draw_info, &target, x, y, primitive_info->method == FloodfillMethod ? MagickFalse : MagickTrue, exception); (void)SetImageChannelMask(image, channel_mask); break; } case ResetMethod: { MagickBooleanType sync; PixelInfo pixel; for (y = 0; y < (ssize_t) image->rows; y++) { register Quantum * magick_restrict q; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) break; for (x = 0; x < (ssize_t) image->columns; x++) { GetFillColor(draw_info, x, y, &pixel, exception); SetPixelAlpha(image, ClampToQuantum(pixel.alpha), q); q += GetPixelChannels(image); } sync = SyncCacheViewAuthenticPixels(image_view, exception); if (sync == MagickFalse) break; } break; } } break; } case ColorPrimitive: { switch (primitive_info->method) { case PointMethod: default: { PixelInfo pixel; register Quantum * q; q = GetCacheViewAuthenticPixels(image_view, x, y, 1, 1, exception); if (q == (Quantum *) NULL) break; GetPixelInfo(image, &pixel); GetFillColor(draw_info, x, y, &pixel, exception); SetPixelViaPixelInfo(image, &pixel, q); (void)SyncCacheViewAuthenticPixels(image_view, exception); break; } case ReplaceMethod: { MagickBooleanType sync; PixelInfo pixel, target; (void)GetOneCacheViewVirtualPixelInfo(image_view, x, y, &target, exception); for (y = 0; y < (ssize_t) image->rows; y++) { register Quantum * magick_restrict q; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) break; for (x = 0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image, q, &pixel); if (IsFuzzyEquivalencePixelInfo(&pixel, &target) == MagickFalse) { q += GetPixelChannels(image); continue; } GetFillColor(draw_info, x, y, &pixel, exception); SetPixelViaPixelInfo(image, &pixel, q); q += GetPixelChannels(image); } sync = SyncCacheViewAuthenticPixels(image_view, exception); if (sync == MagickFalse) break; } break; } case FloodfillMethod: case FillToBorderMethod: { PixelInfo target; (void)GetOneVirtualPixelInfo(image, TileVirtualPixelMethod, x, y, &target, exception); if (primitive_info->method == FillToBorderMethod) { target.red = (double)draw_info->border_color.red; target.green = (double)draw_info->border_color.green; target.blue = (double)draw_info->border_color.blue; } status &= FloodfillPaintImage(image, draw_info, &target, x, y, primitive_info->method == FloodfillMethod ? MagickFalse : MagickTrue, exception); break; } case ResetMethod: { MagickBooleanType sync; PixelInfo pixel; GetPixelInfo(image, &pixel); for (y = 0; y < (ssize_t) image->rows; y++) { register Quantum * magick_restrict q; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) break; for (x = 0; x < (ssize_t) image->columns; x++) { GetFillColor(draw_info, x, y, &pixel, exception); SetPixelViaPixelInfo(image, &pixel, q); q += GetPixelChannels(image); } sync = SyncCacheViewAuthenticPixels(image_view, exception); if (sync == MagickFalse) break; } break; } } break; } case ImagePrimitive: { AffineMatrix affine; char composite_geometry[MagickPathExtent]; Image * composite_image; ImageInfo * clone_info; RectangleInfo geometry; ssize_t x1, y1; if (primitive_info->text == (char *)NULL) break; clone_info = AcquireImageInfo(); if (LocaleNCompare(primitive_info->text, "data:", 5) == 0) composite_image = ReadInlineImage(clone_info, primitive_info->text, exception); else { (void)CopyMagickString(clone_info->filename, primitive_info->text, MagickPathExtent); composite_image = ReadImage(clone_info, exception); } clone_info = DestroyImageInfo(clone_info); if (composite_image == (Image *) NULL) break; (void)SetImageProgressMonitor(composite_image, (MagickProgressMonitor) NULL, (void *)NULL); x1 = (ssize_t) ceil(primitive_info[1].point.x - 0.5); y1 = (ssize_t) ceil(primitive_info[1].point.y - 0.5); if (((x1 != 0L) && (x1 != (ssize_t) composite_image->columns)) || ((y1 != 0L) && (y1 != (ssize_t) composite_image->rows))) { /* * Resize image. */ (void)FormatLocaleString(composite_geometry, MagickPathExtent, "%gx%g!", primitive_info[1].point.x, primitive_info[1].point.y); composite_image->filter = image->filter; (void)TransformImage(&composite_image, (char *)NULL, composite_geometry, exception); } if (composite_image->alpha_trait == UndefinedPixelTrait) (void)SetImageAlphaChannel(composite_image, OpaqueAlphaChannel, exception); if (draw_info->alpha != OpaqueAlpha) (void)SetImageAlpha(composite_image, draw_info->alpha, exception); SetGeometry(image, &geometry); image->gravity = draw_info->gravity; geometry.x = x; geometry.y = y; (void)FormatLocaleString(composite_geometry, MagickPathExtent, "%.20gx%.20g%+.20g%+.20g", (double)composite_image->columns, (double) composite_image->rows, (double)geometry.x, (double)geometry.y); (void)ParseGravityGeometry(image, composite_geometry, &geometry, exception); affine = draw_info->affine; affine.tx = (double)geometry.x; affine.ty = (double)geometry.y; composite_image->interpolate = image->interpolate; if (draw_info->compose == OverCompositeOp) (void)DrawAffineImage(image, composite_image, &affine, exception); else (void)CompositeImage(image, composite_image, draw_info->compose, MagickTrue, geometry.x, geometry.y, exception); composite_image = DestroyImage(composite_image); break; } case PointPrimitive: { PixelInfo fill_color; register Quantum * q; if ((y < 0) || (y >= (ssize_t) image->rows)) break; if ((x < 0) || (x >= (ssize_t) image->columns)) break; q = GetCacheViewAuthenticPixels(image_view, x, y, 1, 1, exception); if (q == (Quantum *) NULL) break; GetFillColor(draw_info, x, y, &fill_color, exception); CompositePixelOver(image, &fill_color, (double)fill_color.alpha, q, (double)GetPixelAlpha(image, q), q); (void)SyncCacheViewAuthenticPixels(image_view, exception); break; } case TextPrimitive: { char geometry[MagickPathExtent]; DrawInfo * clone_info; if (primitive_info->text == (char *)NULL) break; clone_info = CloneDrawInfo((ImageInfo *) NULL, draw_info); (void)CloneString(&clone_info->text, primitive_info->text); (void)FormatLocaleString(geometry, MagickPathExtent, "%+f%+f", primitive_info->point.x, primitive_info->point.y); (void)CloneString(&clone_info->geometry, geometry); status &= AnnotateImage(image, clone_info, exception); clone_info = DestroyDrawInfo(clone_info); break; } default: { double mid, scale; DrawInfo * clone_info; if (IsEventLogging() != MagickFalse) LogPrimitiveInfo(primitive_info); scale = ExpandAffine(&draw_info->affine); if ((draw_info->dash_pattern != (double *)NULL) && (fabs(draw_info->dash_pattern[0]) >= DrawEpsilon) && (fabs(scale * draw_info->stroke_width) >= DrawEpsilon) && (draw_info->stroke.alpha != (Quantum) TransparentAlpha)) { /* * Draw dash polygon. */ clone_info = CloneDrawInfo((ImageInfo *) NULL, draw_info); clone_info->stroke_width = 0.0; clone_info->stroke.alpha = (MagickRealType) TransparentAlpha; status &= DrawPolygonPrimitive(image, clone_info, primitive_info, exception); clone_info = DestroyDrawInfo(clone_info); (void)DrawDashPolygon(draw_info, primitive_info, image, exception); break; } mid = ExpandAffine(&draw_info->affine) * draw_info->stroke_width / 2.0; if ((mid > 1.0) && ((draw_info->stroke.alpha != (Quantum) TransparentAlpha) || (draw_info->stroke_pattern != (Image *) NULL))) { MagickBooleanType closed_path; /* * Draw strokes while respecting line cap/join attributes. */ for (i = 0; primitive_info[i].primitive != UndefinedPrimitive; i++); closed_path = (fabs(primitive_info[i - 1].point.x - primitive_info[0].point.x) < DrawEpsilon) && (fabs(primitive_info[i - 1].point.y - primitive_info[0].point.y) < DrawEpsilon) ? MagickTrue : MagickFalse; i = (ssize_t) primitive_info[0].coordinates; if (((closed_path != MagickFalse) && (draw_info->linejoin == RoundJoin)) || (primitive_info[i].primitive != UndefinedPrimitive)) { (void)DrawPolygonPrimitive(image, draw_info, primitive_info, exception); break; } if (draw_info->linecap == RoundCap) { (void)DrawPolygonPrimitive(image, draw_info, primitive_info, exception); break; } clone_info = CloneDrawInfo((ImageInfo *) NULL, draw_info); clone_info->stroke_width = 0.0; clone_info->stroke.alpha = (MagickRealType) TransparentAlpha; status &= DrawPolygonPrimitive(image, clone_info, primitive_info, exception); clone_info = DestroyDrawInfo(clone_info); status &= DrawStrokePolygon(image, draw_info, primitive_info, exception); break; } status &= DrawPolygonPrimitive(image, draw_info, primitive_info, exception); break; } } image_view = DestroyCacheView(image_view); if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), " end draw-primitive"); return (status != 0 ? MagickTrue : MagickFalse); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + D r a w S t r o k e P o l y g o n * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DrawStrokePolygon() draws a stroked polygon (line, rectangle, * ellipse) on % the image while respecting the line cap and join * attributes. % % The format of the DrawStrokePolygon method is: % % * MagickBooleanType DrawStrokePolygon(Image *image, % const DrawInfo * *draw_info,const PrimitiveInfo *primitive_info) % % A description of each * parameter follows: % % o image: the image. % % o draw_info: the draw * info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo * structure. % % */ static void DrawRoundLinecap(Image * image, const DrawInfo * draw_info, const PrimitiveInfo * primitive_info, ExceptionInfo * exception) { PrimitiveInfo linecap[5]; register ssize_t i; for (i = 0; i < 4; i++) linecap[i] = (*primitive_info); linecap[0].coordinates = 4; linecap[1].point.x += 2.0 * DrawEpsilon; linecap[2].point.x += 2.0 * DrawEpsilon; linecap[2].point.y += 2.0 * DrawEpsilon; linecap[3].point.y += 2.0 * DrawEpsilon; linecap[4].primitive = UndefinedPrimitive; (void)DrawPolygonPrimitive(image, draw_info, linecap, exception); } static MagickBooleanType DrawStrokePolygon(Image * image, const DrawInfo * draw_info, const PrimitiveInfo * primitive_info, ExceptionInfo * exception) { DrawInfo * clone_info; MagickBooleanType closed_path; MagickStatusType status; PrimitiveInfo * stroke_polygon; register const PrimitiveInfo * p, *q; /* * Draw stroked polygon. */ if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), " begin draw-stroke-polygon"); clone_info = CloneDrawInfo((ImageInfo *) NULL, draw_info); clone_info->fill = draw_info->stroke; if (clone_info->fill_pattern != (Image *) NULL) clone_info->fill_pattern = DestroyImage(clone_info->fill_pattern); if (clone_info->stroke_pattern != (Image *) NULL) clone_info->fill_pattern = CloneImage(clone_info->stroke_pattern, 0, 0, MagickTrue, exception); clone_info->stroke.alpha = (MagickRealType) TransparentAlpha; clone_info->stroke_width = 0.0; clone_info->fill_rule = NonZeroRule; status = MagickTrue; for (p = primitive_info; p->primitive != UndefinedPrimitive; p += p->coordinates) { stroke_polygon = TraceStrokePolygon(draw_info, p); status &= DrawPolygonPrimitive(image, clone_info, stroke_polygon, exception); if (status == 0) break; stroke_polygon = (PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon); q = p + p->coordinates - 1; closed_path = (fabs(q->point.x - p->point.x) < DrawEpsilon) && (fabs(q->point.y - p->point.y) < DrawEpsilon) ? MagickTrue : MagickFalse; if ((draw_info->linecap == RoundCap) && (closed_path == MagickFalse)) { DrawRoundLinecap(image, draw_info, p, exception); DrawRoundLinecap(image, draw_info, q, exception); } } clone_info = DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), " end draw-stroke-polygon"); return (status != 0 ? MagickTrue : MagickFalse); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % G e t A f f i n e M a t r i x * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % GetAffineMatrix() returns an AffineMatrix initialized to the * identity % matrix. % % The format of the GetAffineMatrix method is: % % * void GetAffineMatrix(AffineMatrix *affine_matrix) % % A description of * each parameter follows: % % o affine_matrix: the affine matrix. % */ MagickExport void GetAffineMatrix(AffineMatrix * affine_matrix) { (void)LogMagickEvent(TraceEvent, GetMagickModule(), "..."); assert(affine_matrix != (AffineMatrix *) NULL); (void)ResetMagickMemory(affine_matrix, 0, sizeof(*affine_matrix)); affine_matrix->sx = 1.0; affine_matrix->sy = 1.0; } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + G e t D r a w I n f o * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % GetDrawInfo() initializes draw_info to default values from * image_info. % % The format of the GetDrawInfo method is: % % void * GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info) % % A * description of each parameter follows: % % o image_info: the image * info.. % % o draw_info: the draw info. % */ MagickExport void GetDrawInfo(const ImageInfo * image_info, DrawInfo * draw_info) { char *next_token; const char *option; ExceptionInfo * exception; ImageInfo * clone_info; /* * Initialize draw attributes. */ (void)LogMagickEvent(TraceEvent, GetMagickModule(), "..."); assert(draw_info != (DrawInfo *) NULL); (void)ResetMagickMemory(draw_info, 0, sizeof(*draw_info)); clone_info = CloneImageInfo(image_info); GetAffineMatrix(&draw_info->affine); exception = AcquireExceptionInfo(); (void)QueryColorCompliance("#000F", AllCompliance, &draw_info->fill, exception); (void)QueryColorCompliance("#0000", AllCompliance, &draw_info->stroke, exception); draw_info->stroke_width = 1.0; draw_info->fill_rule = EvenOddRule; draw_info->alpha = OpaqueAlpha; draw_info->fill_alpha = OpaqueAlpha; draw_info->stroke_alpha = OpaqueAlpha; draw_info->linecap = ButtCap; draw_info->linejoin = MiterJoin; draw_info->miterlimit = 10; draw_info->decorate = NoDecoration; draw_info->pointsize = 12.0; draw_info->undercolor.alpha = (MagickRealType) TransparentAlpha; draw_info->compose = OverCompositeOp; draw_info->render = MagickTrue; draw_info->debug = IsEventLogging(); draw_info->stroke_antialias = clone_info->antialias; if (clone_info->font != (char *)NULL) draw_info->font = AcquireString(clone_info->font); if (clone_info->density != (char *)NULL) draw_info->density = AcquireString(clone_info->density); draw_info->text_antialias = clone_info->antialias; if (fabs(clone_info->pointsize) >= DrawEpsilon) draw_info->pointsize = clone_info->pointsize; draw_info->border_color = clone_info->border_color; if (clone_info->server_name != (char *)NULL) draw_info->server_name = AcquireString(clone_info->server_name); option = GetImageOption(clone_info, "direction"); if (option != (const char *)NULL) draw_info->direction = (DirectionType) ParseCommandOption( MagickDirectionOptions, MagickFalse, option); else draw_info->direction = UndefinedDirection; option = GetImageOption(clone_info, "encoding"); if (option != (const char *)NULL) (void)CloneString(&draw_info->encoding, option); option = GetImageOption(clone_info, "family"); if (option != (const char *)NULL) (void)CloneString(&draw_info->family, option); option = GetImageOption(clone_info, "fill"); if (option != (const char *)NULL) (void)QueryColorCompliance(option, AllCompliance, &draw_info->fill, exception); option = GetImageOption(clone_info, "gravity"); if (option != (const char *)NULL) draw_info->gravity = (GravityType) ParseCommandOption(MagickGravityOptions, MagickFalse, option); option = GetImageOption(clone_info, "interline-spacing"); if (option != (const char *)NULL) draw_info->interline_spacing = StringToDouble(option, &next_token); option = GetImageOption(clone_info, "interword-spacing"); if (option != (const char *)NULL) draw_info->interword_spacing = StringToDouble(option, &next_token); option = GetImageOption(clone_info, "kerning"); if (option != (const char *)NULL) draw_info->kerning = StringToDouble(option, &next_token); option = GetImageOption(clone_info, "stroke"); if (option != (const char *)NULL) (void)QueryColorCompliance(option, AllCompliance, &draw_info->stroke, exception); option = GetImageOption(clone_info, "strokewidth"); if (option != (const char *)NULL) draw_info->stroke_width = StringToDouble(option, &next_token); option = GetImageOption(clone_info, "style"); if (option != (const char *)NULL) draw_info->style = (StyleType) ParseCommandOption(MagickStyleOptions, MagickFalse, option); option = GetImageOption(clone_info, "undercolor"); if (option != (const char *)NULL) (void)QueryColorCompliance(option, AllCompliance, &draw_info->undercolor, exception); option = GetImageOption(clone_info, "weight"); if (option != (const char *)NULL) { ssize_t weight; weight = ParseCommandOption(MagickWeightOptions, MagickFalse, option); if (weight == -1) weight = (ssize_t) StringToUnsignedLong(option); draw_info->weight = (size_t) weight; } exception = DestroyExceptionInfo(exception); draw_info->signature = MagickCoreSignature; clone_info = DestroyImageInfo(clone_info); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + P e r m u t a t e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % Permutate() returns the permuation of the (n,k). % % The format * of the Permutate method is: % % void Permutate(ssize_t n,ssize_t k) % * % A description of each parameter follows: % % o n: % % o k: % % */ static inline double Permutate(const ssize_t n, const ssize_t k) { double r; register ssize_t i; r = 1.0; for (i = k + 1; i <= n; i++) r *= i; for (i = 1; i <= (n - k); i++) r /= i; return (r); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + T r a c e P r i m i t i v e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % TracePrimitive is a collection of methods for generating graphic * % primitives such as arcs, ellipses, paths, etc. % */ static void TraceArc(PrimitiveInfo * primitive_info, const PointInfo start, const PointInfo end, const PointInfo degrees) { PointInfo center, radii; center.x = 0.5 * (end.x + start.x); center.y = 0.5 * (end.y + start.y); radii.x = fabs(center.x - start.x); radii.y = fabs(center.y - start.y); TraceEllipse(primitive_info, center, radii, degrees); } static void TraceArcPath(PrimitiveInfo * primitive_info, const PointInfo start, const PointInfo end, const PointInfo arc, const double angle, const MagickBooleanType large_arc, const MagickBooleanType sweep) { double alpha, beta, delta, factor, gamma, theta; PointInfo center, points[3], radii; register double cosine, sine; register PrimitiveInfo * p; register ssize_t i; size_t arc_segments; if ((fabs(start.x - end.x) < DrawEpsilon) && (fabs(start.y - end.y) < DrawEpsilon)) { TracePoint(primitive_info, end); return; } radii.x = fabs(arc.x); radii.y = fabs(arc.y); if ((fabs(radii.x) < DrawEpsilon) || (fabs(radii.y) < DrawEpsilon)) { TraceLine(primitive_info, start, end); return; } cosine = cos(DegreesToRadians(fmod((double)angle, 360.0))); sine = sin(DegreesToRadians(fmod((double)angle, 360.0))); center.x = (double)(cosine * (end.x - start.x) / 2 + sine * (end.y - start.y) / 2); center.y = (double)(cosine * (end.y - start.y) / 2 - sine * (end.x - start.x) / 2); delta = (center.x * center.x) / (radii.x * radii.x) + (center.y * center.y) / (radii.y * radii.y); if (delta < DrawEpsilon) { TraceLine(primitive_info, start, end); return; } if (delta > 1.0) { radii.x *= sqrt((double)delta); radii.y *= sqrt((double)delta); } points[0].x = (double)(cosine * start.x / radii.x + sine * start.y / radii.x); points[0].y = (double)(cosine * start.y / radii.y - sine * start.x / radii.y); points[1].x = (double)(cosine * end.x / radii.x + sine * end.y / radii.x); points[1].y = (double)(cosine * end.y / radii.y - sine * end.x / radii.y); alpha = points[1].x - points[0].x; beta = points[1].y - points[0].y; factor = PerceptibleReciprocal(alpha * alpha + beta * beta) - 0.25; if (factor <= 0.0) factor = 0.0; else { factor = sqrt((double)factor); if (sweep == large_arc) factor = (-factor); } center.x = (double)((points[0].x + points[1].x) / 2 - factor * beta); center.y = (double)((points[0].y + points[1].y) / 2 + factor * alpha); alpha = atan2(points[0].y - center.y, points[0].x - center.x); theta = atan2(points[1].y - center.y, points[1].x - center.x) - alpha; if ((theta < 0.0) && (sweep != MagickFalse)) theta += 2.0 * MagickPI; else if ((theta > 0.0) && (sweep == MagickFalse)) theta -= 2.0 * MagickPI; arc_segments = (size_t) ceil(fabs((double)(theta / (0.5 * MagickPI + DrawEpsilon)))); p = primitive_info; for (i = 0; i < (ssize_t) arc_segments; i++) { beta = 0.5 * ((alpha + (i + 1) * theta / arc_segments) - (alpha + i * theta / arc_segments)); gamma = (8.0 / 3.0) * sin(fmod((double)(0.5 * beta), DegreesToRadians(360.0))) * sin(fmod((double)(0.5 * beta), DegreesToRadians(360.0))) / sin(fmod((double)beta, DegreesToRadians(360.0))); points[0].x = (double)(center.x + cos(fmod((double)(alpha + (double)i * theta / arc_segments), DegreesToRadians(360.0))) - gamma * sin(fmod((double)(alpha + (double)i * theta / arc_segments), DegreesToRadians(360.0)))); points[0].y = (double)(center.y + sin(fmod((double)(alpha + (double)i * theta / arc_segments), DegreesToRadians(360.0))) + gamma * cos(fmod((double)(alpha + (double)i * theta / arc_segments), DegreesToRadians(360.0)))); points[2].x = (double)(center.x + cos(fmod((double)(alpha + (double)(i + 1) * theta / arc_segments), DegreesToRadians(360.0)))); points[2].y = (double)(center.y + sin(fmod((double)(alpha + (double)(i + 1) * theta / arc_segments), DegreesToRadians(360.0)))); points[1].x = (double)(points[2].x + gamma * sin(fmod((double)(alpha + (double) (i + 1) * theta / arc_segments), DegreesToRadians(360.0)))); points[1].y = (double)(points[2].y - gamma * cos(fmod((double)(alpha + (double) (i + 1) * theta / arc_segments), DegreesToRadians(360.0)))); p->point.x = (p == primitive_info) ? start.x : (p - 1)->point.x; p->point.y = (p == primitive_info) ? start.y : (p - 1)->point.y; (p + 1)->point.x = (double)(cosine * radii.x * points[0].x - sine * radii.y * points[0].y); (p + 1)->point.y = (double)(sine * radii.x * points[0].x + cosine * radii.y * points[0].y); (p + 2)->point.x = (double)(cosine * radii.x * points[1].x - sine * radii.y * points[1].y); (p + 2)->point.y = (double)(sine * radii.x * points[1].x + cosine * radii.y * points[1].y); (p + 3)->point.x = (double)(cosine * radii.x * points[2].x - sine * radii.y * points[2].y); (p + 3)->point.y = (double)(sine * radii.x * points[2].x + cosine * radii.y * points[2].y); if (i == (ssize_t) (arc_segments - 1)) (p + 3)->point = end; TraceBezier(p, 4); p += p->coordinates; } primitive_info->coordinates = (size_t) (p - primitive_info); for (i = 0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive = primitive_info->primitive; p--; } } static void TraceBezier(PrimitiveInfo * primitive_info, const size_t number_coordinates) { double alpha, *coefficients, weight; PointInfo end, point, *points; register PrimitiveInfo * p; register ssize_t i, j; size_t control_points, quantum; /* * Allocate coeficients. */ quantum = number_coordinates; for (i = 0; i < (ssize_t) number_coordinates; i++) { for (j = i + 1; j < (ssize_t) number_coordinates; j++) { alpha = fabs(primitive_info[j].point.x - primitive_info[i].point.x); if (alpha > (double)quantum) quantum = (size_t) alpha; alpha = fabs(primitive_info[j].point.y - primitive_info[i].point.y); if (alpha > (double)quantum) quantum = (size_t) alpha; } } quantum = (size_t) MagickMin((double)quantum / number_coordinates, (double)BezierQuantum); control_points = quantum * number_coordinates; coefficients = (double *)AcquireQuantumMemory((size_t) number_coordinates, sizeof(*coefficients)); points = (PointInfo *) AcquireQuantumMemory((size_t) control_points, sizeof(*points)); if ((coefficients == (double *)NULL) || (points == (PointInfo *) NULL)) ThrowFatalException(ResourceLimitFatalError, "MemoryAllocationFailed"); /* * Compute bezier points. */ end = primitive_info[number_coordinates - 1].point; for (i = 0; i < (ssize_t) number_coordinates; i++) coefficients[i] = Permutate((ssize_t) number_coordinates - 1, i); weight = 0.0; for (i = 0; i < (ssize_t) control_points; i++) { p = primitive_info; point.x = 0.0; point.y = 0.0; alpha = pow((double)(1.0 - weight), (double)number_coordinates - 1.0); for (j = 0; j < (ssize_t) number_coordinates; j++) { point.x += alpha * coefficients[j] * p->point.x; point.y += alpha * coefficients[j] * p->point.y; alpha *= weight / (1.0 - weight); p++; } points[i] = point; weight += 1.0 / control_points; } /* * Bezier curves are just short segmented polys. */ p = primitive_info; for (i = 0; i < (ssize_t) control_points; i++) { TracePoint(p, points[i]); p += p->coordinates; } TracePoint(p, end); p += p->coordinates; primitive_info->coordinates = (size_t) (p - primitive_info); for (i = 0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive = primitive_info->primitive; p--; } points = (PointInfo *) RelinquishMagickMemory(points); coefficients = (double *)RelinquishMagickMemory(coefficients); } static void TraceCircle(PrimitiveInfo * primitive_info, const PointInfo start, const PointInfo end) { double alpha, beta, radius; PointInfo offset, degrees; alpha = end.x - start.x; beta = end.y - start.y; radius = hypot((double)alpha, (double)beta); offset.x = (double)radius; offset.y = (double)radius; degrees.x = 0.0; degrees.y = 360.0; TraceEllipse(primitive_info, start, offset, degrees); } static void TraceEllipse(PrimitiveInfo * primitive_info, const PointInfo start, const PointInfo stop, const PointInfo degrees) { double delta, step, y; PointInfo angle, point; register PrimitiveInfo * p; register ssize_t i; /* * Ellipses are just short segmented polys. */ if ((fabs(stop.x) < DrawEpsilon) && (fabs(stop.y) < DrawEpsilon)) { TracePoint(primitive_info, start); return; } delta = 2.0 / MagickMax(stop.x, stop.y); step = MagickPI / 8.0; if ((delta >= 0.0) && (delta < (MagickPI / 8.0))) step = MagickPI / (4 * (MagickPI / delta / 2 + 0.5)); angle.x = DegreesToRadians(degrees.x); y = degrees.y; while (y < degrees.x) y += 360.0; angle.y = DegreesToRadians(y); for (p = primitive_info; angle.x < angle.y; angle.x += step) { point.x = cos(fmod(angle.x, DegreesToRadians(360.0))) * stop.x + start.x; point.y = sin(fmod(angle.x, DegreesToRadians(360.0))) * stop.y + start.y; TracePoint(p, point); p += p->coordinates; } point.x = cos(fmod(angle.y, DegreesToRadians(360.0))) * stop.x + start.x; point.y = sin(fmod(angle.y, DegreesToRadians(360.0))) * stop.y + start.y; TracePoint(p, point); p += p->coordinates; primitive_info->coordinates = (size_t) (p - primitive_info); for (i = 0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive = primitive_info->primitive; p--; } } static void TraceLine(PrimitiveInfo * primitive_info, const PointInfo start, const PointInfo end) { TracePoint(primitive_info, start); if ((fabs(start.x - end.x) < DrawEpsilon) && (fabs(start.y - end.y) < DrawEpsilon)) { primitive_info->primitive = PointPrimitive; primitive_info->coordinates = 1; return; } TracePoint(primitive_info + 1, end); (primitive_info + 1)->primitive = primitive_info->primitive; primitive_info->coordinates = 2; } static size_t TracePath(PrimitiveInfo * primitive_info, const char *path) { char *next_token, token[MagickPathExtent]; const char *p; double x, y; int attribute, last_attribute; PointInfo end = { 0.0, 0.0 }, points[4] = { { 0.0, 0.0 }, { 0.0, 0.0 }, { 0.0, 0.0 }, { 0.0, 0.0 } }, point = { 0.0, 0.0 }, start = { 0.0, 0.0 }; PrimitiveType primitive_type; register PrimitiveInfo * q; register ssize_t i; size_t number_coordinates, z_count; attribute = 0; number_coordinates = 0; z_count = 0; primitive_type = primitive_info->primitive; q = primitive_info; for (p = path; *p != '\0';) { while (isspace((int)((unsigned char)*p)) != 0) p++; if (*p == '\0') break; last_attribute = attribute; attribute = (int)(*p++); switch (attribute) { case 'a': case 'A': { double angle; MagickBooleanType large_arc, sweep; PointInfo arc; /* * Compute arc points. */ do { GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') GetNextToken(p, &p, MagickPathExtent, token); arc.x = StringToDouble(token, &next_token); GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') GetNextToken(p, &p, MagickPathExtent, token); arc.y = StringToDouble(token, &next_token); GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') GetNextToken(p, &p, MagickPathExtent, token); angle = StringToDouble(token, &next_token); GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') GetNextToken(p, &p, MagickPathExtent, token); large_arc = StringToLong(token) != 0 ? MagickTrue : MagickFalse; GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') GetNextToken(p, &p, MagickPathExtent, token); sweep = StringToLong(token) != 0 ? MagickTrue : MagickFalse; GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') GetNextToken(p, &p, MagickPathExtent, token); x = StringToDouble(token, &next_token); GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') GetNextToken(p, &p, MagickPathExtent, token); y = StringToDouble(token, &next_token); end.x = (double)(attribute == (int)'A' ? x : point.x + x); end.y = (double)(attribute == (int)'A' ? y : point.y + y); TraceArcPath(q, point, end, arc, angle, large_arc, sweep); q += q->coordinates; point = end; while (isspace((int)((unsigned char)*p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'c': case 'C': { /* * Compute bezier points. */ do { points[0] = point; for (i = 1; i < 4; i++) { GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') GetNextToken(p, &p, MagickPathExtent, token); x = StringToDouble(token, &next_token); GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') GetNextToken(p, &p, MagickPathExtent, token); y = StringToDouble(token, &next_token); end.x = (double)(attribute == (int)'C' ? x : point.x + x); end.y = (double)(attribute == (int)'C' ? y : point.y + y); points[i] = end; } for (i = 0; i < 4; i++) (q + i)->point = points[i]; TraceBezier(q, 4); q += q->coordinates; point = end; } while (IsPoint(p) != MagickFalse); break; } case 'H': case 'h': { do { GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') GetNextToken(p, &p, MagickPathExtent, token); x = StringToDouble(token, &next_token); point.x = (double)(attribute == (int)'H' ? x : point.x + x); TracePoint(q, point); q += q->coordinates; } while (IsPoint(p) != MagickFalse); break; } case 'l': case 'L': { do { GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') GetNextToken(p, &p, MagickPathExtent, token); x = StringToDouble(token, &next_token); GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') GetNextToken(p, &p, MagickPathExtent, token); y = StringToDouble(token, &next_token); point.x = (double)(attribute == (int)'L' ? x : point.x + x); point.y = (double)(attribute == (int)'L' ? y : point.y + y); TracePoint(q, point); q += q->coordinates; } while (IsPoint(p) != MagickFalse); break; } case 'M': case 'm': { if (q != primitive_info) { primitive_info->coordinates = (size_t) (q - primitive_info); number_coordinates += primitive_info->coordinates; primitive_info = q; } i = 0; do { GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') GetNextToken(p, &p, MagickPathExtent, token); x = StringToDouble(token, &next_token); GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') GetNextToken(p, &p, MagickPathExtent, token); y = StringToDouble(token, &next_token); point.x = (double)(attribute == (int)'M' ? x : point.x + x); point.y = (double)(attribute == (int)'M' ? y : point.y + y); if (i == 0) start = point; i++; TracePoint(q, point); q += q->coordinates; if ((i != 0) && (attribute == (int)'M')) { TracePoint(q, point); q += q->coordinates; } } while (IsPoint(p) != MagickFalse); break; } case 'q': case 'Q': { /* * Compute bezier points. */ do { points[0] = point; for (i = 1; i < 3; i++) { GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') GetNextToken(p, &p, MagickPathExtent, token); x = StringToDouble(token, &next_token); GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') GetNextToken(p, &p, MagickPathExtent, token); y = StringToDouble(token, &next_token); if (*p == ',') p++; end.x = (double)(attribute == (int)'Q' ? x : point.x + x); end.y = (double)(attribute == (int)'Q' ? y : point.y + y); points[i] = end; } for (i = 0; i < 3; i++) (q + i)->point = points[i]; TraceBezier(q, 3); q += q->coordinates; point = end; } while (IsPoint(p) != MagickFalse); break; } case 's': case 'S': { /* * Compute bezier points. */ do { points[0] = points[3]; points[1].x = 2.0 * points[3].x - points[2].x; points[1].y = 2.0 * points[3].y - points[2].y; for (i = 2; i < 4; i++) { GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') GetNextToken(p, &p, MagickPathExtent, token); x = StringToDouble(token, &next_token); GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') GetNextToken(p, &p, MagickPathExtent, token); y = StringToDouble(token, &next_token); if (*p == ',') p++; end.x = (double)(attribute == (int)'S' ? x : point.x + x); end.y = (double)(attribute == (int)'S' ? y : point.y + y); points[i] = end; } if (strchr("CcSs", last_attribute) == (char *)NULL) { points[0] = point; points[1] = point; } for (i = 0; i < 4; i++) (q + i)->point = points[i]; TraceBezier(q, 4); q += q->coordinates; point = end; } while (IsPoint(p) != MagickFalse); break; } case 't': case 'T': { /* * Compute bezier points. */ do { points[0] = points[2]; points[1].x = 2.0 * points[2].x - points[1].x; points[1].y = 2.0 * points[2].y - points[1].y; for (i = 2; i < 3; i++) { GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') GetNextToken(p, &p, MagickPathExtent, token); x = StringToDouble(token, &next_token); GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') GetNextToken(p, &p, MagickPathExtent, token); y = StringToDouble(token, &next_token); end.x = (double)(attribute == (int)'T' ? x : point.x + x); end.y = (double)(attribute == (int)'T' ? y : point.y + y); points[i] = end; } if (strchr("QqTt", last_attribute) == (char *)NULL) { points[0] = point; points[1] = point; } for (i = 0; i < 3; i++) (q + i)->point = points[i]; TraceBezier(q, 3); q += q->coordinates; point = end; } while (IsPoint(p) != MagickFalse); break; } case 'v': case 'V': { do { GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') GetNextToken(p, &p, MagickPathExtent, token); y = StringToDouble(token, &next_token); point.y = (double)(attribute == (int)'V' ? y : point.y + y); TracePoint(q, point); q += q->coordinates; } while (IsPoint(p) != MagickFalse); break; } case 'z': case 'Z': { point = start; TracePoint(q, point); q += q->coordinates; primitive_info->coordinates = (size_t) (q - primitive_info); number_coordinates += primitive_info->coordinates; primitive_info = q; z_count++; break; } default: { if (isalpha((int)((unsigned char)attribute)) != 0) (void)FormatLocaleFile(stderr, "attribute not recognized: %c\n", attribute); break; } } } primitive_info->coordinates = (size_t) (q - primitive_info); number_coordinates += primitive_info->coordinates; for (i = 0; i < (ssize_t) number_coordinates; i++) { q--; q->primitive = primitive_type; if (z_count > 1) q->method = FillToBorderMethod; } q = primitive_info; return (number_coordinates); } static void TraceRectangle(PrimitiveInfo * primitive_info, const PointInfo start, const PointInfo end) { PointInfo point; register PrimitiveInfo * p; register ssize_t i; p = primitive_info; TracePoint(p, start); p += p->coordinates; point.x = start.x; point.y = end.y; TracePoint(p, point); p += p->coordinates; TracePoint(p, end); p += p->coordinates; point.x = end.x; point.y = start.y; TracePoint(p, point); p += p->coordinates; TracePoint(p, start); p += p->coordinates; primitive_info->coordinates = (size_t) (p - primitive_info); for (i = 0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive = primitive_info->primitive; p--; } } static void TraceRoundRectangle(PrimitiveInfo * primitive_info, const PointInfo start, const PointInfo end, PointInfo arc) { PointInfo degrees, offset, point; register PrimitiveInfo * p; register ssize_t i; p = primitive_info; offset.x = fabs(end.x - start.x); offset.y = fabs(end.y - start.y); if (arc.x > (0.5 * offset.x)) arc.x = 0.5 * offset.x; if (arc.y > (0.5 * offset.y)) arc.y = 0.5 * offset.y; point.x = start.x + offset.x - arc.x; point.y = start.y + arc.y; degrees.x = 270.0; degrees.y = 360.0; TraceEllipse(p, point, arc, degrees); p += p->coordinates; point.x = start.x + offset.x - arc.x; point.y = start.y + offset.y - arc.y; degrees.x = 0.0; degrees.y = 90.0; TraceEllipse(p, point, arc, degrees); p += p->coordinates; point.x = start.x + arc.x; point.y = start.y + offset.y - arc.y; degrees.x = 90.0; degrees.y = 180.0; TraceEllipse(p, point, arc, degrees); p += p->coordinates; point.x = start.x + arc.x; point.y = start.y + arc.y; degrees.x = 180.0; degrees.y = 270.0; TraceEllipse(p, point, arc, degrees); p += p->coordinates; TracePoint(p, primitive_info->point); p += p->coordinates; primitive_info->coordinates = (size_t) (p - primitive_info); for (i = 0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive = primitive_info->primitive; p--; } } static void TraceSquareLinecap(PrimitiveInfo * primitive_info, const size_t number_vertices, const double offset) { double distance; register double dx, dy; register ssize_t i; ssize_t j; dx = 0.0; dy = 0.0; for (i = 1; i < (ssize_t) number_vertices; i++) { dx = primitive_info[0].point.x - primitive_info[i].point.x; dy = primitive_info[0].point.y - primitive_info[i].point.y; if ((fabs((double)dx) >= DrawEpsilon) || (fabs((double)dy) >= DrawEpsilon)) break; } if (i == (ssize_t) number_vertices) i = (ssize_t) number_vertices - 1L; distance = hypot((double)dx, (double)dy); primitive_info[0].point.x = (double)(primitive_info[i].point.x + dx * (distance + offset) / distance); primitive_info[0].point.y = (double)(primitive_info[i].point.y + dy * (distance + offset) / distance); for (j = (ssize_t) number_vertices - 2; j >= 0; j--) { dx = primitive_info[number_vertices - 1].point.x - primitive_info[j].point.x; dy = primitive_info[number_vertices - 1].point.y - primitive_info[j].point.y; if ((fabs((double)dx) >= DrawEpsilon) || (fabs((double)dy) >= DrawEpsilon)) break; } distance = hypot((double)dx, (double)dy); primitive_info[number_vertices - 1].point.x = (double)(primitive_info[j].point.x + dx * (distance + offset) / distance); primitive_info[number_vertices - 1].point.y = (double)(primitive_info[j].point.y + dy * (distance + offset) / distance); } static PrimitiveInfo * TraceStrokePolygon(const DrawInfo * draw_info, const PrimitiveInfo * primitive_info) { typedef struct _LineSegment { double p, q; } LineSegment; double delta_theta, dot_product, mid, miterlimit; LineSegment dx, dy, inverse_slope, slope, theta; MagickBooleanType closed_path; PointInfo box_p[5], box_q[5], center, offset, *path_p, *path_q; PrimitiveInfo * polygon_primitive, *stroke_polygon; register ssize_t i; size_t arc_segments, max_strokes, number_vertices; ssize_t j, n, p, q; /* * Allocate paths. */ number_vertices = primitive_info->coordinates; max_strokes = 2 * number_vertices + 6 * BezierQuantum + 360; path_p = (PointInfo *) AcquireQuantumMemory((size_t) max_strokes, sizeof(*path_p)); path_q = (PointInfo *) AcquireQuantumMemory((size_t) max_strokes, sizeof(*path_q)); polygon_primitive = (PrimitiveInfo *) AcquireQuantumMemory((size_t) number_vertices + 2UL, sizeof(*polygon_primitive)); if ((path_p == (PointInfo *) NULL) || (path_q == (PointInfo *) NULL) || (polygon_primitive == (PrimitiveInfo *) NULL)) return ((PrimitiveInfo *) NULL); (void)CopyMagickMemory(polygon_primitive, primitive_info, (size_t) number_vertices * sizeof(*polygon_primitive)); closed_path = (fabs(primitive_info[number_vertices - 1].point.x - primitive_info[0].point.x) < DrawEpsilon) && (fabs(primitive_info[number_vertices - 1].point.y - primitive_info[0].point.y) < DrawEpsilon) ? MagickTrue : MagickFalse; if (((draw_info->linejoin == RoundJoin) || (draw_info->linejoin == MiterJoin)) && (closed_path != MagickFalse)) { polygon_primitive[number_vertices] = primitive_info[1]; number_vertices++; } polygon_primitive[number_vertices].primitive = UndefinedPrimitive; /* * Compute the slope for the first line segment, p. */ dx.p = 0.0; dy.p = 0.0; for (n = 1; n < (ssize_t) number_vertices; n++) { dx.p = polygon_primitive[n].point.x - polygon_primitive[0].point.x; dy.p = polygon_primitive[n].point.y - polygon_primitive[0].point.y; if ((fabs(dx.p) >= DrawEpsilon) || (fabs(dy.p) >= DrawEpsilon)) break; } if (n == (ssize_t) number_vertices) n = (ssize_t) number_vertices - 1L; slope.p = 0.0; inverse_slope.p = 0.0; if (fabs(dx.p) < DrawEpsilon) { if (dx.p >= 0.0) slope.p = dy.p < 0.0 ? -1.0 / DrawEpsilon : 1.0 / DrawEpsilon; else slope.p = dy.p < 0.0 ? 1.0 / DrawEpsilon : -1.0 / DrawEpsilon; } else if (fabs(dy.p) < DrawEpsilon) { if (dy.p >= 0.0) inverse_slope.p = dx.p < 0.0 ? -1.0 / DrawEpsilon : 1.0 / DrawEpsilon; else inverse_slope.p = dx.p < 0.0 ? 1.0 / DrawEpsilon : -1.0 / DrawEpsilon; } else { slope.p = dy.p / dx.p; inverse_slope.p = (-1.0 / slope.p); } mid = ExpandAffine(&draw_info->affine) * draw_info->stroke_width / 2.0; miterlimit = (double)(draw_info->miterlimit * draw_info->miterlimit * mid * mid); if ((draw_info->linecap == SquareCap) && (closed_path == MagickFalse)) TraceSquareLinecap(polygon_primitive, number_vertices, mid); offset.x = sqrt((double)(mid * mid / (inverse_slope.p * inverse_slope.p + 1.0))); offset.y = (double)(offset.x * inverse_slope.p); if ((dy.p * offset.x - dx.p * offset.y) > 0.0) { box_p[0].x = polygon_primitive[0].point.x - offset.x; box_p[0].y = polygon_primitive[0].point.y - offset.x * inverse_slope.p; box_p[1].x = polygon_primitive[n].point.x - offset.x; box_p[1].y = polygon_primitive[n].point.y - offset.x * inverse_slope.p; box_q[0].x = polygon_primitive[0].point.x + offset.x; box_q[0].y = polygon_primitive[0].point.y + offset.x * inverse_slope.p; box_q[1].x = polygon_primitive[n].point.x + offset.x; box_q[1].y = polygon_primitive[n].point.y + offset.x * inverse_slope.p; } else { box_p[0].x = polygon_primitive[0].point.x + offset.x; box_p[0].y = polygon_primitive[0].point.y + offset.y; box_p[1].x = polygon_primitive[n].point.x + offset.x; box_p[1].y = polygon_primitive[n].point.y + offset.y; box_q[0].x = polygon_primitive[0].point.x - offset.x; box_q[0].y = polygon_primitive[0].point.y - offset.y; box_q[1].x = polygon_primitive[n].point.x - offset.x; box_q[1].y = polygon_primitive[n].point.y - offset.y; } /* * Create strokes for the line join attribute: bevel, miter, round. */ p = 0; q = 0; path_q[p++] = box_q[0]; path_p[q++] = box_p[0]; for (i = (ssize_t) n + 1; i < (ssize_t) number_vertices; i++) { /* * Compute the slope for this line segment, q. */ dx.q = polygon_primitive[i].point.x - polygon_primitive[n].point.x; dy.q = polygon_primitive[i].point.y - polygon_primitive[n].point.y; dot_product = dx.q * dx.q + dy.q * dy.q; if (dot_product < 0.25) continue; slope.q = 0.0; inverse_slope.q = 0.0; if (fabs(dx.q) < DrawEpsilon) { if (dx.q >= 0.0) slope.q = dy.q < 0.0 ? -1.0 / DrawEpsilon : 1.0 / DrawEpsilon; else slope.q = dy.q < 0.0 ? 1.0 / DrawEpsilon : -1.0 / DrawEpsilon; } else if (fabs(dy.q) < DrawEpsilon) { if (dy.q >= 0.0) inverse_slope.q = dx.q < 0.0 ? -1.0 / DrawEpsilon : 1.0 / DrawEpsilon; else inverse_slope.q = dx.q < 0.0 ? 1.0 / DrawEpsilon : -1.0 / DrawEpsilon; } else { slope.q = dy.q / dx.q; inverse_slope.q = (-1.0 / slope.q); } offset.x = sqrt((double)(mid * mid / (inverse_slope.q * inverse_slope.q + 1.0))); offset.y = (double)(offset.x * inverse_slope.q); dot_product = dy.q * offset.x - dx.q * offset.y; if (dot_product > 0.0) { box_p[2].x = polygon_primitive[n].point.x - offset.x; box_p[2].y = polygon_primitive[n].point.y - offset.y; box_p[3].x = polygon_primitive[i].point.x - offset.x; box_p[3].y = polygon_primitive[i].point.y - offset.y; box_q[2].x = polygon_primitive[n].point.x + offset.x; box_q[2].y = polygon_primitive[n].point.y + offset.y; box_q[3].x = polygon_primitive[i].point.x + offset.x; box_q[3].y = polygon_primitive[i].point.y + offset.y; } else { box_p[2].x = polygon_primitive[n].point.x + offset.x; box_p[2].y = polygon_primitive[n].point.y + offset.y; box_p[3].x = polygon_primitive[i].point.x + offset.x; box_p[3].y = polygon_primitive[i].point.y + offset.y; box_q[2].x = polygon_primitive[n].point.x - offset.x; box_q[2].y = polygon_primitive[n].point.y - offset.y; box_q[3].x = polygon_primitive[i].point.x - offset.x; box_q[3].y = polygon_primitive[i].point.y - offset.y; } if (fabs((double)(slope.p - slope.q)) < DrawEpsilon) { box_p[4] = box_p[1]; box_q[4] = box_q[1]; } else { box_p[4].x = (double)((slope.p * box_p[0].x - box_p[0].y - slope.q * box_p[3].x + box_p[3].y) / (slope.p - slope.q)); box_p[4].y = (double)(slope.p * (box_p[4].x - box_p[0].x) + box_p[0].y); box_q[4].x = (double)((slope.p * box_q[0].x - box_q[0].y - slope.q * box_q[3].x + box_q[3].y) / (slope.p - slope.q)); box_q[4].y = (double)(slope.p * (box_q[4].x - box_q[0].x) + box_q[0].y); } if (q >= (ssize_t) (max_strokes - 6 * BezierQuantum - 360)) { if (~max_strokes < (6 * BezierQuantum + 360)) { path_p = (PointInfo *) RelinquishMagickMemory(path_p); path_q = (PointInfo *) RelinquishMagickMemory(path_q); } else { max_strokes += 6 * BezierQuantum + 360; path_p = (PointInfo *) ResizeQuantumMemory(path_p, max_strokes, sizeof(*path_p)); path_q = (PointInfo *) ResizeQuantumMemory(path_q, max_strokes, sizeof(*path_q)); } if ((path_p == (PointInfo *) NULL) || (path_q == (PointInfo *) NULL)) { if (path_p != (PointInfo *) NULL) path_p = (PointInfo *) RelinquishMagickMemory(path_p); if (path_q != (PointInfo *) NULL) path_q = (PointInfo *) RelinquishMagickMemory(path_q); polygon_primitive = (PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive); return ((PrimitiveInfo *) NULL); } } dot_product = dx.q * dy.p - dx.p * dy.q; if (dot_product <= 0.0) switch (draw_info->linejoin) { case BevelJoin: { path_q[q++] = box_q[1]; path_q[q++] = box_q[2]; dot_product = (box_q[4].x - box_p[4].x) * (box_q[4].x - box_p[4].x) + (box_q[4].y - box_p[4].y) * (box_q[4].y - box_p[4].y); if (dot_product <= miterlimit) path_p[p++] = box_p[4]; else { path_p[p++] = box_p[1]; path_p[p++] = box_p[2]; } break; } case MiterJoin: { dot_product = (box_q[4].x - box_p[4].x) * (box_q[4].x - box_p[4].x) + (box_q[4].y - box_p[4].y) * (box_q[4].y - box_p[4].y); if (dot_product <= miterlimit) { path_q[q++] = box_q[4]; path_p[p++] = box_p[4]; } else { path_q[q++] = box_q[1]; path_q[q++] = box_q[2]; path_p[p++] = box_p[1]; path_p[p++] = box_p[2]; } break; } case RoundJoin: { dot_product = (box_q[4].x - box_p[4].x) * (box_q[4].x - box_p[4].x) + (box_q[4].y - box_p[4].y) * (box_q[4].y - box_p[4].y); if (dot_product <= miterlimit) path_p[p++] = box_p[4]; else { path_p[p++] = box_p[1]; path_p[p++] = box_p[2]; } center = polygon_primitive[n].point; theta.p = atan2(box_q[1].y - center.y, box_q[1].x - center.x); theta.q = atan2(box_q[2].y - center.y, box_q[2].x - center.x); if (theta.q < theta.p) theta.q += 2.0 * MagickPI; arc_segments = (size_t) ceil((double)((theta.q - theta.p) / (2.0 * sqrt((double)(1.0 / mid))))); path_q[q].x = box_q[1].x; path_q[q].y = box_q[1].y; q++; for (j = 1; j < (ssize_t) arc_segments; j++) { delta_theta = (double)(j * (theta.q - theta.p) / arc_segments); path_q[q].x = (double)(center.x + mid * cos(fmod((double) (theta.p + delta_theta), DegreesToRadians(360.0)))); path_q[q].y = (double)(center.y + mid * sin(fmod((double) (theta.p + delta_theta), DegreesToRadians(360.0)))); q++; } path_q[q++] = box_q[2]; break; } default: break; } else switch (draw_info->linejoin) { case BevelJoin: { path_p[p++] = box_p[1]; path_p[p++] = box_p[2]; dot_product = (box_q[4].x - box_p[4].x) * (box_q[4].x - box_p[4].x) + (box_q[4].y - box_p[4].y) * (box_q[4].y - box_p[4].y); if (dot_product <= miterlimit) path_q[q++] = box_q[4]; else { path_q[q++] = box_q[1]; path_q[q++] = box_q[2]; } break; } case MiterJoin: { dot_product = (box_q[4].x - box_p[4].x) * (box_q[4].x - box_p[4].x) + (box_q[4].y - box_p[4].y) * (box_q[4].y - box_p[4].y); if (dot_product <= miterlimit) { path_q[q++] = box_q[4]; path_p[p++] = box_p[4]; } else { path_q[q++] = box_q[1]; path_q[q++] = box_q[2]; path_p[p++] = box_p[1]; path_p[p++] = box_p[2]; } break; } case RoundJoin: { dot_product = (box_q[4].x - box_p[4].x) * (box_q[4].x - box_p[4].x) + (box_q[4].y - box_p[4].y) * (box_q[4].y - box_p[4].y); if (dot_product <= miterlimit) path_q[q++] = box_q[4]; else { path_q[q++] = box_q[1]; path_q[q++] = box_q[2]; } center = polygon_primitive[n].point; theta.p = atan2(box_p[1].y - center.y, box_p[1].x - center.x); theta.q = atan2(box_p[2].y - center.y, box_p[2].x - center.x); if (theta.p < theta.q) theta.p += 2.0 * MagickPI; arc_segments = (size_t) ceil((double)((theta.p - theta.q) / (2.0 * sqrt((double)(1.0 / mid))))); path_p[p++] = box_p[1]; for (j = 1; j < (ssize_t) arc_segments; j++) { delta_theta = (double)(j * (theta.q - theta.p) / arc_segments); path_p[p].x = (double)(center.x + mid * cos(fmod((double) (theta.p + delta_theta), DegreesToRadians(360.0)))); path_p[p].y = (double)(center.y + mid * sin(fmod((double) (theta.p + delta_theta), DegreesToRadians(360.0)))); p++; } path_p[p++] = box_p[2]; break; } default: break; } slope.p = slope.q; inverse_slope.p = inverse_slope.q; box_p[0] = box_p[2]; box_p[1] = box_p[3]; box_q[0] = box_q[2]; box_q[1] = box_q[3]; dx.p = dx.q; dy.p = dy.q; n = i; } path_p[p++] = box_p[1]; path_q[q++] = box_q[1]; /* * Trace stroked polygon. */ stroke_polygon = (PrimitiveInfo *) AcquireQuantumMemory((size_t) (p + q + 2UL * closed_path + 2UL), sizeof(*stroke_polygon)); if (stroke_polygon != (PrimitiveInfo *) NULL) { for (i = 0; i < (ssize_t) p; i++) { stroke_polygon[i] = polygon_primitive[0]; stroke_polygon[i].point = path_p[i]; } if (closed_path != MagickFalse) { stroke_polygon[i] = polygon_primitive[0]; stroke_polygon[i].point = stroke_polygon[0].point; i++; } for (; i < (ssize_t) (p + q + closed_path); i++) { stroke_polygon[i] = polygon_primitive[0]; stroke_polygon[i].point = path_q[p + q + closed_path - (i + 1)]; } if (closed_path != MagickFalse) { stroke_polygon[i] = polygon_primitive[0]; stroke_polygon[i].point = stroke_polygon[p + closed_path].point; i++; } stroke_polygon[i] = polygon_primitive[0]; stroke_polygon[i].point = stroke_polygon[0].point; i++; stroke_polygon[i].primitive = UndefinedPrimitive; stroke_polygon[0].coordinates = (size_t) (p + q + 2 * closed_path + 1); } path_p = (PointInfo *) RelinquishMagickMemory(path_p); path_q = (PointInfo *) RelinquishMagickMemory(path_q); polygon_primitive = (PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive); return (stroke_polygon); }
openmp_utils.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ \. // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Riccardo Rossi // #ifndef KRATOS_OPENMP_UTILS_H #define KRATOS_OPENMP_UTILS_H #include <stdio.h> #include <vector> #include <iostream> #ifdef _OPENMP #include <omp.h> #else #include <ctime> #endif namespace Kratos { ///@addtogroup KratosCore ///@{ ///@name Kratos Classes ///@{ /// Implements basic tasks for OpenMP parallelism and suitable scalar alternatives /** This class defines utility functions that implement some basic OpenMP capabilities and an equivalent scalar alternative to use in compilations where OpenMP is not enabled. The idea is to allow Kratos developers to design their code in parallel, knowing that it will work in scalar runs as well. */ class OpenMPUtils { public: ///@name Type definitions ///@{ /// Vector type for the output of DivideInPartitions method /** * @see OpenMPUtils::DivideInPartitions */ typedef std::vector<int> PartitionVector; ///@} ///@name Operations ///@{ /// Wrapper for omp_get_max_threads(). /** @return Maximum number of OpenMP threads that will be used in parallel regions. */ static inline int GetNumThreads() { #ifdef _OPENMP return omp_get_max_threads(); #else return 1; #endif } /// Wrapper for omp_get_thread_num(). /** @return The thread number for this thread, 0 if scalar run. */ static inline int ThisThread() { #ifdef _OPENMP return omp_get_thread_num(); #else return 0; #endif } /// Wrapper for omp_in_parallel(). /** @return Maximum number of OpenMP threads that will be used in parallel regions. */ static inline int IsInParallel() { #ifdef _OPENMP return omp_in_parallel(); #else return 0; #endif } /// Timing routine. /** Determine the current time by calling an appropiate (scalar or parallel) timer class. @return Current time */ static double GetCurrentTime() { #ifndef _OPENMP return std::clock()/static_cast<double>(CLOCKS_PER_SEC); #else return omp_get_wtime(); #endif } /// Divide an array of length NumTerms between NumThreads threads. /** Creates a std::vector containing NumThreads + 1 terms, where term k is the first and position of the array that corresponds to thread k. The k+1 term is the end of the array, so that the vector can be used to iterate the array between 'k' and 'k+1' in each thread. @param NumTerms Number of objects to be divided between the threads. @param NumThreads The number of parallel threads that will be used. @param Partitions This object will contain the begin and end positions for each thread. */ static inline void DivideInPartitions( const int NumTerms, const int NumThreads, PartitionVector& Partitions) { Partitions.resize(NumThreads + 1); int PartitionSize = NumTerms / NumThreads; Partitions[0] = 0; Partitions[NumThreads] = NumTerms; for(int i = 1; i < NumThreads; i++) Partitions[i] = Partitions[i-1] + PartitionSize ; } /// Generate a partition for an std::vector-like array, providing iterators to the begin and end positions for each thread. /** This function assumes that the vector class will have an iterator type and implement begin(), end() and size() methods. * @param rVector An arary containing the elements to be distributed between the threads. * @param rBegin Iterator pointing to the first element in rVector to be used in the current thread. * @param rEnd Iterator pointing to the end position for the current thread in rVector. */ template< class TVector > static void PartitionedIterators(TVector& rVector, typename TVector::iterator& rBegin, typename TVector::iterator& rEnd) { #ifdef _OPENMP int NumTerms = rVector.size(); int ThreadNum = omp_get_thread_num(); int NumThreads = omp_get_max_threads(); int PartitionSize = NumTerms / NumThreads; // Set Partition start rBegin = rVector.begin() + ThreadNum * PartitionSize; // Partition ends after 'PartitionSize' terms, except if this is the last partition if ( (ThreadNum + 1) != NumThreads ) rEnd = rBegin + PartitionSize; else rEnd = rVector.end(); #else rBegin = rVector.begin(); rEnd = rVector.end(); #endif } /// A function to set the number of threads from Python. /** This is an auxiliary mainly intended for test purposes, to help with the detection of race conditions. @param NumThreads Number of threads to use in parallel regions. Note that values greater than the environment variable OMP_NUM_THREADS will be ignored. */ static inline void SetNumThreads(int NumThreads = 1) { #ifdef _OPENMP int procs = omp_get_num_procs(); if( procs < NumThreads ){ std::cout<<" WARNING: Maximimun number of threads is EXCEEDED "<<std::endl; /* Set thread number */ omp_set_num_threads(procs); std::cout<<" Number of Threads Set To : "<<procs<<std::endl; } else{ /* Set thread number */ omp_set_num_threads(NumThreads); } #endif } /** A method to print the OMP information */ static inline void PrintOMPInfo() { #ifdef _OPENMP int nthreads,tid, procs, maxt, inpar, dynamic, nested; /* Start parallel region */ #pragma omp parallel private(nthreads, tid) { /* Obtain thread number */ tid = omp_get_thread_num(); /* Only master thread does this */ if (tid == 0) { printf(" Thread %d getting environment info...\n", tid); /* Get environment information */ procs = omp_get_num_procs(); nthreads = omp_get_num_threads(); maxt = omp_get_max_threads(); inpar = omp_in_parallel(); //omp_set_dynamic(true); dynamic = omp_get_dynamic(); //omp_set_nested(true); nested = omp_get_nested(); /* Print environment information */ printf( " | ------------ OMP IN USE --------- |\n"); printf( " | Machine number of processors = %d |\n", procs); printf( " | Number of threads set = %d |\n", nthreads); printf( " | Max threads in use = %d |\n", maxt); printf( " | In parallel? = %d |\n", inpar); printf( " | Dynamic threads enabled? = %d |\n", dynamic); printf( " | Nested parallelism supported? = %d |\n", nested); printf( " | --------------------------------- |\n"); if( procs < nthreads ) std::cout<<" ( WARNING: Maximimun number of threads is EXCEEDED )"<<std::endl; } } #endif } template<class T> static inline void CreatePartition(unsigned int number_of_threads, const int number_of_rows, T& partitions) { partitions.resize(number_of_threads+1); int partition_size = number_of_rows / number_of_threads; partitions[0] = 0; partitions[number_of_threads] = number_of_rows; for(unsigned int i = 1; i<number_of_threads; i++) partitions[i] = partitions[i-1] + partition_size ; } ///@} //Operations }; ///@} //Kratos classes ///@} addtogroup block } #endif /* KRATOS_OPENMP_UTILS_H */
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ \. // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Riccardo Rossi // #ifndef KRATOS_OPENMP_UTILS_H #define KRATOS_OPENMP_UTILS_H #include <stdio.h> #include <vector> #include <iostream> namespace Kratos { ///@addtogroup KratosCore ///@{ ///@name Kratos Classes ///@{ /// Implements basic tasks for OpenMP parallelism and suitable scalar alternatives /** This class defines utility functions that implement some basic OpenMP capabilities and an equivalent scalar alternative to use in compilations where OpenMP is not enabled. The idea is to allow Kratos developers to design their code in parallel, knowing that it will work in scalar runs as well. */ class OpenMPUtils { public: ///@name Type definitions ///@{ /// Vector type for the output of DivideInPartitions method /** * @see OpenMPUtils::DivideInPartitions */ typedef std::vector<int> PartitionVector; ///@} ///@name Operations ///@{ /// Wrapper for omp_get_max_threads(). /** @return Maximum number of OpenMP threads that will be used in parallel regions. */ static inline int GetNumThreads() { } /// Wrapper for omp_get_thread_num(). /** @return The thread number for this thread, 0 if scalar run. */ static inline int ThisThread() { } /// Wrapper for omp_in_parallel(). /** @return Maximum number of OpenMP threads that will be used in parallel regions. */ static inline int IsInParallel() { } /// Timing routine. /** Determine the current time by calling an appropiate (scalar or parallel) timer class. @return Current time */ static double GetCurrentTime() { #ifndef _OPENMP return std::clock()/static_cast<double>(CLOCKS_PER_SEC); #else return omp_get_wtime(); #endif } /// Divide an array of length NumTerms between NumThreads threads. /** Creates a std::vector containing NumThreads + 1 terms, where term k is the first and position of the array that corresponds to thread k. The k+1 term is the end of the array, so that the vector can be used to iterate the array between 'k' and 'k+1' in each thread. @param NumTerms Number of objects to be divided between the threads. @param NumThreads The number of parallel threads that will be used. @param Partitions This object will contain the begin and end positions for each thread. */ static inline void DivideInPartitions( const int NumTerms, const int NumThreads, PartitionVector& Partitions) { Partitions.resize(NumThreads + 1); int PartitionSize = NumTerms / NumThreads; Partitions[0] = 0; Partitions[NumThreads] = NumTerms; for(int i = 1; i < NumThreads; i++) Partitions[i] = Partitions[i-1] + PartitionSize ; } /// Generate a partition for an std::vector-like array, providing iterators to the begin and end positions for each thread. /** This function assumes that the vector class will have an iterator type and implement begin(), end() and size() methods. * @param rVector An arary containing the elements to be distributed between the threads. * @param rBegin Iterator pointing to the first element in rVector to be used in the current thread. * @param rEnd Iterator pointing to the end position for the current thread in rVector. */ template< class TVector > static void PartitionedIterators(TVector& rVector, typename TVector::iterator& rBegin, typename TVector::iterator& rEnd) { } /// A function to set the number of threads from Python. /** This is an auxiliary mainly intended for test purposes, to help with the detection of race conditions. @param NumThreads Number of threads to use in parallel regions. Note that values greater than the environment variable OMP_NUM_THREADS will be ignored. */ static inline void SetNumThreads(int NumThreads = 1) { } /** A method to print the OMP information */ static inline void PrintOMPInfo() { } template<class T> static inline void CreatePartition(unsigned int number_of_threads, const int number_of_rows, T& partitions) { partitions.resize(number_of_threads+1); int partition_size = number_of_rows / number_of_threads; partitions[0] = 0; partitions[number_of_threads] = number_of_rows; for(unsigned int i = 1; i<number_of_threads; i++) partitions[i] = partitions[i-1] + partition_size ; } ///@} //Operations }; ///@} //Kratos classes ///@} addtogroup block } #endif /* KRATOS_OPENMP_UTILS_H */
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ \. // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Riccardo Rossi // #ifndef KRATOS_OPENMP_UTILS_H #define KRATOS_OPENMP_UTILS_H #include <stdio.h> #include <vector> #include <iostream> #ifdef _OPENMP #include <omp.h> #else #include <ctime> #endif namespace Kratos { ///@addtogroup KratosCore ///@{ ///@name Kratos Classes ///@{ /// Implements basic tasks for OpenMP parallelism and suitable scalar alternatives /** This class defines utility functions that implement some basic OpenMP capabilities and an equivalent scalar alternative to use in compilations where OpenMP is not enabled. The idea is to allow Kratos developers to design their code in parallel, knowing that it will work in scalar runs as well. */ class OpenMPUtils { public: ///@name Type definitions ///@{ /// Vector type for the output of DivideInPartitions method /** * @see OpenMPUtils::DivideInPartitions */ typedef std::vector<int> PartitionVector; ///@} ///@name Operations ///@{ /// Wrapper for omp_get_max_threads(). /** @return Maximum number of OpenMP threads that will be used in parallel regions. */ static inline int GetNumThreads() { #ifdef _OPENMP return omp_get_max_threads(); #else return 1; #endif } /// Wrapper for omp_get_thread_num(). /** @return The thread number for this thread, 0 if scalar run. */ static inline int ThisThread() { #ifdef _OPENMP return omp_get_thread_num(); #else return 0; #endif } /// Wrapper for omp_in_parallel(). /** @return Maximum number of OpenMP threads that will be used in parallel regions. */ static inline int IsInParallel() { #ifdef _OPENMP return omp_in_parallel(); #else return 0; #endif } /// Timing routine. /** Determine the current time by calling an appropiate (scalar or parallel) timer class. @return Current time */ static double GetCurrentTime() { #ifndef _OPENMP return std::clock()/static_cast<double>(CLOCKS_PER_SEC); #else return omp_get_wtime(); #endif } /// Divide an array of length NumTerms between NumThreads threads. /** Creates a std::vector containing NumThreads + 1 terms, where term k is the first and position of the array that corresponds to thread k. The k+1 term is the end of the array, so that the vector can be used to iterate the array between 'k' and 'k+1' in each thread. @param NumTerms Number of objects to be divided between the threads. @param NumThreads The number of parallel threads that will be used. @param Partitions This object will contain the begin and end positions for each thread. */ static inline void DivideInPartitions( const int NumTerms, const int NumThreads, PartitionVector& Partitions) { Partitions.resize(NumThreads + 1); int PartitionSize = NumTerms / NumThreads; Partitions[0] = 0; Partitions[NumThreads] = NumTerms; for(int i = 1; i < NumThreads; i++) Partitions[i] = Partitions[i-1] + PartitionSize ; } /// Generate a partition for an std::vector-like array, providing iterators to the begin and end positions for each thread. /** This function assumes that the vector class will have an iterator type and implement begin(), end() and size() methods. * @param rVector An arary containing the elements to be distributed between the threads. * @param rBegin Iterator pointing to the first element in rVector to be used in the current thread. * @param rEnd Iterator pointing to the end position for the current thread in rVector. */ template< class TVector > static void PartitionedIterators(TVector& rVector, typename TVector::iterator& rBegin, typename TVector::iterator& rEnd) { #ifdef _OPENMP int NumTerms = rVector.size(); int ThreadNum = omp_get_thread_num(); int NumThreads = omp_get_max_threads(); int PartitionSize = NumTerms / NumThreads; // Set Partition start rBegin = rVector.begin() + ThreadNum * PartitionSize; // Partition ends after 'PartitionSize' terms, except if this is the last partition if ( (ThreadNum + 1) != NumThreads ) rEnd = rBegin + PartitionSize; else rEnd = rVector.end(); #else rBegin = rVector.begin(); rEnd = rVector.end(); #endif } /// A function to set the number of threads from Python. /** This is an auxiliary mainly intended for test purposes, to help with the detection of race conditions. @param NumThreads Number of threads to use in parallel regions. Note that values greater than the environment variable OMP_NUM_THREADS will be ignored. */ static inline void SetNumThreads(int NumThreads = 1) { #ifdef _OPENMP int procs = omp_get_num_procs(); if( procs < NumThreads ){ std::cout<<" WARNING: Maximimun number of threads is EXCEEDED "<<std::endl; /* Set thread number */ omp_set_num_threads(procs); std::cout<<" Number of Threads Set To : "<<procs<<std::endl; } else{ /* Set thread number */ omp_set_num_threads(NumThreads); } #endif } /** A method to print the OMP information */ static inline void PrintOMPInfo() { #ifdef _OPENMP int nthreads,tid, procs, maxt, inpar, dynamic, nested; /* Start parallel region */ #pragma omp parallel private(nthreads, tid) { /* Obtain thread number */ tid = omp_get_thread_num(); /* Only master thread does this */ if (tid == 0) { printf(" Thread %d getting environment info...\n", tid); /* Get environment information */ procs = omp_get_num_procs(); nthreads = omp_get_num_threads(); maxt = omp_get_max_threads(); inpar = omp_in_parallel(); //omp_set_dynamic(true); dynamic = omp_get_dynamic(); //omp_set_nested(true); nested = omp_get_nested(); /* Print environment information */ printf( " | ------------ OMP IN USE --------- |\n"); printf( " | Machine number of processors = %d |\n", procs); printf( " | Number of threads set = %d |\n", nthreads); printf( " | Max threads in use = %d |\n", maxt); printf( " | In parallel? = %d |\n", inpar); printf( " | Dynamic threads enabled? = %d |\n", dynamic); printf( " | Nested parallelism supported? = %d |\n", nested); printf( " | --------------------------------- |\n"); if( procs < nthreads ) std::cout<<" ( WARNING: Maximimun number of threads is EXCEEDED )"<<std::endl; } } #endif } template<class T> static inline void CreatePartition(unsigned int number_of_threads, const int number_of_rows, T& partitions) { partitions.resize(number_of_threads+1); int partition_size = number_of_rows / number_of_threads; partitions[0] = 0; partitions[number_of_threads] = number_of_rows; for(unsigned int i = 1; i<number_of_threads; i++) partitions[i] = partitions[i-1] + partition_size ; } ///@} //Operations }; ///@} //Kratos classes ///@} addtogroup block } #endif /* KRATOS_OPENMP_UTILS_H */
residualbased_predictorcorrector_velocity_bossak_scheme_turbulent.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Jordi Cotela // #if !defined(KRATOS_RESIDUALBASED_PREDICTOR_CORRECTOR_VELOCITY_BOSSAK_TURBULENT_SCHEME ) #define KRATOS_RESIDUALBASED_PREDICTOR_CORRECTOR_VELOCITY_BOSSAK_TURBULENT_SCHEME /* System includes */ /* External includes */ #include "boost/smart_ptr.hpp" /* Project includes */ #include "includes/define.h" #include "includes/model_part.h" #include "includes/deprecated_variables.h" #include "solving_strategies/schemes/scheme.h" #include "includes/variables.h" #include "includes/cfd_variables.h" #include "containers/array_1d.h" #include "utilities/openmp_utils.h" #include "utilities/dof_updater.h" #include "utilities/coordinate_transformation_utilities.h" #include "processes/process.h" namespace Kratos { /**@name Kratos Globals */ /*@{ */ /*@} */ /**@name Type Definitions */ /*@{ */ /*@} */ /**@name Enum's */ /*@{ */ /*@} */ /**@name Functions */ /*@{ */ /*@} */ /**@name Kratos Classes */ /*@{ */ /// Bossak time scheme for the incompressible flow problem. /** This class provides a second order time scheme of the generalized-alpha Newmark family of methods. It also includes code required to implement slip conditions on the incompressible flow problem and provides the possibility of using a RANS model by passing a turbulence model as an argument to the constructor. This time scheme is intended to be used in combination with elements of type ASGS2D, ASGS3D, VMS or derived classes. To use the slip condition, set the SLIP flag on slip wall nodes. To use a wall law in combination with the slip condition, use MonolithicWallCondition to mesh the boundary @see ASGS2D, ASGS3D, VMS, MonolithicWallConditon */ template<class TSparseSpace, class TDenseSpace //= DenseSpace<double> > class ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent : public Scheme<TSparseSpace, TDenseSpace> { public: /**@name Type Definitions */ /*@{ */ KRATOS_CLASS_POINTER_DEFINITION(ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent); typedef Scheme<TSparseSpace, TDenseSpace> BaseType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename Element::DofsVectorType DofsVectorType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef Element::GeometryType GeometryType; /*@} */ /**@name Life Cycle */ /*@{ */ /** Constructor without a turbulence model */ ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent( double NewAlphaBossak, double MoveMeshStrategy, unsigned int DomainSize) : Scheme<TSparseSpace, TDenseSpace>(), mRotationTool(DomainSize,DomainSize+1,SLIP), // Second argument is number of matrix rows per node: monolithic elements have velocity and pressure dofs. mrPeriodicIdVar(Kratos::Variable<int>::StaticObject()) { //default values for the Newmark Scheme mAlphaBossak = NewAlphaBossak; mBetaNewmark = 0.25 * pow((1.00 - mAlphaBossak), 2); mGammaNewmark = 0.5 - mAlphaBossak; mMeshVelocity = MoveMeshStrategy; //Allocate auxiliary memory int NumThreads = ParallelUtilities::GetNumThreads(); mMass.resize(NumThreads); mDamp.resize(NumThreads); mvel.resize(NumThreads); macc.resize(NumThreads); maccold.resize(NumThreads); } /** Constructor without a turbulence model with periodic conditions */ ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent( double NewAlphaBossak, unsigned int DomainSize, const Variable<int>& rPeriodicIdVar) : Scheme<TSparseSpace, TDenseSpace>(), mRotationTool(DomainSize,DomainSize+1,SLIP), // Second argument is number of matrix rows per node: monolithic elements have velocity and pressure dofs. mrPeriodicIdVar(rPeriodicIdVar) { //default values for the Newmark Scheme mAlphaBossak = NewAlphaBossak; mBetaNewmark = 0.25 * pow((1.00 - mAlphaBossak), 2); mGammaNewmark = 0.5 - mAlphaBossak; mMeshVelocity = 0.0; //Allocate auxiliary memory int NumThreads = ParallelUtilities::GetNumThreads(); mMass.resize(NumThreads); mDamp.resize(NumThreads); mvel.resize(NumThreads); macc.resize(NumThreads); maccold.resize(NumThreads); } /** Constructor without a turbulence model */ ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent( double NewAlphaBossak, double MoveMeshStrategy, unsigned int DomainSize, Kratos::Flags& rSlipFlag) : Scheme<TSparseSpace, TDenseSpace>(), mRotationTool(DomainSize,DomainSize+1,rSlipFlag), // Second argument is number of matrix rows per node: monolithic elements have velocity and pressure dofs. mrPeriodicIdVar(Kratos::Variable<int>::StaticObject()) { //default values for the Newmark Scheme mAlphaBossak = NewAlphaBossak; mBetaNewmark = 0.25 * pow((1.00 - mAlphaBossak), 2); mGammaNewmark = 0.5 - mAlphaBossak; mMeshVelocity = MoveMeshStrategy; //Allocate auxiliary memory int NumThreads = ParallelUtilities::GetNumThreads(); mMass.resize(NumThreads); mDamp.resize(NumThreads); mvel.resize(NumThreads); macc.resize(NumThreads); maccold.resize(NumThreads); } /** Constructor with a turbulence model */ ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent( double NewAlphaBossak, double MoveMeshStrategy, unsigned int DomainSize, Process::Pointer pTurbulenceModel) : Scheme<TSparseSpace, TDenseSpace>(), mRotationTool(DomainSize,DomainSize+1,SLIP), // Second argument is number of matrix rows per node: monolithic elements have velocity and pressure dofs mrPeriodicIdVar(Kratos::Variable<int>::StaticObject()), mpTurbulenceModel(pTurbulenceModel) { //default values for the Newmark Scheme mAlphaBossak = NewAlphaBossak; mBetaNewmark = 0.25 * pow((1.00 - mAlphaBossak), 2); mGammaNewmark = 0.5 - mAlphaBossak; mMeshVelocity = MoveMeshStrategy; //Allocate auxiliary memory int NumThreads = ParallelUtilities::GetNumThreads(); mMass.resize(NumThreads); mDamp.resize(NumThreads); mvel.resize(NumThreads); macc.resize(NumThreads); maccold.resize(NumThreads); } /** Constructor with a turbulence model and relaxation factor */ ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent( double NewAlphaBossak, double MoveMeshStrategy, unsigned int DomainSize, const double RelaxationFactor, Process::Pointer pTurbulenceModel) : Scheme<TSparseSpace, TDenseSpace>(), mRotationTool(DomainSize,DomainSize+1,SLIP), // Second argument is number of matrix rows per node: monolithic elements have velocity and pressure dofs mrPeriodicIdVar(Kratos::Variable<int>::StaticObject()), mpTurbulenceModel(pTurbulenceModel) { //default values for the Newmark Scheme mAlphaBossak = NewAlphaBossak; mBetaNewmark = 0.25 * pow((1.00 - mAlphaBossak), 2); mGammaNewmark = 0.5 - mAlphaBossak; mMeshVelocity = MoveMeshStrategy; mRelaxationFactor = RelaxationFactor; //Allocate auxiliary memory int NumThreads = ParallelUtilities::GetNumThreads(); mMass.resize(NumThreads); mDamp.resize(NumThreads); mvel.resize(NumThreads); macc.resize(NumThreads); maccold.resize(NumThreads); } /** Destructor. */ ~ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent() override { } /*@} */ /**@name Operators */ /*@{ */ /** Performing the update of the solution. */ //*************************************************************************** void Update(ModelPart& r_model_part, DofsArrayType& rDofSet, TSystemMatrixType& A, TSystemVectorType& Dv, TSystemVectorType& b) override { KRATOS_TRY; mRotationTool.RotateVelocities(r_model_part); TSparseSpace::InplaceMult(Dv, mRelaxationFactor); mpDofUpdater->UpdateDofs(rDofSet,Dv); mRotationTool.RecoverVelocities(r_model_part); AdditionalUpdateOperations(r_model_part, rDofSet, A, Dv, b); KRATOS_CATCH("") } //*************************************************************************** void AdditionalUpdateOperations(ModelPart& rModelPart, DofsArrayType& rDofSet, TSystemMatrixType& A, TSystemVectorType& Dv, TSystemVectorType& b) { KRATOS_TRY int NumThreads = ParallelUtilities::GetNumThreads(); OpenMPUtils::PartitionVector NodePartition; OpenMPUtils::DivideInPartitions(rModelPart.Nodes().size(), NumThreads, NodePartition); //updating time derivatives (nodally for efficiency) #pragma omp parallel { array_1d<double, 3 > DeltaVel; int k = OpenMPUtils::ThisThread(); ModelPart::NodeIterator NodesBegin = rModelPart.NodesBegin() + NodePartition[k]; ModelPart::NodeIterator NodesEnd = rModelPart.NodesBegin() + NodePartition[k + 1]; for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; itNode++) { noalias(DeltaVel) = (itNode)->FastGetSolutionStepValue(VELOCITY) - (itNode)->FastGetSolutionStepValue(VELOCITY, 1); array_1d<double, 3 > & CurrentAcceleration = (itNode)->FastGetSolutionStepValue(ACCELERATION, 0); array_1d<double, 3 > & OldAcceleration = (itNode)->FastGetSolutionStepValue(ACCELERATION, 1); UpdateAcceleration(CurrentAcceleration, DeltaVel, OldAcceleration); if (mMeshVelocity == 2)//Lagrangian { if((itNode)->FastGetSolutionStepValue(IS_LAGRANGIAN_INLET) < 1e-15) { array_1d<double, 3 > & CurrentDisplacement = (itNode)->FastGetSolutionStepValue(DISPLACEMENT, 0); array_1d<double, 3 > & OldDisplacement = (itNode)->FastGetSolutionStepValue(DISPLACEMENT, 1); array_1d<double, 3 > & OldVelocity = (itNode)->FastGetSolutionStepValue(VELOCITY, 1); noalias(itNode->FastGetSolutionStepValue(MESH_VELOCITY)) = itNode->FastGetSolutionStepValue(VELOCITY); UpdateDisplacement(CurrentDisplacement, OldDisplacement, OldVelocity, OldAcceleration, CurrentAcceleration); } else { noalias(itNode->FastGetSolutionStepValue(MESH_VELOCITY)) = ZeroVector(3); noalias(itNode->FastGetSolutionStepValue(DISPLACEMENT)) = ZeroVector(3); } } } } KRATOS_CATCH("") } //*************************************************************************** //predicts the solution at the current step as // v = vold void Predict(ModelPart& rModelPart, DofsArrayType& rDofSet, TSystemMatrixType& A, TSystemVectorType& Dv, TSystemVectorType& b) override { // if (rModelPart.GetCommunicator().MyPID() == 0) // std::cout << "prediction" << std::endl; int NumThreads = ParallelUtilities::GetNumThreads(); OpenMPUtils::PartitionVector NodePartition; OpenMPUtils::DivideInPartitions(rModelPart.Nodes().size(), NumThreads, NodePartition); #pragma omp parallel { //array_1d<double, 3 > DeltaDisp; int k = OpenMPUtils::ThisThread(); ModelPart::NodeIterator NodesBegin = rModelPart.NodesBegin() + NodePartition[k]; ModelPart::NodeIterator NodesEnd = rModelPart.NodesBegin() + NodePartition[k + 1]; for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; itNode++) { array_1d<double, 3 > & OldVelocity = (itNode)->FastGetSolutionStepValue(VELOCITY, 1); double& OldPressure = (itNode)->FastGetSolutionStepValue(PRESSURE, 1); //predicting velocity //ATTENTION::: the prediction is performed only on free nodes array_1d<double, 3 > & CurrentVelocity = (itNode)->FastGetSolutionStepValue(VELOCITY); double& CurrentPressure = (itNode)->FastGetSolutionStepValue(PRESSURE); if ((itNode->pGetDof(VELOCITY_X))->IsFree()) (CurrentVelocity[0]) = OldVelocity[0]; if (itNode->pGetDof(VELOCITY_Y)->IsFree()) (CurrentVelocity[1]) = OldVelocity[1]; if (itNode->HasDofFor(VELOCITY_Z)) if (itNode->pGetDof(VELOCITY_Z)->IsFree()) (CurrentVelocity[2]) = OldVelocity[2]; if (itNode->pGetDof(PRESSURE)->IsFree()) CurrentPressure = OldPressure; // updating time derivatives ::: please note that displacements and // their time derivatives can not be consistently fixed separately array_1d<double, 3 > DeltaVel; noalias(DeltaVel) = CurrentVelocity - OldVelocity; array_1d<double, 3 > & OldAcceleration = (itNode)->FastGetSolutionStepValue(ACCELERATION, 1); array_1d<double, 3 > & CurrentAcceleration = (itNode)->FastGetSolutionStepValue(ACCELERATION); UpdateAcceleration(CurrentAcceleration, DeltaVel, OldAcceleration); if (mMeshVelocity == 2) //Lagrangian { array_1d<double, 3 > & OldDisplacement = (itNode)->FastGetSolutionStepValue(DISPLACEMENT, 1); array_1d<double, 3 > & CurrentDisplacement = (itNode)->FastGetSolutionStepValue(DISPLACEMENT, 0); if((itNode)->FastGetSolutionStepValue(IS_LAGRANGIAN_INLET) < 1e-15) { noalias(itNode->FastGetSolutionStepValue(MESH_VELOCITY)) = itNode->FastGetSolutionStepValue(VELOCITY); UpdateDisplacement(CurrentDisplacement, OldDisplacement, OldVelocity, OldAcceleration, CurrentAcceleration); } else { itNode->FastGetSolutionStepValue(MESH_VELOCITY_X) = 0.0; itNode->FastGetSolutionStepValue(MESH_VELOCITY_Y) = 0.0; itNode->FastGetSolutionStepValue(DISPLACEMENT_X) = 0.0; itNode->FastGetSolutionStepValue(DISPLACEMENT_Y) = 0.0; } } } } // if (rModelPart.GetCommunicator().MyPID() == 0) // std::cout << "end of prediction" << std::endl; } //*************************************************************************** /** this function is designed to be called in the builder and solver to introduce the selected time integration scheme. It "asks" the matrix needed to the element and performs the operations needed to introduce the seected time integration scheme. this function calculates at the same time the contribution to the LHS and to the RHS of the system */ void CalculateSystemContributions( Element& rCurrentElement, LocalSystemMatrixType& LHS_Contribution, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& EquationId, const ProcessInfo& CurrentProcessInfo) override { KRATOS_TRY int k = OpenMPUtils::ThisThread(); //basic operations for the element considered rCurrentElement.CalculateLocalSystem(LHS_Contribution, RHS_Contribution, CurrentProcessInfo); rCurrentElement.CalculateMassMatrix(mMass[k], CurrentProcessInfo); rCurrentElement.CalculateLocalVelocityContribution(mDamp[k], RHS_Contribution, CurrentProcessInfo); rCurrentElement.EquationIdVector(EquationId, CurrentProcessInfo); //adding the dynamic contributions (statics is already included) AddDynamicsToLHS(LHS_Contribution, mDamp[k], mMass[k], CurrentProcessInfo); AddDynamicsToRHS(rCurrentElement, RHS_Contribution, mDamp[k], mMass[k], CurrentProcessInfo); // If there is a slip condition, apply it on a rotated system of coordinates mRotationTool.Rotate(LHS_Contribution,RHS_Contribution,rCurrentElement.GetGeometry()); mRotationTool.ApplySlipCondition(LHS_Contribution,RHS_Contribution,rCurrentElement.GetGeometry()); KRATOS_CATCH("") } void CalculateRHSContribution( Element& rCurrentElement, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& EquationId, const ProcessInfo& CurrentProcessInfo) override { int k = OpenMPUtils::ThisThread(); //basic operations for the element considered rCurrentElement.CalculateRightHandSide(RHS_Contribution, CurrentProcessInfo); rCurrentElement.CalculateMassMatrix(mMass[k], CurrentProcessInfo); rCurrentElement.CalculateLocalVelocityContribution(mDamp[k], RHS_Contribution, CurrentProcessInfo); rCurrentElement.EquationIdVector(EquationId, CurrentProcessInfo); //adding the dynamic contributions (static is already included) AddDynamicsToRHS(rCurrentElement, RHS_Contribution, mDamp[k], mMass[k], CurrentProcessInfo); // If there is a slip condition, apply it on a rotated system of coordinates mRotationTool.Rotate(RHS_Contribution,rCurrentElement.GetGeometry()); mRotationTool.ApplySlipCondition(RHS_Contribution,rCurrentElement.GetGeometry()); } /** functions totally analogous to the precedent but applied to the "condition" objects */ void CalculateSystemContributions( Condition& rCurrentCondition, LocalSystemMatrixType& LHS_Contribution, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& EquationId, const ProcessInfo& CurrentProcessInfo) override { KRATOS_TRY int k = OpenMPUtils::ThisThread(); rCurrentCondition.CalculateLocalSystem(LHS_Contribution, RHS_Contribution, CurrentProcessInfo); rCurrentCondition.CalculateMassMatrix(mMass[k], CurrentProcessInfo); //rCurrentCondition.CalculateDampingMatrix(VelocityBossakAuxiliaries::mDamp,CurrentProcessInfo); rCurrentCondition.CalculateLocalVelocityContribution(mDamp[k], RHS_Contribution, CurrentProcessInfo); rCurrentCondition.EquationIdVector(EquationId, CurrentProcessInfo); AddDynamicsToLHS(LHS_Contribution, mDamp[k], mMass[k], CurrentProcessInfo); AddDynamicsToRHS(rCurrentCondition, RHS_Contribution, mDamp[k], mMass[k], CurrentProcessInfo); // Rotate contributions (to match coordinates for slip conditions) mRotationTool.Rotate(LHS_Contribution,RHS_Contribution,rCurrentCondition.GetGeometry()); mRotationTool.ApplySlipCondition(LHS_Contribution,RHS_Contribution,rCurrentCondition.GetGeometry()); KRATOS_CATCH("") } void CalculateRHSContribution( Condition& rCurrentCondition, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& EquationId, const ProcessInfo& rCurrentProcessInfo) override { KRATOS_TRY; int k = OpenMPUtils::ThisThread(); //basic operations for the element considered rCurrentCondition.CalculateRightHandSide(RHS_Contribution,rCurrentProcessInfo); rCurrentCondition.CalculateMassMatrix(mMass[k],rCurrentProcessInfo); //rCurrentCondition.CalculateDampingMatrix(VelocityBossakAuxiliaries::mDamp,CurrentProcessInfo); rCurrentCondition.CalculateLocalVelocityContribution(mDamp[k], RHS_Contribution,rCurrentProcessInfo); rCurrentCondition.EquationIdVector(EquationId,rCurrentProcessInfo); //adding the dynamic contributions (static is already included) AddDynamicsToRHS(rCurrentCondition, RHS_Contribution, mDamp[k], mMass[k],rCurrentProcessInfo); // Rotate contributions (to match coordinates for slip conditions) mRotationTool.Rotate(RHS_Contribution,rCurrentCondition.GetGeometry()); mRotationTool.ApplySlipCondition(RHS_Contribution,rCurrentCondition.GetGeometry()); KRATOS_CATCH(""); } //************************************************************************************* //************************************************************************************* void InitializeSolutionStep(ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) override { ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); Scheme<TSparseSpace, TDenseSpace>::InitializeSolutionStep(r_model_part, A, Dx, b); double DeltaTime = CurrentProcessInfo[DELTA_TIME]; KRATOS_ERROR_IF(DeltaTime < 1.0e-12) << "Detected delta_time = 0 in the Bossak scheme. Check if the time step is created correctly for the current model part" << std::endl; //initializing constants ma0 = 1.0 / (mGammaNewmark * DeltaTime); ma1 = DeltaTime * mBetaNewmark / mGammaNewmark; ma2 = (-1 + mGammaNewmark) / mGammaNewmark; ma3 = DeltaTime; ma4 = pow(DeltaTime, 2)*(-2.0 * mBetaNewmark + 1.0) / 2.0; ma5 = pow(DeltaTime, 2) * mBetaNewmark; mam = (1.0 - mAlphaBossak) / (mGammaNewmark * DeltaTime); } //************************************************************************************* //************************************************************************************* void FinalizeNonLinIteration(ModelPart &rModelPart, TSystemMatrixType &A, TSystemVectorType &Dx, TSystemVectorType &b) override { const auto& r_current_process_info = rModelPart.GetProcessInfo(); if (mpTurbulenceModel) // If not null mpTurbulenceModel->Execute(); //if orthogonal subscales are computed if (r_current_process_info[OSS_SWITCH] == 1.0) { KRATOS_INFO("Bossak Scheme") << "Computing OSS projections" << std::endl; const int nnodes = static_cast<int>(rModelPart.Nodes().size()); auto nbegin = rModelPart.NodesBegin(); #pragma omp parallel for firstprivate(nbegin,nnodes) for(int i=0; i<nnodes; ++i) { auto ind = nbegin + i; noalias(ind->FastGetSolutionStepValue(ADVPROJ)) = ZeroVector(3); ind->FastGetSolutionStepValue(DIVPROJ) = 0.0; ind->FastGetSolutionStepValue(NODAL_AREA) = 0.0; }//end of loop over nodes //loop on nodes to compute ADVPROJ CONVPROJ NODALAREA array_1d<double, 3 > output = ZeroVector(3); const int nel = static_cast<int>(rModelPart.Elements().size()); auto elbegin = rModelPart.ElementsBegin(); #pragma omp parallel for firstprivate(elbegin,nel,output) for(int i=0; i<nel; ++i) { auto elem = elbegin + i; elem->Calculate(ADVPROJ, output, r_current_process_info); } rModelPart.GetCommunicator().AssembleCurrentData(NODAL_AREA); rModelPart.GetCommunicator().AssembleCurrentData(DIVPROJ); rModelPart.GetCommunicator().AssembleCurrentData(ADVPROJ); // Correction for periodic conditions this->PeriodicConditionProjectionCorrection(rModelPart); #pragma omp parallel for firstprivate(nbegin,nnodes) for(int i=0; i<nnodes; ++i) { auto ind = nbegin + i; if (ind->FastGetSolutionStepValue(NODAL_AREA) == 0.0) { ind->FastGetSolutionStepValue(NODAL_AREA) = 1.0; //KRATOS_WATCH("*********ATTENTION: NODAL AREA IS ZERRROOOO************"); } const double Area = ind->FastGetSolutionStepValue(NODAL_AREA); ind->FastGetSolutionStepValue(ADVPROJ) /= Area; ind->FastGetSolutionStepValue(DIVPROJ) /= Area; } } } void FinalizeSolutionStep(ModelPart &rModelPart, TSystemMatrixType &A, TSystemVectorType &Dx, TSystemVectorType &b) override { Element::EquationIdVectorType EquationId; LocalSystemVectorType RHS_Contribution; LocalSystemMatrixType LHS_Contribution; const ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo(); //for (ModelPart::NodeIterator itNode = rModelPart.NodesBegin(); itNode != rModelPart.NodesEnd(); ++itNode) #pragma omp parallel for for(int k = 0; k<static_cast<int>(rModelPart.Nodes().size()); k++) { auto itNode = rModelPart.NodesBegin() + k; (itNode->FastGetSolutionStepValue(REACTION)).clear(); // calculating relaxed acceleration const array_1d<double, 3 > & CurrentAcceleration = (itNode)->FastGetSolutionStepValue(ACCELERATION, 0); const array_1d<double, 3 > & OldAcceleration = (itNode)->FastGetSolutionStepValue(ACCELERATION, 1); const array_1d<double, 3> relaxed_acceleration = (1 - mAlphaBossak) * CurrentAcceleration + mAlphaBossak * OldAcceleration; (itNode)->SetValue(RELAXED_ACCELERATION, relaxed_acceleration); } //for (ModelPart::ElementsContainerType::ptr_iterator itElem = rModelPart.Elements().ptr_begin(); itElem != rModelPart.Elements().ptr_end(); ++itElem) #pragma omp parallel for firstprivate(EquationId,RHS_Contribution,LHS_Contribution) for(int k = 0; k<static_cast<int>(rModelPart.Elements().size()); k++) { auto itElem = rModelPart.Elements().ptr_begin()+k; int thread_id = OpenMPUtils::ThisThread(); //KRATOS_WATCH(LHS_Contribution); //basic operations for the element considered (*itElem)->CalculateLocalSystem(LHS_Contribution, RHS_Contribution, CurrentProcessInfo); //std::cout << rCurrentElement->Id() << " RHS = " << RHS_Contribution << std::endl; (*itElem)->CalculateMassMatrix(mMass[thread_id], CurrentProcessInfo); (*itElem)->CalculateLocalVelocityContribution(mDamp[thread_id], RHS_Contribution, CurrentProcessInfo); (*itElem)->EquationIdVector(EquationId, CurrentProcessInfo); //adding the dynamic contributions (statics is already included) AddDynamicsToLHS(LHS_Contribution, mDamp[thread_id], mMass[thread_id], CurrentProcessInfo); AddDynamicsToRHS(**itElem, RHS_Contribution, mDamp[thread_id], mMass[thread_id], CurrentProcessInfo); GeometryType& rGeom = (*itElem)->GetGeometry(); unsigned int NumNodes = rGeom.PointsNumber(); unsigned int Dimension = rGeom.WorkingSpaceDimension(); unsigned int index = 0; for (unsigned int i = 0; i < NumNodes; i++) { auto& reaction = rGeom[i].FastGetSolutionStepValue(REACTION); double& target_value0 = reaction[0]; const double& origin_value0 = RHS_Contribution[index++]; #pragma omp atomic target_value0 -= origin_value0; double& target_value1 = reaction[1]; const double& origin_value1 = RHS_Contribution[index++]; #pragma omp atomic target_value1 -= origin_value1; if (Dimension == 3) { double& target_value2 = reaction[2]; const double& origin_value2 = RHS_Contribution[index++]; #pragma omp atomic target_value2 -= origin_value2; } // rGeom[i].FastGetSolutionStepValue(REACTION_X,0) -= RHS_Contribution[index++]; // rGeom[i].FastGetSolutionStepValue(REACTION_Y,0) -= RHS_Contribution[index++]; // if (Dimension == 3) rGeom[i].FastGetSolutionStepValue(REACTION_Z,0) -= RHS_Contribution[index++]; index++; // skip pressure dof } } rModelPart.GetCommunicator().AssembleCurrentData(REACTION); // Base scheme calls FinalizeSolutionStep method of elements and conditions Scheme<TSparseSpace, TDenseSpace>::FinalizeSolutionStep(rModelPart, A, Dx, b); } //************************************************************************************************ //************************************************************************************************ /// Free memory allocated by this object. void Clear() override { this->mpDofUpdater->Clear(); } /*@} */ /**@name Operations */ /*@{ */ /*@} */ /**@name Access */ /*@{ */ /*@} */ /**@name Inquiry */ /*@{ */ /*@} */ /**@name Friends */ /*@{ */ /*@} */ protected: /**@name Protected static Member Variables */ /*@{ */ /*@} */ /**@name Protected member Variables */ /*@{ */ double mAlphaBossak; double mBetaNewmark; double mGammaNewmark; double mMeshVelocity; double mRelaxationFactor = 1.0; double ma0; double ma1; double ma2; double ma3; double ma4; double ma5; double mam; std::vector< Matrix > mMass; std::vector< Matrix > mDamp; std::vector< Vector > mvel; std::vector< Vector > macc; std::vector< Vector > maccold; /*@} */ /**@name Protected Operators*/ /*@{ */ /** On periodic boundaries, the nodal area and the values to project need to take into account contributions from elements on * both sides of the boundary. This is done using the conditions and the non-historical nodal data containers as follows:\n * 1- The partition that owns the PeriodicCondition adds the values on both nodes to their non-historical containers.\n * 2- The non-historical containers are added across processes, communicating the right value from the condition owner to all partitions.\n * 3- The value on all periodic nodes is replaced by the one received in step 2. */ void PeriodicConditionProjectionCorrection(ModelPart& rModelPart) { const int num_nodes = rModelPart.NumberOfNodes(); const int num_conditions = rModelPart.NumberOfConditions(); #pragma omp parallel for for (int i = 0; i < num_nodes; i++) { auto it_node = rModelPart.NodesBegin() + i; it_node->SetValue(NODAL_AREA,0.0); it_node->SetValue(ADVPROJ,ZeroVector(3)); it_node->SetValue(DIVPROJ,0.0); } #pragma omp parallel for for (int i = 0; i < num_conditions; i++) { auto it_cond = rModelPart.ConditionsBegin() + i; if(it_cond->Is(PERIODIC)) { this->AssemblePeriodicContributionToProjections(it_cond->GetGeometry()); } } rModelPart.GetCommunicator().AssembleNonHistoricalData(NODAL_AREA); rModelPart.GetCommunicator().AssembleNonHistoricalData(ADVPROJ); rModelPart.GetCommunicator().AssembleNonHistoricalData(DIVPROJ); #pragma omp parallel for for (int i = 0; i < num_nodes; i++) { auto it_node = rModelPart.NodesBegin() + i; this->CorrectContributionsOnPeriodicNode(*it_node); } } void AssemblePeriodicContributionToProjections(Geometry< Node<3> >& rGeometry) { unsigned int nodes_in_cond = rGeometry.PointsNumber(); double nodal_area = 0.0; array_1d<double,3> momentum_projection = ZeroVector(3); double mass_projection = 0.0; for ( unsigned int i = 0; i < nodes_in_cond; i++ ) { auto& r_node = rGeometry[i]; nodal_area += r_node.FastGetSolutionStepValue(NODAL_AREA); noalias(momentum_projection) += r_node.FastGetSolutionStepValue(ADVPROJ); mass_projection += r_node.FastGetSolutionStepValue(DIVPROJ); } for ( unsigned int i = 0; i < nodes_in_cond; i++ ) { auto& r_node = rGeometry[i]; /* Note that this loop is expected to be threadsafe in normal conditions, * since each node should belong to a single periodic link. However, I am * setting the locks for openmp in case that we try more complicated things * in the future (like having different periodic conditions for different * coordinate directions). */ r_node.SetLock(); r_node.GetValue(NODAL_AREA) = nodal_area; noalias(r_node.GetValue(ADVPROJ)) = momentum_projection; r_node.GetValue(DIVPROJ) = mass_projection; r_node.UnSetLock(); } } void CorrectContributionsOnPeriodicNode(Node<3>& rNode) { if (rNode.GetValue(NODAL_AREA) != 0.0) // Only periodic nodes will have a non-historical NODAL_AREA set. { rNode.FastGetSolutionStepValue(NODAL_AREA) = rNode.GetValue(NODAL_AREA); noalias(rNode.FastGetSolutionStepValue(ADVPROJ)) = rNode.GetValue(ADVPROJ); rNode.FastGetSolutionStepValue(DIVPROJ) = rNode.GetValue(DIVPROJ); } } //********************************************************************************* //Updating first time Derivative //********************************************************************************* void UpdateDisplacement(array_1d<double, 3 > & CurrentDisplacement, const array_1d<double, 3 > & OldDisplacement, const array_1d<double, 3 > & OldVelocity, const array_1d<double, 3 > & OldAcceleration, const array_1d<double, 3 > & CurrentAcceleration) { noalias(CurrentDisplacement) = OldDisplacement + ma3 * OldVelocity + ma4 * OldAcceleration + ma5*CurrentAcceleration; } //************************************************************************** void UpdateAcceleration(array_1d<double, 3 > & CurrentAcceleration, const array_1d<double, 3 > & DeltaVel, const array_1d<double, 3 > & OldAcceleration) { noalias(CurrentAcceleration) = ma0 * DeltaVel + ma2 * OldAcceleration; } //**************************************************************************** /** Kdyn = am*M + D + a1*K */ void AddDynamicsToLHS(LocalSystemMatrixType& LHS_Contribution, LocalSystemMatrixType& D, LocalSystemMatrixType& M, const ProcessInfo& CurrentProcessInfo) { //multipling time scheme factor LHS_Contribution *= ma1; // adding mass contribution to the dynamic stiffness if (M.size1() != 0) // if M matrix declared { noalias(LHS_Contribution) += mam*M; } //adding damping contribution if (D.size1() != 0) // if M matrix declared { noalias(LHS_Contribution) += D; } } //**************************************************************************** /// Add Bossak contributions from the inertial term to the RHS vector. /** This essentially performs bdyn = b - M*acc for the current element. * Note that viscous/pressure contributions to the RHS are expected to be added by the element itself. * @param[in] rCurrentElement The fluid element we are assembling. * @param[in/out] rRHS_Contribution The right hand side term where the contribution will be added. * @param[in] rD The elemental velocity/pressure LHS matrix. * @param[in] rM The elemental acceleration LHS matrix. * @param[in] rCurrentProcessInfo ProcessInfo instance for the containing ModelPart. */ void AddDynamicsToRHS( Element& rCurrentElement, LocalSystemVectorType& rRHS_Contribution, LocalSystemMatrixType& rD, LocalSystemMatrixType& rM, const ProcessInfo& rCurrentProcessInfo) { //adding inertia contribution if (rM.size1() != 0) { const auto& r_const_elem_ref = rCurrentElement; int k = OpenMPUtils::ThisThread(); r_const_elem_ref.GetSecondDerivativesVector(macc[k], 0); (macc[k]) *= (1.00 - mAlphaBossak); r_const_elem_ref.GetSecondDerivativesVector(maccold[k], 1); noalias(macc[k]) += mAlphaBossak * maccold[k]; noalias(rRHS_Contribution) -= prod(rM, macc[k]); } } /// Add Bossak contributions from the inertial term to the RHS vector. /** This essentially performs bdyn = b - M*acc for the current condition. * Note that viscous/pressure contributions to the RHS are expected to be added by the element condition. * @param[in] rCurrentCondition The fluid condition we are assembling. * @param[in/out] rRHS_Contribution The right hand side term where the contribution will be added. * @param[in] rD The elemental velocity/pressure LHS matrix. * @param[in] rM The elemental acceleration LHS matrix. * @param[in] rCurrentProcessInfo ProcessInfo instance for the containing ModelPart. */ void AddDynamicsToRHS( Condition& rCurrentCondition, LocalSystemVectorType& rRHS_Contribution, LocalSystemMatrixType& D, LocalSystemMatrixType& rM, const ProcessInfo& rCurrentProcessInfo) { //adding inertia contribution if (rM.size1() != 0) { const auto& r_const_cond_ref = rCurrentCondition; int k = OpenMPUtils::ThisThread(); r_const_cond_ref.GetSecondDerivativesVector(macc[k], 0); (macc[k]) *= (1.00 - mAlphaBossak); r_const_cond_ref.GetSecondDerivativesVector(maccold[k], 1); noalias(macc[k]) += mAlphaBossak * maccold[k]; noalias(rRHS_Contribution) -= prod(rM, macc[k]); } } /*@} */ /**@name Protected Operations*/ /*@{ */ /*@} */ /**@name Protected Access */ /*@{ */ /*@} */ /**@name Protected Inquiry */ /*@{ */ /*@} */ /**@name Protected LifeCycle */ /*@{ */ /*@} */ private: /**@name Static Member Variables */ /*@{ */ /*@} */ /**@name Member Variables */ /*@{ */ CoordinateTransformationUtils<LocalSystemMatrixType,LocalSystemVectorType,double> mRotationTool; const Variable<int>& mrPeriodicIdVar; Process::Pointer mpTurbulenceModel; typename TSparseSpace::DofUpdaterPointerType mpDofUpdater = TSparseSpace::CreateDofUpdater(); /*@} */ /**@name Private Operators*/ /*@{ */ /*@} */ /**@name Private Operations*/ /*@{ */ /*@} */ /**@name Private Access */ /*@{ */ /*@} */ /**@name Private Inquiry */ /*@{ */ /*@} */ /**@name Un accessible methods */ /*@{ */ /*@} */ }; /* Class Scheme */ /*@} */ /**@name Type Definitions */ /*@{ */ /*@} */ } /* namespace Kratos.*/ #endif /* KRATOS_RESIDUALBASED_PREDICTOR_CORRECTOR_BOSSAK_SCHEME defined */
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Jordi Cotela // #if !defined(KRATOS_RESIDUALBASED_PREDICTOR_CORRECTOR_VELOCITY_BOSSAK_TURBULENT_SCHEME ) #define KRATOS_RESIDUALBASED_PREDICTOR_CORRECTOR_VELOCITY_BOSSAK_TURBULENT_SCHEME /* System includes */ /* External includes */ #include "boost/smart_ptr.hpp" /* Project includes */ #include "includes/define.h" #include "includes/model_part.h" #include "includes/deprecated_variables.h" #include "solving_strategies/schemes/scheme.h" #include "includes/variables.h" #include "includes/cfd_variables.h" #include "containers/array_1d.h" #include "utilities/openmp_utils.h" #include "utilities/dof_updater.h" #include "utilities/coordinate_transformation_utilities.h" #include "processes/process.h" namespace Kratos { /**@name Kratos Globals */ /*@{ */ /*@} */ /**@name Type Definitions */ /*@{ */ /*@} */ /**@name Enum's */ /*@{ */ /*@} */ /**@name Functions */ /*@{ */ /*@} */ /**@name Kratos Classes */ /*@{ */ /// Bossak time scheme for the incompressible flow problem. /** This class provides a second order time scheme of the generalized-alpha Newmark family of methods. It also includes code required to implement slip conditions on the incompressible flow problem and provides the possibility of using a RANS model by passing a turbulence model as an argument to the constructor. This time scheme is intended to be used in combination with elements of type ASGS2D, ASGS3D, VMS or derived classes. To use the slip condition, set the SLIP flag on slip wall nodes. To use a wall law in combination with the slip condition, use MonolithicWallCondition to mesh the boundary @see ASGS2D, ASGS3D, VMS, MonolithicWallConditon */ template<class TSparseSpace, class TDenseSpace //= DenseSpace<double> > class ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent : public Scheme<TSparseSpace, TDenseSpace> { public: /**@name Type Definitions */ /*@{ */ KRATOS_CLASS_POINTER_DEFINITION(ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent); typedef Scheme<TSparseSpace, TDenseSpace> BaseType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename Element::DofsVectorType DofsVectorType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef Element::GeometryType GeometryType; /*@} */ /**@name Life Cycle */ /*@{ */ /** Constructor without a turbulence model */ ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent( double NewAlphaBossak, double MoveMeshStrategy, unsigned int DomainSize) : Scheme<TSparseSpace, TDenseSpace>(), mRotationTool(DomainSize,DomainSize+1,SLIP), // Second argument is number of matrix rows per node: monolithic elements have velocity and pressure dofs. mrPeriodicIdVar(Kratos::Variable<int>::StaticObject()) { //default values for the Newmark Scheme mAlphaBossak = NewAlphaBossak; mBetaNewmark = 0.25 * pow((1.00 - mAlphaBossak), 2); mGammaNewmark = 0.5 - mAlphaBossak; mMeshVelocity = MoveMeshStrategy; //Allocate auxiliary memory int NumThreads = ParallelUtilities::GetNumThreads(); mMass.resize(NumThreads); mDamp.resize(NumThreads); mvel.resize(NumThreads); macc.resize(NumThreads); maccold.resize(NumThreads); } /** Constructor without a turbulence model with periodic conditions */ ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent( double NewAlphaBossak, unsigned int DomainSize, const Variable<int>& rPeriodicIdVar) : Scheme<TSparseSpace, TDenseSpace>(), mRotationTool(DomainSize,DomainSize+1,SLIP), // Second argument is number of matrix rows per node: monolithic elements have velocity and pressure dofs. mrPeriodicIdVar(rPeriodicIdVar) { //default values for the Newmark Scheme mAlphaBossak = NewAlphaBossak; mBetaNewmark = 0.25 * pow((1.00 - mAlphaBossak), 2); mGammaNewmark = 0.5 - mAlphaBossak; mMeshVelocity = 0.0; //Allocate auxiliary memory int NumThreads = ParallelUtilities::GetNumThreads(); mMass.resize(NumThreads); mDamp.resize(NumThreads); mvel.resize(NumThreads); macc.resize(NumThreads); maccold.resize(NumThreads); } /** Constructor without a turbulence model */ ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent( double NewAlphaBossak, double MoveMeshStrategy, unsigned int DomainSize, Kratos::Flags& rSlipFlag) : Scheme<TSparseSpace, TDenseSpace>(), mRotationTool(DomainSize,DomainSize+1,rSlipFlag), // Second argument is number of matrix rows per node: monolithic elements have velocity and pressure dofs. mrPeriodicIdVar(Kratos::Variable<int>::StaticObject()) { //default values for the Newmark Scheme mAlphaBossak = NewAlphaBossak; mBetaNewmark = 0.25 * pow((1.00 - mAlphaBossak), 2); mGammaNewmark = 0.5 - mAlphaBossak; mMeshVelocity = MoveMeshStrategy; //Allocate auxiliary memory int NumThreads = ParallelUtilities::GetNumThreads(); mMass.resize(NumThreads); mDamp.resize(NumThreads); mvel.resize(NumThreads); macc.resize(NumThreads); maccold.resize(NumThreads); } /** Constructor with a turbulence model */ ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent( double NewAlphaBossak, double MoveMeshStrategy, unsigned int DomainSize, Process::Pointer pTurbulenceModel) : Scheme<TSparseSpace, TDenseSpace>(), mRotationTool(DomainSize,DomainSize+1,SLIP), // Second argument is number of matrix rows per node: monolithic elements have velocity and pressure dofs mrPeriodicIdVar(Kratos::Variable<int>::StaticObject()), mpTurbulenceModel(pTurbulenceModel) { //default values for the Newmark Scheme mAlphaBossak = NewAlphaBossak; mBetaNewmark = 0.25 * pow((1.00 - mAlphaBossak), 2); mGammaNewmark = 0.5 - mAlphaBossak; mMeshVelocity = MoveMeshStrategy; //Allocate auxiliary memory int NumThreads = ParallelUtilities::GetNumThreads(); mMass.resize(NumThreads); mDamp.resize(NumThreads); mvel.resize(NumThreads); macc.resize(NumThreads); maccold.resize(NumThreads); } /** Constructor with a turbulence model and relaxation factor */ ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent( double NewAlphaBossak, double MoveMeshStrategy, unsigned int DomainSize, const double RelaxationFactor, Process::Pointer pTurbulenceModel) : Scheme<TSparseSpace, TDenseSpace>(), mRotationTool(DomainSize,DomainSize+1,SLIP), // Second argument is number of matrix rows per node: monolithic elements have velocity and pressure dofs mrPeriodicIdVar(Kratos::Variable<int>::StaticObject()), mpTurbulenceModel(pTurbulenceModel) { //default values for the Newmark Scheme mAlphaBossak = NewAlphaBossak; mBetaNewmark = 0.25 * pow((1.00 - mAlphaBossak), 2); mGammaNewmark = 0.5 - mAlphaBossak; mMeshVelocity = MoveMeshStrategy; mRelaxationFactor = RelaxationFactor; //Allocate auxiliary memory int NumThreads = ParallelUtilities::GetNumThreads(); mMass.resize(NumThreads); mDamp.resize(NumThreads); mvel.resize(NumThreads); macc.resize(NumThreads); maccold.resize(NumThreads); } /** Destructor. */ ~ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent() override { } /*@} */ /**@name Operators */ /*@{ */ /** Performing the update of the solution. */ //*************************************************************************** void Update(ModelPart& r_model_part, DofsArrayType& rDofSet, TSystemMatrixType& A, TSystemVectorType& Dv, TSystemVectorType& b) override { KRATOS_TRY; mRotationTool.RotateVelocities(r_model_part); TSparseSpace::InplaceMult(Dv, mRelaxationFactor); mpDofUpdater->UpdateDofs(rDofSet,Dv); mRotationTool.RecoverVelocities(r_model_part); AdditionalUpdateOperations(r_model_part, rDofSet, A, Dv, b); KRATOS_CATCH("") } //*************************************************************************** void AdditionalUpdateOperations(ModelPart& rModelPart, DofsArrayType& rDofSet, TSystemMatrixType& A, TSystemVectorType& Dv, TSystemVectorType& b) { KRATOS_TRY int NumThreads = ParallelUtilities::GetNumThreads(); OpenMPUtils::PartitionVector NodePartition; OpenMPUtils::DivideInPartitions(rModelPart.Nodes().size(), NumThreads, NodePartition); //updating time derivatives (nodally for efficiency) array_1d<double, 3 > DeltaVel; int k = OpenMPUtils::ThisThread(); ModelPart::NodeIterator NodesBegin = rModelPart.NodesBegin() + NodePartition[k]; ModelPart::NodeIterator NodesEnd = rModelPart.NodesBegin() + NodePartition[k + 1]; for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; itNode++) { noalias(DeltaVel) = (itNode)->FastGetSolutionStepValue(VELOCITY) - (itNode)->FastGetSolutionStepValue(VELOCITY, 1); array_1d<double, 3 > & CurrentAcceleration = (itNode)->FastGetSolutionStepValue(ACCELERATION, 0); array_1d<double, 3 > & OldAcceleration = (itNode)->FastGetSolutionStepValue(ACCELERATION, 1); UpdateAcceleration(CurrentAcceleration, DeltaVel, OldAcceleration); if (mMeshVelocity == 2)//Lagrangian { if((itNode)->FastGetSolutionStepValue(IS_LAGRANGIAN_INLET) < 1e-15) { array_1d<double, 3 > & CurrentDisplacement = (itNode)->FastGetSolutionStepValue(DISPLACEMENT, 0); array_1d<double, 3 > & OldDisplacement = (itNode)->FastGetSolutionStepValue(DISPLACEMENT, 1); array_1d<double, 3 > & OldVelocity = (itNode)->FastGetSolutionStepValue(VELOCITY, 1); noalias(itNode->FastGetSolutionStepValue(MESH_VELOCITY)) = itNode->FastGetSolutionStepValue(VELOCITY); UpdateDisplacement(CurrentDisplacement, OldDisplacement, OldVelocity, OldAcceleration, CurrentAcceleration); } else { noalias(itNode->FastGetSolutionStepValue(MESH_VELOCITY)) = ZeroVector(3); noalias(itNode->FastGetSolutionStepValue(DISPLACEMENT)) = ZeroVector(3); } } } KRATOS_CATCH("") } //*************************************************************************** //predicts the solution at the current step as // v = vold void Predict(ModelPart& rModelPart, DofsArrayType& rDofSet, TSystemMatrixType& A, TSystemVectorType& Dv, TSystemVectorType& b) override { // if (rModelPart.GetCommunicator().MyPID() == 0) // std::cout << "prediction" << std::endl; int NumThreads = ParallelUtilities::GetNumThreads(); OpenMPUtils::PartitionVector NodePartition; OpenMPUtils::DivideInPartitions(rModelPart.Nodes().size(), NumThreads, NodePartition); //array_1d<double, 3 > DeltaDisp; int k = OpenMPUtils::ThisThread(); ModelPart::NodeIterator NodesBegin = rModelPart.NodesBegin() + NodePartition[k]; ModelPart::NodeIterator NodesEnd = rModelPart.NodesBegin() + NodePartition[k + 1]; for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; itNode++) { array_1d<double, 3 > & OldVelocity = (itNode)->FastGetSolutionStepValue(VELOCITY, 1); double& OldPressure = (itNode)->FastGetSolutionStepValue(PRESSURE, 1); //predicting velocity //ATTENTION::: the prediction is performed only on free nodes array_1d<double, 3 > & CurrentVelocity = (itNode)->FastGetSolutionStepValue(VELOCITY); double& CurrentPressure = (itNode)->FastGetSolutionStepValue(PRESSURE); if ((itNode->pGetDof(VELOCITY_X))->IsFree()) (CurrentVelocity[0]) = OldVelocity[0]; if (itNode->pGetDof(VELOCITY_Y)->IsFree()) (CurrentVelocity[1]) = OldVelocity[1]; if (itNode->HasDofFor(VELOCITY_Z)) if (itNode->pGetDof(VELOCITY_Z)->IsFree()) (CurrentVelocity[2]) = OldVelocity[2]; if (itNode->pGetDof(PRESSURE)->IsFree()) CurrentPressure = OldPressure; // updating time derivatives ::: please note that displacements and // their time derivatives can not be consistently fixed separately array_1d<double, 3 > DeltaVel; noalias(DeltaVel) = CurrentVelocity - OldVelocity; array_1d<double, 3 > & OldAcceleration = (itNode)->FastGetSolutionStepValue(ACCELERATION, 1); array_1d<double, 3 > & CurrentAcceleration = (itNode)->FastGetSolutionStepValue(ACCELERATION); UpdateAcceleration(CurrentAcceleration, DeltaVel, OldAcceleration); if (mMeshVelocity == 2) //Lagrangian { array_1d<double, 3 > & OldDisplacement = (itNode)->FastGetSolutionStepValue(DISPLACEMENT, 1); array_1d<double, 3 > & CurrentDisplacement = (itNode)->FastGetSolutionStepValue(DISPLACEMENT, 0); if((itNode)->FastGetSolutionStepValue(IS_LAGRANGIAN_INLET) < 1e-15) { noalias(itNode->FastGetSolutionStepValue(MESH_VELOCITY)) = itNode->FastGetSolutionStepValue(VELOCITY); UpdateDisplacement(CurrentDisplacement, OldDisplacement, OldVelocity, OldAcceleration, CurrentAcceleration); } else { itNode->FastGetSolutionStepValue(MESH_VELOCITY_X) = 0.0; itNode->FastGetSolutionStepValue(MESH_VELOCITY_Y) = 0.0; itNode->FastGetSolutionStepValue(DISPLACEMENT_X) = 0.0; itNode->FastGetSolutionStepValue(DISPLACEMENT_Y) = 0.0; } } } // if (rModelPart.GetCommunicator().MyPID() == 0) // std::cout << "end of prediction" << std::endl; } //*************************************************************************** /** this function is designed to be called in the builder and solver to introduce the selected time integration scheme. It "asks" the matrix needed to the element and performs the operations needed to introduce the seected time integration scheme. this function calculates at the same time the contribution to the LHS and to the RHS of the system */ void CalculateSystemContributions( Element& rCurrentElement, LocalSystemMatrixType& LHS_Contribution, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& EquationId, const ProcessInfo& CurrentProcessInfo) override { KRATOS_TRY int k = OpenMPUtils::ThisThread(); //basic operations for the element considered rCurrentElement.CalculateLocalSystem(LHS_Contribution, RHS_Contribution, CurrentProcessInfo); rCurrentElement.CalculateMassMatrix(mMass[k], CurrentProcessInfo); rCurrentElement.CalculateLocalVelocityContribution(mDamp[k], RHS_Contribution, CurrentProcessInfo); rCurrentElement.EquationIdVector(EquationId, CurrentProcessInfo); //adding the dynamic contributions (statics is already included) AddDynamicsToLHS(LHS_Contribution, mDamp[k], mMass[k], CurrentProcessInfo); AddDynamicsToRHS(rCurrentElement, RHS_Contribution, mDamp[k], mMass[k], CurrentProcessInfo); // If there is a slip condition, apply it on a rotated system of coordinates mRotationTool.Rotate(LHS_Contribution,RHS_Contribution,rCurrentElement.GetGeometry()); mRotationTool.ApplySlipCondition(LHS_Contribution,RHS_Contribution,rCurrentElement.GetGeometry()); KRATOS_CATCH("") } void CalculateRHSContribution( Element& rCurrentElement, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& EquationId, const ProcessInfo& CurrentProcessInfo) override { int k = OpenMPUtils::ThisThread(); //basic operations for the element considered rCurrentElement.CalculateRightHandSide(RHS_Contribution, CurrentProcessInfo); rCurrentElement.CalculateMassMatrix(mMass[k], CurrentProcessInfo); rCurrentElement.CalculateLocalVelocityContribution(mDamp[k], RHS_Contribution, CurrentProcessInfo); rCurrentElement.EquationIdVector(EquationId, CurrentProcessInfo); //adding the dynamic contributions (static is already included) AddDynamicsToRHS(rCurrentElement, RHS_Contribution, mDamp[k], mMass[k], CurrentProcessInfo); // If there is a slip condition, apply it on a rotated system of coordinates mRotationTool.Rotate(RHS_Contribution,rCurrentElement.GetGeometry()); mRotationTool.ApplySlipCondition(RHS_Contribution,rCurrentElement.GetGeometry()); } /** functions totally analogous to the precedent but applied to the "condition" objects */ void CalculateSystemContributions( Condition& rCurrentCondition, LocalSystemMatrixType& LHS_Contribution, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& EquationId, const ProcessInfo& CurrentProcessInfo) override { KRATOS_TRY int k = OpenMPUtils::ThisThread(); rCurrentCondition.CalculateLocalSystem(LHS_Contribution, RHS_Contribution, CurrentProcessInfo); rCurrentCondition.CalculateMassMatrix(mMass[k], CurrentProcessInfo); //rCurrentCondition.CalculateDampingMatrix(VelocityBossakAuxiliaries::mDamp,CurrentProcessInfo); rCurrentCondition.CalculateLocalVelocityContribution(mDamp[k], RHS_Contribution, CurrentProcessInfo); rCurrentCondition.EquationIdVector(EquationId, CurrentProcessInfo); AddDynamicsToLHS(LHS_Contribution, mDamp[k], mMass[k], CurrentProcessInfo); AddDynamicsToRHS(rCurrentCondition, RHS_Contribution, mDamp[k], mMass[k], CurrentProcessInfo); // Rotate contributions (to match coordinates for slip conditions) mRotationTool.Rotate(LHS_Contribution,RHS_Contribution,rCurrentCondition.GetGeometry()); mRotationTool.ApplySlipCondition(LHS_Contribution,RHS_Contribution,rCurrentCondition.GetGeometry()); KRATOS_CATCH("") } void CalculateRHSContribution( Condition& rCurrentCondition, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& EquationId, const ProcessInfo& rCurrentProcessInfo) override { KRATOS_TRY; int k = OpenMPUtils::ThisThread(); //basic operations for the element considered rCurrentCondition.CalculateRightHandSide(RHS_Contribution,rCurrentProcessInfo); rCurrentCondition.CalculateMassMatrix(mMass[k],rCurrentProcessInfo); //rCurrentCondition.CalculateDampingMatrix(VelocityBossakAuxiliaries::mDamp,CurrentProcessInfo); rCurrentCondition.CalculateLocalVelocityContribution(mDamp[k], RHS_Contribution,rCurrentProcessInfo); rCurrentCondition.EquationIdVector(EquationId,rCurrentProcessInfo); //adding the dynamic contributions (static is already included) AddDynamicsToRHS(rCurrentCondition, RHS_Contribution, mDamp[k], mMass[k],rCurrentProcessInfo); // Rotate contributions (to match coordinates for slip conditions) mRotationTool.Rotate(RHS_Contribution,rCurrentCondition.GetGeometry()); mRotationTool.ApplySlipCondition(RHS_Contribution,rCurrentCondition.GetGeometry()); KRATOS_CATCH(""); } //************************************************************************************* //************************************************************************************* void InitializeSolutionStep(ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) override { ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); Scheme<TSparseSpace, TDenseSpace>::InitializeSolutionStep(r_model_part, A, Dx, b); double DeltaTime = CurrentProcessInfo[DELTA_TIME]; KRATOS_ERROR_IF(DeltaTime < 1.0e-12) << "Detected delta_time = 0 in the Bossak scheme. Check if the time step is created correctly for the current model part" << std::endl; //initializing constants ma0 = 1.0 / (mGammaNewmark * DeltaTime); ma1 = DeltaTime * mBetaNewmark / mGammaNewmark; ma2 = (-1 + mGammaNewmark) / mGammaNewmark; ma3 = DeltaTime; ma4 = pow(DeltaTime, 2)*(-2.0 * mBetaNewmark + 1.0) / 2.0; ma5 = pow(DeltaTime, 2) * mBetaNewmark; mam = (1.0 - mAlphaBossak) / (mGammaNewmark * DeltaTime); } //************************************************************************************* //************************************************************************************* void FinalizeNonLinIteration(ModelPart &rModelPart, TSystemMatrixType &A, TSystemVectorType &Dx, TSystemVectorType &b) override { const auto& r_current_process_info = rModelPart.GetProcessInfo(); if (mpTurbulenceModel) // If not null mpTurbulenceModel->Execute(); //if orthogonal subscales are computed if (r_current_process_info[OSS_SWITCH] == 1.0) { KRATOS_INFO("Bossak Scheme") << "Computing OSS projections" << std::endl; const int nnodes = static_cast<int>(rModelPart.Nodes().size()); auto nbegin = rModelPart.NodesBegin(); for(int i=0; i<nnodes; ++i) { auto ind = nbegin + i; noalias(ind->FastGetSolutionStepValue(ADVPROJ)) = ZeroVector(3); ind->FastGetSolutionStepValue(DIVPROJ) = 0.0; ind->FastGetSolutionStepValue(NODAL_AREA) = 0.0; }//end of loop over nodes //loop on nodes to compute ADVPROJ CONVPROJ NODALAREA array_1d<double, 3 > output = ZeroVector(3); const int nel = static_cast<int>(rModelPart.Elements().size()); auto elbegin = rModelPart.ElementsBegin(); for(int i=0; i<nel; ++i) { auto elem = elbegin + i; elem->Calculate(ADVPROJ, output, r_current_process_info); } rModelPart.GetCommunicator().AssembleCurrentData(NODAL_AREA); rModelPart.GetCommunicator().AssembleCurrentData(DIVPROJ); rModelPart.GetCommunicator().AssembleCurrentData(ADVPROJ); // Correction for periodic conditions this->PeriodicConditionProjectionCorrection(rModelPart); for(int i=0; i<nnodes; ++i) { auto ind = nbegin + i; if (ind->FastGetSolutionStepValue(NODAL_AREA) == 0.0) { ind->FastGetSolutionStepValue(NODAL_AREA) = 1.0; //KRATOS_WATCH("*********ATTENTION: NODAL AREA IS ZERRROOOO************"); } const double Area = ind->FastGetSolutionStepValue(NODAL_AREA); ind->FastGetSolutionStepValue(ADVPROJ) /= Area; ind->FastGetSolutionStepValue(DIVPROJ) /= Area; } } } void FinalizeSolutionStep(ModelPart &rModelPart, TSystemMatrixType &A, TSystemVectorType &Dx, TSystemVectorType &b) override { Element::EquationIdVectorType EquationId; LocalSystemVectorType RHS_Contribution; LocalSystemMatrixType LHS_Contribution; const ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo(); //for (ModelPart::NodeIterator itNode = rModelPart.NodesBegin(); itNode != rModelPart.NodesEnd(); ++itNode) for(int k = 0; k<static_cast<int>(rModelPart.Nodes().size()); k++) { auto itNode = rModelPart.NodesBegin() + k; (itNode->FastGetSolutionStepValue(REACTION)).clear(); // calculating relaxed acceleration const array_1d<double, 3 > & CurrentAcceleration = (itNode)->FastGetSolutionStepValue(ACCELERATION, 0); const array_1d<double, 3 > & OldAcceleration = (itNode)->FastGetSolutionStepValue(ACCELERATION, 1); const array_1d<double, 3> relaxed_acceleration = (1 - mAlphaBossak) * CurrentAcceleration + mAlphaBossak * OldAcceleration; (itNode)->SetValue(RELAXED_ACCELERATION, relaxed_acceleration); } //for (ModelPart::ElementsContainerType::ptr_iterator itElem = rModelPart.Elements().ptr_begin(); itElem != rModelPart.Elements().ptr_end(); ++itElem) for(int k = 0; k<static_cast<int>(rModelPart.Elements().size()); k++) { auto itElem = rModelPart.Elements().ptr_begin()+k; int thread_id = OpenMPUtils::ThisThread(); //KRATOS_WATCH(LHS_Contribution); //basic operations for the element considered (*itElem)->CalculateLocalSystem(LHS_Contribution, RHS_Contribution, CurrentProcessInfo); //std::cout << rCurrentElement->Id() << " RHS = " << RHS_Contribution << std::endl; (*itElem)->CalculateMassMatrix(mMass[thread_id], CurrentProcessInfo); (*itElem)->CalculateLocalVelocityContribution(mDamp[thread_id], RHS_Contribution, CurrentProcessInfo); (*itElem)->EquationIdVector(EquationId, CurrentProcessInfo); //adding the dynamic contributions (statics is already included) AddDynamicsToLHS(LHS_Contribution, mDamp[thread_id], mMass[thread_id], CurrentProcessInfo); AddDynamicsToRHS(**itElem, RHS_Contribution, mDamp[thread_id], mMass[thread_id], CurrentProcessInfo); GeometryType& rGeom = (*itElem)->GetGeometry(); unsigned int NumNodes = rGeom.PointsNumber(); unsigned int Dimension = rGeom.WorkingSpaceDimension(); unsigned int index = 0; for (unsigned int i = 0; i < NumNodes; i++) { auto& reaction = rGeom[i].FastGetSolutionStepValue(REACTION); double& target_value0 = reaction[0]; const double& origin_value0 = RHS_Contribution[index++]; target_value0 -= origin_value0; double& target_value1 = reaction[1]; const double& origin_value1 = RHS_Contribution[index++]; target_value1 -= origin_value1; if (Dimension == 3) { double& target_value2 = reaction[2]; const double& origin_value2 = RHS_Contribution[index++]; target_value2 -= origin_value2; } // rGeom[i].FastGetSolutionStepValue(REACTION_X,0) -= RHS_Contribution[index++]; // rGeom[i].FastGetSolutionStepValue(REACTION_Y,0) -= RHS_Contribution[index++]; // if (Dimension == 3) rGeom[i].FastGetSolutionStepValue(REACTION_Z,0) -= RHS_Contribution[index++]; index++; // skip pressure dof } } rModelPart.GetCommunicator().AssembleCurrentData(REACTION); // Base scheme calls FinalizeSolutionStep method of elements and conditions Scheme<TSparseSpace, TDenseSpace>::FinalizeSolutionStep(rModelPart, A, Dx, b); } //************************************************************************************************ //************************************************************************************************ /// Free memory allocated by this object. void Clear() override { this->mpDofUpdater->Clear(); } /*@} */ /**@name Operations */ /*@{ */ /*@} */ /**@name Access */ /*@{ */ /*@} */ /**@name Inquiry */ /*@{ */ /*@} */ /**@name Friends */ /*@{ */ /*@} */ protected: /**@name Protected static Member Variables */ /*@{ */ /*@} */ /**@name Protected member Variables */ /*@{ */ double mAlphaBossak; double mBetaNewmark; double mGammaNewmark; double mMeshVelocity; double mRelaxationFactor = 1.0; double ma0; double ma1; double ma2; double ma3; double ma4; double ma5; double mam; std::vector< Matrix > mMass; std::vector< Matrix > mDamp; std::vector< Vector > mvel; std::vector< Vector > macc; std::vector< Vector > maccold; /*@} */ /**@name Protected Operators*/ /*@{ */ /** On periodic boundaries, the nodal area and the values to project need to take into account contributions from elements on * both sides of the boundary. This is done using the conditions and the non-historical nodal data containers as follows:\n * 1- The partition that owns the PeriodicCondition adds the values on both nodes to their non-historical containers.\n * 2- The non-historical containers are added across processes, communicating the right value from the condition owner to all partitions.\n * 3- The value on all periodic nodes is replaced by the one received in step 2. */ void PeriodicConditionProjectionCorrection(ModelPart& rModelPart) { const int num_nodes = rModelPart.NumberOfNodes(); const int num_conditions = rModelPart.NumberOfConditions(); for (int i = 0; i < num_nodes; i++) { auto it_node = rModelPart.NodesBegin() + i; it_node->SetValue(NODAL_AREA,0.0); it_node->SetValue(ADVPROJ,ZeroVector(3)); it_node->SetValue(DIVPROJ,0.0); } for (int i = 0; i < num_conditions; i++) { auto it_cond = rModelPart.ConditionsBegin() + i; if(it_cond->Is(PERIODIC)) { this->AssemblePeriodicContributionToProjections(it_cond->GetGeometry()); } } rModelPart.GetCommunicator().AssembleNonHistoricalData(NODAL_AREA); rModelPart.GetCommunicator().AssembleNonHistoricalData(ADVPROJ); rModelPart.GetCommunicator().AssembleNonHistoricalData(DIVPROJ); for (int i = 0; i < num_nodes; i++) { auto it_node = rModelPart.NodesBegin() + i; this->CorrectContributionsOnPeriodicNode(*it_node); } } void AssemblePeriodicContributionToProjections(Geometry< Node<3> >& rGeometry) { unsigned int nodes_in_cond = rGeometry.PointsNumber(); double nodal_area = 0.0; array_1d<double,3> momentum_projection = ZeroVector(3); double mass_projection = 0.0; for ( unsigned int i = 0; i < nodes_in_cond; i++ ) { auto& r_node = rGeometry[i]; nodal_area += r_node.FastGetSolutionStepValue(NODAL_AREA); noalias(momentum_projection) += r_node.FastGetSolutionStepValue(ADVPROJ); mass_projection += r_node.FastGetSolutionStepValue(DIVPROJ); } for ( unsigned int i = 0; i < nodes_in_cond; i++ ) { auto& r_node = rGeometry[i]; /* Note that this loop is expected to be threadsafe in normal conditions, * since each node should belong to a single periodic link. However, I am * setting the locks for openmp in case that we try more complicated things * in the future (like having different periodic conditions for different * coordinate directions). */ r_node.SetLock(); r_node.GetValue(NODAL_AREA) = nodal_area; noalias(r_node.GetValue(ADVPROJ)) = momentum_projection; r_node.GetValue(DIVPROJ) = mass_projection; r_node.UnSetLock(); } } void CorrectContributionsOnPeriodicNode(Node<3>& rNode) { if (rNode.GetValue(NODAL_AREA) != 0.0) // Only periodic nodes will have a non-historical NODAL_AREA set. { rNode.FastGetSolutionStepValue(NODAL_AREA) = rNode.GetValue(NODAL_AREA); noalias(rNode.FastGetSolutionStepValue(ADVPROJ)) = rNode.GetValue(ADVPROJ); rNode.FastGetSolutionStepValue(DIVPROJ) = rNode.GetValue(DIVPROJ); } } //********************************************************************************* //Updating first time Derivative //********************************************************************************* void UpdateDisplacement(array_1d<double, 3 > & CurrentDisplacement, const array_1d<double, 3 > & OldDisplacement, const array_1d<double, 3 > & OldVelocity, const array_1d<double, 3 > & OldAcceleration, const array_1d<double, 3 > & CurrentAcceleration) { noalias(CurrentDisplacement) = OldDisplacement + ma3 * OldVelocity + ma4 * OldAcceleration + ma5*CurrentAcceleration; } //************************************************************************** void UpdateAcceleration(array_1d<double, 3 > & CurrentAcceleration, const array_1d<double, 3 > & DeltaVel, const array_1d<double, 3 > & OldAcceleration) { noalias(CurrentAcceleration) = ma0 * DeltaVel + ma2 * OldAcceleration; } //**************************************************************************** /** Kdyn = am*M + D + a1*K */ void AddDynamicsToLHS(LocalSystemMatrixType& LHS_Contribution, LocalSystemMatrixType& D, LocalSystemMatrixType& M, const ProcessInfo& CurrentProcessInfo) { //multipling time scheme factor LHS_Contribution *= ma1; // adding mass contribution to the dynamic stiffness if (M.size1() != 0) // if M matrix declared { noalias(LHS_Contribution) += mam*M; } //adding damping contribution if (D.size1() != 0) // if M matrix declared { noalias(LHS_Contribution) += D; } } //**************************************************************************** /// Add Bossak contributions from the inertial term to the RHS vector. /** This essentially performs bdyn = b - M*acc for the current element. * Note that viscous/pressure contributions to the RHS are expected to be added by the element itself. * @param[in] rCurrentElement The fluid element we are assembling. * @param[in/out] rRHS_Contribution The right hand side term where the contribution will be added. * @param[in] rD The elemental velocity/pressure LHS matrix. * @param[in] rM The elemental acceleration LHS matrix. * @param[in] rCurrentProcessInfo ProcessInfo instance for the containing ModelPart. */ void AddDynamicsToRHS( Element& rCurrentElement, LocalSystemVectorType& rRHS_Contribution, LocalSystemMatrixType& rD, LocalSystemMatrixType& rM, const ProcessInfo& rCurrentProcessInfo) { //adding inertia contribution if (rM.size1() != 0) { const auto& r_const_elem_ref = rCurrentElement; int k = OpenMPUtils::ThisThread(); r_const_elem_ref.GetSecondDerivativesVector(macc[k], 0); (macc[k]) *= (1.00 - mAlphaBossak); r_const_elem_ref.GetSecondDerivativesVector(maccold[k], 1); noalias(macc[k]) += mAlphaBossak * maccold[k]; noalias(rRHS_Contribution) -= prod(rM, macc[k]); } } /// Add Bossak contributions from the inertial term to the RHS vector. /** This essentially performs bdyn = b - M*acc for the current condition. * Note that viscous/pressure contributions to the RHS are expected to be added by the element condition. * @param[in] rCurrentCondition The fluid condition we are assembling. * @param[in/out] rRHS_Contribution The right hand side term where the contribution will be added. * @param[in] rD The elemental velocity/pressure LHS matrix. * @param[in] rM The elemental acceleration LHS matrix. * @param[in] rCurrentProcessInfo ProcessInfo instance for the containing ModelPart. */ void AddDynamicsToRHS( Condition& rCurrentCondition, LocalSystemVectorType& rRHS_Contribution, LocalSystemMatrixType& D, LocalSystemMatrixType& rM, const ProcessInfo& rCurrentProcessInfo) { //adding inertia contribution if (rM.size1() != 0) { const auto& r_const_cond_ref = rCurrentCondition; int k = OpenMPUtils::ThisThread(); r_const_cond_ref.GetSecondDerivativesVector(macc[k], 0); (macc[k]) *= (1.00 - mAlphaBossak); r_const_cond_ref.GetSecondDerivativesVector(maccold[k], 1); noalias(macc[k]) += mAlphaBossak * maccold[k]; noalias(rRHS_Contribution) -= prod(rM, macc[k]); } } /*@} */ /**@name Protected Operations*/ /*@{ */ /*@} */ /**@name Protected Access */ /*@{ */ /*@} */ /**@name Protected Inquiry */ /*@{ */ /*@} */ /**@name Protected LifeCycle */ /*@{ */ /*@} */ private: /**@name Static Member Variables */ /*@{ */ /*@} */ /**@name Member Variables */ /*@{ */ CoordinateTransformationUtils<LocalSystemMatrixType,LocalSystemVectorType,double> mRotationTool; const Variable<int>& mrPeriodicIdVar; Process::Pointer mpTurbulenceModel; typename TSparseSpace::DofUpdaterPointerType mpDofUpdater = TSparseSpace::CreateDofUpdater(); /*@} */ /**@name Private Operators*/ /*@{ */ /*@} */ /**@name Private Operations*/ /*@{ */ /*@} */ /**@name Private Access */ /*@{ */ /*@} */ /**@name Private Inquiry */ /*@{ */ /*@} */ /**@name Un accessible methods */ /*@{ */ /*@} */ }; /* Class Scheme */ /*@} */ /**@name Type Definitions */ /*@{ */ /*@} */ } /* namespace Kratos.*/ #endif /* KRATOS_RESIDUALBASED_PREDICTOR_CORRECTOR_BOSSAK_SCHEME defined */
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Jordi Cotela // #if !defined(KRATOS_RESIDUALBASED_PREDICTOR_CORRECTOR_VELOCITY_BOSSAK_TURBULENT_SCHEME ) #define KRATOS_RESIDUALBASED_PREDICTOR_CORRECTOR_VELOCITY_BOSSAK_TURBULENT_SCHEME /* System includes */ /* External includes */ #include "boost/smart_ptr.hpp" /* Project includes */ #include "includes/define.h" #include "includes/model_part.h" #include "includes/deprecated_variables.h" #include "solving_strategies/schemes/scheme.h" #include "includes/variables.h" #include "includes/cfd_variables.h" #include "containers/array_1d.h" #include "utilities/openmp_utils.h" #include "utilities/dof_updater.h" #include "utilities/coordinate_transformation_utilities.h" #include "processes/process.h" namespace Kratos { /**@name Kratos Globals */ /*@{ */ /*@} */ /**@name Type Definitions */ /*@{ */ /*@} */ /**@name Enum's */ /*@{ */ /*@} */ /**@name Functions */ /*@{ */ /*@} */ /**@name Kratos Classes */ /*@{ */ /// Bossak time scheme for the incompressible flow problem. /** This class provides a second order time scheme of the generalized-alpha Newmark family of methods. It also includes code required to implement slip conditions on the incompressible flow problem and provides the possibility of using a RANS model by passing a turbulence model as an argument to the constructor. This time scheme is intended to be used in combination with elements of type ASGS2D, ASGS3D, VMS or derived classes. To use the slip condition, set the SLIP flag on slip wall nodes. To use a wall law in combination with the slip condition, use MonolithicWallCondition to mesh the boundary @see ASGS2D, ASGS3D, VMS, MonolithicWallConditon */ template<class TSparseSpace, class TDenseSpace //= DenseSpace<double> > class ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent : public Scheme<TSparseSpace, TDenseSpace> { public: /**@name Type Definitions */ /*@{ */ KRATOS_CLASS_POINTER_DEFINITION(ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent); typedef Scheme<TSparseSpace, TDenseSpace> BaseType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename Element::DofsVectorType DofsVectorType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef Element::GeometryType GeometryType; /*@} */ /**@name Life Cycle */ /*@{ */ /** Constructor without a turbulence model */ ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent( double NewAlphaBossak, double MoveMeshStrategy, unsigned int DomainSize) : Scheme<TSparseSpace, TDenseSpace>(), mRotationTool(DomainSize,DomainSize+1,SLIP), // Second argument is number of matrix rows per node: monolithic elements have velocity and pressure dofs. mrPeriodicIdVar(Kratos::Variable<int>::StaticObject()) { //default values for the Newmark Scheme mAlphaBossak = NewAlphaBossak; mBetaNewmark = 0.25 * pow((1.00 - mAlphaBossak), 2); mGammaNewmark = 0.5 - mAlphaBossak; mMeshVelocity = MoveMeshStrategy; //Allocate auxiliary memory int NumThreads = ParallelUtilities::GetNumThreads(); mMass.resize(NumThreads); mDamp.resize(NumThreads); mvel.resize(NumThreads); macc.resize(NumThreads); maccold.resize(NumThreads); } /** Constructor without a turbulence model with periodic conditions */ ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent( double NewAlphaBossak, unsigned int DomainSize, const Variable<int>& rPeriodicIdVar) : Scheme<TSparseSpace, TDenseSpace>(), mRotationTool(DomainSize,DomainSize+1,SLIP), // Second argument is number of matrix rows per node: monolithic elements have velocity and pressure dofs. mrPeriodicIdVar(rPeriodicIdVar) { //default values for the Newmark Scheme mAlphaBossak = NewAlphaBossak; mBetaNewmark = 0.25 * pow((1.00 - mAlphaBossak), 2); mGammaNewmark = 0.5 - mAlphaBossak; mMeshVelocity = 0.0; //Allocate auxiliary memory int NumThreads = ParallelUtilities::GetNumThreads(); mMass.resize(NumThreads); mDamp.resize(NumThreads); mvel.resize(NumThreads); macc.resize(NumThreads); maccold.resize(NumThreads); } /** Constructor without a turbulence model */ ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent( double NewAlphaBossak, double MoveMeshStrategy, unsigned int DomainSize, Kratos::Flags& rSlipFlag) : Scheme<TSparseSpace, TDenseSpace>(), mRotationTool(DomainSize,DomainSize+1,rSlipFlag), // Second argument is number of matrix rows per node: monolithic elements have velocity and pressure dofs. mrPeriodicIdVar(Kratos::Variable<int>::StaticObject()) { //default values for the Newmark Scheme mAlphaBossak = NewAlphaBossak; mBetaNewmark = 0.25 * pow((1.00 - mAlphaBossak), 2); mGammaNewmark = 0.5 - mAlphaBossak; mMeshVelocity = MoveMeshStrategy; //Allocate auxiliary memory int NumThreads = ParallelUtilities::GetNumThreads(); mMass.resize(NumThreads); mDamp.resize(NumThreads); mvel.resize(NumThreads); macc.resize(NumThreads); maccold.resize(NumThreads); } /** Constructor with a turbulence model */ ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent( double NewAlphaBossak, double MoveMeshStrategy, unsigned int DomainSize, Process::Pointer pTurbulenceModel) : Scheme<TSparseSpace, TDenseSpace>(), mRotationTool(DomainSize,DomainSize+1,SLIP), // Second argument is number of matrix rows per node: monolithic elements have velocity and pressure dofs mrPeriodicIdVar(Kratos::Variable<int>::StaticObject()), mpTurbulenceModel(pTurbulenceModel) { //default values for the Newmark Scheme mAlphaBossak = NewAlphaBossak; mBetaNewmark = 0.25 * pow((1.00 - mAlphaBossak), 2); mGammaNewmark = 0.5 - mAlphaBossak; mMeshVelocity = MoveMeshStrategy; //Allocate auxiliary memory int NumThreads = ParallelUtilities::GetNumThreads(); mMass.resize(NumThreads); mDamp.resize(NumThreads); mvel.resize(NumThreads); macc.resize(NumThreads); maccold.resize(NumThreads); } /** Constructor with a turbulence model and relaxation factor */ ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent( double NewAlphaBossak, double MoveMeshStrategy, unsigned int DomainSize, const double RelaxationFactor, Process::Pointer pTurbulenceModel) : Scheme<TSparseSpace, TDenseSpace>(), mRotationTool(DomainSize,DomainSize+1,SLIP), // Second argument is number of matrix rows per node: monolithic elements have velocity and pressure dofs mrPeriodicIdVar(Kratos::Variable<int>::StaticObject()), mpTurbulenceModel(pTurbulenceModel) { //default values for the Newmark Scheme mAlphaBossak = NewAlphaBossak; mBetaNewmark = 0.25 * pow((1.00 - mAlphaBossak), 2); mGammaNewmark = 0.5 - mAlphaBossak; mMeshVelocity = MoveMeshStrategy; mRelaxationFactor = RelaxationFactor; //Allocate auxiliary memory int NumThreads = ParallelUtilities::GetNumThreads(); mMass.resize(NumThreads); mDamp.resize(NumThreads); mvel.resize(NumThreads); macc.resize(NumThreads); maccold.resize(NumThreads); } /** Destructor. */ ~ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent() override { } /*@} */ /**@name Operators */ /*@{ */ /** Performing the update of the solution. */ //*************************************************************************** void Update(ModelPart& r_model_part, DofsArrayType& rDofSet, TSystemMatrixType& A, TSystemVectorType& Dv, TSystemVectorType& b) override { KRATOS_TRY; mRotationTool.RotateVelocities(r_model_part); TSparseSpace::InplaceMult(Dv, mRelaxationFactor); mpDofUpdater->UpdateDofs(rDofSet,Dv); mRotationTool.RecoverVelocities(r_model_part); AdditionalUpdateOperations(r_model_part, rDofSet, A, Dv, b); KRATOS_CATCH("") } //*************************************************************************** void AdditionalUpdateOperations(ModelPart& rModelPart, DofsArrayType& rDofSet, TSystemMatrixType& A, TSystemVectorType& Dv, TSystemVectorType& b) { KRATOS_TRY int NumThreads = ParallelUtilities::GetNumThreads(); OpenMPUtils::PartitionVector NodePartition; OpenMPUtils::DivideInPartitions(rModelPart.Nodes().size(), NumThreads, NodePartition); //updating time derivatives (nodally for efficiency) #pragma omp parallel { array_1d<double, 3 > DeltaVel; int k = OpenMPUtils::ThisThread(); ModelPart::NodeIterator NodesBegin = rModelPart.NodesBegin() + NodePartition[k]; ModelPart::NodeIterator NodesEnd = rModelPart.NodesBegin() + NodePartition[k + 1]; for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; itNode++) { noalias(DeltaVel) = (itNode)->FastGetSolutionStepValue(VELOCITY) - (itNode)->FastGetSolutionStepValue(VELOCITY, 1); array_1d<double, 3 > & CurrentAcceleration = (itNode)->FastGetSolutionStepValue(ACCELERATION, 0); array_1d<double, 3 > & OldAcceleration = (itNode)->FastGetSolutionStepValue(ACCELERATION, 1); UpdateAcceleration(CurrentAcceleration, DeltaVel, OldAcceleration); if (mMeshVelocity == 2)//Lagrangian { if((itNode)->FastGetSolutionStepValue(IS_LAGRANGIAN_INLET) < 1e-15) { array_1d<double, 3 > & CurrentDisplacement = (itNode)->FastGetSolutionStepValue(DISPLACEMENT, 0); array_1d<double, 3 > & OldDisplacement = (itNode)->FastGetSolutionStepValue(DISPLACEMENT, 1); array_1d<double, 3 > & OldVelocity = (itNode)->FastGetSolutionStepValue(VELOCITY, 1); noalias(itNode->FastGetSolutionStepValue(MESH_VELOCITY)) = itNode->FastGetSolutionStepValue(VELOCITY); UpdateDisplacement(CurrentDisplacement, OldDisplacement, OldVelocity, OldAcceleration, CurrentAcceleration); } else { noalias(itNode->FastGetSolutionStepValue(MESH_VELOCITY)) = ZeroVector(3); noalias(itNode->FastGetSolutionStepValue(DISPLACEMENT)) = ZeroVector(3); } } } } KRATOS_CATCH("") } //*************************************************************************** //predicts the solution at the current step as // v = vold void Predict(ModelPart& rModelPart, DofsArrayType& rDofSet, TSystemMatrixType& A, TSystemVectorType& Dv, TSystemVectorType& b) override { // if (rModelPart.GetCommunicator().MyPID() == 0) // std::cout << "prediction" << std::endl; int NumThreads = ParallelUtilities::GetNumThreads(); OpenMPUtils::PartitionVector NodePartition; OpenMPUtils::DivideInPartitions(rModelPart.Nodes().size(), NumThreads, NodePartition); #pragma omp parallel { //array_1d<double, 3 > DeltaDisp; int k = OpenMPUtils::ThisThread(); ModelPart::NodeIterator NodesBegin = rModelPart.NodesBegin() + NodePartition[k]; ModelPart::NodeIterator NodesEnd = rModelPart.NodesBegin() + NodePartition[k + 1]; for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; itNode++) { array_1d<double, 3 > & OldVelocity = (itNode)->FastGetSolutionStepValue(VELOCITY, 1); double& OldPressure = (itNode)->FastGetSolutionStepValue(PRESSURE, 1); //predicting velocity //ATTENTION::: the prediction is performed only on free nodes array_1d<double, 3 > & CurrentVelocity = (itNode)->FastGetSolutionStepValue(VELOCITY); double& CurrentPressure = (itNode)->FastGetSolutionStepValue(PRESSURE); if ((itNode->pGetDof(VELOCITY_X))->IsFree()) (CurrentVelocity[0]) = OldVelocity[0]; if (itNode->pGetDof(VELOCITY_Y)->IsFree()) (CurrentVelocity[1]) = OldVelocity[1]; if (itNode->HasDofFor(VELOCITY_Z)) if (itNode->pGetDof(VELOCITY_Z)->IsFree()) (CurrentVelocity[2]) = OldVelocity[2]; if (itNode->pGetDof(PRESSURE)->IsFree()) CurrentPressure = OldPressure; // updating time derivatives ::: please note that displacements and // their time derivatives can not be consistently fixed separately array_1d<double, 3 > DeltaVel; noalias(DeltaVel) = CurrentVelocity - OldVelocity; array_1d<double, 3 > & OldAcceleration = (itNode)->FastGetSolutionStepValue(ACCELERATION, 1); array_1d<double, 3 > & CurrentAcceleration = (itNode)->FastGetSolutionStepValue(ACCELERATION); UpdateAcceleration(CurrentAcceleration, DeltaVel, OldAcceleration); if (mMeshVelocity == 2) //Lagrangian { array_1d<double, 3 > & OldDisplacement = (itNode)->FastGetSolutionStepValue(DISPLACEMENT, 1); array_1d<double, 3 > & CurrentDisplacement = (itNode)->FastGetSolutionStepValue(DISPLACEMENT, 0); if((itNode)->FastGetSolutionStepValue(IS_LAGRANGIAN_INLET) < 1e-15) { noalias(itNode->FastGetSolutionStepValue(MESH_VELOCITY)) = itNode->FastGetSolutionStepValue(VELOCITY); UpdateDisplacement(CurrentDisplacement, OldDisplacement, OldVelocity, OldAcceleration, CurrentAcceleration); } else { itNode->FastGetSolutionStepValue(MESH_VELOCITY_X) = 0.0; itNode->FastGetSolutionStepValue(MESH_VELOCITY_Y) = 0.0; itNode->FastGetSolutionStepValue(DISPLACEMENT_X) = 0.0; itNode->FastGetSolutionStepValue(DISPLACEMENT_Y) = 0.0; } } } } // if (rModelPart.GetCommunicator().MyPID() == 0) // std::cout << "end of prediction" << std::endl; } //*************************************************************************** /** this function is designed to be called in the builder and solver to introduce the selected time integration scheme. It "asks" the matrix needed to the element and performs the operations needed to introduce the seected time integration scheme. this function calculates at the same time the contribution to the LHS and to the RHS of the system */ void CalculateSystemContributions( Element& rCurrentElement, LocalSystemMatrixType& LHS_Contribution, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& EquationId, const ProcessInfo& CurrentProcessInfo) override { KRATOS_TRY int k = OpenMPUtils::ThisThread(); //basic operations for the element considered rCurrentElement.CalculateLocalSystem(LHS_Contribution, RHS_Contribution, CurrentProcessInfo); rCurrentElement.CalculateMassMatrix(mMass[k], CurrentProcessInfo); rCurrentElement.CalculateLocalVelocityContribution(mDamp[k], RHS_Contribution, CurrentProcessInfo); rCurrentElement.EquationIdVector(EquationId, CurrentProcessInfo); //adding the dynamic contributions (statics is already included) AddDynamicsToLHS(LHS_Contribution, mDamp[k], mMass[k], CurrentProcessInfo); AddDynamicsToRHS(rCurrentElement, RHS_Contribution, mDamp[k], mMass[k], CurrentProcessInfo); // If there is a slip condition, apply it on a rotated system of coordinates mRotationTool.Rotate(LHS_Contribution,RHS_Contribution,rCurrentElement.GetGeometry()); mRotationTool.ApplySlipCondition(LHS_Contribution,RHS_Contribution,rCurrentElement.GetGeometry()); KRATOS_CATCH("") } void CalculateRHSContribution( Element& rCurrentElement, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& EquationId, const ProcessInfo& CurrentProcessInfo) override { int k = OpenMPUtils::ThisThread(); //basic operations for the element considered rCurrentElement.CalculateRightHandSide(RHS_Contribution, CurrentProcessInfo); rCurrentElement.CalculateMassMatrix(mMass[k], CurrentProcessInfo); rCurrentElement.CalculateLocalVelocityContribution(mDamp[k], RHS_Contribution, CurrentProcessInfo); rCurrentElement.EquationIdVector(EquationId, CurrentProcessInfo); //adding the dynamic contributions (static is already included) AddDynamicsToRHS(rCurrentElement, RHS_Contribution, mDamp[k], mMass[k], CurrentProcessInfo); // If there is a slip condition, apply it on a rotated system of coordinates mRotationTool.Rotate(RHS_Contribution,rCurrentElement.GetGeometry()); mRotationTool.ApplySlipCondition(RHS_Contribution,rCurrentElement.GetGeometry()); } /** functions totally analogous to the precedent but applied to the "condition" objects */ void CalculateSystemContributions( Condition& rCurrentCondition, LocalSystemMatrixType& LHS_Contribution, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& EquationId, const ProcessInfo& CurrentProcessInfo) override { KRATOS_TRY int k = OpenMPUtils::ThisThread(); rCurrentCondition.CalculateLocalSystem(LHS_Contribution, RHS_Contribution, CurrentProcessInfo); rCurrentCondition.CalculateMassMatrix(mMass[k], CurrentProcessInfo); //rCurrentCondition.CalculateDampingMatrix(VelocityBossakAuxiliaries::mDamp,CurrentProcessInfo); rCurrentCondition.CalculateLocalVelocityContribution(mDamp[k], RHS_Contribution, CurrentProcessInfo); rCurrentCondition.EquationIdVector(EquationId, CurrentProcessInfo); AddDynamicsToLHS(LHS_Contribution, mDamp[k], mMass[k], CurrentProcessInfo); AddDynamicsToRHS(rCurrentCondition, RHS_Contribution, mDamp[k], mMass[k], CurrentProcessInfo); // Rotate contributions (to match coordinates for slip conditions) mRotationTool.Rotate(LHS_Contribution,RHS_Contribution,rCurrentCondition.GetGeometry()); mRotationTool.ApplySlipCondition(LHS_Contribution,RHS_Contribution,rCurrentCondition.GetGeometry()); KRATOS_CATCH("") } void CalculateRHSContribution( Condition& rCurrentCondition, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& EquationId, const ProcessInfo& rCurrentProcessInfo) override { KRATOS_TRY; int k = OpenMPUtils::ThisThread(); //basic operations for the element considered rCurrentCondition.CalculateRightHandSide(RHS_Contribution,rCurrentProcessInfo); rCurrentCondition.CalculateMassMatrix(mMass[k],rCurrentProcessInfo); //rCurrentCondition.CalculateDampingMatrix(VelocityBossakAuxiliaries::mDamp,CurrentProcessInfo); rCurrentCondition.CalculateLocalVelocityContribution(mDamp[k], RHS_Contribution,rCurrentProcessInfo); rCurrentCondition.EquationIdVector(EquationId,rCurrentProcessInfo); //adding the dynamic contributions (static is already included) AddDynamicsToRHS(rCurrentCondition, RHS_Contribution, mDamp[k], mMass[k],rCurrentProcessInfo); // Rotate contributions (to match coordinates for slip conditions) mRotationTool.Rotate(RHS_Contribution,rCurrentCondition.GetGeometry()); mRotationTool.ApplySlipCondition(RHS_Contribution,rCurrentCondition.GetGeometry()); KRATOS_CATCH(""); } //************************************************************************************* //************************************************************************************* void InitializeSolutionStep(ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) override { ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); Scheme<TSparseSpace, TDenseSpace>::InitializeSolutionStep(r_model_part, A, Dx, b); double DeltaTime = CurrentProcessInfo[DELTA_TIME]; KRATOS_ERROR_IF(DeltaTime < 1.0e-12) << "Detected delta_time = 0 in the Bossak scheme. Check if the time step is created correctly for the current model part" << std::endl; //initializing constants ma0 = 1.0 / (mGammaNewmark * DeltaTime); ma1 = DeltaTime * mBetaNewmark / mGammaNewmark; ma2 = (-1 + mGammaNewmark) / mGammaNewmark; ma3 = DeltaTime; ma4 = pow(DeltaTime, 2)*(-2.0 * mBetaNewmark + 1.0) / 2.0; ma5 = pow(DeltaTime, 2) * mBetaNewmark; mam = (1.0 - mAlphaBossak) / (mGammaNewmark * DeltaTime); } //************************************************************************************* //************************************************************************************* void FinalizeNonLinIteration(ModelPart &rModelPart, TSystemMatrixType &A, TSystemVectorType &Dx, TSystemVectorType &b) override { const auto& r_current_process_info = rModelPart.GetProcessInfo(); if (mpTurbulenceModel) // If not null mpTurbulenceModel->Execute(); //if orthogonal subscales are computed if (r_current_process_info[OSS_SWITCH] == 1.0) { KRATOS_INFO("Bossak Scheme") << "Computing OSS projections" << std::endl; const int nnodes = static_cast<int>(rModelPart.Nodes().size()); auto nbegin = rModelPart.NodesBegin(); #pragma omp parallel for firstprivate(nbegin,nnodes) for(int i=0; i<nnodes; ++i) { auto ind = nbegin + i; noalias(ind->FastGetSolutionStepValue(ADVPROJ)) = ZeroVector(3); ind->FastGetSolutionStepValue(DIVPROJ) = 0.0; ind->FastGetSolutionStepValue(NODAL_AREA) = 0.0; }//end of loop over nodes //loop on nodes to compute ADVPROJ CONVPROJ NODALAREA array_1d<double, 3 > output = ZeroVector(3); const int nel = static_cast<int>(rModelPart.Elements().size()); auto elbegin = rModelPart.ElementsBegin(); #pragma omp parallel for firstprivate(elbegin,nel,output) for(int i=0; i<nel; ++i) { auto elem = elbegin + i; elem->Calculate(ADVPROJ, output, r_current_process_info); } rModelPart.GetCommunicator().AssembleCurrentData(NODAL_AREA); rModelPart.GetCommunicator().AssembleCurrentData(DIVPROJ); rModelPart.GetCommunicator().AssembleCurrentData(ADVPROJ); // Correction for periodic conditions this->PeriodicConditionProjectionCorrection(rModelPart); #pragma omp parallel for firstprivate(nbegin,nnodes) for(int i=0; i<nnodes; ++i) { auto ind = nbegin + i; if (ind->FastGetSolutionStepValue(NODAL_AREA) == 0.0) { ind->FastGetSolutionStepValue(NODAL_AREA) = 1.0; //KRATOS_WATCH("*********ATTENTION: NODAL AREA IS ZERRROOOO************"); } const double Area = ind->FastGetSolutionStepValue(NODAL_AREA); ind->FastGetSolutionStepValue(ADVPROJ) /= Area; ind->FastGetSolutionStepValue(DIVPROJ) /= Area; } } } void FinalizeSolutionStep(ModelPart &rModelPart, TSystemMatrixType &A, TSystemVectorType &Dx, TSystemVectorType &b) override { Element::EquationIdVectorType EquationId; LocalSystemVectorType RHS_Contribution; LocalSystemMatrixType LHS_Contribution; const ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo(); //for (ModelPart::NodeIterator itNode = rModelPart.NodesBegin(); itNode != rModelPart.NodesEnd(); ++itNode) #pragma omp parallel for for(int k = 0; k<static_cast<int>(rModelPart.Nodes().size()); k++) { auto itNode = rModelPart.NodesBegin() + k; (itNode->FastGetSolutionStepValue(REACTION)).clear(); // calculating relaxed acceleration const array_1d<double, 3 > & CurrentAcceleration = (itNode)->FastGetSolutionStepValue(ACCELERATION, 0); const array_1d<double, 3 > & OldAcceleration = (itNode)->FastGetSolutionStepValue(ACCELERATION, 1); const array_1d<double, 3> relaxed_acceleration = (1 - mAlphaBossak) * CurrentAcceleration + mAlphaBossak * OldAcceleration; (itNode)->SetValue(RELAXED_ACCELERATION, relaxed_acceleration); } //for (ModelPart::ElementsContainerType::ptr_iterator itElem = rModelPart.Elements().ptr_begin(); itElem != rModelPart.Elements().ptr_end(); ++itElem) #pragma omp parallel for firstprivate(EquationId,RHS_Contribution,LHS_Contribution) for(int k = 0; k<static_cast<int>(rModelPart.Elements().size()); k++) { auto itElem = rModelPart.Elements().ptr_begin()+k; int thread_id = OpenMPUtils::ThisThread(); //KRATOS_WATCH(LHS_Contribution); //basic operations for the element considered (*itElem)->CalculateLocalSystem(LHS_Contribution, RHS_Contribution, CurrentProcessInfo); //std::cout << rCurrentElement->Id() << " RHS = " << RHS_Contribution << std::endl; (*itElem)->CalculateMassMatrix(mMass[thread_id], CurrentProcessInfo); (*itElem)->CalculateLocalVelocityContribution(mDamp[thread_id], RHS_Contribution, CurrentProcessInfo); (*itElem)->EquationIdVector(EquationId, CurrentProcessInfo); //adding the dynamic contributions (statics is already included) AddDynamicsToLHS(LHS_Contribution, mDamp[thread_id], mMass[thread_id], CurrentProcessInfo); AddDynamicsToRHS(**itElem, RHS_Contribution, mDamp[thread_id], mMass[thread_id], CurrentProcessInfo); GeometryType& rGeom = (*itElem)->GetGeometry(); unsigned int NumNodes = rGeom.PointsNumber(); unsigned int Dimension = rGeom.WorkingSpaceDimension(); unsigned int index = 0; for (unsigned int i = 0; i < NumNodes; i++) { auto& reaction = rGeom[i].FastGetSolutionStepValue(REACTION); double& target_value0 = reaction[0]; const double& origin_value0 = RHS_Contribution[index++]; #pragma omp atomic target_value0 -= origin_value0; double& target_value1 = reaction[1]; const double& origin_value1 = RHS_Contribution[index++]; #pragma omp atomic target_value1 -= origin_value1; if (Dimension == 3) { double& target_value2 = reaction[2]; const double& origin_value2 = RHS_Contribution[index++]; #pragma omp atomic target_value2 -= origin_value2; } // rGeom[i].FastGetSolutionStepValue(REACTION_X,0) -= RHS_Contribution[index++]; // rGeom[i].FastGetSolutionStepValue(REACTION_Y,0) -= RHS_Contribution[index++]; // if (Dimension == 3) rGeom[i].FastGetSolutionStepValue(REACTION_Z,0) -= RHS_Contribution[index++]; index++; // skip pressure dof } } rModelPart.GetCommunicator().AssembleCurrentData(REACTION); // Base scheme calls FinalizeSolutionStep method of elements and conditions Scheme<TSparseSpace, TDenseSpace>::FinalizeSolutionStep(rModelPart, A, Dx, b); } //************************************************************************************************ //************************************************************************************************ /// Free memory allocated by this object. void Clear() override { this->mpDofUpdater->Clear(); } /*@} */ /**@name Operations */ /*@{ */ /*@} */ /**@name Access */ /*@{ */ /*@} */ /**@name Inquiry */ /*@{ */ /*@} */ /**@name Friends */ /*@{ */ /*@} */ protected: /**@name Protected static Member Variables */ /*@{ */ /*@} */ /**@name Protected member Variables */ /*@{ */ double mAlphaBossak; double mBetaNewmark; double mGammaNewmark; double mMeshVelocity; double mRelaxationFactor = 1.0; double ma0; double ma1; double ma2; double ma3; double ma4; double ma5; double mam; std::vector< Matrix > mMass; std::vector< Matrix > mDamp; std::vector< Vector > mvel; std::vector< Vector > macc; std::vector< Vector > maccold; /*@} */ /**@name Protected Operators*/ /*@{ */ /** On periodic boundaries, the nodal area and the values to project need to take into account contributions from elements on * both sides of the boundary. This is done using the conditions and the non-historical nodal data containers as follows:\n * 1- The partition that owns the PeriodicCondition adds the values on both nodes to their non-historical containers.\n * 2- The non-historical containers are added across processes, communicating the right value from the condition owner to all partitions.\n * 3- The value on all periodic nodes is replaced by the one received in step 2. */ void PeriodicConditionProjectionCorrection(ModelPart& rModelPart) { const int num_nodes = rModelPart.NumberOfNodes(); const int num_conditions = rModelPart.NumberOfConditions(); #pragma omp parallel for for (int i = 0; i < num_nodes; i++) { auto it_node = rModelPart.NodesBegin() + i; it_node->SetValue(NODAL_AREA,0.0); it_node->SetValue(ADVPROJ,ZeroVector(3)); it_node->SetValue(DIVPROJ,0.0); } #pragma omp parallel for for (int i = 0; i < num_conditions; i++) { auto it_cond = rModelPart.ConditionsBegin() + i; if(it_cond->Is(PERIODIC)) { this->AssemblePeriodicContributionToProjections(it_cond->GetGeometry()); } } rModelPart.GetCommunicator().AssembleNonHistoricalData(NODAL_AREA); rModelPart.GetCommunicator().AssembleNonHistoricalData(ADVPROJ); rModelPart.GetCommunicator().AssembleNonHistoricalData(DIVPROJ); #pragma omp parallel for for (int i = 0; i < num_nodes; i++) { auto it_node = rModelPart.NodesBegin() + i; this->CorrectContributionsOnPeriodicNode(*it_node); } } void AssemblePeriodicContributionToProjections(Geometry< Node<3> >& rGeometry) { unsigned int nodes_in_cond = rGeometry.PointsNumber(); double nodal_area = 0.0; array_1d<double,3> momentum_projection = ZeroVector(3); double mass_projection = 0.0; for ( unsigned int i = 0; i < nodes_in_cond; i++ ) { auto& r_node = rGeometry[i]; nodal_area += r_node.FastGetSolutionStepValue(NODAL_AREA); noalias(momentum_projection) += r_node.FastGetSolutionStepValue(ADVPROJ); mass_projection += r_node.FastGetSolutionStepValue(DIVPROJ); } for ( unsigned int i = 0; i < nodes_in_cond; i++ ) { auto& r_node = rGeometry[i]; /* Note that this loop is expected to be threadsafe in normal conditions, * since each node should belong to a single periodic link. However, I am * setting the locks for openmp in case that we try more complicated things * in the future (like having different periodic conditions for different * coordinate directions). */ r_node.SetLock(); r_node.GetValue(NODAL_AREA) = nodal_area; noalias(r_node.GetValue(ADVPROJ)) = momentum_projection; r_node.GetValue(DIVPROJ) = mass_projection; r_node.UnSetLock(); } } void CorrectContributionsOnPeriodicNode(Node<3>& rNode) { if (rNode.GetValue(NODAL_AREA) != 0.0) // Only periodic nodes will have a non-historical NODAL_AREA set. { rNode.FastGetSolutionStepValue(NODAL_AREA) = rNode.GetValue(NODAL_AREA); noalias(rNode.FastGetSolutionStepValue(ADVPROJ)) = rNode.GetValue(ADVPROJ); rNode.FastGetSolutionStepValue(DIVPROJ) = rNode.GetValue(DIVPROJ); } } //********************************************************************************* //Updating first time Derivative //********************************************************************************* void UpdateDisplacement(array_1d<double, 3 > & CurrentDisplacement, const array_1d<double, 3 > & OldDisplacement, const array_1d<double, 3 > & OldVelocity, const array_1d<double, 3 > & OldAcceleration, const array_1d<double, 3 > & CurrentAcceleration) { noalias(CurrentDisplacement) = OldDisplacement + ma3 * OldVelocity + ma4 * OldAcceleration + ma5*CurrentAcceleration; } //************************************************************************** void UpdateAcceleration(array_1d<double, 3 > & CurrentAcceleration, const array_1d<double, 3 > & DeltaVel, const array_1d<double, 3 > & OldAcceleration) { noalias(CurrentAcceleration) = ma0 * DeltaVel + ma2 * OldAcceleration; } //**************************************************************************** /** Kdyn = am*M + D + a1*K */ void AddDynamicsToLHS(LocalSystemMatrixType& LHS_Contribution, LocalSystemMatrixType& D, LocalSystemMatrixType& M, const ProcessInfo& CurrentProcessInfo) { //multipling time scheme factor LHS_Contribution *= ma1; // adding mass contribution to the dynamic stiffness if (M.size1() != 0) // if M matrix declared { noalias(LHS_Contribution) += mam*M; } //adding damping contribution if (D.size1() != 0) // if M matrix declared { noalias(LHS_Contribution) += D; } } //**************************************************************************** /// Add Bossak contributions from the inertial term to the RHS vector. /** This essentially performs bdyn = b - M*acc for the current element. * Note that viscous/pressure contributions to the RHS are expected to be added by the element itself. * @param[in] rCurrentElement The fluid element we are assembling. * @param[in/out] rRHS_Contribution The right hand side term where the contribution will be added. * @param[in] rD The elemental velocity/pressure LHS matrix. * @param[in] rM The elemental acceleration LHS matrix. * @param[in] rCurrentProcessInfo ProcessInfo instance for the containing ModelPart. */ void AddDynamicsToRHS( Element& rCurrentElement, LocalSystemVectorType& rRHS_Contribution, LocalSystemMatrixType& rD, LocalSystemMatrixType& rM, const ProcessInfo& rCurrentProcessInfo) { //adding inertia contribution if (rM.size1() != 0) { const auto& r_const_elem_ref = rCurrentElement; int k = OpenMPUtils::ThisThread(); r_const_elem_ref.GetSecondDerivativesVector(macc[k], 0); (macc[k]) *= (1.00 - mAlphaBossak); r_const_elem_ref.GetSecondDerivativesVector(maccold[k], 1); noalias(macc[k]) += mAlphaBossak * maccold[k]; noalias(rRHS_Contribution) -= prod(rM, macc[k]); } } /// Add Bossak contributions from the inertial term to the RHS vector. /** This essentially performs bdyn = b - M*acc for the current condition. * Note that viscous/pressure contributions to the RHS are expected to be added by the element condition. * @param[in] rCurrentCondition The fluid condition we are assembling. * @param[in/out] rRHS_Contribution The right hand side term where the contribution will be added. * @param[in] rD The elemental velocity/pressure LHS matrix. * @param[in] rM The elemental acceleration LHS matrix. * @param[in] rCurrentProcessInfo ProcessInfo instance for the containing ModelPart. */ void AddDynamicsToRHS( Condition& rCurrentCondition, LocalSystemVectorType& rRHS_Contribution, LocalSystemMatrixType& D, LocalSystemMatrixType& rM, const ProcessInfo& rCurrentProcessInfo) { //adding inertia contribution if (rM.size1() != 0) { const auto& r_const_cond_ref = rCurrentCondition; int k = OpenMPUtils::ThisThread(); r_const_cond_ref.GetSecondDerivativesVector(macc[k], 0); (macc[k]) *= (1.00 - mAlphaBossak); r_const_cond_ref.GetSecondDerivativesVector(maccold[k], 1); noalias(macc[k]) += mAlphaBossak * maccold[k]; noalias(rRHS_Contribution) -= prod(rM, macc[k]); } } /*@} */ /**@name Protected Operations*/ /*@{ */ /*@} */ /**@name Protected Access */ /*@{ */ /*@} */ /**@name Protected Inquiry */ /*@{ */ /*@} */ /**@name Protected LifeCycle */ /*@{ */ /*@} */ private: /**@name Static Member Variables */ /*@{ */ /*@} */ /**@name Member Variables */ /*@{ */ CoordinateTransformationUtils<LocalSystemMatrixType,LocalSystemVectorType,double> mRotationTool; const Variable<int>& mrPeriodicIdVar; Process::Pointer mpTurbulenceModel; typename TSparseSpace::DofUpdaterPointerType mpDofUpdater = TSparseSpace::CreateDofUpdater(); /*@} */ /**@name Private Operators*/ /*@{ */ /*@} */ /**@name Private Operations*/ /*@{ */ /*@} */ /**@name Private Access */ /*@{ */ /*@} */ /**@name Private Inquiry */ /*@{ */ /*@} */ /**@name Un accessible methods */ /*@{ */ /*@} */ }; /* Class Scheme */ /*@} */ /**@name Type Definitions */ /*@{ */ /*@} */ } /* namespace Kratos.*/ #endif /* KRATOS_RESIDUALBASED_PREDICTOR_CORRECTOR_BOSSAK_SCHEME defined */
46a_so12_itt.c
#define _POSIX_C_SOURCE 200809L #include "stdlib.h" #include "math.h" #include "sys/time.h" #include "ittnotify.h" #include "xmmintrin.h" #include "pmmintrin.h" #include <stdio.h> #include "omp.h" #define min(a, b) (((a) < (b)) ? (a) : (b)) #define max(a, b) (((a) > (b)) ? (a) : (b)) struct dataobj { void *restrict data; int *size; int *npsize; int *dsize; int *hsize; int *hofs; int *oofs; }; int Kernel(struct dataobj *restrict block_sizes_vec, struct dataobj *restrict damp_vec, const float dt, const float h_x, const float h_y, const float h_z, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict save_src_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict usol_vec, struct dataobj *restrict vp_vec, const int sp_zi_m, const int time_M, const int time_m, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads) { int (*restrict block_sizes) __attribute__ ((aligned (64))) = (int (*)) block_sizes_vec->data; float(*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[damp_vec->size[1]][damp_vec->size[2]])damp_vec->data; int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data; float(*restrict save_src)[save_src_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_vec->size[1]])save_src_vec->data; int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data; float(*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (float(*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data; int(*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data; float(*restrict usol)[usol_vec->size[1]][usol_vec->size[2]][usol_vec->size[3]] __attribute__((aligned(64))) = (float(*)[usol_vec->size[1]][usol_vec->size[2]][usol_vec->size[3]])usol_vec->data; float(*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[vp_vec->size[1]][vp_vec->size[2]])vp_vec->data; /* Flush denormal numbers to zero in hardware */ _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON); _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON); __itt_resume(); int xb_size = block_sizes[0]; int y0_blk0_size = block_sizes[3]; int x0_blk0_size = block_sizes[2]; int yb_size = block_sizes[1]; printf(" Tiles: %d, %d ::: Blocks %d, %d \n", x0_blk0_size, y0_blk0_size, xb_size, yb_size); int sf = 6; int t_blk_size = 2 * sf * (time_M - time_m); for (int t_blk = time_m; t_blk < sf * (time_M - time_m); t_blk += sf * t_blk_size) // for each t block { for (int xb = x_m; xb <= (x_M + sf * (time_M - time_m)); xb += xb_size + 1) { for (int yb = y_m; yb <= (y_M + sf * (time_M - time_m)); yb += yb_size + 1) { for (int time = t_blk, t0 = (time + 1) % (3), t1 = (time) % (3), t2 = (time + 2) % (3); time <= 1 + min(t_blk + t_blk_size - 1, sf * (time_M - time_m)); time += sf, t0 = (((time / sf) % (time_M - time_m + 1)) + 1) % (3), t1 = (((time / sf) % (time_M - time_m + 1))) % (3), t2 = (((time / sf) % (time_M - time_m + 1)) + 2) % (3)) { int tw = ((time / sf) % (time_M - time_m + 1)); /* Begin section0 */ #pragma omp parallel num_threads(nthreads) { #pragma omp for collapse(2) schedule(dynamic, 1) for (int x0_blk0 = max((x_m + time), xb); x0_blk0 <= min((x_M + time), (xb + xb_size)); x0_blk0 += x0_blk0_size) { for (int y0_blk0 = max((y_m + time), yb); y0_blk0 <= min((y_M + time), (yb + yb_size)); y0_blk0 += y0_blk0_size) { for (int x = x0_blk0; x <= min(min((x_M + time), (xb + xb_size)), (x0_blk0 + x0_blk0_size - 1)); x++) { for (int y = y0_blk0; y <= min(min((y_M + time), (yb + yb_size)), (y0_blk0 + y0_blk0_size - 1)); y++) { #pragma omp simd aligned(damp, usol, vp : 32) for (int z = z_m; z <= z_M; z += 1) { float r7 = -2.98277778F * usol[t1][x - time + 12][y - time + 12][z + 12]; float r6 = 1.0 / dt; float r5 = 1.0 / (dt * dt); float r4 = 1.0 / (vp[x - time + 12][y - time + 12][z + 12] * vp[x - time + 12][y - time + 12][z + 12]); usol[t0][x - time + 12][y - time + 12][z + 12] = (r4 * (-r5 * (-2.0F * usol[t1][x - time + 12][y - time + 12][z + 12] + usol[t2][x - time + 12][y - time + 12][z + 12])) + r6 * (damp[x - time + 1][y - time + 1][z + 1] * usol[t1][x - time + 12][y - time + 12][z + 12]) + (r7 - 6.01250601e-5F * (usol[t1][x - time + 12][y - time + 12][z + 6] + usol[t1][x - time + 12][y - time + 12][z + 18]) + 1.03896104e-3F * (usol[t1][x - time + 12][y - time + 12][z + 7] + usol[t1][x - time + 12][y - time + 12][z + 17]) - 8.92857143e-3F * (usol[t1][x - time + 12][y - time + 12][z + 8] + usol[t1][x - time + 12][y - time + 12][z + 16]) + 5.29100529e-2F * (usol[t1][x - time + 12][y - time + 12][z + 9] + usol[t1][x - time + 12][y - time + 12][z + 15]) - 2.67857143e-1F * (usol[t1][x - time + 12][y - time + 12][z + 10] + usol[t1][x - time + 12][y - time + 12][z + 14]) + 1.71428571F * (usol[t1][x - time + 12][y - time + 12][z + 11] + usol[t1][x - time + 12][y - time + 12][z + 13])) / ((h_z * h_z)) + (r7 - 6.01250601e-5F * (usol[t1][x - time + 12][y - time + 6][z + 12] + usol[t1][x - time + 12][y - time + 18][z + 12]) + 1.03896104e-3F * (usol[t1][x - time + 12][y - time + 7][z + 12] + usol[t1][x - time + 12][y - time + 17][z + 12]) - 8.92857143e-3F * (usol[t1][x - time + 12][y - time + 8][z + 12] + usol[t1][x - time + 12][y - time + 16][z + 12]) + 5.29100529e-2F * (usol[t1][x - time + 12][y - time + 9][z + 12] + usol[t1][x - time + 12][y - time + 15][z + 12]) - 2.67857143e-1F * (usol[t1][x - time + 12][y - time + 10][z + 12] + usol[t1][x - time + 12][y - time + 14][z + 12]) + 1.71428571F * (usol[t1][x - time + 12][y - time + 11][z + 12] + usol[t1][x - time + 12][y - time + 13][z + 12])) / ((h_y * h_y)) + (r7 - 6.01250601e-5F * (usol[t1][x - time + 6][y - time + 12][z + 12] + usol[t1][x - time + 18][y - time + 12][z + 12]) + 1.03896104e-3F * (usol[t1][x - time + 7][y - time + 12][z + 12] + usol[t1][x - time + 17][y - time + 12][z + 12]) - 8.92857143e-3F * (usol[t1][x - time + 8][y - time + 12][z + 12] + usol[t1][x - time + 16][y - time + 12][z + 12]) + 5.29100529e-2F * (usol[t1][x - time + 9][y - time + 12][z + 12] + usol[t1][x - time + 15][y - time + 12][z + 12]) - 2.67857143e-1F * (usol[t1][x - time + 10][y - time + 12][z + 12] + usol[t1][x - time + 14][y - time + 12][z + 12]) + 1.71428571F * (usol[t1][x - time + 11][y - time + 12][z + 12] + usol[t1][x - time + 13][y - time + 12][z + 12])) / ((h_x * h_x))) / (r4 * r5 + r6 * damp[x - time + 1][y - time + 1][z + 1]); } #pragma omp simd aligned(damp, usol, vp : 32) for (int sp_zi = sp_zi_m; sp_zi <= nnz_sp_source_mask[x - time][y - time] - 1; sp_zi += 1) { int zind = sp_source_mask[x - time][y - time][sp_zi]; float r0 = save_src[((time / sf) % (time_M - time_m + 1))][source_id[x - time][y - time][zind]] * source_mask[x - time][y - time][zind]; usol[t0][x - time + 12][y - time + 12][zind + 12] += r0;} } } } } } } } } } /* End section0 */ __itt_pause(); return 0; }
#define _POSIX_C_SOURCE 200809L #include "stdlib.h" #include "math.h" #include "sys/time.h" #include "ittnotify.h" #include "xmmintrin.h" #include "pmmintrin.h" #include <stdio.h> #include "omp.h" #define min(a, b) (((a) < (b)) ? (a) : (b)) #define max(a, b) (((a) > (b)) ? (a) : (b)) struct dataobj { void *restrict data; int *size; int *npsize; int *dsize; int *hsize; int *hofs; int *oofs; }; int Kernel(struct dataobj *restrict block_sizes_vec, struct dataobj *restrict damp_vec, const float dt, const float h_x, const float h_y, const float h_z, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict save_src_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict usol_vec, struct dataobj *restrict vp_vec, const int sp_zi_m, const int time_M, const int time_m, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads) { int (*restrict block_sizes) __attribute__((aligned(64))) = (int (*))block_sizes_vec->data; float (*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__((aligned(64))) = (float (*)[damp_vec->size[1]][damp_vec->size[2]])damp_vec->data; int (*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int (*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data; float (*restrict save_src)[save_src_vec->size[1]] __attribute__((aligned(64))) = (float (*)[save_src_vec->size[1]])save_src_vec->data; int (*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int (*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data; float (*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (float (*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data; int (*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int (*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data; float (*restrict usol)[usol_vec->size[1]][usol_vec->size[2]][usol_vec->size[3]] __attribute__((aligned(64))) = (float (*)[usol_vec->size[1]][usol_vec->size[2]][usol_vec->size[3]])usol_vec->data; float (*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__((aligned(64))) = (float (*)[vp_vec->size[1]][vp_vec->size[2]])vp_vec->data; /* Flush denormal numbers to zero in hardware */ _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON); _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON); __itt_resume(); int xb_size = block_sizes[0]; int y0_blk0_size = block_sizes[3]; int x0_blk0_size = block_sizes[2]; int yb_size = block_sizes[1]; printf(" Tiles: %d, %d ::: Blocks %d, %d \n", x0_blk0_size, y0_blk0_size, xb_size, yb_size); int sf = 6; int t_blk_size = 2 * sf * (time_M - time_m); for (int t_blk = time_m; t_blk < sf * (time_M - time_m); t_blk += sf * t_blk_size) //for each t block { for (int xb = x_m; xb <= (x_M + sf * (time_M - time_m)); xb += xb_size + 1) { for (int yb = y_m; yb <= (y_M + sf * (time_M - time_m)); yb += yb_size + 1) { for (int time = t_blk, t0 = (time + 1) % (3), t1 = (time) % (3), t2 = (time + 2) % (3); time <= 1 + min(t_blk + t_blk_size - 1, sf * (time_M - time_m)); time += sf, t0 = (((time / sf) % (time_M - time_m + 1)) + 1) % (3), t1 = (((time / sf) % (time_M - time_m + 1))) % (3), t2 = (((time / sf) % (time_M - time_m + 1)) + 2) % (3)) { int tw = ((time / sf) % (time_M - time_m + 1)); /* Begin section0 */ for (int x0_blk0 = max((x_m + time), xb); x0_blk0 <= min((x_M + time), (xb + xb_size)); x0_blk0 += x0_blk0_size) { for (int y0_blk0 = max((y_m + time), yb); y0_blk0 <= min((y_M + time), (yb + yb_size)); y0_blk0 += y0_blk0_size) { for (int x = x0_blk0; x <= min(min((x_M + time), (xb + xb_size)), (x0_blk0 + x0_blk0_size - 1)); x++) { for (int y = y0_blk0; y <= min(min((y_M + time), (yb + yb_size)), (y0_blk0 + y0_blk0_size - 1)); y++) { for (int z = z_m; z <= z_M; z += 1) { float r7 = -2.98277778 F * usol[t1][x - time + 12][y - time + 12][z + 12]; float r6 = 1.0 / dt; float r5 = 1.0 / (dt * dt); float r4 = 1.0 / (vp[x - time + 12][y - time + 12][z + 12] * vp[x - time + 12][y - time + 12][z + 12]); usol[t0][x - time + 12][y - time + 12][z + 12] = (r4 * (-r5 * (-2.0 F * usol[t1][x - time + 12][y - time + 12][z + 12] + usol[t2][x - time + 12][y - time + 12][z + 12])) + r6 * (damp[x - time + 1][y - time + 1][z + 1] * usol[t1][x - time + 12][y - time + 12][z + 12]) + (r7 - 6.01250601e-5 F * (usol[t1][x - time + 12][y - time + 12][z + 6] + usol[t1][x - time + 12][y - time + 12][z + 18]) + 1.03896104e-3 F * (usol[t1][x - time + 12][y - time + 12][z + 7] + usol[t1][x - time + 12][y - time + 12][z + 17]) - 8.92857143e-3 F * (usol[t1][x - time + 12][y - time + 12][z + 8] + usol[t1][x - time + 12][y - time + 12][z + 16]) + 5.29100529e-2 F * (usol[t1][x - time + 12][y - time + 12][z + 9] + usol[t1][x - time + 12][y - time + 12][z + 15]) - 2.67857143e-1 F * (usol[t1][x - time + 12][y - time + 12][z + 10] + usol[t1][x - time + 12][y - time + 12][z + 14]) + 1.71428571 F * (usol[t1][x - time + 12][y - time + 12][z + 11] + usol[t1][x - time + 12][y - time + 12][z + 13])) / ((h_z * h_z)) + (r7 - 6.01250601e-5 F * (usol[t1][x - time + 12][y - time + 6][z + 12] + usol[t1][x - time + 12][y - time + 18][z + 12]) + 1.03896104e-3 F * (usol[t1][x - time + 12][y - time + 7][z + 12] + usol[t1][x - time + 12][y - time + 17][z + 12]) - 8.92857143e-3 F * (usol[t1][x - time + 12][y - time + 8][z + 12] + usol[t1][x - time + 12][y - time + 16][z + 12]) + 5.29100529e-2 F * (usol[t1][x - time + 12][y - time + 9][z + 12] + usol[t1][x - time + 12][y - time + 15][z + 12]) - 2.67857143e-1 F * (usol[t1][x - time + 12][y - time + 10][z + 12] + usol[t1][x - time + 12][y - time + 14][z + 12]) + 1.71428571 F * (usol[t1][x - time + 12][y - time + 11][z + 12] + usol[t1][x - time + 12][y - time + 13][z + 12])) / ((h_y * h_y)) + (r7 - 6.01250601e-5 F * (usol[t1][x - time + 6][y - time + 12][z + 12] + usol[t1][x - time + 18][y - time + 12][z + 12]) + 1.03896104e-3 F * (usol[t1][x - time + 7][y - time + 12][z + 12] + usol[t1][x - time + 17][y - time + 12][z + 12]) - 8.92857143e-3 F * (usol[t1][x - time + 8][y - time + 12][z + 12] + usol[t1][x - time + 16][y - time + 12][z + 12]) + 5.29100529e-2 F * (usol[t1][x - time + 9][y - time + 12][z + 12] + usol[t1][x - time + 15][y - time + 12][z + 12]) - 2.67857143e-1 F * (usol[t1][x - time + 10][y - time + 12][z + 12] + usol[t1][x - time + 14][y - time + 12][z + 12]) + 1.71428571 F * (usol[t1][x - time + 11][y - time + 12][z + 12] + usol[t1][x - time + 13][y - time + 12][z + 12])) / ((h_x * h_x))) / (r4 * r5 + r6 * damp[x - time + 1][y - time + 1][z + 1]); } for (int sp_zi = sp_zi_m; sp_zi <= nnz_sp_source_mask[x - time][y - time] - 1; sp_zi += 1) { int zind = sp_source_mask[x - time][y - time][sp_zi]; float r0 = save_src[((time / sf) % (time_M - time_m + 1))][source_id[x - time][y - time][zind]] * source_mask[x - time][y - time][zind]; usol[t0][x - time + 12][y - time + 12][zind + 12] += r0; } } } } } } } } } /* End section0 */ __itt_pause(); return 0; }
#define _POSIX_C_SOURCE 200809L #include "stdlib.h" #include "math.h" #include "sys/time.h" #include "ittnotify.h" #include "xmmintrin.h" #include "pmmintrin.h" #include <stdio.h> #include "omp.h" #define min(a, b) (((a) < (b)) ? (a) : (b)) #define max(a, b) (((a) > (b)) ? (a) : (b)) struct dataobj { void *restrict data; int *size; int *npsize; int *dsize; int *hsize; int *hofs; int *oofs; }; int Kernel(struct dataobj *restrict block_sizes_vec, struct dataobj *restrict damp_vec, const float dt, const float h_x, const float h_y, const float h_z, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict save_src_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict usol_vec, struct dataobj *restrict vp_vec, const int sp_zi_m, const int time_M, const int time_m, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads) { int (*restrict block_sizes) __attribute__((aligned(64))) = (int (*))block_sizes_vec->data; float (*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__((aligned(64))) = (float (*)[damp_vec->size[1]][damp_vec->size[2]])damp_vec->data; int (*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int (*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data; float (*restrict save_src)[save_src_vec->size[1]] __attribute__((aligned(64))) = (float (*)[save_src_vec->size[1]])save_src_vec->data; int (*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int (*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data; float (*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (float (*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data; int (*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int (*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data; float (*restrict usol)[usol_vec->size[1]][usol_vec->size[2]][usol_vec->size[3]] __attribute__((aligned(64))) = (float (*)[usol_vec->size[1]][usol_vec->size[2]][usol_vec->size[3]])usol_vec->data; float (*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__((aligned(64))) = (float (*)[vp_vec->size[1]][vp_vec->size[2]])vp_vec->data; /* Flush denormal numbers to zero in hardware */ _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON); _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON); __itt_resume(); int xb_size = block_sizes[0]; int y0_blk0_size = block_sizes[3]; int x0_blk0_size = block_sizes[2]; int yb_size = block_sizes[1]; printf(" Tiles: %d, %d ::: Blocks %d, %d \n", x0_blk0_size, y0_blk0_size, xb_size, yb_size); int sf = 6; int t_blk_size = 2 * sf * (time_M - time_m); for (int t_blk = time_m; t_blk < sf * (time_M - time_m); t_blk += sf * t_blk_size) //for each t block { for (int xb = x_m; xb <= (x_M + sf * (time_M - time_m)); xb += xb_size + 1) { for (int yb = y_m; yb <= (y_M + sf * (time_M - time_m)); yb += yb_size + 1) { for (int time = t_blk, t0 = (time + 1) % (3), t1 = (time) % (3), t2 = (time + 2) % (3); time <= 1 + min(t_blk + t_blk_size - 1, sf * (time_M - time_m)); time += sf, t0 = (((time / sf) % (time_M - time_m + 1)) + 1) % (3), t1 = (((time / sf) % (time_M - time_m + 1))) % (3), t2 = (((time / sf) % (time_M - time_m + 1)) + 2) % (3)) { int tw = ((time / sf) % (time_M - time_m + 1)); /* Begin section0 */ #pragma omp parallel num_threads(nthreads) { #pragma omp for collapse(2) schedule(dynamic, 1) for (int x0_blk0 = max((x_m + time), xb); x0_blk0 <= min((x_M + time), (xb + xb_size)); x0_blk0 += x0_blk0_size) { for (int y0_blk0 = max((y_m + time), yb); y0_blk0 <= min((y_M + time), (yb + yb_size)); y0_blk0 += y0_blk0_size) { for (int x = x0_blk0; x <= min(min((x_M + time), (xb + xb_size)), (x0_blk0 + x0_blk0_size - 1)); x++) { for (int y = y0_blk0; y <= min(min((y_M + time), (yb + yb_size)), (y0_blk0 + y0_blk0_size - 1)); y++) { #pragma omp simd aligned(damp, usol, vp : 32) for (int z = z_m; z <= z_M; z += 1) { float r7 = -2.98277778 F * usol[t1][x - time + 12][y - time + 12][z + 12]; float r6 = 1.0 / dt; float r5 = 1.0 / (dt * dt); float r4 = 1.0 / (vp[x - time + 12][y - time + 12][z + 12] * vp[x - time + 12][y - time + 12][z + 12]); usol[t0][x - time + 12][y - time + 12][z + 12] = (r4 * (-r5 * (-2.0 F * usol[t1][x - time + 12][y - time + 12][z + 12] + usol[t2][x - time + 12][y - time + 12][z + 12])) + r6 * (damp[x - time + 1][y - time + 1][z + 1] * usol[t1][x - time + 12][y - time + 12][z + 12]) + (r7 - 6.01250601e-5 F * (usol[t1][x - time + 12][y - time + 12][z + 6] + usol[t1][x - time + 12][y - time + 12][z + 18]) + 1.03896104e-3 F * (usol[t1][x - time + 12][y - time + 12][z + 7] + usol[t1][x - time + 12][y - time + 12][z + 17]) - 8.92857143e-3 F * (usol[t1][x - time + 12][y - time + 12][z + 8] + usol[t1][x - time + 12][y - time + 12][z + 16]) + 5.29100529e-2 F * (usol[t1][x - time + 12][y - time + 12][z + 9] + usol[t1][x - time + 12][y - time + 12][z + 15]) - 2.67857143e-1 F * (usol[t1][x - time + 12][y - time + 12][z + 10] + usol[t1][x - time + 12][y - time + 12][z + 14]) + 1.71428571 F * (usol[t1][x - time + 12][y - time + 12][z + 11] + usol[t1][x - time + 12][y - time + 12][z + 13])) / ((h_z * h_z)) + (r7 - 6.01250601e-5 F * (usol[t1][x - time + 12][y - time + 6][z + 12] + usol[t1][x - time + 12][y - time + 18][z + 12]) + 1.03896104e-3 F * (usol[t1][x - time + 12][y - time + 7][z + 12] + usol[t1][x - time + 12][y - time + 17][z + 12]) - 8.92857143e-3 F * (usol[t1][x - time + 12][y - time + 8][z + 12] + usol[t1][x - time + 12][y - time + 16][z + 12]) + 5.29100529e-2 F * (usol[t1][x - time + 12][y - time + 9][z + 12] + usol[t1][x - time + 12][y - time + 15][z + 12]) - 2.67857143e-1 F * (usol[t1][x - time + 12][y - time + 10][z + 12] + usol[t1][x - time + 12][y - time + 14][z + 12]) + 1.71428571 F * (usol[t1][x - time + 12][y - time + 11][z + 12] + usol[t1][x - time + 12][y - time + 13][z + 12])) / ((h_y * h_y)) + (r7 - 6.01250601e-5 F * (usol[t1][x - time + 6][y - time + 12][z + 12] + usol[t1][x - time + 18][y - time + 12][z + 12]) + 1.03896104e-3 F * (usol[t1][x - time + 7][y - time + 12][z + 12] + usol[t1][x - time + 17][y - time + 12][z + 12]) - 8.92857143e-3 F * (usol[t1][x - time + 8][y - time + 12][z + 12] + usol[t1][x - time + 16][y - time + 12][z + 12]) + 5.29100529e-2 F * (usol[t1][x - time + 9][y - time + 12][z + 12] + usol[t1][x - time + 15][y - time + 12][z + 12]) - 2.67857143e-1 F * (usol[t1][x - time + 10][y - time + 12][z + 12] + usol[t1][x - time + 14][y - time + 12][z + 12]) + 1.71428571 F * (usol[t1][x - time + 11][y - time + 12][z + 12] + usol[t1][x - time + 13][y - time + 12][z + 12])) / ((h_x * h_x))) / (r4 * r5 + r6 * damp[x - time + 1][y - time + 1][z + 1]); } #pragma omp simd aligned(damp, usol, vp : 32) for (int sp_zi = sp_zi_m; sp_zi <= nnz_sp_source_mask[x - time][y - time] - 1; sp_zi += 1) { int zind = sp_source_mask[x - time][y - time][sp_zi]; float r0 = save_src[((time / sf) % (time_M - time_m + 1))][source_id[x - time][y - time][zind]] * source_mask[x - time][y - time][zind]; usol[t0][x - time + 12][y - time + 12][zind + 12] += r0; } } } } } } } } } } /* End section0 */ __itt_pause(); return 0; }
streamingbc.c
#include <omp.h> #include <stdint.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <assert.h> #include <math.h> #include <unistd.h> #include "stinger.h" #include "streamingbc.h" #include "streamingbc_aux.h" #include "timer.h" bcForest * streamingBCCreateForestExact(int64_t NV) { return CreateForestExact(NV); } bcForest * streamingBCCreateForestApproximate(int64_t NV, int64_t NK, int64_t * rootArray) { return CreateForestApproximate(NV, rootArray, NK); } extraArraysPerThread ** streamingBCCreateAuxilary(int64_t threadCount, int64_t NV) { return createExtraArraysForThreads(threadCount, NV); } void streamingBCInitStaticExact(bcForest * forest, struct stinger * stingerGraph, int64_t NT, extraArraysPerThread ** auxilary) { BrandesExactParallel(forest, stingerGraph, NT, auxilary); } void streamingBCInitStaticApproximate(bcForest * forest, struct stinger * stingerGraph, int64_t NT, extraArraysPerThread ** auxilary, int64_t NK, int64_t * rootArray) { BrandesApproxCaseParallel(forest, stingerGraph, rootArray, NK, auxilary, NT); } void streamingBCDeleteAuxilary(extraArraysPerThread ** parallelExtra, int64_t threadCount, int64_t NV) { destroyExtraArraysForThreads(parallelExtra, threadCount, NV); } void streamingBCDeleteForestExact(bcForestPtr * deadForest) { DestroyForestExact(deadForest); } void streamingBCDeleteForestApproximate(bcForestPtr * deadForest, int64_t rootArraySize, int64_t * rootArray) { DestroyForestApproximate(deadForest, rootArray, rootArraySize); } StreamingExtraInfo insertVertexStreamingBC(bcForest * forest, struct stinger * sStinger, int64_t src, int64_t * adjacencyArray, int64_t adjacencySize, int64_t * rootArrayForApproximation, int64_t NK, int64_t NV, int64_t NT, extraArraysPerThread ** eAPT) { StreamingExtraInfo oneSEI, returnsei; returnsei.adjacent = 0; returnsei.movement = 0; returnsei.sameLevel = 0; for (int64_t d = 0; d < adjacencySize; d++) { int64_t dest = adjacencyArray[d]; stinger_insert_edge(sStinger, 0, src, dest, 0, 0); stinger_insert_edge(sStinger, 0, dest, src, 0, 0); // Set to load balancing and coarse-grained implementation. oneSEI = insertEdgeStreamingBC(forest, sStinger, src, dest, rootArrayForApproximation, NK, NV, NT, eAPT, 1, 1); returnsei.adjacent += oneSEI.adjacent; returnsei.movement += oneSEI.movement; returnsei.sameLevel += oneSEI.sameLevel; } return returnsei; } int compareArrays(const void * arr1, const void * arr2) { const int64_t * one = (const int64_t *) arr1; const int64_t * two = (const int64_t *) arr2; return two[1] - one[1]; } StreamingExtraInfo insertEdgeStreamingBC(bcForest * forest, struct stinger * sStinger, int64_t newU, int64_t newV, int64_t * rootArrayForApproximation, int64_t NK, int64_t NV, int64_t NT, extraArraysPerThread ** eAPT, uint32_t loadBalancing, uint32_t granularity) { int64_t workPerVertex[NK][2]; // First column has vertex ids, second col has work values per id. int64_t currRoot = 0; int64_t samelevel = 0, compConn = 0, adjacent = 0, movement = 0; int64_t workIndex = 0; for (currRoot = 0; currRoot < NK; currRoot++) { int64_t i = rootArrayForApproximation[currRoot]; int64_t thread = 0; extraArraysPerThread * myExtraArrays = eAPT[thread]; bcTree * tree = forest->forest[i]; if (loadBalancing == BALANCE) workPerVertex[workIndex][0] = i; int64_t diff = tree->vArr[newU].level - tree->vArr[newV].level; if (loadBalancing == BALANCE) { if (diff < 0) { workPerVertex[workIndex++][1] = 2 * tree->vArr[newV].edgesBelow + tree->vArr[newV].edgesAbove; } else if (diff > 0) { workPerVertex[workIndex++][1] = 2 * tree->vArr[newU].edgesBelow + tree->vArr[newU].edgesAbove; } else { workPerVertex[workIndex++][1] = 0; } } } if (loadBalancing == BALANCE) { qsort((void *)&workPerVertex, workIndex, sizeof(int64_t[2]), compareArrays); } if (granularity == FINE) { // fine-grain portion. for (int64_t r = 0; r < NK; r++) { int64_t i = workPerVertex[r][0]; if (loadBalancing == 0) { i = rootArrayForApproximation[r]; } int64_t thread = omp_get_thread_num(); extraArraysPerThread * myExtraArrays = eAPT[thread]; bcTree * tree = forest->forest[i]; int64_t diff = tree->vArr[newU].level - tree->vArr[newV].level; if (diff < -1 || diff > 1) { // Case 3 -- non-adjacent level insertion if (diff < -1) { moveUpTreeBrandesFG(forest, sStinger, i, newV, newU, (-diff) - 1, myExtraArrays, (int64_t)NT); } else { moveUpTreeBrandesFG(forest, sStinger, i, newU, newV, (diff) - 1, myExtraArrays, (int64_t)NT); } eAPT[thread]->movementCounter++; } // Newly inserted edge is connecting vertices that were in adjacent levels before insertions else if (diff == -1 || diff == 1) { // Case 2 -- adjacent level insertion if (diff == -1) { addEdgeWithoutMovementBrandesFG(forest, sStinger, i, newV, newU, tree->vArr[newU].sigma, myExtraArrays, (int64_t)NT); } else { addEdgeWithoutMovementBrandesFG(forest, sStinger, i, newU, newV, tree->vArr[newV].sigma, myExtraArrays, (int64_t)NT); } eAPT[thread]->adjacentCounter++; } } } else { omp_set_num_threads(NT); // coarse-grain portion. #pragma omp parallel for schedule(dynamic,1) for (int64_t r = 0; r < NK; r++) { int64_t i = workPerVertex[r][0]; if (loadBalancing == 0) { i = rootArrayForApproximation[r]; } int64_t thread = omp_get_thread_num(); extraArraysPerThread * myExtraArrays = eAPT[thread]; bcTree * tree = forest->forest[i]; int64_t diff = tree->vArr[newU].level - tree->vArr[newV].level; if (diff < -1 || diff > 1) { if (diff < -1) { moveUpTreeBrandes(forest, sStinger, i, newV, newU, (-diff) - 1, myExtraArrays); } else { moveUpTreeBrandes(forest, sStinger, i, newU, newV, (diff) - 1, myExtraArrays); } eAPT[thread]->movementCounter++; } // Newly inserted edge is connecting vertices that were in adjacent levels before insertions else if (diff == -1 || diff == 1) { if (diff == -1) { addEdgeWithoutMovementBrandes(forest, sStinger, i, newV, newU, tree->vArr[newU].sigma, myExtraArrays); } else { addEdgeWithoutMovementBrandes(forest, sStinger, i, newU, newV, tree->vArr[newV].sigma, myExtraArrays); } eAPT[thread]->adjacentCounter++; } } } #pragma omp parallel for for (int64_t v = 0; v < NV; v++) { for (int64_t t = 0; t < NT; t++) { forest->totalBC[v] += eAPT[t]->sV[v].totalBC; eAPT[t]->sV[v].totalBC = 0.0; } } StreamingExtraInfo returnSEI = {0, 0, 0, 0}; returnSEI.sameLevel = samelevel; returnSEI.adjacent = adjacent; returnSEI.movement = movement; return returnSEI; } StreamingExtraInfo deleteVertexStreamingBC(bcForest * forest, struct stinger * sStinger, int64_t src, int64_t * adjacencyArray, int64_t * adjacencySize, int64_t * rootArrayForApproximation, int64_t NK, int64_t NV, int64_t NT, extraArraysPerThread ** eAPT) { StreamingExtraInfo oneSEI, returnsei; returnsei.adjacent = 0; returnsei.movement = 0; returnsei.sameLevel = 0; int64_t d = 0; STINGER_FORALL_EDGES_OF_VTX_BEGIN(sStinger, src) { int64_t dest = STINGER_EDGE_DEST; adjacencyArray[d] = dest; stinger_remove_edge(sStinger, 0, src, dest); stinger_remove_edge(sStinger, 0, dest, src); // Force to be load balancing and coarse-grained implementation. oneSEI = deleteEdgeStreamingBC(forest, sStinger, src, dest, rootArrayForApproximation, NK, NV, NT, eAPT, 1, 1); returnsei.adjacent += oneSEI.adjacent; returnsei.movement += oneSEI.movement; returnsei.sameLevel += oneSEI.sameLevel; } STINGER_FORALL_EDGES_OF_VTX_END(); *adjacencySize = d; return returnsei; } StreamingExtraInfo deleteEdgeStreamingBC(bcForest * forest, struct stinger * sStinger, int64_t oldU, int64_t oldV, int64_t * rootArrayForApproximation, int64_t NK, int64_t NV, int64_t NT, extraArraysPerThread ** eAPT, uint32_t loadBalancing, uint32_t granularity) { omp_set_num_threads(NT); int64_t currRoot = 0; int64_t samelevel = 0, compConn = 0, adjacent = 0, movement = 0; int64_t thread = 0; int64_t workPerVertex[NK][2]; int64_t workIndex = 0; if (loadBalancing == BALANCE) { for (int64_t r = 0; r < NK; r++) { int64_t i = rootArrayForApproximation[r]; bcTree * tree = forest->forest[i]; int64_t diff = tree->vArr[oldU].level - tree->vArr[oldV].level; workPerVertex[workIndex][0] = i; if (diff < 0) { workPerVertex[workIndex++][1] = 2 * tree->vArr[oldV].edgesBelow + tree->vArr[oldV].edgesAbove; } else if (diff > 0) { workPerVertex[workIndex++][1] = 2 * tree->vArr[oldU].edgesBelow + tree->vArr[oldU].edgesAbove; } else { workPerVertex[workIndex++][1] = 0; } } } if (loadBalancing == BALANCE) { qsort((void *)&workPerVertex, workIndex, sizeof(int64_t[2]), compareArrays); } if (granularity == COARSE) { #pragma omp parallel for schedule(dynamic,1) for (int64_t r = 0; r < NK; r++) { int64_t i = workPerVertex[r][0]; if (loadBalancing == 0) { i = rootArrayForApproximation[r]; } int64_t thread = omp_get_thread_num(); extraArraysPerThread * myExtraArrays = eAPT[thread]; bcTree * tree = forest->forest[i]; int64_t extraParents = 0; int64_t childVertex = oldU; int64_t parentVertex = oldV; int64_t diff = tree->vArr[oldU].level - tree->vArr[oldV].level; if (diff == 0) { eAPT[thread]->samelevelCounter++; samelevel++; continue; } if (tree->vArr[oldU].level < tree->vArr[oldV].level) { childVertex = oldV; parentVertex = oldU; } STINGER_FORALL_EDGES_OF_VTX_BEGIN(sStinger, childVertex) { int64_t neighbor = STINGER_EDGE_DEST; if (tree->vArr[neighbor].level + 1 == tree->vArr[childVertex].level) { extraParents++; } } STINGER_FORALL_EDGES_OF_VTX_END(); if (extraParents >= 1) { // Case 2 -- adjacent level deletion. removeEdgeWithoutMovementBrandes(forest, sStinger, i, childVertex, parentVertex, tree->vArr[parentVertex].sigma, myExtraArrays); eAPT[thread]->adjacentCounter++; adjacent++; } else { // Case 3 -- non-adjacent level deletion. moveDownTreeBrandes(forest, sStinger, i, childVertex, parentVertex, myExtraArrays); eAPT[thread]->movementCounter++; movement++; } } } else { for (int64_t r = 0; r < NK; r++) { int64_t i = workPerVertex[r][0]; if (loadBalancing == 0) { i = rootArrayForApproximation[r]; } int64_t thread = omp_get_thread_num(); extraArraysPerThread * myExtraArrays = eAPT[thread]; bcTree * tree = forest->forest[i]; int64_t extraParents = 0; int64_t childVertex = oldU; int64_t parentVertex = oldV; int64_t diff = tree->vArr[oldU].level - tree->vArr[oldV].level; if (diff == 0) { eAPT[thread]->samelevelCounter++; samelevel++; continue; } if (tree->vArr[oldU].level < tree->vArr[oldV].level) { childVertex = oldV; parentVertex = oldU; } STINGER_FORALL_EDGES_OF_VTX_BEGIN(sStinger, childVertex) { int64_t neighbor = STINGER_EDGE_DEST; if (tree->vArr[neighbor].level + 1 == tree->vArr[childVertex].level) { extraParents++; } } STINGER_FORALL_EDGES_OF_VTX_END(); if (extraParents >= 1) { removeEdgeWithoutMovementBrandesFG(forest, sStinger, i, childVertex, parentVertex, tree->vArr[parentVertex].sigma, myExtraArrays, NT); eAPT[thread]->adjacentCounter++; adjacent++; } else { moveDownTreeBrandes(forest, sStinger, i, childVertex, parentVertex, myExtraArrays); eAPT[thread]->movementCounter++; movement++; } } } int64_t tlow = (NV * thread) / NT; int64_t thigh = (NV * (thread + 1)) / NT ; for (int64_t v = tlow; v < NV; v++) { for (int64_t t = 0; t < NT; t++) { forest->totalBC[v] += eAPT[t]->sV[v].totalBC; eAPT[t]->sV[v].totalBC = 0.0; } } StreamingExtraInfo returnSEI = {0, 0, 0, 0}; returnSEI.sameLevel = samelevel; returnSEI.adjacent = adjacent; returnSEI.movement = movement; return returnSEI; }
#include <omp.h> #include <stdint.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <assert.h> #include <math.h> #include <unistd.h> #include "stinger.h" #include "streamingbc.h" #include "streamingbc_aux.h" #include "timer.h" bcForest * streamingBCCreateForestExact(int64_t NV) { return CreateForestExact(NV); } bcForest * streamingBCCreateForestApproximate(int64_t NV, int64_t NK, int64_t * rootArray) { return CreateForestApproximate(NV, rootArray, NK); } extraArraysPerThread ** streamingBCCreateAuxilary(int64_t threadCount, int64_t NV) { return createExtraArraysForThreads(threadCount, NV); } void streamingBCInitStaticExact(bcForest * forest, struct stinger *stingerGraph, int64_t NT, extraArraysPerThread ** auxilary) { BrandesExactParallel(forest, stingerGraph, NT, auxilary); } void streamingBCInitStaticApproximate(bcForest * forest, struct stinger *stingerGraph, int64_t NT, extraArraysPerThread ** auxilary, int64_t NK, int64_t * rootArray) { BrandesApproxCaseParallel(forest, stingerGraph, rootArray, NK, auxilary, NT); } void streamingBCDeleteAuxilary(extraArraysPerThread ** parallelExtra, int64_t threadCount, int64_t NV) { destroyExtraArraysForThreads(parallelExtra, threadCount, NV); } void streamingBCDeleteForestExact(bcForestPtr * deadForest) { DestroyForestExact(deadForest); } void streamingBCDeleteForestApproximate(bcForestPtr * deadForest, int64_t rootArraySize, int64_t * rootArray) { DestroyForestApproximate(deadForest, rootArray, rootArraySize); } StreamingExtraInfo insertVertexStreamingBC(bcForest * forest, struct stinger *sStinger, int64_t src, int64_t * adjacencyArray, int64_t adjacencySize, int64_t * rootArrayForApproximation, int64_t NK, int64_t NV, int64_t NT, extraArraysPerThread ** eAPT) { StreamingExtraInfo oneSEI, returnsei; returnsei.adjacent = 0; returnsei.movement = 0; returnsei.sameLevel = 0; for (int64_t d = 0; d < adjacencySize; d++) { int64_t dest = adjacencyArray[d]; stinger_insert_edge(sStinger, 0, src, dest, 0, 0); stinger_insert_edge(sStinger, 0, dest, src, 0, 0); //Set to load balancing and coarse - grained implementation. oneSEI = insertEdgeStreamingBC(forest, sStinger, src, dest, rootArrayForApproximation, NK, NV, NT, eAPT, 1, 1); returnsei.adjacent += oneSEI.adjacent; returnsei.movement += oneSEI.movement; returnsei.sameLevel += oneSEI.sameLevel; } return returnsei; } int compareArrays(const void *arr1, const void *arr2) { const int64_t *one = (const int64_t *)arr1; const int64_t *two = (const int64_t *)arr2; return two[1] - one[1]; } StreamingExtraInfo insertEdgeStreamingBC(bcForest * forest, struct stinger *sStinger, int64_t newU, int64_t newV, int64_t * rootArrayForApproximation, int64_t NK, int64_t NV, int64_t NT, extraArraysPerThread ** eAPT, uint32_t loadBalancing, uint32_t granularity) { int64_t workPerVertex[NK][2]; //First column has vertex ids, second col has work values per id. int64_t currRoot = 0; int64_t samelevel = 0, compConn = 0, adjacent = 0, movement = 0; int64_t workIndex = 0; for (currRoot = 0; currRoot < NK; currRoot++) { int64_t i = rootArrayForApproximation[currRoot]; int64_t thread = 0; extraArraysPerThread *myExtraArrays = eAPT[thread]; bcTree *tree = forest->forest[i]; if (loadBalancing == BALANCE) workPerVertex[workIndex][0] = i; int64_t diff = tree->vArr[newU].level - tree->vArr[newV].level; if (loadBalancing == BALANCE) { if (diff < 0) { workPerVertex[workIndex++][1] = 2 * tree->vArr[newV].edgesBelow + tree->vArr[newV].edgesAbove; } else if (diff > 0) { workPerVertex[workIndex++][1] = 2 * tree->vArr[newU].edgesBelow + tree->vArr[newU].edgesAbove; } else { workPerVertex[workIndex++][1] = 0; } } } if (loadBalancing == BALANCE) { qsort((void *)&workPerVertex, workIndex, sizeof(int64_t[2]), compareArrays); } if (granularity == FINE) { //fine - grain portion. for (int64_t r = 0; r < NK; r++) { int64_t i = workPerVertex[r][0]; if (loadBalancing == 0) { i = rootArrayForApproximation[r]; } int64_t thread = omp_get_thread_num(); extraArraysPerThread *myExtraArrays = eAPT[thread]; bcTree *tree = forest->forest[i]; int64_t diff = tree->vArr[newU].level - tree->vArr[newV].level; if (diff < -1 || diff > 1) { //Case 3-- non - adjacent level insertion if (diff < -1) { moveUpTreeBrandesFG(forest, sStinger, i, newV, newU, (-diff) - 1, myExtraArrays, (int64_t) NT); } else { moveUpTreeBrandesFG(forest, sStinger, i, newU, newV, (diff) - 1, myExtraArrays, (int64_t) NT); } eAPT[thread]->movementCounter++; } //Newly inserted edge is connecting vertices that were in adjacent levels before insertions else if (diff == -1 || diff == 1) { //Case 2-- adjacent level insertion if (diff == -1) { addEdgeWithoutMovementBrandesFG(forest, sStinger, i, newV, newU, tree->vArr[newU].sigma, myExtraArrays, (int64_t) NT); } else { addEdgeWithoutMovementBrandesFG(forest, sStinger, i, newU, newV, tree->vArr[newV].sigma, myExtraArrays, (int64_t) NT); } eAPT[thread]->adjacentCounter++; } } } else { omp_set_num_threads(NT); //coarse - grain portion. for (int64_t r = 0; r < NK; r++) { int64_t i = workPerVertex[r][0]; if (loadBalancing == 0) { i = rootArrayForApproximation[r]; } int64_t thread = omp_get_thread_num(); extraArraysPerThread *myExtraArrays = eAPT[thread]; bcTree *tree = forest->forest[i]; int64_t diff = tree->vArr[newU].level - tree->vArr[newV].level; if (diff < -1 || diff > 1) { if (diff < -1) { moveUpTreeBrandes(forest, sStinger, i, newV, newU, (-diff) - 1, myExtraArrays); } else { moveUpTreeBrandes(forest, sStinger, i, newU, newV, (diff) - 1, myExtraArrays); } eAPT[thread]->movementCounter++; } //Newly inserted edge is connecting vertices that were in adjacent levels before insertions else if (diff == -1 || diff == 1) { if (diff == -1) { addEdgeWithoutMovementBrandes(forest, sStinger, i, newV, newU, tree->vArr[newU].sigma, myExtraArrays); } else { addEdgeWithoutMovementBrandes(forest, sStinger, i, newU, newV, tree->vArr[newV].sigma, myExtraArrays); } eAPT[thread]->adjacentCounter++; } } } for (int64_t v = 0; v < NV; v++) { for (int64_t t = 0; t < NT; t++) { forest->totalBC[v] += eAPT[t]->sV[v].totalBC; eAPT[t]->sV[v].totalBC = 0.0; } } StreamingExtraInfo returnSEI = {0, 0, 0, 0}; returnSEI.sameLevel = samelevel; returnSEI.adjacent = adjacent; returnSEI.movement = movement; return returnSEI; } StreamingExtraInfo deleteVertexStreamingBC(bcForest * forest, struct stinger *sStinger, int64_t src, int64_t * adjacencyArray, int64_t * adjacencySize, int64_t * rootArrayForApproximation, int64_t NK, int64_t NV, int64_t NT, extraArraysPerThread ** eAPT) { StreamingExtraInfo oneSEI, returnsei; returnsei.adjacent = 0; returnsei.movement = 0; returnsei.sameLevel = 0; int64_t d = 0; STINGER_FORALL_EDGES_OF_VTX_BEGIN(sStinger, src) { int64_t dest = STINGER_EDGE_DEST; adjacencyArray[d] = dest; stinger_remove_edge(sStinger, 0, src, dest); stinger_remove_edge(sStinger, 0, dest, src); //Force to be load balancing and coarse - grained implementation. oneSEI = deleteEdgeStreamingBC(forest, sStinger, src, dest, rootArrayForApproximation, NK, NV, NT, eAPT, 1, 1); returnsei.adjacent += oneSEI.adjacent; returnsei.movement += oneSEI.movement; returnsei.sameLevel += oneSEI.sameLevel; } STINGER_FORALL_EDGES_OF_VTX_END(); *adjacencySize = d; return returnsei; } StreamingExtraInfo deleteEdgeStreamingBC(bcForest * forest, struct stinger *sStinger, int64_t oldU, int64_t oldV, int64_t * rootArrayForApproximation, int64_t NK, int64_t NV, int64_t NT, extraArraysPerThread ** eAPT, uint32_t loadBalancing, uint32_t granularity) { omp_set_num_threads(NT); int64_t currRoot = 0; int64_t samelevel = 0, compConn = 0, adjacent = 0, movement = 0; int64_t thread = 0; int64_t workPerVertex[NK][2]; int64_t workIndex = 0; if (loadBalancing == BALANCE) { for (int64_t r = 0; r < NK; r++) { int64_t i = rootArrayForApproximation[r]; bcTree *tree = forest->forest[i]; int64_t diff = tree->vArr[oldU].level - tree->vArr[oldV].level; workPerVertex[workIndex][0] = i; if (diff < 0) { workPerVertex[workIndex++][1] = 2 * tree->vArr[oldV].edgesBelow + tree->vArr[oldV].edgesAbove; } else if (diff > 0) { workPerVertex[workIndex++][1] = 2 * tree->vArr[oldU].edgesBelow + tree->vArr[oldU].edgesAbove; } else { workPerVertex[workIndex++][1] = 0; } } } if (loadBalancing == BALANCE) { qsort((void *)&workPerVertex, workIndex, sizeof(int64_t[2]), compareArrays); } if (granularity == COARSE) { for (int64_t r = 0; r < NK; r++) { int64_t i = workPerVertex[r][0]; if (loadBalancing == 0) { i = rootArrayForApproximation[r]; } int64_t thread = omp_get_thread_num(); extraArraysPerThread *myExtraArrays = eAPT[thread]; bcTree *tree = forest->forest[i]; int64_t extraParents = 0; int64_t childVertex = oldU; int64_t parentVertex = oldV; int64_t diff = tree->vArr[oldU].level - tree->vArr[oldV].level; if (diff == 0) { eAPT[thread]->samelevelCounter++; samelevel++; continue; } if (tree->vArr[oldU].level < tree->vArr[oldV].level) { childVertex = oldV; parentVertex = oldU; } STINGER_FORALL_EDGES_OF_VTX_BEGIN(sStinger, childVertex) { int64_t neighbor = STINGER_EDGE_DEST; if (tree->vArr[neighbor].level + 1 == tree->vArr[childVertex].level) { extraParents++; } } STINGER_FORALL_EDGES_OF_VTX_END(); if (extraParents >= 1) { //Case 2-- adjacent level deletion. removeEdgeWithoutMovementBrandes(forest, sStinger, i, childVertex, parentVertex, tree->vArr[parentVertex].sigma, myExtraArrays); eAPT[thread]->adjacentCounter++; adjacent++; } else { //Case 3-- non - adjacent level deletion. moveDownTreeBrandes(forest, sStinger, i, childVertex, parentVertex, myExtraArrays); eAPT[thread]->movementCounter++; movement++; } } } else { for (int64_t r = 0; r < NK; r++) { int64_t i = workPerVertex[r][0]; if (loadBalancing == 0) { i = rootArrayForApproximation[r]; } int64_t thread = omp_get_thread_num(); extraArraysPerThread *myExtraArrays = eAPT[thread]; bcTree *tree = forest->forest[i]; int64_t extraParents = 0; int64_t childVertex = oldU; int64_t parentVertex = oldV; int64_t diff = tree->vArr[oldU].level - tree->vArr[oldV].level; if (diff == 0) { eAPT[thread]->samelevelCounter++; samelevel++; continue; } if (tree->vArr[oldU].level < tree->vArr[oldV].level) { childVertex = oldV; parentVertex = oldU; } STINGER_FORALL_EDGES_OF_VTX_BEGIN(sStinger, childVertex) { int64_t neighbor = STINGER_EDGE_DEST; if (tree->vArr[neighbor].level + 1 == tree->vArr[childVertex].level) { extraParents++; } } STINGER_FORALL_EDGES_OF_VTX_END(); if (extraParents >= 1) { removeEdgeWithoutMovementBrandesFG(forest, sStinger, i, childVertex, parentVertex, tree->vArr[parentVertex].sigma, myExtraArrays, NT); eAPT[thread]->adjacentCounter++; adjacent++; } else { moveDownTreeBrandes(forest, sStinger, i, childVertex, parentVertex, myExtraArrays); eAPT[thread]->movementCounter++; movement++; } } } int64_t tlow = (NV * thread) / NT; int64_t thigh = (NV * (thread + 1)) / NT; for (int64_t v = tlow; v < NV; v++) { for (int64_t t = 0; t < NT; t++) { forest->totalBC[v] += eAPT[t]->sV[v].totalBC; eAPT[t]->sV[v].totalBC = 0.0; } } StreamingExtraInfo returnSEI = {0, 0, 0, 0}; returnSEI.sameLevel = samelevel; returnSEI.adjacent = adjacent; returnSEI.movement = movement; return returnSEI; }
#include <omp.h> #include <stdint.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <assert.h> #include <math.h> #include <unistd.h> #include "stinger.h" #include "streamingbc.h" #include "streamingbc_aux.h" #include "timer.h" bcForest * streamingBCCreateForestExact(int64_t NV) { return CreateForestExact(NV); } bcForest * streamingBCCreateForestApproximate(int64_t NV, int64_t NK, int64_t * rootArray) { return CreateForestApproximate(NV, rootArray, NK); } extraArraysPerThread ** streamingBCCreateAuxilary(int64_t threadCount, int64_t NV) { return createExtraArraysForThreads(threadCount, NV); } void streamingBCInitStaticExact(bcForest * forest, struct stinger *stingerGraph, int64_t NT, extraArraysPerThread ** auxilary) { BrandesExactParallel(forest, stingerGraph, NT, auxilary); } void streamingBCInitStaticApproximate(bcForest * forest, struct stinger *stingerGraph, int64_t NT, extraArraysPerThread ** auxilary, int64_t NK, int64_t * rootArray) { BrandesApproxCaseParallel(forest, stingerGraph, rootArray, NK, auxilary, NT); } void streamingBCDeleteAuxilary(extraArraysPerThread ** parallelExtra, int64_t threadCount, int64_t NV) { destroyExtraArraysForThreads(parallelExtra, threadCount, NV); } void streamingBCDeleteForestExact(bcForestPtr * deadForest) { DestroyForestExact(deadForest); } void streamingBCDeleteForestApproximate(bcForestPtr * deadForest, int64_t rootArraySize, int64_t * rootArray) { DestroyForestApproximate(deadForest, rootArray, rootArraySize); } StreamingExtraInfo insertVertexStreamingBC(bcForest * forest, struct stinger *sStinger, int64_t src, int64_t * adjacencyArray, int64_t adjacencySize, int64_t * rootArrayForApproximation, int64_t NK, int64_t NV, int64_t NT, extraArraysPerThread ** eAPT) { StreamingExtraInfo oneSEI, returnsei; returnsei.adjacent = 0; returnsei.movement = 0; returnsei.sameLevel = 0; for (int64_t d = 0; d < adjacencySize; d++) { int64_t dest = adjacencyArray[d]; stinger_insert_edge(sStinger, 0, src, dest, 0, 0); stinger_insert_edge(sStinger, 0, dest, src, 0, 0); //Set to load balancing and coarse - grained implementation. oneSEI = insertEdgeStreamingBC(forest, sStinger, src, dest, rootArrayForApproximation, NK, NV, NT, eAPT, 1, 1); returnsei.adjacent += oneSEI.adjacent; returnsei.movement += oneSEI.movement; returnsei.sameLevel += oneSEI.sameLevel; } return returnsei; } int compareArrays(const void *arr1, const void *arr2) { const int64_t *one = (const int64_t *)arr1; const int64_t *two = (const int64_t *)arr2; return two[1] - one[1]; } StreamingExtraInfo insertEdgeStreamingBC(bcForest * forest, struct stinger *sStinger, int64_t newU, int64_t newV, int64_t * rootArrayForApproximation, int64_t NK, int64_t NV, int64_t NT, extraArraysPerThread ** eAPT, uint32_t loadBalancing, uint32_t granularity) { int64_t workPerVertex[NK][2]; //First column has vertex ids, second col has work values per id. int64_t currRoot = 0; int64_t samelevel = 0, compConn = 0, adjacent = 0, movement = 0; int64_t workIndex = 0; for (currRoot = 0; currRoot < NK; currRoot++) { int64_t i = rootArrayForApproximation[currRoot]; int64_t thread = 0; extraArraysPerThread *myExtraArrays = eAPT[thread]; bcTree *tree = forest->forest[i]; if (loadBalancing == BALANCE) workPerVertex[workIndex][0] = i; int64_t diff = tree->vArr[newU].level - tree->vArr[newV].level; if (loadBalancing == BALANCE) { if (diff < 0) { workPerVertex[workIndex++][1] = 2 * tree->vArr[newV].edgesBelow + tree->vArr[newV].edgesAbove; } else if (diff > 0) { workPerVertex[workIndex++][1] = 2 * tree->vArr[newU].edgesBelow + tree->vArr[newU].edgesAbove; } else { workPerVertex[workIndex++][1] = 0; } } } if (loadBalancing == BALANCE) { qsort((void *)&workPerVertex, workIndex, sizeof(int64_t[2]), compareArrays); } if (granularity == FINE) { //fine - grain portion. for (int64_t r = 0; r < NK; r++) { int64_t i = workPerVertex[r][0]; if (loadBalancing == 0) { i = rootArrayForApproximation[r]; } int64_t thread = omp_get_thread_num(); extraArraysPerThread *myExtraArrays = eAPT[thread]; bcTree *tree = forest->forest[i]; int64_t diff = tree->vArr[newU].level - tree->vArr[newV].level; if (diff < -1 || diff > 1) { //Case 3-- non - adjacent level insertion if (diff < -1) { moveUpTreeBrandesFG(forest, sStinger, i, newV, newU, (-diff) - 1, myExtraArrays, (int64_t) NT); } else { moveUpTreeBrandesFG(forest, sStinger, i, newU, newV, (diff) - 1, myExtraArrays, (int64_t) NT); } eAPT[thread]->movementCounter++; } //Newly inserted edge is connecting vertices that were in adjacent levels before insertions else if (diff == -1 || diff == 1) { //Case 2-- adjacent level insertion if (diff == -1) { addEdgeWithoutMovementBrandesFG(forest, sStinger, i, newV, newU, tree->vArr[newU].sigma, myExtraArrays, (int64_t) NT); } else { addEdgeWithoutMovementBrandesFG(forest, sStinger, i, newU, newV, tree->vArr[newV].sigma, myExtraArrays, (int64_t) NT); } eAPT[thread]->adjacentCounter++; } } } else { omp_set_num_threads(NT); //coarse - grain portion. #pragma omp parallel for schedule(dynamic,1) for (int64_t r = 0; r < NK; r++) { int64_t i = workPerVertex[r][0]; if (loadBalancing == 0) { i = rootArrayForApproximation[r]; } int64_t thread = omp_get_thread_num(); extraArraysPerThread *myExtraArrays = eAPT[thread]; bcTree *tree = forest->forest[i]; int64_t diff = tree->vArr[newU].level - tree->vArr[newV].level; if (diff < -1 || diff > 1) { if (diff < -1) { moveUpTreeBrandes(forest, sStinger, i, newV, newU, (-diff) - 1, myExtraArrays); } else { moveUpTreeBrandes(forest, sStinger, i, newU, newV, (diff) - 1, myExtraArrays); } eAPT[thread]->movementCounter++; } //Newly inserted edge is connecting vertices that were in adjacent levels before insertions else if (diff == -1 || diff == 1) { if (diff == -1) { addEdgeWithoutMovementBrandes(forest, sStinger, i, newV, newU, tree->vArr[newU].sigma, myExtraArrays); } else { addEdgeWithoutMovementBrandes(forest, sStinger, i, newU, newV, tree->vArr[newV].sigma, myExtraArrays); } eAPT[thread]->adjacentCounter++; } } } #pragma omp parallel for for (int64_t v = 0; v < NV; v++) { for (int64_t t = 0; t < NT; t++) { forest->totalBC[v] += eAPT[t]->sV[v].totalBC; eAPT[t]->sV[v].totalBC = 0.0; } } StreamingExtraInfo returnSEI = {0, 0, 0, 0}; returnSEI.sameLevel = samelevel; returnSEI.adjacent = adjacent; returnSEI.movement = movement; return returnSEI; } StreamingExtraInfo deleteVertexStreamingBC(bcForest * forest, struct stinger *sStinger, int64_t src, int64_t * adjacencyArray, int64_t * adjacencySize, int64_t * rootArrayForApproximation, int64_t NK, int64_t NV, int64_t NT, extraArraysPerThread ** eAPT) { StreamingExtraInfo oneSEI, returnsei; returnsei.adjacent = 0; returnsei.movement = 0; returnsei.sameLevel = 0; int64_t d = 0; STINGER_FORALL_EDGES_OF_VTX_BEGIN(sStinger, src) { int64_t dest = STINGER_EDGE_DEST; adjacencyArray[d] = dest; stinger_remove_edge(sStinger, 0, src, dest); stinger_remove_edge(sStinger, 0, dest, src); //Force to be load balancing and coarse - grained implementation. oneSEI = deleteEdgeStreamingBC(forest, sStinger, src, dest, rootArrayForApproximation, NK, NV, NT, eAPT, 1, 1); returnsei.adjacent += oneSEI.adjacent; returnsei.movement += oneSEI.movement; returnsei.sameLevel += oneSEI.sameLevel; } STINGER_FORALL_EDGES_OF_VTX_END(); *adjacencySize = d; return returnsei; } StreamingExtraInfo deleteEdgeStreamingBC(bcForest * forest, struct stinger *sStinger, int64_t oldU, int64_t oldV, int64_t * rootArrayForApproximation, int64_t NK, int64_t NV, int64_t NT, extraArraysPerThread ** eAPT, uint32_t loadBalancing, uint32_t granularity) { omp_set_num_threads(NT); int64_t currRoot = 0; int64_t samelevel = 0, compConn = 0, adjacent = 0, movement = 0; int64_t thread = 0; int64_t workPerVertex[NK][2]; int64_t workIndex = 0; if (loadBalancing == BALANCE) { for (int64_t r = 0; r < NK; r++) { int64_t i = rootArrayForApproximation[r]; bcTree *tree = forest->forest[i]; int64_t diff = tree->vArr[oldU].level - tree->vArr[oldV].level; workPerVertex[workIndex][0] = i; if (diff < 0) { workPerVertex[workIndex++][1] = 2 * tree->vArr[oldV].edgesBelow + tree->vArr[oldV].edgesAbove; } else if (diff > 0) { workPerVertex[workIndex++][1] = 2 * tree->vArr[oldU].edgesBelow + tree->vArr[oldU].edgesAbove; } else { workPerVertex[workIndex++][1] = 0; } } } if (loadBalancing == BALANCE) { qsort((void *)&workPerVertex, workIndex, sizeof(int64_t[2]), compareArrays); } if (granularity == COARSE) { #pragma omp parallel for schedule(dynamic,1) for (int64_t r = 0; r < NK; r++) { int64_t i = workPerVertex[r][0]; if (loadBalancing == 0) { i = rootArrayForApproximation[r]; } int64_t thread = omp_get_thread_num(); extraArraysPerThread *myExtraArrays = eAPT[thread]; bcTree *tree = forest->forest[i]; int64_t extraParents = 0; int64_t childVertex = oldU; int64_t parentVertex = oldV; int64_t diff = tree->vArr[oldU].level - tree->vArr[oldV].level; if (diff == 0) { eAPT[thread]->samelevelCounter++; samelevel++; continue; } if (tree->vArr[oldU].level < tree->vArr[oldV].level) { childVertex = oldV; parentVertex = oldU; } STINGER_FORALL_EDGES_OF_VTX_BEGIN(sStinger, childVertex) { int64_t neighbor = STINGER_EDGE_DEST; if (tree->vArr[neighbor].level + 1 == tree->vArr[childVertex].level) { extraParents++; } } STINGER_FORALL_EDGES_OF_VTX_END(); if (extraParents >= 1) { //Case 2-- adjacent level deletion. removeEdgeWithoutMovementBrandes(forest, sStinger, i, childVertex, parentVertex, tree->vArr[parentVertex].sigma, myExtraArrays); eAPT[thread]->adjacentCounter++; adjacent++; } else { //Case 3-- non - adjacent level deletion. moveDownTreeBrandes(forest, sStinger, i, childVertex, parentVertex, myExtraArrays); eAPT[thread]->movementCounter++; movement++; } } } else { for (int64_t r = 0; r < NK; r++) { int64_t i = workPerVertex[r][0]; if (loadBalancing == 0) { i = rootArrayForApproximation[r]; } int64_t thread = omp_get_thread_num(); extraArraysPerThread *myExtraArrays = eAPT[thread]; bcTree *tree = forest->forest[i]; int64_t extraParents = 0; int64_t childVertex = oldU; int64_t parentVertex = oldV; int64_t diff = tree->vArr[oldU].level - tree->vArr[oldV].level; if (diff == 0) { eAPT[thread]->samelevelCounter++; samelevel++; continue; } if (tree->vArr[oldU].level < tree->vArr[oldV].level) { childVertex = oldV; parentVertex = oldU; } STINGER_FORALL_EDGES_OF_VTX_BEGIN(sStinger, childVertex) { int64_t neighbor = STINGER_EDGE_DEST; if (tree->vArr[neighbor].level + 1 == tree->vArr[childVertex].level) { extraParents++; } } STINGER_FORALL_EDGES_OF_VTX_END(); if (extraParents >= 1) { removeEdgeWithoutMovementBrandesFG(forest, sStinger, i, childVertex, parentVertex, tree->vArr[parentVertex].sigma, myExtraArrays, NT); eAPT[thread]->adjacentCounter++; adjacent++; } else { moveDownTreeBrandes(forest, sStinger, i, childVertex, parentVertex, myExtraArrays); eAPT[thread]->movementCounter++; movement++; } } } int64_t tlow = (NV * thread) / NT; int64_t thigh = (NV * (thread + 1)) / NT; for (int64_t v = tlow; v < NV; v++) { for (int64_t t = 0; t < NT; t++) { forest->totalBC[v] += eAPT[t]->sV[v].totalBC; eAPT[t]->sV[v].totalBC = 0.0; } } StreamingExtraInfo returnSEI = {0, 0, 0, 0}; returnSEI.sameLevel = samelevel; returnSEI.adjacent = adjacent; returnSEI.movement = movement; return returnSEI; }
GB_binop__isne_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__isne_uint32 // A.*B function (eWiseMult): GB_AemultB__isne_uint32 // A*D function (colscale): GB_AxD__isne_uint32 // D*A function (rowscale): GB_DxB__isne_uint32 // C+=B function (dense accum): GB_Cdense_accumB__isne_uint32 // C+=b function (dense accum): GB_Cdense_accumb__isne_uint32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isne_uint32 // C=scalar+B GB_bind1st__isne_uint32 // C=scalar+B' GB_bind1st_tran__isne_uint32 // C=A+scalar GB_bind2nd__isne_uint32 // C=A'+scalar GB_bind2nd_tran__isne_uint32 // C type: uint32_t // A type: uint32_t // B,b type: uint32_t // BinaryOp: cij = (aij != bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x != y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISNE || GxB_NO_UINT32 || GxB_NO_ISNE_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__isne_uint32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__isne_uint32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__isne_uint32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__isne_uint32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__isne_uint32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__isne_uint32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__isne_uint32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__isne_uint32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint32_t bij = Bx [p] ; Cx [p] = (x != bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__isne_uint32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint32_t aij = Ax [p] ; Cx [p] = (aij != y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = (x != aij) ; \ } GrB_Info GB_bind1st_tran__isne_uint32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = (aij != y) ; \ } GrB_Info GB_bind2nd_tran__isne_uint32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__isne_uint32 // A.*B function (eWiseMult): GB_AemultB__isne_uint32 // A*D function (colscale): GB_AxD__isne_uint32 // D*A function (rowscale): GB_DxB__isne_uint32 // C+=B function (dense accum): GB_Cdense_accumB__isne_uint32 // C+=b function (dense accum): GB_Cdense_accumb__isne_uint32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isne_uint32 // C=scalar+B GB_bind1st__isne_uint32 // C=scalar+B' GB_bind1st_tran__isne_uint32 // C=A+scalar GB_bind2nd__isne_uint32 // C=A'+scalar GB_bind2nd_tran__isne_uint32 // C type: uint32_t // A type: uint32_t // B,b type: uint32_t // BinaryOp: cij = (aij != bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x != y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISNE || GxB_NO_UINT32 || GxB_NO_ISNE_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__isne_uint32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__isne_uint32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__isne_uint32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__isne_uint32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__isne_uint32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__isne_uint32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__isne_uint32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__isne_uint32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; for (p = 0 ; p < anz ; p++) { uint32_t bij = Bx [p] ; Cx [p] = (x != bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__isne_uint32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; for (p = 0 ; p < anz ; p++) { uint32_t aij = Ax [p] ; Cx [p] = (aij != y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = (x != aij) ; \ } GrB_Info GB_bind1st_tran__isne_uint32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = (aij != y) ; \ } GrB_Info GB_bind2nd_tran__isne_uint32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__isne_uint32 // A.*B function (eWiseMult): GB_AemultB__isne_uint32 // A*D function (colscale): GB_AxD__isne_uint32 // D*A function (rowscale): GB_DxB__isne_uint32 // C+=B function (dense accum): GB_Cdense_accumB__isne_uint32 // C+=b function (dense accum): GB_Cdense_accumb__isne_uint32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isne_uint32 // C=scalar+B GB_bind1st__isne_uint32 // C=scalar+B' GB_bind1st_tran__isne_uint32 // C=A+scalar GB_bind2nd__isne_uint32 // C=A'+scalar GB_bind2nd_tran__isne_uint32 // C type: uint32_t // A type: uint32_t // B,b type: uint32_t // BinaryOp: cij = (aij != bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x != y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISNE || GxB_NO_UINT32 || GxB_NO_ISNE_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__isne_uint32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__isne_uint32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__isne_uint32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__isne_uint32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__isne_uint32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__isne_uint32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__isne_uint32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__isne_uint32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint32_t bij = Bx [p] ; Cx [p] = (x != bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__isne_uint32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint32_t aij = Ax [p] ; Cx [p] = (aij != y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = (x != aij) ; \ } GrB_Info GB_bind1st_tran__isne_uint32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = (aij != y) ; \ } GrB_Info GB_bind2nd_tran__isne_uint32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
core_cgeqrt.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_zgeqrt.c, normal z -> c, Fri Sep 28 17:38:20 2018 * **/ #include <plasma_core_blas.h> #include "plasma_types.h" #include "plasma_internal.h" #include "core_lapack.h" #include <omp.h> /***************************************************************************//** * * @ingroup core_geqrt * * Computes a QR factorization of an m-by-n tile A: * The factorization has the form * \f[ * A = Q \times R * \f] * The tile Q is represented as a product of elementary reflectors * \f[ * Q = H(1) H(2) ... H(k), * \f] * where \f$ k = min(m,n) \f$. * * Each \f$ H(i) \f$ has the form * \f[ * H(i) = I - \tau \times v \times v^H * \f] * where \f$ tau \f$ is a scalar, and \f$ v \f$ is a vector with * v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i), * and \f$ tau \f$ in tau(i). * ******************************************************************************* * * @param[in] m * The number of rows of the tile A. m >= 0. * * @param[in] n * The number of columns of the tile A. n >= 0. * * @param[in] ib * The inner-blocking size. ib >= 0. * * @param[in,out] A * On entry, the m-by-n tile A. * On exit, the elements on and above the diagonal of the array * contain the min(m,n)-by-n upper trapezoidal tile R (R is * upper triangular if m >= n); the elements below the diagonal, * with the array tau, represent the unitary tile Q as a * product of elementary reflectors (see Further Details). * * @param[in] lda * The leading dimension of the array A. lda >= max(1,m). * * @param[out] T * The ib-by-n triangular factor T of the block reflector. * T is upper triangular by block (economic storage); * The rest of the array is not referenced. * * @param[in] ldt * The leading dimension of the array T. ldt >= ib. * * @param tau * Auxiliary workspace array of length n. * * @param work * Auxiliary workspace array of length ib*n. * * @param[in] lwork * Size of the array work. Should be at least ib*n. * ******************************************************************************* * * @retval PlasmaSuccess successful exit * @retval < 0 if -i, the i-th argument had an illegal value * ******************************************************************************/ __attribute__((weak)) int plasma_core_cgeqrt(int m, int n, int ib, plasma_complex32_t *A, int lda, plasma_complex32_t *T, int ldt, plasma_complex32_t *tau, plasma_complex32_t *work) { // Check input arguments. if (m < 0) { plasma_coreblas_error("illegal value of m"); return -1; } if (n < 0) { plasma_coreblas_error("illegal value of n"); return -2; } if ((ib < 0) || ( (ib == 0) && (m > 0) && (n > 0) )) { plasma_coreblas_error("illegal value of ib"); return -3; } if (A == NULL) { plasma_coreblas_error("NULL A"); return -4; } if (lda < imax(1, m) && m > 0) { plasma_coreblas_error("illegal value of lda"); return -5; } if (T == NULL) { plasma_coreblas_error("NULL T"); return -6; } if (ldt < imax(1, ib) && ib > 0) { plasma_coreblas_error("illegal value of ldt"); return -7; } if (tau == NULL) { plasma_coreblas_error("NULL tau"); return -8; } if (work == NULL) { plasma_coreblas_error("NULL work"); return -9; } // quick return if (m == 0 || n == 0 || ib == 0) return PlasmaSuccess; int k = imin(m, n); for (int i = 0; i < k; i += ib) { int sb = imin(ib, k-i); LAPACKE_cgeqr2_work(LAPACK_COL_MAJOR, m-i, sb, &A[lda*i+i], lda, &tau[i], work); LAPACKE_clarft_work(LAPACK_COL_MAJOR, lapack_const(PlasmaForward), lapack_const(PlasmaColumnwise), m-i, sb, &A[lda*i+i], lda, &tau[i], &T[ldt*i], ldt); if (n > i+sb) { LAPACKE_clarfb_work(LAPACK_COL_MAJOR, lapack_const(PlasmaLeft), lapack_const(Plasma_ConjTrans), lapack_const(PlasmaForward), lapack_const(PlasmaColumnwise), m-i, n-i-sb, sb, &A[lda*i+i], lda, &T[ldt*i], ldt, &A[lda*(i+sb)+i], lda, work, n-i-sb); } } return PlasmaSuccess; } /******************************************************************************/ void plasma_core_omp_cgeqrt(int m, int n, int ib, plasma_complex32_t *A, int lda, plasma_complex32_t *T, int ldt, plasma_workspace_t work, plasma_sequence_t *sequence, plasma_request_t *request) { #pragma omp task depend(inout:A[0:lda*n]) \ depend(out:T[0:ib*n]) { if (sequence->status == PlasmaSuccess) { // Prepare workspaces. int tid = omp_get_thread_num(); plasma_complex32_t *tau = ((plasma_complex32_t*)work.spaces[tid]); // Call the kernel. int info = plasma_core_cgeqrt(m, n, ib, A, lda, T, ldt, tau, tau+n); if (info != PlasmaSuccess) { plasma_error("core_cgeqrt() failed"); plasma_request_fail(sequence, request, PlasmaErrorInternal); } } } }
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_zgeqrt.c, normal z -> c, Fri Sep 28 17:38:20 2018 * **/ #include <plasma_core_blas.h> #include "plasma_types.h" #include "plasma_internal.h" #include "core_lapack.h" #include <omp.h> /***************************************************************************//** * * @ingroup core_geqrt * * Computes a QR factorization of an m-by-n tile A: * The factorization has the form * \f[ * A = Q \times R * \f] * The tile Q is represented as a product of elementary reflectors * \f[ * Q = H(1) H(2) ... H(k), * \f] * where \f$ k = min(m,n) \f$. * * Each \f$ H(i) \f$ has the form * \f[ * H(i) = I - \tau \times v \times v^H * \f] * where \f$ tau \f$ is a scalar, and \f$ v \f$ is a vector with * v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i), * and \f$ tau \f$ in tau(i). * ******************************************************************************* * * @param[in] m * The number of rows of the tile A. m >= 0. * * @param[in] n * The number of columns of the tile A. n >= 0. * * @param[in] ib * The inner-blocking size. ib >= 0. * * @param[in,out] A * On entry, the m-by-n tile A. * On exit, the elements on and above the diagonal of the array * contain the min(m,n)-by-n upper trapezoidal tile R (R is * upper triangular if m >= n); the elements below the diagonal, * with the array tau, represent the unitary tile Q as a * product of elementary reflectors (see Further Details). * * @param[in] lda * The leading dimension of the array A. lda >= max(1,m). * * @param[out] T * The ib-by-n triangular factor T of the block reflector. * T is upper triangular by block (economic storage); * The rest of the array is not referenced. * * @param[in] ldt * The leading dimension of the array T. ldt >= ib. * * @param tau * Auxiliary workspace array of length n. * * @param work * Auxiliary workspace array of length ib*n. * * @param[in] lwork * Size of the array work. Should be at least ib*n. * ******************************************************************************* * * @retval PlasmaSuccess successful exit * @retval < 0 if -i, the i-th argument had an illegal value * ******************************************************************************/ __attribute__((weak)) int plasma_core_cgeqrt(int m, int n, int ib, plasma_complex32_t * A, int lda, plasma_complex32_t * T, int ldt, plasma_complex32_t * tau, plasma_complex32_t * work) { //Check input arguments. if (m < 0) { plasma_coreblas_error("illegal value of m"); return -1; } if (n < 0) { plasma_coreblas_error("illegal value of n"); return -2; } if ((ib < 0) || ((ib == 0) && (m > 0) && (n > 0))) { plasma_coreblas_error("illegal value of ib"); return -3; } if (A == NULL) { plasma_coreblas_error("NULL A"); return -4; } if (lda < imax(1, m) && m > 0) { plasma_coreblas_error("illegal value of lda"); return -5; } if (T == NULL) { plasma_coreblas_error("NULL T"); return -6; } if (ldt < imax(1, ib) && ib > 0) { plasma_coreblas_error("illegal value of ldt"); return -7; } if (tau == NULL) { plasma_coreblas_error("NULL tau"); return -8; } if (work == NULL) { plasma_coreblas_error("NULL work"); return -9; } //quick return if (m == 0 || n == 0 || ib == 0) return PlasmaSuccess; int k = imin(m, n); for (int i = 0; i < k; i += ib) { int sb = imin(ib, k - i); LAPACKE_cgeqr2_work(LAPACK_COL_MAJOR, m - i, sb, &A[lda * i + i], lda, &tau[i], work); LAPACKE_clarft_work(LAPACK_COL_MAJOR, lapack_const(PlasmaForward), lapack_const(PlasmaColumnwise), m - i, sb, &A[lda * i + i], lda, &tau[i], &T[ldt * i], ldt); if (n > i + sb) { LAPACKE_clarfb_work(LAPACK_COL_MAJOR, lapack_const(PlasmaLeft), lapack_const(Plasma_ConjTrans), lapack_const(PlasmaForward), lapack_const(PlasmaColumnwise), m - i, n - i - sb, sb, &A[lda * i + i], lda, &T[ldt * i], ldt, &A[lda * (i + sb) + i], lda, work, n - i - sb); } } return PlasmaSuccess; } /******************************************************************************/ void plasma_core_omp_cgeqrt(int m, int n, int ib, plasma_complex32_t * A, int lda, plasma_complex32_t * T, int ldt, plasma_workspace_t work, plasma_sequence_t * sequence, plasma_request_t * request) { depend(out: T[0:ib * n]) { if (sequence->status == PlasmaSuccess) { //Prepare workspaces. int tid = omp_get_thread_num(); plasma_complex32_t *tau = ((plasma_complex32_t *) work.spaces[tid]); //Call the kernel. int info = plasma_core_cgeqrt(m, n, ib, A, lda, T, ldt, tau, tau + n); if (info != PlasmaSuccess) { plasma_error("core_cgeqrt() failed"); plasma_request_fail(sequence, request, PlasmaErrorInternal); } } } }
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_zgeqrt.c, normal z -> c, Fri Sep 28 17:38:20 2018 * **/ #include <plasma_core_blas.h> #include "plasma_types.h" #include "plasma_internal.h" #include "core_lapack.h" #include <omp.h> /***************************************************************************//** * * @ingroup core_geqrt * * Computes a QR factorization of an m-by-n tile A: * The factorization has the form * \f[ * A = Q \times R * \f] * The tile Q is represented as a product of elementary reflectors * \f[ * Q = H(1) H(2) ... H(k), * \f] * where \f$ k = min(m,n) \f$. * * Each \f$ H(i) \f$ has the form * \f[ * H(i) = I - \tau \times v \times v^H * \f] * where \f$ tau \f$ is a scalar, and \f$ v \f$ is a vector with * v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i), * and \f$ tau \f$ in tau(i). * ******************************************************************************* * * @param[in] m * The number of rows of the tile A. m >= 0. * * @param[in] n * The number of columns of the tile A. n >= 0. * * @param[in] ib * The inner-blocking size. ib >= 0. * * @param[in,out] A * On entry, the m-by-n tile A. * On exit, the elements on and above the diagonal of the array * contain the min(m,n)-by-n upper trapezoidal tile R (R is * upper triangular if m >= n); the elements below the diagonal, * with the array tau, represent the unitary tile Q as a * product of elementary reflectors (see Further Details). * * @param[in] lda * The leading dimension of the array A. lda >= max(1,m). * * @param[out] T * The ib-by-n triangular factor T of the block reflector. * T is upper triangular by block (economic storage); * The rest of the array is not referenced. * * @param[in] ldt * The leading dimension of the array T. ldt >= ib. * * @param tau * Auxiliary workspace array of length n. * * @param work * Auxiliary workspace array of length ib*n. * * @param[in] lwork * Size of the array work. Should be at least ib*n. * ******************************************************************************* * * @retval PlasmaSuccess successful exit * @retval < 0 if -i, the i-th argument had an illegal value * ******************************************************************************/ __attribute__((weak)) int plasma_core_cgeqrt(int m, int n, int ib, plasma_complex32_t * A, int lda, plasma_complex32_t * T, int ldt, plasma_complex32_t * tau, plasma_complex32_t * work) { //Check input arguments. if (m < 0) { plasma_coreblas_error("illegal value of m"); return -1; } if (n < 0) { plasma_coreblas_error("illegal value of n"); return -2; } if ((ib < 0) || ((ib == 0) && (m > 0) && (n > 0))) { plasma_coreblas_error("illegal value of ib"); return -3; } if (A == NULL) { plasma_coreblas_error("NULL A"); return -4; } if (lda < imax(1, m) && m > 0) { plasma_coreblas_error("illegal value of lda"); return -5; } if (T == NULL) { plasma_coreblas_error("NULL T"); return -6; } if (ldt < imax(1, ib) && ib > 0) { plasma_coreblas_error("illegal value of ldt"); return -7; } if (tau == NULL) { plasma_coreblas_error("NULL tau"); return -8; } if (work == NULL) { plasma_coreblas_error("NULL work"); return -9; } //quick return if (m == 0 || n == 0 || ib == 0) return PlasmaSuccess; int k = imin(m, n); for (int i = 0; i < k; i += ib) { int sb = imin(ib, k - i); LAPACKE_cgeqr2_work(LAPACK_COL_MAJOR, m - i, sb, &A[lda * i + i], lda, &tau[i], work); LAPACKE_clarft_work(LAPACK_COL_MAJOR, lapack_const(PlasmaForward), lapack_const(PlasmaColumnwise), m - i, sb, &A[lda * i + i], lda, &tau[i], &T[ldt * i], ldt); if (n > i + sb) { LAPACKE_clarfb_work(LAPACK_COL_MAJOR, lapack_const(PlasmaLeft), lapack_const(Plasma_ConjTrans), lapack_const(PlasmaForward), lapack_const(PlasmaColumnwise), m - i, n - i - sb, sb, &A[lda * i + i], lda, &T[ldt * i], ldt, &A[lda * (i + sb) + i], lda, work, n - i - sb); } } return PlasmaSuccess; } /******************************************************************************/ void plasma_core_omp_cgeqrt(int m, int n, int ib, plasma_complex32_t * A, int lda, plasma_complex32_t * T, int ldt, plasma_workspace_t work, plasma_sequence_t * sequence, plasma_request_t * request) { #pragma omp task depend(inout:A[0:lda*n]) \ depend(out:T[0:ib*n]) { if (sequence->status == PlasmaSuccess) { //Prepare workspaces. int tid = omp_get_thread_num(); plasma_complex32_t *tau = ((plasma_complex32_t *) work.spaces[tid]); //Call the kernel. int info = plasma_core_cgeqrt(m, n, ib, A, lda, T, ldt, tau, tau + n); if (info != PlasmaSuccess) { plasma_error("core_cgeqrt() failed"); plasma_request_fail(sequence, request, PlasmaErrorInternal); } } } }
GB_unaryop__minv_uint16_int32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_uint16_int32 // op(A') function: GB_tran__minv_uint16_int32 // C type: uint16_t // A type: int32_t // cast: uint16_t cij = (uint16_t) aij // unaryop: cij = GB_IMINV_UNSIGNED (aij, 16) #define GB_ATYPE \ int32_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 16) ; // casting #define GB_CASTING(z, x) \ uint16_t z = (uint16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT16 || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_uint16_int32 ( uint16_t *restrict Cx, const int32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_uint16_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_uint16_int32 // op(A') function: GB_tran__minv_uint16_int32 // C type: uint16_t // A type: int32_t // cast: uint16_t cij = (uint16_t) aij // unaryop: cij = GB_IMINV_UNSIGNED (aij, 16) #define GB_ATYPE \ int32_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 16) ; // casting #define GB_CASTING(z, x) \ uint16_t z = (uint16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT16 || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_uint16_int32 ( uint16_t *restrict Cx, const int32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_uint16_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_uint16_int32 // op(A') function: GB_tran__minv_uint16_int32 // C type: uint16_t // A type: int32_t // cast: uint16_t cij = (uint16_t) aij // unaryop: cij = GB_IMINV_UNSIGNED (aij, 16) #define GB_ATYPE \ int32_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 16) ; // casting #define GB_CASTING(z, x) \ uint16_t z = (uint16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT16 || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_uint16_int32 ( uint16_t *restrict Cx, const int32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_uint16_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
krb5pa-sha1_fmt_plug.c
/* * Kerberos 5 "PA ENC TIMESTAMP" by magnum (modified by Dhiru) * * Pcap file -> input file: * 1. tshark -r capture.pcapng -T pdml > ~/capture.pdml * 2. krbng2john.py ~/capture.pdml > krb5.in * 3. Run john on krb5.in * * http://www.ietf.org/rfc/rfc4757.txt * http://www.securiteam.com/windowsntfocus/5BP0H0A6KM.html * * Input format is 'user:$krb5pa$etype$user$realm$salt$timestamp+checksum' * * NOTE: Checksum implies last 12 bytes of PA_ENC_TIMESTAMP value in AS-REQ * packet. * * Default Salt: realm + user * * AES-256 encryption & decryption of AS-REQ timestamp in Kerberos v5 * See the following RFC for more details about the crypto & algorithms used: * * RFC3961 - Encryption and Checksum Specifications for Kerberos 5 * RFC3962 - Advanced Encryption Standard (AES) Encryption for Kerberos 5 * * march 09 / kevin devine <wyse101 0x40 gmail.com> * * This software is Copyright (c) 2011 magnum, and it is hereby released to the * general public under the following terms: Redistribution and use in source * and binary forms, with or without modification, are permitted. * * This software is Copyright (c) 2012 Dhiru Kholia (dhiru at openwall.com) and * released under same terms as above */ #if FMT_EXTERNS_H extern struct fmt_main fmt_krb5pa; #elif FMT_REGISTERS_H john_register_one(&fmt_krb5pa); #else #include <errno.h> #include <string.h> #include <stdlib.h> #include <ctype.h> #ifdef _OPENMP static int omp_t = 1; #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 64 #endif #endif #include "arch.h" #include "misc.h" #include "formats.h" #include "options.h" #include "common.h" #include "unicode.h" #include "johnswap.h" #include "aes.h" #include "hmac_sha.h" #include "pbkdf2_hmac_sha1.h" #include "loader.h" #include "memdbg.h" #define FORMAT_LABEL "krb5pa-sha1" #define FORMAT_NAME "Kerberos 5 AS-REQ Pre-Auth etype 17/18" /* aes-cts-hmac-sha1-96 */ #define FORMAT_TAG "$krb5pa$" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #ifdef SIMD_COEF_32 #define ALGORITHM_NAME "PBKDF2-SHA1 " SHA1_ALGORITHM_NAME #else #define ALGORITHM_NAME "PBKDF2-SHA1 32/" ARCH_BITS_STR #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define BINARY_SIZE 12 #define BINARY_ALIGN 4 #define PLAINTEXT_LENGTH 125 #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN 4 #ifdef SIMD_COEF_32 #define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1 #define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1 #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif #define MAX_SALTLEN 128 #define MAX_REALMLEN 64 #define MAX_USERLEN 64 #define TIMESTAMP_SIZE 44 #define CHECKSUM_SIZE BINARY_SIZE #define TOTAL_LENGTH (14 + 2 * (CHECKSUM_SIZE + TIMESTAMP_SIZE) + MAX_REALMLEN + MAX_USERLEN + MAX_SALTLEN) static struct fmt_tests tests[] = { {"$krb5pa$18$user1$EXAMPLE.COM$$2a0e68168d1eac344da458599c3a2b33ff326a061449fcbc242b212504e484d45903c6a16e2d593912f56c93883bf697b325193d62a8be9c", "openwall"}, {"$krb5pa$18$user1$EXAMPLE.COM$$a3918bd0381107feedec8db0022bdf3ac56e534ed54d13c62a7013a47713cfc31ef4e7e572f912fa4164f76b335e588bf29c2d17b11c5caa", "openwall"}, {"$krb5pa$18$l33t$EXAMPLE.COM$$98f732b309a1d7ef2355a974842a32894d911e97150f5d57f248e1c2632fbd3735c5f156532ccae0341e6a2d779ca83a06021fe57dafa464", "openwall"}, {"$krb5pa$18$aduser$AD.EXAMPLE.COM$$64dfeee04be2b2e0423814e0df4d0f960885aca4efffe6cb5694c4d34690406071c4968abd2c153ee42d258c5e09a41269bbcd7799f478d3", "password@123"}, {"$krb5pa$18$aduser$AD.EXAMPLE.COM$$f94f755a8b4493d925094a4eb1cec630ac40411a14c9733a853516fe426637d9daefdedc0567e2bb5a83d4f89a0ad1a4b178662b6106c0ff", "password@12345678"}, {"$krb5pa$18$aduser$AD.EXAMPLE.COM$AD.EXAMPLE.COMaduser$f94f755a8b4493d925094a4eb1cec630ac40411a14c9733a853516fe426637d9daefdedc0567e2bb5a83d4f89a0ad1a4b178662b6106c0ff", "password@12345678"}, /* etype 17 hash obtained using MiTM etype downgrade attack */ {"$krb5pa$17$user1$EXAMPLE.COM$$c5461873dc13665771b98ba80be53939e906d90ae1ba79cf2e21f0395e50ee56379fbef4d0298cfccfd6cf8f907329120048fd05e8ae5df4", "openwall"}, {NULL}, }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)]; static struct custom_salt { int etype; unsigned char realm[64]; unsigned char user[64]; unsigned char salt[128]; /* realm + user */ unsigned char ct[44]; } *cur_salt; static unsigned char constant[16]; static unsigned char ke_input[16]; static unsigned char ki_input[16]; /* n-fold(k-bits): * l = lcm(n,k) * r = l/k * s = k-bits | k-bits rot 13 | k-bits rot 13*2 | ... | k-bits rot 13*(r-1) * compute the 1's complement sum: * n-fold = s[0..n-1]+s[n..2n-1]+s[2n..3n-1]+..+s[(k-1)*n..k*n-1] */ /* representation: msb first, assume n and k are multiples of 8, and * that k>=16. this is the case of all the cryptosystems which are * likely to be used. this function can be replaced if that * assumption ever fails. */ /* input length is in bits */ static void nfold(unsigned int inbits, const unsigned char *in, unsigned int outbits,unsigned char *out) { int a,b,c,lcm; int byte, i, msbit; /* the code below is more readable if I make these bytes * instead of bits */ inbits >>= 3; outbits >>= 3; /* first compute lcm(n,k) */ a = outbits; b = inbits; while (b != 0) { c = b; b = a % b; a = c; } lcm = outbits*inbits/a; /* now do the real work */ memset(out, 0, outbits); byte = 0; /* this will end up cycling through k lcm(k,n)/k times, which * is correct */ for (i = lcm - 1; i >= 0; i--) { /* compute the msbit in k which gets added into this byte */ msbit = (/* first, start with the msbit in the first, unrotated byte */ ((inbits << 3) - 1) /* then, for each byte, shift to the right for each * repetition */ +(((inbits << 3) + 13) * (i / inbits)) /* last, pick out the correct byte within that * shifted repetition */ +((inbits - (i % inbits)) << 3) ) % (inbits << 3); /* pull out the byte value itself */ byte += (((in[((inbits - 1) - (msbit >> 3)) % inbits] << 8)| (in[((inbits) - (msbit>>3)) % inbits])) >>((msbit & 7) + 1)) & 0xff; /* do the addition */ byte += out[i % outbits]; out[i % outbits] = byte & 0xff; /* keep around the carry bit, if any */ byte >>= 8; } /* if there's a carry bit left over, add it back in */ if (byte) { for (i = outbits - 1; i >= 0; i--) { /* do the addition */ byte += out[i]; out[i] = byte & 0xff; /* keep around the carry bit, if any */ byte >>= 8;\ } } } static void init(struct fmt_main *self) { unsigned char usage[5]; #ifdef _OPENMP omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt); crypt_out = mem_calloc(sizeof(*crypt_out), self->params.max_keys_per_crypt); // generate 128 bits from 40 bits of "kerberos" string nfold(8 * 8, (unsigned char*)"kerberos", 128, constant); memset(usage,0,sizeof(usage)); usage[3] = 0x01; // key number in big-endian format usage[4] = 0xAA; // used to derive Ke nfold(sizeof(usage)*8,usage,sizeof(ke_input)*8,ke_input); memset(usage,0,sizeof(usage)); usage[3] = 0x01; // key number in big-endian format usage[4] = 0x55; // used to derive Ki nfold(sizeof(usage)*8,usage,sizeof(ki_input)*8,ki_input); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *p, *data = ciphertext; int type, saltlen = 0; // tag is mandatory if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0) return 0; data += FORMAT_TAG_LEN; // etype field, 17 or 18 p = strchr(data, '$'); if (!p || p - data != 2) return 0; type = atoi(data); if (type < 17 || type > 18) return 0; data = p + 1; // user field p = strchr(data, '$'); if (!p || p - data > MAX_USERLEN) return 0; saltlen += p - data; data = p + 1; // realm field p = strchr(data, '$'); if (!p || p - data > MAX_REALMLEN) return 0; saltlen += p - data; data = p + 1; // salt field p = strchr(data, '$'); if (!p) return 0; // if salt is empty, realm.user is used instead if (p - data) saltlen = p - data; data = p + 1; // We support a max. total salt length of 52. // We could opt to emit a warning if rejected here. if (saltlen > MAX_SALTLEN) { static int warned = 0; if (!ldr_in_pot) if (!warned++) fprintf(stderr, "%s: One or more hashes rejected due to salt length limitation\n", FORMAT_LABEL); return 0; } // 56 bytes (112 hex chars) encrypted timestamp + checksum if (strlen(data) != 2 * (TIMESTAMP_SIZE + CHECKSUM_SIZE) || strspn(data, HEXCHARS_all) != strlen(data)) return 0; return 1; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; char *p; int i; static struct custom_salt cs; memset(&cs, 0, sizeof(cs)); ctcopy += FORMAT_TAG_LEN; p = strtokm(ctcopy, "$"); cs.etype = atoi(p); p = strtokm(NULL, "$"); if (p[-1] == '$') cs.user[0] = 0; else { strcpy((char*)cs.user, p); p = strtokm(NULL, "$"); } if (p[-1] == '$') cs.realm[0] = 0; else { strcpy((char*)cs.realm, p); p = strtokm(NULL, "$"); } if (p[-1] == '$') { strcpy((char*)cs.salt, (char*)cs.realm); strcat((char*)cs.salt, (char*)cs.user); } else { strcpy((char*)cs.salt, p); p = strtokm(NULL, "$"); } for (i = 0; i < TIMESTAMP_SIZE; i++) cs.ct[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; MEM_FREE(keeptr); return (void *)&cs; } static void set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *split(char *ciphertext, int index, struct fmt_main *pFmt) { static char out[TOTAL_LENGTH + 1]; char in[TOTAL_LENGTH + 1]; char salt[MAX_SALTLEN + 1]; char *data; char *e, *u, *r, *s, *tc; strnzcpy(in, ciphertext, sizeof(in)); tc = strrchr(in, '$'); *tc++ = 0; s = strrchr(in, '$'); *s++ = 0; r = strrchr(in, '$'); *r++ = 0; u = strrchr(in, '$'); *u++ = 0; e = in + 8; /* Default salt is user.realm */ if (!*s) { snprintf(salt, sizeof(salt), "%s%s", r, u); s = salt; } snprintf(out, sizeof(out), "%s%s$%s$%s$%s$%s", FORMAT_TAG, e, u, r, s, tc); data = out + strlen(out) - 2 * (CHECKSUM_SIZE + TIMESTAMP_SIZE) - 1; strlwr(data); return out; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; p = strrchr(ciphertext, '$') + 1 + TIMESTAMP_SIZE * 2; /* skip to checksum field */ for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static char *get_key(int index) { return saved_key[index]; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static void AES_cts_encrypt(const unsigned char *in, unsigned char *out, size_t len, const AES_KEY *key, unsigned char *ivec, const int encryptp) { unsigned char tmp[AES_BLOCK_SIZE]; unsigned int i; if (encryptp) { while(len > AES_BLOCK_SIZE) { for (i = 0; i < AES_BLOCK_SIZE; i++) tmp[i] = in[i] ^ ivec[i]; AES_encrypt(tmp, out, key); memcpy(ivec, out, AES_BLOCK_SIZE); len -= AES_BLOCK_SIZE; in += AES_BLOCK_SIZE; out += AES_BLOCK_SIZE; } for (i = 0; i < len; i++) tmp[i] = in[i] ^ ivec[i]; for (; i < AES_BLOCK_SIZE; i++) tmp[i] = 0 ^ ivec[i]; AES_encrypt(tmp, out - AES_BLOCK_SIZE, key); memcpy(out, ivec, len); memcpy(ivec, out - AES_BLOCK_SIZE, AES_BLOCK_SIZE); } else { unsigned char tmp2[AES_BLOCK_SIZE]; unsigned char tmp3[AES_BLOCK_SIZE]; while(len > AES_BLOCK_SIZE * 2) { memcpy(tmp, in, AES_BLOCK_SIZE); AES_decrypt(in, out, key); for (i = 0; i < AES_BLOCK_SIZE; i++) out[i] ^= ivec[i]; memcpy(ivec, tmp, AES_BLOCK_SIZE); len -= AES_BLOCK_SIZE; in += AES_BLOCK_SIZE; out += AES_BLOCK_SIZE; } len -= AES_BLOCK_SIZE; memcpy(tmp, in, AES_BLOCK_SIZE); /* save last iv */ AES_decrypt(in, tmp2, key); memcpy(tmp3, in + AES_BLOCK_SIZE, len); memcpy(tmp3 + len, tmp2 + len, AES_BLOCK_SIZE - len); /* xor 0 */ for (i = 0; i < len; i++) out[i + AES_BLOCK_SIZE] = tmp2[i] ^ tmp3[i]; AES_decrypt(tmp3, out, key); for (i = 0; i < AES_BLOCK_SIZE; i++) out[i] ^= ivec[i]; memcpy(ivec, tmp, AES_BLOCK_SIZE); } } // keysize = 32 for 256 bits, 16 for 128 bits static void dk(unsigned char key_out[], unsigned char key_in[], size_t key_size, unsigned char ptext[], size_t ptext_size) { unsigned char iv[32]; unsigned char plaintext[32]; AES_KEY ekey; memset(iv,0,sizeof(iv)); memset(plaintext,0,sizeof(plaintext)); memcpy(plaintext,ptext,16); AES_set_encrypt_key(key_in,key_size*8,&ekey); AES_cbc_encrypt(plaintext,key_out,key_size,&ekey,iv,AES_ENCRYPT); } static void krb_decrypt(const unsigned char ciphertext[], size_t ctext_size, unsigned char plaintext[], const unsigned char key[], size_t key_size) { unsigned char iv[32]; AES_KEY ekey; memset(iv,0,sizeof(iv)); AES_set_decrypt_key(key,key_size*8,&ekey); AES_cts_encrypt(ciphertext,plaintext,ctext_size,&ekey,iv,AES_DECRYPT); } #if 0 /* This is not used */ static void krb_encrypt(const unsigned char ciphertext[], size_t ctext_size, unsigned char plaintext[], const unsigned char key[], size_t key_size) { unsigned char iv[32]; AES_KEY ekey; memset(iv,0,sizeof(iv)); AES_set_encrypt_key(key,key_size*8,&ekey); AES_cts_encrypt(ciphertext,plaintext,ctext_size,&ekey,iv,AES_ENCRYPT); } #endif static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) #endif { unsigned char tkey[MAX_KEYS_PER_CRYPT][32]; unsigned char base_key[32]; unsigned char Ke[32]; unsigned char plaintext[44]; int key_size, i; int len[MAX_KEYS_PER_CRYPT]; #ifdef SIMD_COEF_32 unsigned char *pin[MAX_KEYS_PER_CRYPT], *pout[MAX_KEYS_PER_CRYPT]; for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { len[i] = strlen(saved_key[i+index]); pin[i] = (unsigned char*)saved_key[i+index]; pout[i] = tkey[i]; } pbkdf2_sha1_sse((const unsigned char **)pin, len, cur_salt->salt,strlen((char*)cur_salt->salt), 4096, pout, 32, 0); #else for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { len[i] = strlen(saved_key[index+i]); } pbkdf2_sha1((const unsigned char*)saved_key[index], len[0], cur_salt->salt,strlen((char*)cur_salt->salt), 4096, tkey[0], 32, 0); #endif for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { // generate 128 bits from 40 bits of "kerberos" string // This is precomputed in init() //nfold(8 * 8, (unsigned char*)"kerberos", 128, constant); if (cur_salt->etype == 17) key_size = 16; else key_size = 32; dk(base_key, tkey[i], key_size, constant, 32); /* The "well-known constant" used for the DK function is the key usage number, * expressed as four octets in big-endian order, followed by one octet indicated below. * Kc = DK(base-key, usage | 0x99); * Ke = DK(base-key, usage | 0xAA); * Ki = DK(base-key, usage | 0x55); */ // derive Ke for decryption/encryption // This is precomputed in init() //memset(usage,0,sizeof(usage)); //usage[3] = 0x01; // key number in big-endian format //usage[4] = 0xAA; // used to derive Ke //nfold(sizeof(usage)*8,usage,sizeof(ke_input)*8,ke_input); dk(Ke, base_key, key_size, ke_input, 32); // decrypt the AS-REQ timestamp encrypted with 256-bit AES // here is enough to check the string, further computation below is required // to fully verify the checksum krb_decrypt(cur_salt->ct,44,plaintext,Ke, key_size); // Check a couple bytes from known plain (YYYYMMDDHHMMSSZ) and // bail out if we are out of luck. if (plaintext[22] == '2' && plaintext[23] == '0' && plaintext[36] == 'Z') { unsigned char Ki[32]; unsigned char checksum[20]; // derive Ki used in HMAC-SHA-1 checksum // This is precomputed in init() //memset(usage,0,sizeof(usage)); //usage[3] = 0x01; // key number in big-endian format //usage[4] = 0x55; // used to derive Ki //nfold(sizeof(usage)*8,usage,sizeof(ki_input)*8,ki_input); dk(Ki,base_key, key_size, ki_input, 32); // derive checksum of plaintext hmac_sha1(Ki, key_size, plaintext, 44, checksum, 20); memcpy(crypt_out[index+i], checksum, BINARY_SIZE); } else { memset(crypt_out[index+i], 0, BINARY_SIZE); } } } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } struct fmt_main fmt_krb5pa = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_OMP, { NULL }, { FORMAT_TAG }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
#if FMT_EXTERNS_H extern struct fmt_main fmt_krb5pa; #elif FMT_REGISTERS_H john_register_one(&fmt_krb5pa); #else #include <errno.h> #include <string.h> #include <stdlib.h> #include <ctype.h> #include "arch.h" #include "misc.h" #include "formats.h" #include "options.h" #include "common.h" #include "unicode.h" #include "johnswap.h" #include "aes.h" #include "hmac_sha.h" #include "pbkdf2_hmac_sha1.h" #include "loader.h" #include "memdbg.h" #define FORMAT_LABEL "krb5pa-sha1" #define FORMAT_NAME "Kerberos 5 AS-REQ Pre-Auth etype 17/18" /* aes-cts-hmac-sha1-96 */ #define FORMAT_TAG "$krb5pa$" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #ifdef SIMD_COEF_32 #define ALGORITHM_NAME "PBKDF2-SHA1 " SHA1_ALGORITHM_NAME #else #define ALGORITHM_NAME "PBKDF2-SHA1 32/" ARCH_BITS_STR #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define BINARY_SIZE 12 #define BINARY_ALIGN 4 #define PLAINTEXT_LENGTH 125 #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN 4 #ifdef SIMD_COEF_32 #define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1 #define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1 #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif #define MAX_SALTLEN 128 #define MAX_REALMLEN 64 #define MAX_USERLEN 64 #define TIMESTAMP_SIZE 44 #define CHECKSUM_SIZE BINARY_SIZE #define TOTAL_LENGTH (14 + 2 * (CHECKSUM_SIZE + TIMESTAMP_SIZE) + MAX_REALMLEN + MAX_USERLEN + MAX_SALTLEN) static struct fmt_tests tests[] = { {"$krb5pa$18$user1$EXAMPLE.COM$$2a0e68168d1eac344da458599c3a2b33ff326a061449fcbc242b212504e484d45903c6a16e2d593912f56c93883bf697b325193d62a8be9c", "openwall"}, {"$krb5pa$18$user1$EXAMPLE.COM$$a3918bd0381107feedec8db0022bdf3ac56e534ed54d13c62a7013a47713cfc31ef4e7e572f912fa4164f76b335e588bf29c2d17b11c5caa", "openwall"}, {"$krb5pa$18$l33t$EXAMPLE.COM$$98f732b309a1d7ef2355a974842a32894d911e97150f5d57f248e1c2632fbd3735c5f156532ccae0341e6a2d779ca83a06021fe57dafa464", "openwall"}, {"$krb5pa$18$aduser$AD.EXAMPLE.COM$$64dfeee04be2b2e0423814e0df4d0f960885aca4efffe6cb5694c4d34690406071c4968abd2c153ee42d258c5e09a41269bbcd7799f478d3", "password@123"}, {"$krb5pa$18$aduser$AD.EXAMPLE.COM$$f94f755a8b4493d925094a4eb1cec630ac40411a14c9733a853516fe426637d9daefdedc0567e2bb5a83d4f89a0ad1a4b178662b6106c0ff", "password@12345678"}, {"$krb5pa$18$aduser$AD.EXAMPLE.COM$AD.EXAMPLE.COMaduser$f94f755a8b4493d925094a4eb1cec630ac40411a14c9733a853516fe426637d9daefdedc0567e2bb5a83d4f89a0ad1a4b178662b6106c0ff", "password@12345678"}, /* etype 17 hash obtained using MiTM etype downgrade attack */ {"$krb5pa$17$user1$EXAMPLE.COM$$c5461873dc13665771b98ba80be53939e906d90ae1ba79cf2e21f0395e50ee56379fbef4d0298cfccfd6cf8f907329120048fd05e8ae5df4", "openwall"}, {NULL}, }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static uint32_t(*crypt_out)[BINARY_SIZE / sizeof(uint32_t)]; static struct custom_salt { int etype; unsigned char realm[64]; unsigned char user[64]; unsigned char salt[128];/* realm + user */ unsigned char ct[44]; } *cur_salt; static unsigned char constant[16]; static unsigned char ke_input[16]; static unsigned char ki_input[16]; /* * n-fold(k-bits): l = lcm(n,k) r = l/k s = k-bits | k-bits rot 13 | k-bits * rot 13*2 | ... | k-bits rot 13*(r-1) compute the 1's complement sum: * n-fold = s[0..n-1]+s[n..2n-1]+s[2n..3n-1]+..+s[(k-1)*n..k*n-1] */ /* * representation: msb first, assume n and k are multiples of 8, and that * k>=16. this is the case of all the cryptosystems which are likely to be * used. this function can be replaced if that assumption ever fails. */ /* input length is in bits */ static void nfold(unsigned int inbits, const unsigned char *in, unsigned int outbits, unsigned char *out) { int a, b, c, lcm; int byte, i, msbit; /* * the code below is more readable if I make these bytes instead of bits */ inbits >>= 3; outbits >>= 3; /* first compute lcm(n,k) */ a = outbits; b = inbits; while (b != 0) { c = b; b = a % b; a = c; } lcm = outbits * inbits / a; /* now do the real work */ memset(out, 0, outbits); byte = 0; /* * this will end up cycling through k lcm(k,n)/k times, which is correct */ for (i = lcm - 1; i >= 0; i--) { /* compute the msbit in k which gets added into this byte */ msbit = ( /* first, start with the msbit in the first, * unrotated byte */ ((inbits << 3) - 1) /* * then, for each byte, shift to the right for each repetition */ + (((inbits << 3) + 13) * (i / inbits)) /* * last, pick out the correct byte within that shifted repetition */ + ((inbits - (i % inbits)) << 3) ) % (inbits << 3); /* pull out the byte value itself */ byte += (((in[((inbits - 1) - (msbit >> 3)) % inbits] << 8) | (in[((inbits) - (msbit >> 3)) % inbits])) >> ((msbit & 7) + 1)) & 0xff; /* do the addition */ byte += out[i % outbits]; out[i % outbits] = byte & 0xff; /* keep around the carry bit, if any */ byte >>= 8; } /* if there's a carry bit left over, add it back in */ if (byte) { for (i = outbits - 1; i >= 0; i--) { /* do the addition */ byte += out[i]; out[i] = byte & 0xff; /* keep around the carry bit, if any */ byte >>= 8; \ } } } static void init(struct fmt_main *self) { unsigned char usage[5]; saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt); crypt_out = mem_calloc(sizeof(*crypt_out), self->params.max_keys_per_crypt); //generate 128 bits from 40 bits of "kerberos" string nfold(8 * 8, (unsigned char *)"kerberos", 128, constant); memset(usage, 0, sizeof(usage)); usage[3] = 0x01; //key number in big - endian format usage[4] = 0xAA; //used to derive Ke nfold(sizeof(usage) * 8, usage, sizeof(ke_input) * 8, ke_input); memset(usage, 0, sizeof(usage)); usage[3] = 0x01; //key number in big - endian format usage[4] = 0x55; //used to derive Ki nfold(sizeof(usage) * 8, usage, sizeof(ki_input) * 8, ki_input); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *p, *data = ciphertext; int type, saltlen = 0; //tag is mandatory if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0) return 0; data += FORMAT_TAG_LEN; //etype field, 17 or 18 p = strchr(data, '$'); if (!p || p - data != 2) return 0; type = atoi(data); if (type < 17 || type > 18) return 0; data = p + 1; //user field p = strchr(data, '$'); if (!p || p - data > MAX_USERLEN) return 0; saltlen += p - data; data = p + 1; //realm field p = strchr(data, '$'); if (!p || p - data > MAX_REALMLEN) return 0; saltlen += p - data; data = p + 1; //salt field p = strchr(data, '$'); if (!p) return 0; //if salt is empty, realm.user is used instead if (p - data) saltlen = p - data; data = p + 1; //We support a max.total salt length of 52. // We could opt to emit a warning if rejected here. if (saltlen > MAX_SALTLEN) { static int warned = 0; if (!ldr_in_pot) if (!warned++) fprintf(stderr, "%s: One or more hashes rejected due to salt length limitation\n", FORMAT_LABEL); return 0; } //56 bytes(112 hex chars) encrypted timestamp + checksum if (strlen(data) != 2 * (TIMESTAMP_SIZE + CHECKSUM_SIZE) || strspn(data, HEXCHARS_all) != strlen(data)) return 0; return 1; } static void * get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; char *p; int i; static struct custom_salt cs; memset(&cs, 0, sizeof(cs)); ctcopy += FORMAT_TAG_LEN; p = strtokm(ctcopy, "$"); cs.etype = atoi(p); p = strtokm(NULL, "$"); if (p[-1] == '$') cs.user[0] = 0; else { strcpy((char *)cs.user, p); p = strtokm(NULL, "$"); } if (p[-1] == '$') cs.realm[0] = 0; else { strcpy((char *)cs.realm, p); p = strtokm(NULL, "$"); } if (p[-1] == '$') { strcpy((char *)cs.salt, (char *)cs.realm); strcat((char *)cs.salt, (char *)cs.user); } else { strcpy((char *)cs.salt, p); p = strtokm(NULL, "$"); } for (i = 0; i < TIMESTAMP_SIZE; i++) cs.ct[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; MEM_FREE(keeptr); return (void *)&cs; } static void set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char * split(char *ciphertext, int index, struct fmt_main *pFmt) { static char out[TOTAL_LENGTH + 1]; char in[TOTAL_LENGTH + 1]; char salt[MAX_SALTLEN + 1]; char *data; char *e, *u, *r, *s, *tc; strnzcpy(in, ciphertext, sizeof(in)); tc = strrchr(in, '$'); *tc++ = 0; s = strrchr(in, '$'); *s++ = 0; r = strrchr(in, '$'); *r++ = 0; u = strrchr(in, '$'); *u++ = 0; e = in + 8; /* Default salt is user.realm */ if (!*s) { snprintf(salt, sizeof(salt), "%s%s", r, u); s = salt; } snprintf(out, sizeof(out), "%s%s$%s$%s$%s$%s", FORMAT_TAG, e, u, r, s, tc); data = out + strlen(out) - 2 * (CHECKSUM_SIZE + TIMESTAMP_SIZE) - 1; strlwr(data); return out; } static void * get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; p = strrchr(ciphertext, '$') + 1 + TIMESTAMP_SIZE * 2; /* skip to checksum * field */ for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static char * get_key(int index) { return saved_key[index]; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static void AES_cts_encrypt(const unsigned char *in, unsigned char *out, size_t len, const AES_KEY * key, unsigned char *ivec, const int encryptp) { unsigned char tmp[AES_BLOCK_SIZE]; unsigned int i; if (encryptp) { while (len > AES_BLOCK_SIZE) { for (i = 0; i < AES_BLOCK_SIZE; i++) tmp[i] = in[i] ^ ivec[i]; AES_encrypt(tmp, out, key); memcpy(ivec, out, AES_BLOCK_SIZE); len -= AES_BLOCK_SIZE; in += AES_BLOCK_SIZE; out += AES_BLOCK_SIZE; } for (i = 0; i < len; i++) tmp[i] = in[i] ^ ivec[i]; for (; i < AES_BLOCK_SIZE; i++) tmp[i] = 0 ^ ivec[i]; AES_encrypt(tmp, out - AES_BLOCK_SIZE, key); memcpy(out, ivec, len); memcpy(ivec, out - AES_BLOCK_SIZE, AES_BLOCK_SIZE); } else { unsigned char tmp2[AES_BLOCK_SIZE]; unsigned char tmp3[AES_BLOCK_SIZE]; while (len > AES_BLOCK_SIZE * 2) { memcpy(tmp, in, AES_BLOCK_SIZE); AES_decrypt(in, out, key); for (i = 0; i < AES_BLOCK_SIZE; i++) out[i] ^= ivec[i]; memcpy(ivec, tmp, AES_BLOCK_SIZE); len -= AES_BLOCK_SIZE; in += AES_BLOCK_SIZE; out += AES_BLOCK_SIZE; } len -= AES_BLOCK_SIZE; memcpy(tmp, in, AES_BLOCK_SIZE); /* save last iv */ AES_decrypt(in, tmp2, key); memcpy(tmp3, in + AES_BLOCK_SIZE, len); memcpy(tmp3 + len, tmp2 + len, AES_BLOCK_SIZE - len); /* xor 0 */ for (i = 0; i < len; i++) out[i + AES_BLOCK_SIZE] = tmp2[i] ^ tmp3[i]; AES_decrypt(tmp3, out, key); for (i = 0; i < AES_BLOCK_SIZE; i++) out[i] ^= ivec[i]; memcpy(ivec, tmp, AES_BLOCK_SIZE); } } //keysize = 32 for 256 bits, 16 for 128 bits static void dk(unsigned char key_out[], unsigned char key_in[], size_t key_size, unsigned char ptext[], size_t ptext_size) { unsigned char iv[32]; unsigned char plaintext[32]; AES_KEY ekey; memset(iv, 0, sizeof(iv)); memset(plaintext, 0, sizeof(plaintext)); memcpy(plaintext, ptext, 16); AES_set_encrypt_key(key_in, key_size * 8, &ekey); AES_cbc_encrypt(plaintext, key_out, key_size, &ekey, iv, AES_ENCRYPT); } static void krb_decrypt(const unsigned char ciphertext[], size_t ctext_size, unsigned char plaintext[], const unsigned char key[], size_t key_size) { unsigned char iv[32]; AES_KEY ekey; memset(iv, 0, sizeof(iv)); AES_set_decrypt_key(key, key_size * 8, &ekey); AES_cts_encrypt(ciphertext, plaintext, ctext_size, &ekey, iv, AES_DECRYPT); } #if 0 /* This is not used */ static void krb_encrypt(const unsigned char ciphertext[], size_t ctext_size, unsigned char plaintext[], const unsigned char key[], size_t key_size) { unsigned char iv[32]; AES_KEY ekey; memset(iv, 0, sizeof(iv)); AES_set_encrypt_key(key, key_size * 8, &ekey); AES_cts_encrypt(ciphertext, plaintext, ctext_size, &ekey, iv, AES_ENCRYPT); } #endif static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; { unsigned char tkey[MAX_KEYS_PER_CRYPT][32]; unsigned char base_key[32]; unsigned char Ke[32]; unsigned char plaintext[44]; int key_size, i; int len[MAX_KEYS_PER_CRYPT]; #ifdef SIMD_COEF_32 unsigned char *pin[MAX_KEYS_PER_CRYPT], *pout[MAX_KEYS_PER_CRYPT]; for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { len[i] = strlen(saved_key[i + index]); pin[i] = (unsigned char *)saved_key[i + index]; pout[i] = tkey[i]; } pbkdf2_sha1_sse((const unsigned char **)pin, len, cur_salt->salt, strlen((char *)cur_salt->salt), 4096, pout, 32, 0); #else for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { len[i] = strlen(saved_key[index + i]); } pbkdf2_sha1((const unsigned char *)saved_key[index], len[0], cur_salt->salt, strlen((char *)cur_salt->salt), 4096, tkey[0], 32, 0); #endif for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { //generate 128 bits from 40 bits of "kerberos" string // This is precomputed in init() // nfold(8 * 8, (unsigned char *)"kerberos", 128, constant); if (cur_salt->etype == 17) key_size = 16; else key_size = 32; dk(base_key, tkey[i], key_size, constant, 32); /* * The "well-known constant" used for the DK function is the key * usage number, expressed as four octets in big-endian order, * followed by one octet indicated below. Kc = DK(base-key, usage * | 0x99); Ke = DK(base-key, usage | 0xAA); Ki = DK(base-key, * usage | 0x55); */ //derive Ke for decryption /encryption // This is precomputed in init() // memset(usage, 0, sizeof(usage)); //usage[3] = 0x01; //key number in big - endian format // usage[4] = 0xAA; //used to derive Ke // nfold(sizeof(usage) * 8, usage, sizeof(ke_input) * 8, ke_input); dk(Ke, base_key, key_size, ke_input, 32); //decrypt the AS - REQ timestamp encrypted with 256 - bit AES // here is enough to check the string, further computation below is required // to fully verify the checksum krb_decrypt(cur_salt->ct, 44, plaintext, Ke, key_size); //Check a couple bytes from known plain(YYYYMMDDHHMMSSZ) and // bail out if we are out of luck. if (plaintext[22] == '2' && plaintext[23] == '0' && plaintext[36] == 'Z') { unsigned char Ki[32]; unsigned char checksum[20]; //derive Ki used in HMAC - SHA - 1 checksum // This is precomputed in init() // memset(usage, 0, sizeof(usage)); //usage[3] = 0x01; //key number in big - endian format // usage[4] = 0x55; //used to derive Ki // nfold(sizeof(usage) * 8, usage, sizeof(ki_input) * 8, ki_input); dk(Ki, base_key, key_size, ki_input, 32); //derive checksum of plaintext hmac_sha1(Ki, key_size, plaintext, 44, checksum, 20); memcpy(crypt_out[index + i], checksum, BINARY_SIZE); } else { memset(crypt_out[index + i], 0, BINARY_SIZE); } } } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } struct fmt_main fmt_krb5pa = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_OMP, {NULL}, {FORMAT_TAG}, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, split, get_binary, get_salt, {NULL}, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
#if FMT_EXTERNS_H extern struct fmt_main fmt_krb5pa; #elif FMT_REGISTERS_H john_register_one(&fmt_krb5pa); #else #include <errno.h> #include <string.h> #include <stdlib.h> #include <ctype.h> #ifdef _OPENMP static int omp_t = 1; #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 64 #endif #endif #include "arch.h" #include "misc.h" #include "formats.h" #include "options.h" #include "common.h" #include "unicode.h" #include "johnswap.h" #include "aes.h" #include "hmac_sha.h" #include "pbkdf2_hmac_sha1.h" #include "loader.h" #include "memdbg.h" #define FORMAT_LABEL "krb5pa-sha1" #define FORMAT_NAME "Kerberos 5 AS-REQ Pre-Auth etype 17/18" /* aes-cts-hmac-sha1-96 */ #define FORMAT_TAG "$krb5pa$" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #ifdef SIMD_COEF_32 #define ALGORITHM_NAME "PBKDF2-SHA1 " SHA1_ALGORITHM_NAME #else #define ALGORITHM_NAME "PBKDF2-SHA1 32/" ARCH_BITS_STR #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define BINARY_SIZE 12 #define BINARY_ALIGN 4 #define PLAINTEXT_LENGTH 125 #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN 4 #ifdef SIMD_COEF_32 #define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1 #define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1 #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif #define MAX_SALTLEN 128 #define MAX_REALMLEN 64 #define MAX_USERLEN 64 #define TIMESTAMP_SIZE 44 #define CHECKSUM_SIZE BINARY_SIZE #define TOTAL_LENGTH (14 + 2 * (CHECKSUM_SIZE + TIMESTAMP_SIZE) + MAX_REALMLEN + MAX_USERLEN + MAX_SALTLEN) static struct fmt_tests tests[] = { {"$krb5pa$18$user1$EXAMPLE.COM$$2a0e68168d1eac344da458599c3a2b33ff326a061449fcbc242b212504e484d45903c6a16e2d593912f56c93883bf697b325193d62a8be9c", "openwall"}, {"$krb5pa$18$user1$EXAMPLE.COM$$a3918bd0381107feedec8db0022bdf3ac56e534ed54d13c62a7013a47713cfc31ef4e7e572f912fa4164f76b335e588bf29c2d17b11c5caa", "openwall"}, {"$krb5pa$18$l33t$EXAMPLE.COM$$98f732b309a1d7ef2355a974842a32894d911e97150f5d57f248e1c2632fbd3735c5f156532ccae0341e6a2d779ca83a06021fe57dafa464", "openwall"}, {"$krb5pa$18$aduser$AD.EXAMPLE.COM$$64dfeee04be2b2e0423814e0df4d0f960885aca4efffe6cb5694c4d34690406071c4968abd2c153ee42d258c5e09a41269bbcd7799f478d3", "password@123"}, {"$krb5pa$18$aduser$AD.EXAMPLE.COM$$f94f755a8b4493d925094a4eb1cec630ac40411a14c9733a853516fe426637d9daefdedc0567e2bb5a83d4f89a0ad1a4b178662b6106c0ff", "password@12345678"}, {"$krb5pa$18$aduser$AD.EXAMPLE.COM$AD.EXAMPLE.COMaduser$f94f755a8b4493d925094a4eb1cec630ac40411a14c9733a853516fe426637d9daefdedc0567e2bb5a83d4f89a0ad1a4b178662b6106c0ff", "password@12345678"}, /* etype 17 hash obtained using MiTM etype downgrade attack */ {"$krb5pa$17$user1$EXAMPLE.COM$$c5461873dc13665771b98ba80be53939e906d90ae1ba79cf2e21f0395e50ee56379fbef4d0298cfccfd6cf8f907329120048fd05e8ae5df4", "openwall"}, {NULL}, }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static uint32_t(*crypt_out)[BINARY_SIZE / sizeof(uint32_t)]; static struct custom_salt { int etype; unsigned char realm[64]; unsigned char user[64]; unsigned char salt[128];/* realm + user */ unsigned char ct[44]; } *cur_salt; static unsigned char constant[16]; static unsigned char ke_input[16]; static unsigned char ki_input[16]; /* * n-fold(k-bits): l = lcm(n,k) r = l/k s = k-bits | k-bits rot 13 | k-bits * rot 13*2 | ... | k-bits rot 13*(r-1) compute the 1's complement sum: * n-fold = s[0..n-1]+s[n..2n-1]+s[2n..3n-1]+..+s[(k-1)*n..k*n-1] */ /* * representation: msb first, assume n and k are multiples of 8, and that * k>=16. this is the case of all the cryptosystems which are likely to be * used. this function can be replaced if that assumption ever fails. */ /* input length is in bits */ static void nfold(unsigned int inbits, const unsigned char *in, unsigned int outbits, unsigned char *out) { int a, b, c, lcm; int byte, i, msbit; /* * the code below is more readable if I make these bytes instead of bits */ inbits >>= 3; outbits >>= 3; /* first compute lcm(n,k) */ a = outbits; b = inbits; while (b != 0) { c = b; b = a % b; a = c; } lcm = outbits * inbits / a; /* now do the real work */ memset(out, 0, outbits); byte = 0; /* * this will end up cycling through k lcm(k,n)/k times, which is correct */ for (i = lcm - 1; i >= 0; i--) { /* compute the msbit in k which gets added into this byte */ msbit = ( /* first, start with the msbit in the first, * unrotated byte */ ((inbits << 3) - 1) /* * then, for each byte, shift to the right for each repetition */ + (((inbits << 3) + 13) * (i / inbits)) /* * last, pick out the correct byte within that shifted repetition */ + ((inbits - (i % inbits)) << 3) ) % (inbits << 3); /* pull out the byte value itself */ byte += (((in[((inbits - 1) - (msbit >> 3)) % inbits] << 8) | (in[((inbits) - (msbit >> 3)) % inbits])) >> ((msbit & 7) + 1)) & 0xff; /* do the addition */ byte += out[i % outbits]; out[i % outbits] = byte & 0xff; /* keep around the carry bit, if any */ byte >>= 8; } /* if there's a carry bit left over, add it back in */ if (byte) { for (i = outbits - 1; i >= 0; i--) { /* do the addition */ byte += out[i]; out[i] = byte & 0xff; /* keep around the carry bit, if any */ byte >>= 8; \ } } } static void init(struct fmt_main *self) { unsigned char usage[5]; #ifdef _OPENMP omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt); crypt_out = mem_calloc(sizeof(*crypt_out), self->params.max_keys_per_crypt); //generate 128 bits from 40 bits of "kerberos" string nfold(8 * 8, (unsigned char *)"kerberos", 128, constant); memset(usage, 0, sizeof(usage)); usage[3] = 0x01; //key number in big - endian format usage[4] = 0xAA; //used to derive Ke nfold(sizeof(usage) * 8, usage, sizeof(ke_input) * 8, ke_input); memset(usage, 0, sizeof(usage)); usage[3] = 0x01; //key number in big - endian format usage[4] = 0x55; //used to derive Ki nfold(sizeof(usage) * 8, usage, sizeof(ki_input) * 8, ki_input); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *p, *data = ciphertext; int type, saltlen = 0; //tag is mandatory if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0) return 0; data += FORMAT_TAG_LEN; //etype field, 17 or 18 p = strchr(data, '$'); if (!p || p - data != 2) return 0; type = atoi(data); if (type < 17 || type > 18) return 0; data = p + 1; //user field p = strchr(data, '$'); if (!p || p - data > MAX_USERLEN) return 0; saltlen += p - data; data = p + 1; //realm field p = strchr(data, '$'); if (!p || p - data > MAX_REALMLEN) return 0; saltlen += p - data; data = p + 1; //salt field p = strchr(data, '$'); if (!p) return 0; //if salt is empty, realm.user is used instead if (p - data) saltlen = p - data; data = p + 1; //We support a max.total salt length of 52. // We could opt to emit a warning if rejected here. if (saltlen > MAX_SALTLEN) { static int warned = 0; if (!ldr_in_pot) if (!warned++) fprintf(stderr, "%s: One or more hashes rejected due to salt length limitation\n", FORMAT_LABEL); return 0; } //56 bytes(112 hex chars) encrypted timestamp + checksum if (strlen(data) != 2 * (TIMESTAMP_SIZE + CHECKSUM_SIZE) || strspn(data, HEXCHARS_all) != strlen(data)) return 0; return 1; } static void * get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; char *p; int i; static struct custom_salt cs; memset(&cs, 0, sizeof(cs)); ctcopy += FORMAT_TAG_LEN; p = strtokm(ctcopy, "$"); cs.etype = atoi(p); p = strtokm(NULL, "$"); if (p[-1] == '$') cs.user[0] = 0; else { strcpy((char *)cs.user, p); p = strtokm(NULL, "$"); } if (p[-1] == '$') cs.realm[0] = 0; else { strcpy((char *)cs.realm, p); p = strtokm(NULL, "$"); } if (p[-1] == '$') { strcpy((char *)cs.salt, (char *)cs.realm); strcat((char *)cs.salt, (char *)cs.user); } else { strcpy((char *)cs.salt, p); p = strtokm(NULL, "$"); } for (i = 0; i < TIMESTAMP_SIZE; i++) cs.ct[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; MEM_FREE(keeptr); return (void *)&cs; } static void set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char * split(char *ciphertext, int index, struct fmt_main *pFmt) { static char out[TOTAL_LENGTH + 1]; char in[TOTAL_LENGTH + 1]; char salt[MAX_SALTLEN + 1]; char *data; char *e, *u, *r, *s, *tc; strnzcpy(in, ciphertext, sizeof(in)); tc = strrchr(in, '$'); *tc++ = 0; s = strrchr(in, '$'); *s++ = 0; r = strrchr(in, '$'); *r++ = 0; u = strrchr(in, '$'); *u++ = 0; e = in + 8; /* Default salt is user.realm */ if (!*s) { snprintf(salt, sizeof(salt), "%s%s", r, u); s = salt; } snprintf(out, sizeof(out), "%s%s$%s$%s$%s$%s", FORMAT_TAG, e, u, r, s, tc); data = out + strlen(out) - 2 * (CHECKSUM_SIZE + TIMESTAMP_SIZE) - 1; strlwr(data); return out; } static void * get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; p = strrchr(ciphertext, '$') + 1 + TIMESTAMP_SIZE * 2; /* skip to checksum * field */ for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static char * get_key(int index) { return saved_key[index]; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static void AES_cts_encrypt(const unsigned char *in, unsigned char *out, size_t len, const AES_KEY * key, unsigned char *ivec, const int encryptp) { unsigned char tmp[AES_BLOCK_SIZE]; unsigned int i; if (encryptp) { while (len > AES_BLOCK_SIZE) { for (i = 0; i < AES_BLOCK_SIZE; i++) tmp[i] = in[i] ^ ivec[i]; AES_encrypt(tmp, out, key); memcpy(ivec, out, AES_BLOCK_SIZE); len -= AES_BLOCK_SIZE; in += AES_BLOCK_SIZE; out += AES_BLOCK_SIZE; } for (i = 0; i < len; i++) tmp[i] = in[i] ^ ivec[i]; for (; i < AES_BLOCK_SIZE; i++) tmp[i] = 0 ^ ivec[i]; AES_encrypt(tmp, out - AES_BLOCK_SIZE, key); memcpy(out, ivec, len); memcpy(ivec, out - AES_BLOCK_SIZE, AES_BLOCK_SIZE); } else { unsigned char tmp2[AES_BLOCK_SIZE]; unsigned char tmp3[AES_BLOCK_SIZE]; while (len > AES_BLOCK_SIZE * 2) { memcpy(tmp, in, AES_BLOCK_SIZE); AES_decrypt(in, out, key); for (i = 0; i < AES_BLOCK_SIZE; i++) out[i] ^= ivec[i]; memcpy(ivec, tmp, AES_BLOCK_SIZE); len -= AES_BLOCK_SIZE; in += AES_BLOCK_SIZE; out += AES_BLOCK_SIZE; } len -= AES_BLOCK_SIZE; memcpy(tmp, in, AES_BLOCK_SIZE); /* save last iv */ AES_decrypt(in, tmp2, key); memcpy(tmp3, in + AES_BLOCK_SIZE, len); memcpy(tmp3 + len, tmp2 + len, AES_BLOCK_SIZE - len); /* xor 0 */ for (i = 0; i < len; i++) out[i + AES_BLOCK_SIZE] = tmp2[i] ^ tmp3[i]; AES_decrypt(tmp3, out, key); for (i = 0; i < AES_BLOCK_SIZE; i++) out[i] ^= ivec[i]; memcpy(ivec, tmp, AES_BLOCK_SIZE); } } //keysize = 32 for 256 bits, 16 for 128 bits static void dk(unsigned char key_out[], unsigned char key_in[], size_t key_size, unsigned char ptext[], size_t ptext_size) { unsigned char iv[32]; unsigned char plaintext[32]; AES_KEY ekey; memset(iv, 0, sizeof(iv)); memset(plaintext, 0, sizeof(plaintext)); memcpy(plaintext, ptext, 16); AES_set_encrypt_key(key_in, key_size * 8, &ekey); AES_cbc_encrypt(plaintext, key_out, key_size, &ekey, iv, AES_ENCRYPT); } static void krb_decrypt(const unsigned char ciphertext[], size_t ctext_size, unsigned char plaintext[], const unsigned char key[], size_t key_size) { unsigned char iv[32]; AES_KEY ekey; memset(iv, 0, sizeof(iv)); AES_set_decrypt_key(key, key_size * 8, &ekey); AES_cts_encrypt(ciphertext, plaintext, ctext_size, &ekey, iv, AES_DECRYPT); } #if 0 /* This is not used */ static void krb_encrypt(const unsigned char ciphertext[], size_t ctext_size, unsigned char plaintext[], const unsigned char key[], size_t key_size) { unsigned char iv[32]; AES_KEY ekey; memset(iv, 0, sizeof(iv)); AES_set_encrypt_key(key, key_size * 8, &ekey); AES_cts_encrypt(ciphertext, plaintext, ctext_size, &ekey, iv, AES_ENCRYPT); } #endif static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) #endif { unsigned char tkey[MAX_KEYS_PER_CRYPT][32]; unsigned char base_key[32]; unsigned char Ke[32]; unsigned char plaintext[44]; int key_size, i; int len[MAX_KEYS_PER_CRYPT]; #ifdef SIMD_COEF_32 unsigned char *pin[MAX_KEYS_PER_CRYPT], *pout[MAX_KEYS_PER_CRYPT]; for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { len[i] = strlen(saved_key[i + index]); pin[i] = (unsigned char *)saved_key[i + index]; pout[i] = tkey[i]; } pbkdf2_sha1_sse((const unsigned char **)pin, len, cur_salt->salt, strlen((char *)cur_salt->salt), 4096, pout, 32, 0); #else for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { len[i] = strlen(saved_key[index + i]); } pbkdf2_sha1((const unsigned char *)saved_key[index], len[0], cur_salt->salt, strlen((char *)cur_salt->salt), 4096, tkey[0], 32, 0); #endif for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { //generate 128 bits from 40 bits of "kerberos" string // This is precomputed in init() // nfold(8 * 8, (unsigned char *)"kerberos", 128, constant); if (cur_salt->etype == 17) key_size = 16; else key_size = 32; dk(base_key, tkey[i], key_size, constant, 32); /* * The "well-known constant" used for the DK function is the key * usage number, expressed as four octets in big-endian order, * followed by one octet indicated below. Kc = DK(base-key, usage * | 0x99); Ke = DK(base-key, usage | 0xAA); Ki = DK(base-key, * usage | 0x55); */ //derive Ke for decryption /encryption // This is precomputed in init() // memset(usage, 0, sizeof(usage)); //usage[3] = 0x01; //key number in big - endian format // usage[4] = 0xAA; //used to derive Ke // nfold(sizeof(usage) * 8, usage, sizeof(ke_input) * 8, ke_input); dk(Ke, base_key, key_size, ke_input, 32); //decrypt the AS - REQ timestamp encrypted with 256 - bit AES // here is enough to check the string, further computation below is required // to fully verify the checksum krb_decrypt(cur_salt->ct, 44, plaintext, Ke, key_size); //Check a couple bytes from known plain(YYYYMMDDHHMMSSZ) and // bail out if we are out of luck. if (plaintext[22] == '2' && plaintext[23] == '0' && plaintext[36] == 'Z') { unsigned char Ki[32]; unsigned char checksum[20]; //derive Ki used in HMAC - SHA - 1 checksum // This is precomputed in init() // memset(usage, 0, sizeof(usage)); //usage[3] = 0x01; //key number in big - endian format // usage[4] = 0x55; //used to derive Ki // nfold(sizeof(usage) * 8, usage, sizeof(ki_input) * 8, ki_input); dk(Ki, base_key, key_size, ki_input, 32); //derive checksum of plaintext hmac_sha1(Ki, key_size, plaintext, 44, checksum, 20); memcpy(crypt_out[index + i], checksum, BINARY_SIZE); } else { memset(crypt_out[index + i], 0, BINARY_SIZE); } } } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } struct fmt_main fmt_krb5pa = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_OMP, {NULL}, {FORMAT_TAG}, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, split, get_binary, get_salt, {NULL}, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
3d7pt.c
/* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 8; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k]) + beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] + A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
/* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 8; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; LIKWID_MARKER_THREADINIT; LIKWID_MARKER_START("calc"); #endif int num_threads = 1; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k]) + beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] + A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON LIKWID_MARKER_STOP("calc"); LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
/* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 8; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k]) + beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] + A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
DenseMatrix.h
/* * DenseMatrix.h * * Created on: Nov 25, 2015 * Author: Michael Wegner (michael.wegner@student.kit.edu) */ #ifndef NETWORKIT_CPP_ALGEBRAIC_DENSEMATRIX_H_ #define NETWORKIT_CPP_ALGEBRAIC_DENSEMATRIX_H_ #include "../Globals.h" #include "Vector.h" #include <cassert> #include <vector> namespace NetworKit { /** * Represents a dense matrix. Use this matrix to run LU decompositions and LU solves. * Note that most matrices are rather sparse s.t. CSRMatrix might be a better representation. */ class DenseMatrix { private: count nRows; count nCols; std::vector<double> entries; public: /** Default constructor */ DenseMatrix(); /** * Constructs an instance of DenseMatrix given the number of rows (@a nRows) and the number of columns (@a nCols) and its * values (@a entries). * @param nRows Number of rows. * @param nCols Number of columns. * @param entries Entries of the matrix. * @note The size of the @a entries vector should be equal to @a nRows * @a nCols. */ DenseMatrix(const count nRows, const count nCols, const std::vector<double> &entries); /** Default destructor */ virtual ~DenseMatrix() = default; /** Default copy constructor */ DenseMatrix (const DenseMatrix &other) = default; /** Default move constructor */ DenseMatrix (DenseMatrix &&other) = default; /** Default copy assignment operator */ DenseMatrix& operator=(DenseMatrix &&other) = default; /** Default move assignment operator */ DenseMatrix& operator=(const DenseMatrix &other) = default; /** * @return Number of rows. */ inline count numberOfRows() const { return nRows; } /** * @return Number of columns. */ inline count numberOfColumns() const { return nCols; } /** * @return Value at matrix position (i,j). */ double operator()(const index i, const index j) const; /** * Set the matrix at position (@a i, @a j) to @a value. */ void setValue(const index i, const index j, const double value); /** * @return Row @a i of this matrix as vector. */ Vector row(const index i) const; /** * @return Column @a j of this matrix as vector. */ Vector column(const index j) const; /** * @return The main diagonal of this matrix. */ Vector diagonal() const; /** * Adds this matrix to @a other and returns the result. * @return The sum of this matrix and @a other. */ DenseMatrix operator+(const DenseMatrix &other) const; /** * Adds @a other to this matrix. * @return Reference to this matrix. */ DenseMatrix& operator+=(const DenseMatrix &other); /** * Subtracts @a other from this matrix and returns the result. * @return The difference of this matrix and @a other. * */ DenseMatrix operator-(const DenseMatrix &other) const; /** * Subtracts @a other from this matrix. * @return Reference to this matrix. */ DenseMatrix& operator-=(const DenseMatrix &other); /** * Multiplies this matrix with a scalar specified in @a scalar and returns the result. * @return The result of multiplying this matrix with @a scalar. */ DenseMatrix operator*(const double &scalar) const; /** * Multiplies this matrix with a scalar specified in @a scalar. * @return Reference to this matrix. */ DenseMatrix& operator*=(const double &scalar); /** * Multiplies this matrix with @a vector and returns the result. * @return The result of multiplying this matrix with @a vector. */ Vector operator*(const Vector &vector) const; /** * Multiplies this matrix with @a other and returns the result in a new matrix. * @return The result of multiplying this matrix with @a other. */ DenseMatrix operator*(const DenseMatrix &other) const; /** * Divides this matrix by a divisor specified in @a divisor and returns the result in a new matrix. * @return The result of dividing this matrix by @a divisor. */ DenseMatrix operator/(const double &divisor) const; /** * Divides this matrix by a divisor specified in @a divisor. * @return Reference to this matrix. */ DenseMatrix& operator/=(const double &divisor); /** * Decomposes the given @a matrix into lower L and upper U matrix (in-place). * @param matrix The matrix to decompose into LU. */ static void LUDecomposition(DenseMatrix &matrix); /** * Computes the solution vector x to the system @a LU * x = @a b where @a LU is a matrix decomposed into L and U. * @param LU Matrix decomposed into lower L and upper U matrix. * @param b Right-hand side. * @return Solution vector x to the linear equation system LU * x = b. */ static Vector LUSolve(const DenseMatrix &LU, const Vector &b); /** * Computes @a A @a binaryOp @a B on the elements of matrix @a A and matrix @a B. * @param A * @param B * @param binaryOp Function handling (double, double) -> double * @return @a A @a binaryOp @a B. * @note @a A and @a B must have the same dimensions. */ template<typename L> static DenseMatrix binaryOperator(const DenseMatrix &A, const DenseMatrix &B, L binaryOp); /** * Iterate over all non-zero elements of row @a row in the matrix and call handler(index column, double value) */ template<typename L> void forElementsInRow(index i, L handle) const; /** * Iterate in parallel over all non-zero elements of row @a row in the matrix and call handler(index column, double value) */ template<typename L> void parallelForElementsInRow(index i, L handle) const; /** * Iterate over all non-zero elements of the matrix in row order and call handler (lambda closure). */ template<typename L> void forElementsInRowOrder(L handle) const; /** * Iterate in parallel over all rows and call handler (lambda closure) on non-zero elements of the matrix. */ template<typename L> void parallelForElementsInRowOrder(L handle) const; /** * Iterate in parallel over all rows and call handler (lambda closure) on non-zero elements of the matrix. */ template<typename L> void parallelForElementsInRowOrder(L handle); }; template<typename L> inline DenseMatrix NetworKit::DenseMatrix::binaryOperator(const DenseMatrix &A, const DenseMatrix &B, L binaryOp) { assert(A.nRows == B.nRows && A.nCols == B.nCols); std::vector<double> resultEntries(A.numberOfRows() * A.numberOfColumns()); #pragma omp parallel for for (index i = 0; i < A.numberOfRows(); ++i) { index offset = i * A.numberOfColumns(); for (index j = offset; j < offset + A.numberOfColumns(); ++j) { resultEntries[j] = binaryOp(A.entries[j], B.entries[j]); } } return DenseMatrix(A.numberOfRows(), A.numberOfColumns(), resultEntries); } template<typename L> inline void NetworKit::DenseMatrix::forElementsInRow(index i, L handle) const { index offset = i * numberOfColumns(); for (index k = offset, j = 0; k < offset + numberOfColumns(); ++k, ++j) { handle(j, entries[k]); } } template<typename L> inline void NetworKit::DenseMatrix::parallelForElementsInRow(index i, L handle) const { index offset = i * numberOfColumns(); #pragma omp parallel for for (index j = 0; j < numberOfColumns(); ++j) { handle(j, entries[offset + j]); } } template<typename L> inline void NetworKit::DenseMatrix::forElementsInRowOrder(L handle) const { for (index i = 0; i < nRows; ++i) { index offset = i * numberOfColumns(); for (index k = offset, j = 0; k < offset + numberOfColumns(); ++k, ++j) { handle(i, j, entries[k]); } } } template<typename L> inline void NetworKit::DenseMatrix::parallelForElementsInRowOrder(L handle) const { #pragma omp parallel for for (index i = 0; i < nRows; ++i) { index offset = i * numberOfColumns(); for (index k = offset, j = 0; k < offset + numberOfColumns(); ++k, ++j) { handle(i, j, entries[k]); } } } template<typename L> inline void NetworKit::DenseMatrix::parallelForElementsInRowOrder(L handle) { #pragma omp parallel for for (index i = 0; i < nRows; ++i) { index offset = i * numberOfColumns(); for (index k = offset, j = 0; k < offset + numberOfColumns(); ++k, ++j) { handle(i, j, entries[k]); } } } } /* namespace NetworKit */ #endif /* NETWORKIT_CPP_ALGEBRAIC_DENSEMATRIX_H_ */
/* * DenseMatrix.h * * Created on: Nov 25, 2015 Author: Michael Wegner * (michael.wegner@student.kit.edu) */ #ifndef NETWORKIT_CPP_ALGEBRAIC_DENSEMATRIX_H_ #define NETWORKIT_CPP_ALGEBRAIC_DENSEMATRIX_H_ #include "../Globals.h" #include "Vector.h" #include <cassert> #include <vector> namespace NetworKit { /** * Represents a dense matrix. Use this matrix to run LU decompositions and LU solves. * Note that most matrices are rather sparse s.t. CSRMatrix might be a better representation. */ class DenseMatrix { private: count nRows; count nCols; std: : vector < double >entries; public: /** Default constructor */ DenseMatrix(); /** * Constructs an instance of DenseMatrix given the number of rows (@a nRows) and the number of columns (@a nCols) and its * values (@a entries). * @param nRows Number of rows. * @param nCols Number of columns. * @param entries Entries of the matrix. * @note The size of the @a entries vector should be equal to @a nRows * @a nCols. */ DenseMatrix(const count nRows, const count nCols, const std: : vector < double >&entries); /** Default destructor */ virtual ~ DenseMatrix() = default; /** Default copy constructor */ DenseMatrix(const DenseMatrix & other)= default; /** Default move constructor */ DenseMatrix(DenseMatrix && other) = default; /** Default copy assignment operator */ DenseMatrix & operator = (DenseMatrix && other) = default; /** Default move assignment operator */ DenseMatrix & operator = (const DenseMatrix & other)= default; /** * @return Number of rows. */ inline count numberOfRows() const { return nRows; } /** * @return Number of columns. */ inline count numberOfColumns() const { return nCols; } /** * @return Value at matrix position (i,j). */ double operator() (const index i, const index j)const; /** * Set the matrix at position (@a i, @a j) to @a value. */ void setValue(const index i, const index j, const double value); /** * @return Row @a i of this matrix as vector. */ Vector row(const index i)const; /** * @return Column @a j of this matrix as vector. */ Vector column(const index j)const; /** * @return The main diagonal of this matrix. */ Vector diagonal() const; /** * Adds this matrix to @a other and returns the result. * @return The sum of this matrix and @a other. */ DenseMatrix operator + (const DenseMatrix & other)const; /** * Adds @a other to this matrix. * @return Reference to this matrix. */ DenseMatrix & operator += (const DenseMatrix & other); /** * Subtracts @a other from this matrix and returns the result. * @return The difference of this matrix and @a other. * */ DenseMatrix operator - (const DenseMatrix & other)const; /** * Subtracts @a other from this matrix. * @return Reference to this matrix. */ DenseMatrix & operator -= (const DenseMatrix & other); /** * Multiplies this matrix with a scalar specified in @a scalar and returns the result. * @return The result of multiplying this matrix with @a scalar. */ DenseMatrix operator *(const double &scalar)const; /** * Multiplies this matrix with a scalar specified in @a scalar. * @return Reference to this matrix. */ DenseMatrix & operator *= (const double &scalar); /** * Multiplies this matrix with @a vector and returns the result. * @return The result of multiplying this matrix with @a vector. */ Vector operator *(const Vector & vector)const; /** * Multiplies this matrix with @a other and returns the result in a new matrix. * @return The result of multiplying this matrix with @a other. */ DenseMatrix operator *(const DenseMatrix & other)const; /** * Divides this matrix by a divisor specified in @a divisor and returns the result in a new matrix. * @return The result of dividing this matrix by @a divisor. */ DenseMatrix operator / (const double &divisor)const; /** * Divides this matrix by a divisor specified in @a divisor. * @return Reference to this matrix. */ DenseMatrix & operator /= (const double &divisor); /** * Decomposes the given @a matrix into lower L and upper U matrix (in-place). * @param matrix The matrix to decompose into LU. */ static void LUDecomposition(DenseMatrix & matrix); /** * Computes the solution vector x to the system @a LU * x = @a b where @a LU is a matrix decomposed into L and U. * @param LU Matrix decomposed into lower L and upper U matrix. * @param b Right-hand side. * @return Solution vector x to the linear equation system LU * x = b. */ static Vector LUSolve(const DenseMatrix & LU, const Vector & b); /** * Computes @a A @a binaryOp @a B on the elements of matrix @a A and matrix @a B. * @param A * @param B * @param binaryOp Function handling (double, double) -> double * @return @a A @a binaryOp @a B. * @note @a A and @a B must have the same dimensions. */ template < typename L > static DenseMatrix binaryOperator(const DenseMatrix & A, const DenseMatrix & B, L binaryOp); /** * Iterate over all non-zero elements of row @a row in the matrix and call handler(index column, double value) */ template < typename L > void forElementsInRow(index i, L handle) const; /** * Iterate in parallel over all non-zero elements of row @a row in the matrix and call handler(index column, double value) */ template < typename L > void parallelForElementsInRow(index i, L handle) const; /** * Iterate over all non-zero elements of the matrix in row order and call handler (lambda closure). */ template < typename L > void forElementsInRowOrder(L handle) const; /** * Iterate in parallel over all rows and call handler (lambda closure) on non-zero elements of the matrix. */ template < typename L > void parallelForElementsInRowOrder(L handle) const; /** * Iterate in parallel over all rows and call handler (lambda closure) on non-zero elements of the matrix. */ template < typename L > void parallelForElementsInRowOrder(L handle); }; template < typename L > inline DenseMatrix NetworKit: : DenseMatrix: : binaryOperator(const DenseMatrix & A, const DenseMatrix & B, L binaryOp) { assert(A.nRows == B.nRows && A.nCols == B.nCols); std: : vector < double >resultEntries(A.numberOfRows() * A.numberOfColumns()); for (index i = 0; i < A.numberOfRows(); ++i) { index offset = i * A.numberOfColumns(); for (index j = offset; j < offset + A.numberOfColumns(); ++j) { resultEntries[j] = binaryOp(A.entries[j], B.entries[j]); } } return DenseMatrix(A.numberOfRows(), A.numberOfColumns(), resultEntries); } template < typename L > inline void NetworKit::DenseMatrix::forElementsInRow(index i, L handle) const { index offset = i * numberOfColumns(); for (index k = offset, j = 0; k < offset + numberOfColumns(); ++k, ++j) { handle(j, entries[k]); } } template < typename L > inline void NetworKit::DenseMatrix::parallelForElementsInRow(index i, L handle) const { index offset = i * numberOfColumns(); for (index j = 0; j < numberOfColumns(); ++j) { handle(j, entries[offset + j]); } } template < typename L > inline void NetworKit::DenseMatrix::forElementsInRowOrder(L handle) const { for (index i = 0; i < nRows; ++i) { index offset = i * numberOfColumns(); for (index k = offset, j = 0; k < offset + numberOfColumns(); ++k, ++j) { handle(i, j, entries[k]); } } } template < typename L > inline void NetworKit::DenseMatrix::parallelForElementsInRowOrder(L handle) const { for (index i = 0; i < nRows; ++i) { index offset = i * numberOfColumns(); for (index k = offset, j = 0; k < offset + numberOfColumns(); ++k, ++j) { handle(i, j, entries[k]); } } } template < typename L > inline void NetworKit::DenseMatrix::parallelForElementsInRowOrder(L handle) { for (index i = 0; i < nRows; ++i) { index offset = i * numberOfColumns(); for (index k = offset, j = 0; k < offset + numberOfColumns(); ++k, ++j) { handle(i, j, entries[k]); } } } } /* namespace NetworKit */ #endif /* NETWORKIT_CPP_ALGEBRAIC_DENSEMATRIX_H_ */
/* * DenseMatrix.h * * Created on: Nov 25, 2015 Author: Michael Wegner * (michael.wegner@student.kit.edu) */ #ifndef NETWORKIT_CPP_ALGEBRAIC_DENSEMATRIX_H_ #define NETWORKIT_CPP_ALGEBRAIC_DENSEMATRIX_H_ #include "../Globals.h" #include "Vector.h" #include <cassert> #include <vector> namespace NetworKit { /** * Represents a dense matrix. Use this matrix to run LU decompositions and LU solves. * Note that most matrices are rather sparse s.t. CSRMatrix might be a better representation. */ class DenseMatrix { private: count nRows; count nCols; std: : vector < double >entries; public: /** Default constructor */ DenseMatrix(); /** * Constructs an instance of DenseMatrix given the number of rows (@a nRows) and the number of columns (@a nCols) and its * values (@a entries). * @param nRows Number of rows. * @param nCols Number of columns. * @param entries Entries of the matrix. * @note The size of the @a entries vector should be equal to @a nRows * @a nCols. */ DenseMatrix(const count nRows, const count nCols, const std: : vector < double >&entries); /** Default destructor */ virtual ~ DenseMatrix() = default; /** Default copy constructor */ DenseMatrix(const DenseMatrix & other)= default; /** Default move constructor */ DenseMatrix(DenseMatrix && other) = default; /** Default copy assignment operator */ DenseMatrix & operator = (DenseMatrix && other) = default; /** Default move assignment operator */ DenseMatrix & operator = (const DenseMatrix & other)= default; /** * @return Number of rows. */ inline count numberOfRows() const { return nRows; } /** * @return Number of columns. */ inline count numberOfColumns() const { return nCols; } /** * @return Value at matrix position (i,j). */ double operator() (const index i, const index j)const; /** * Set the matrix at position (@a i, @a j) to @a value. */ void setValue(const index i, const index j, const double value); /** * @return Row @a i of this matrix as vector. */ Vector row(const index i)const; /** * @return Column @a j of this matrix as vector. */ Vector column(const index j)const; /** * @return The main diagonal of this matrix. */ Vector diagonal() const; /** * Adds this matrix to @a other and returns the result. * @return The sum of this matrix and @a other. */ DenseMatrix operator + (const DenseMatrix & other)const; /** * Adds @a other to this matrix. * @return Reference to this matrix. */ DenseMatrix & operator += (const DenseMatrix & other); /** * Subtracts @a other from this matrix and returns the result. * @return The difference of this matrix and @a other. * */ DenseMatrix operator - (const DenseMatrix & other)const; /** * Subtracts @a other from this matrix. * @return Reference to this matrix. */ DenseMatrix & operator -= (const DenseMatrix & other); /** * Multiplies this matrix with a scalar specified in @a scalar and returns the result. * @return The result of multiplying this matrix with @a scalar. */ DenseMatrix operator *(const double &scalar)const; /** * Multiplies this matrix with a scalar specified in @a scalar. * @return Reference to this matrix. */ DenseMatrix & operator *= (const double &scalar); /** * Multiplies this matrix with @a vector and returns the result. * @return The result of multiplying this matrix with @a vector. */ Vector operator *(const Vector & vector)const; /** * Multiplies this matrix with @a other and returns the result in a new matrix. * @return The result of multiplying this matrix with @a other. */ DenseMatrix operator *(const DenseMatrix & other)const; /** * Divides this matrix by a divisor specified in @a divisor and returns the result in a new matrix. * @return The result of dividing this matrix by @a divisor. */ DenseMatrix operator / (const double &divisor)const; /** * Divides this matrix by a divisor specified in @a divisor. * @return Reference to this matrix. */ DenseMatrix & operator /= (const double &divisor); /** * Decomposes the given @a matrix into lower L and upper U matrix (in-place). * @param matrix The matrix to decompose into LU. */ static void LUDecomposition(DenseMatrix & matrix); /** * Computes the solution vector x to the system @a LU * x = @a b where @a LU is a matrix decomposed into L and U. * @param LU Matrix decomposed into lower L and upper U matrix. * @param b Right-hand side. * @return Solution vector x to the linear equation system LU * x = b. */ static Vector LUSolve(const DenseMatrix & LU, const Vector & b); /** * Computes @a A @a binaryOp @a B on the elements of matrix @a A and matrix @a B. * @param A * @param B * @param binaryOp Function handling (double, double) -> double * @return @a A @a binaryOp @a B. * @note @a A and @a B must have the same dimensions. */ template < typename L > static DenseMatrix binaryOperator(const DenseMatrix & A, const DenseMatrix & B, L binaryOp); /** * Iterate over all non-zero elements of row @a row in the matrix and call handler(index column, double value) */ template < typename L > void forElementsInRow(index i, L handle) const; /** * Iterate in parallel over all non-zero elements of row @a row in the matrix and call handler(index column, double value) */ template < typename L > void parallelForElementsInRow(index i, L handle) const; /** * Iterate over all non-zero elements of the matrix in row order and call handler (lambda closure). */ template < typename L > void forElementsInRowOrder(L handle) const; /** * Iterate in parallel over all rows and call handler (lambda closure) on non-zero elements of the matrix. */ template < typename L > void parallelForElementsInRowOrder(L handle) const; /** * Iterate in parallel over all rows and call handler (lambda closure) on non-zero elements of the matrix. */ template < typename L > void parallelForElementsInRowOrder(L handle); }; template < typename L > inline DenseMatrix NetworKit: : DenseMatrix: : binaryOperator(const DenseMatrix & A, const DenseMatrix & B, L binaryOp) { assert(A.nRows == B.nRows && A.nCols == B.nCols); std: : vector < double >resultEntries(A.numberOfRows() * A.numberOfColumns()); #pragma omp parallel for for (index i = 0; i < A.numberOfRows(); ++i) { index offset = i * A.numberOfColumns(); for (index j = offset; j < offset + A.numberOfColumns(); ++j) { resultEntries[j] = binaryOp(A.entries[j], B.entries[j]); } } return DenseMatrix(A.numberOfRows(), A.numberOfColumns(), resultEntries); } template < typename L > inline void NetworKit::DenseMatrix::forElementsInRow(index i, L handle) const { index offset = i * numberOfColumns(); for (index k = offset, j = 0; k < offset + numberOfColumns(); ++k, ++j) { handle(j, entries[k]); } } template < typename L > inline void NetworKit::DenseMatrix::parallelForElementsInRow(index i, L handle) const { index offset = i * numberOfColumns(); #pragma omp parallel for for (index j = 0; j < numberOfColumns(); ++j) { handle(j, entries[offset + j]); } } template < typename L > inline void NetworKit::DenseMatrix::forElementsInRowOrder(L handle) const { for (index i = 0; i < nRows; ++i) { index offset = i * numberOfColumns(); for (index k = offset, j = 0; k < offset + numberOfColumns(); ++k, ++j) { handle(i, j, entries[k]); } } } template < typename L > inline void NetworKit::DenseMatrix::parallelForElementsInRowOrder(L handle) const { #pragma omp parallel for for (index i = 0; i < nRows; ++i) { index offset = i * numberOfColumns(); for (index k = offset, j = 0; k < offset + numberOfColumns(); ++k, ++j) { handle(i, j, entries[k]); } } } template < typename L > inline void NetworKit::DenseMatrix::parallelForElementsInRowOrder(L handle) { #pragma omp parallel for for (index i = 0; i < nRows; ++i) { index offset = i * numberOfColumns(); for (index k = offset, j = 0; k < offset + numberOfColumns(); ++k, ++j) { handle(i, j, entries[k]); } } } } /* namespace NetworKit */ #endif /* NETWORKIT_CPP_ALGEBRAIC_DENSEMATRIX_H_ */
comms.h
/* //@HEADER // ***************************************************************************** // // HPCGraph: Graph Computation on High Performance Computing Systems // Copyright (2016) Sandia Corporation // // Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, // the U.S. Government retains certain rights in this software. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // 1. Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // // 3. Neither the name of the Corporation nor the names of the // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Questions? Contact George M. Slota (gmslota@sandia.gov) // Siva Rajamanickam (srajama@sandia.gov) // Kamesh Madduri (madduri@cse.psu.edu) // // ***************************************************************************** //@HEADER */ #ifndef _COMMS_H_ #define _COMMS_H_ #include <mpi.h> #include <omp.h> #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <assert.h> #include "dist_graph.h" #include "util.h" extern int procid, nprocs; extern bool verbose, debug, debug2, verify, output; #define MAX_SEND_SIZE 268435456 #define THREAD_QUEUE_SIZE 3072 struct mpi_data_t { int32_t* sendcounts; int32_t* recvcounts; int32_t* sdispls; int32_t* rdispls; int32_t* sdispls_cpy; uint64_t* recvcounts_temp; uint64_t* sendcounts_temp; uint64_t* sdispls_temp; uint64_t* rdispls_temp; uint64_t* sdispls_cpy_temp; uint64_t* sendbuf_vert; uint64_t* sendbuf_data; double* sendbuf_data_flt; uint64_t* recvbuf_vert; uint64_t* recvbuf_data; double* recvbuf_data_flt; uint64_t total_recv; uint64_t total_send; uint64_t global_queue_size; } ; struct queue_data_t { uint64_t* queue; uint64_t* queue_next; uint64_t* queue_send; uint64_t queue_size; uint64_t next_size; uint64_t send_size; } ; struct thread_queue_t { int32_t tid; uint64_t* thread_queue; uint64_t* thread_send; uint64_t thread_queue_size; uint64_t thread_send_size; } ; struct thread_comm_t { int32_t tid; bool* v_to_rank; uint64_t* sendcounts_thread; uint64_t* sendbuf_vert_thread; uint64_t* sendbuf_data_thread; double* sendbuf_data_thread_flt; int32_t* sendbuf_rank_thread; uint64_t* thread_starts; uint64_t thread_queue_size; } ; void init_queue_data(dist_graph_t* g, queue_data_t* q); void clear_queue_data(queue_data_t* q); void init_comm_data(mpi_data_t* comm); void clear_comm_data(mpi_data_t* comm); void init_thread_queue(thread_queue_t* tq); void clear_thread_queue(thread_queue_t* tq); void init_thread_comm(thread_comm_t* tc); void clear_thread_comm(thread_comm_t* tc); void init_thread_comm_flt(thread_comm_t* tc); void clear_thread_commflt(thread_comm_t* tc); void init_sendbuf_vid_data(mpi_data_t* comm); void init_recvbuf_vid_data(mpi_data_t* comm); void init_sendbuf_vid_data_flt(mpi_data_t* comm); void init_recvbuf_vid_data_flt(mpi_data_t* comm); void clear_recvbuf_vid_data(mpi_data_t* comm); void clear_allbuf_vid_data(mpi_data_t* comm); inline void exchange_verts(dist_graph_t* g, mpi_data_t* comm, queue_data_t* q); inline void exchange_verts_bicc(dist_graph_t* g, mpi_data_t* comm, queue_data_t* q); inline void exchange_verts(mpi_data_t* comm); inline void exchange_data(mpi_data_t* comm); inline void exchange_data_flt(mpi_data_t* comm); inline void exchange_vert_data(dist_graph_t* g, mpi_data_t* comm, queue_data_t* q); inline void exchange_vert_data(dist_graph_t* g, mpi_data_t* comm); inline void update_sendcounts_thread(dist_graph_t* g, thread_comm_t* tc, uint64_t vert_index); inline void update_sendcounts_thread(dist_graph_t* g, thread_comm_t* tc, uint64_t vert_index, uint64_t count_data); inline void update_vid_data_queues(dist_graph_t* g, thread_comm_t* tc, mpi_data_t* comm, uint64_t vert_index, uint64_t data); inline void update_vid_data_queues(dist_graph_t* g, thread_comm_t* tc, mpi_data_t* comm, uint64_t vert_index, uint64_t data1, uint64_t data2, uint64_t data3); inline void add_vid_to_queue(thread_queue_t* tq, queue_data_t* q, uint64_t vertex_id); inline void add_vid_to_queue(thread_queue_t* tq, queue_data_t* q, uint64_t vertex_id1, uint64_t vertex_id2); inline void empty_queue(thread_queue_t* tq, queue_data_t* q); inline void add_vid_to_send(thread_queue_t* tq, queue_data_t* q, uint64_t vertex_id); inline void add_vid_to_send(thread_queue_t* tq, queue_data_t* q, uint64_t vertex_id1, uint64_t vertex_id2); inline void empty_send(thread_queue_t* tq, queue_data_t* q); inline void add_vid_data_to_send(thread_comm_t* tc, mpi_data_t* comm, uint64_t vertex_id, uint64_t data_val, int32_t send_rank); inline void add_vid_data_to_send_flt(thread_comm_t* tc, mpi_data_t* comm, uint64_t vertex_id, double data_val, int32_t send_rank); inline void empty_vid_data(thread_comm_t* tc, mpi_data_t* comm); inline void empty_vid_data_flt(thread_comm_t* tc, mpi_data_t* comm); inline void exchange_verts(dist_graph_t* g, mpi_data_t* comm, queue_data_t* q) { comm->global_queue_size = 0; uint64_t task_queue_size = q->next_size + q->send_size; MPI_Allreduce(&task_queue_size, &comm->global_queue_size, 1, MPI_UINT64_T, MPI_SUM, MPI_COMM_WORLD); uint64_t num_comms = comm->global_queue_size / (uint64_t)MAX_SEND_SIZE + 1; uint64_t sum_recv = 0; for (uint64_t c = 0; c < num_comms; ++c) { uint64_t send_begin = (q->send_size * c) / num_comms; uint64_t send_end = (q->send_size * (c + 1)) / num_comms; if (c == (num_comms-1)) send_end = q->send_size; for (int32_t i = 0; i < nprocs; ++i) { comm->sendcounts[i] = 0; comm->recvcounts[i] = 0; } for (uint64_t i = send_begin; i < send_end; ++i) { uint64_t ghost_index = q->queue_send[i] - g->n_local; uint64_t ghost_task = g->ghost_tasks[ghost_index]; ++comm->sendcounts[ghost_task]; } MPI_Alltoall(comm->sendcounts, 1, MPI_INT32_T, comm->recvcounts, 1, MPI_INT32_T, MPI_COMM_WORLD); comm->sdispls[0] = 0; comm->sdispls_cpy[0] = 0; comm->rdispls[0] = 0; for (int32_t i = 1; i < nprocs; ++i) { comm->sdispls[i] = comm->sdispls[i-1] + comm->sendcounts[i-1]; comm->rdispls[i] = comm->rdispls[i-1] + comm->recvcounts[i-1]; comm->sdispls_cpy[i] = comm->sdispls[i]; } int32_t cur_send = comm->sdispls[nprocs-1] + comm->sendcounts[nprocs-1]; int32_t cur_recv = comm->rdispls[nprocs-1] + comm->recvcounts[nprocs-1]; comm->sendbuf_vert = (uint64_t*)malloc((uint64_t)(cur_send+1)*sizeof(uint64_t)); if (comm->sendbuf_vert == NULL) throw_err("exchange_verts(), unable to allocate comm buffers", procid); for (uint64_t i = send_begin; i < send_end; ++i) { uint64_t ghost_index = q->queue_send[i] - g->n_local; uint64_t ghost_task = g->ghost_tasks[ghost_index]; uint64_t vert = g->ghost_unmap[ghost_index]; comm->sendbuf_vert[comm->sdispls_cpy[ghost_task]++] = vert; } MPI_Alltoallv(comm->sendbuf_vert, comm->sendcounts, comm->sdispls, MPI_UINT64_T, q->queue_next+q->next_size+sum_recv, comm->recvcounts, comm->rdispls, MPI_UINT64_T, MPI_COMM_WORLD); free(comm->sendbuf_vert); sum_recv += cur_recv; } q->queue_size = q->next_size + sum_recv; q->next_size = 0; q->send_size = 0; uint64_t* temp = q->queue; q->queue = q->queue_next; q->queue_next = temp; } inline void exchange_verts_bicc(dist_graph_t* g, mpi_data_t* comm, queue_data_t* q) { comm->global_queue_size = 0; uint64_t task_queue_size = q->next_size + q->send_size; MPI_Allreduce(&task_queue_size, &comm->global_queue_size, 1, MPI_UINT64_T, MPI_SUM, MPI_COMM_WORLD); uint64_t num_comms = comm->global_queue_size / (uint64_t)MAX_SEND_SIZE + 1; uint64_t sum_recv = 0; for (uint64_t c = 0; c < num_comms; ++c) { uint64_t send_begin = (q->send_size * c) / num_comms; uint64_t send_end = (q->send_size * (c + 1)) / num_comms; if (c == (num_comms-1)) send_end = q->send_size; if (send_begin % 2 != 0) send_begin++; if (send_end % 2 != 0) send_end++; for (int32_t i = 0; i < nprocs; ++i) { comm->sendcounts[i] = 0; comm->recvcounts[i] = 0; } for (uint64_t i = send_begin; i < send_end; i += 2) { uint64_t ghost_index = q->queue_send[i] - g->n_local; uint64_t ghost_task = g->ghost_tasks[ghost_index]; comm->sendcounts[ghost_task] += 2; } MPI_Alltoall(comm->sendcounts, 1, MPI_INT32_T, comm->recvcounts, 1, MPI_INT32_T, MPI_COMM_WORLD); comm->sdispls[0] = 0; comm->sdispls_cpy[0] = 0; comm->rdispls[0] = 0; for (int32_t i = 1; i < nprocs; ++i) { comm->sdispls[i] = comm->sdispls[i-1] + comm->sendcounts[i-1]; comm->rdispls[i] = comm->rdispls[i-1] + comm->recvcounts[i-1]; comm->sdispls_cpy[i] = comm->sdispls[i]; } int32_t cur_send = comm->sdispls[nprocs-1] + comm->sendcounts[nprocs-1]; int32_t cur_recv = comm->rdispls[nprocs-1] + comm->recvcounts[nprocs-1]; comm->sendbuf_vert = (uint64_t*)malloc((uint64_t)(cur_send+1)*sizeof(uint64_t)); if (comm->sendbuf_vert == NULL) throw_err("exchange_verts(), unable to allocate comm buffers", procid); for (uint64_t i = send_begin; i < send_end; i += 2) { uint64_t ghost_index = q->queue_send[i] - g->n_local; uint64_t ghost_task = g->ghost_tasks[ghost_index]; uint64_t vert = g->ghost_unmap[ghost_index]; uint64_t parent = q->queue_send[i+1]; comm->sendbuf_vert[comm->sdispls_cpy[ghost_task]++] = vert; comm->sendbuf_vert[comm->sdispls_cpy[ghost_task]++] = parent; } MPI_Alltoallv(comm->sendbuf_vert, comm->sendcounts, comm->sdispls, MPI_UINT64_T, q->queue_next+q->next_size+sum_recv, comm->recvcounts, comm->rdispls, MPI_UINT64_T, MPI_COMM_WORLD); free(comm->sendbuf_vert); sum_recv += cur_recv; } q->queue_size = q->next_size + sum_recv; q->next_size = 0; q->send_size = 0; uint64_t* temp = q->queue; q->queue = q->queue_next; q->queue_next = temp; } inline void exchange_vert_data(dist_graph_t* g, mpi_data_t* comm, queue_data_t* q) { for (int32_t i = 0; i < nprocs; ++i) comm->recvcounts_temp[i] = 0; MPI_Alltoall(comm->sendcounts_temp, 1, MPI_UINT64_T, comm->recvcounts_temp, 1, MPI_UINT64_T, MPI_COMM_WORLD); comm->total_recv = 0; for (int i = 0; i < nprocs; ++i) comm->total_recv += comm->recvcounts_temp[i]; comm->recvbuf_vert = (uint64_t*)malloc(comm->total_recv*sizeof(uint64_t)); comm->recvbuf_data = (uint64_t*)malloc(comm->total_recv*sizeof(uint64_t)); comm->recvbuf_data_flt = NULL; if (comm->recvbuf_vert == NULL || comm->recvbuf_data == NULL) throw_err("exchange_vert_data() unable to allocate comm buffers", procid); comm->global_queue_size = 0; uint64_t task_queue_size = comm->total_send; MPI_Allreduce(&task_queue_size, &comm->global_queue_size, 1, MPI_UINT64_T, MPI_SUM, MPI_COMM_WORLD); uint64_t num_comms = comm->global_queue_size / (uint64_t)MAX_SEND_SIZE + 1; uint64_t sum_recv = 0; uint64_t sum_send = 0; for (uint64_t c = 0; c < num_comms; ++c) { for (int32_t i = 0; i < nprocs; ++i) { uint64_t send_begin = (comm->sendcounts_temp[i] * c) / num_comms; uint64_t send_end = (comm->sendcounts_temp[i] * (c + 1)) / num_comms; if (c == (num_comms-1)) send_end = comm->sendcounts_temp[i]; comm->sendcounts[i] = (int32_t)(send_end - send_begin); assert(comm->sendcounts[i] >= 0); } MPI_Alltoall(comm->sendcounts, 1, MPI_INT32_T, comm->recvcounts, 1, MPI_INT32_T, MPI_COMM_WORLD); comm->sdispls[0] = 0; comm->sdispls_cpy[0] = 0; comm->rdispls[0] = 0; for (int32_t i = 1; i < nprocs; ++i) { comm->sdispls[i] = comm->sdispls[i-1] + comm->sendcounts[i-1]; comm->rdispls[i] = comm->rdispls[i-1] + comm->recvcounts[i-1]; comm->sdispls_cpy[i] = comm->sdispls[i]; } int32_t cur_send = comm->sdispls[nprocs-1] + comm->sendcounts[nprocs-1]; int32_t cur_recv = comm->rdispls[nprocs-1] + comm->recvcounts[nprocs-1]; uint64_t* buf_v = (uint64_t*)malloc((uint64_t)(cur_send)*sizeof(uint64_t)); uint64_t* buf_d = (uint64_t*)malloc((uint64_t)(cur_send)*sizeof(uint64_t)); if (buf_v == NULL || buf_d == NULL) throw_err("exchange_verts(), unable to allocate comm buffers", procid); for (int32_t i = 0; i < nprocs; ++i) { uint64_t send_begin = (comm->sendcounts_temp[i] * c) / num_comms; uint64_t send_end = (comm->sendcounts_temp[i] * (c + 1)) / num_comms; if (c == (num_comms-1)) send_end = comm->sendcounts_temp[i]; for (uint64_t j = send_begin; j < send_end; ++j) { uint64_t vert = comm->sendbuf_vert[comm->sdispls_temp[i]+j]; uint64_t data = comm->sendbuf_data[comm->sdispls_temp[i]+j]; buf_v[comm->sdispls_cpy[i]] = vert; buf_d[comm->sdispls_cpy[i]++] = data; } } MPI_Alltoallv(buf_v, comm->sendcounts, comm->sdispls, MPI_UINT64_T, comm->recvbuf_vert+sum_recv, comm->recvcounts, comm->rdispls, MPI_UINT64_T, MPI_COMM_WORLD); MPI_Alltoallv(buf_d, comm->sendcounts, comm->sdispls, MPI_UINT64_T, comm->recvbuf_data+sum_recv, comm->recvcounts, comm->rdispls, MPI_UINT64_T, MPI_COMM_WORLD); free(buf_v); free(buf_d); sum_recv += cur_recv; sum_send += cur_send; } assert(sum_recv == comm->total_recv); assert(sum_send == comm->total_send); comm->global_queue_size = 0; task_queue_size = comm->total_recv + q->next_size; MPI_Allreduce(&task_queue_size, &comm->global_queue_size, 1, MPI_UINT64_T, MPI_SUM, MPI_COMM_WORLD); q->send_size = 0; } inline void exchange_verts(mpi_data_t* comm) { if (debug) { printf("Task %d exchange_verts() start\n", procid); } uint64_t num_comms = comm->global_queue_size / (uint64_t)MAX_SEND_SIZE + 1; uint64_t sum_recv = 0; uint64_t sum_send = 0; for (uint64_t c = 0; c < num_comms; ++c) { for (int32_t i = 0; i < nprocs; ++i) { uint64_t send_begin = (comm->sendcounts_temp[i] * c) / num_comms; uint64_t send_end = (comm->sendcounts_temp[i] * (c + 1)) / num_comms; if (c == (num_comms-1)) send_end = comm->sendcounts_temp[i]; comm->sendcounts[i] = (int32_t)(send_end - send_begin); assert(comm->sendcounts[i] >= 0); } MPI_Alltoall(comm->sendcounts, 1, MPI_INT32_T, comm->recvcounts, 1, MPI_INT32_T, MPI_COMM_WORLD); comm->sdispls[0] = 0; comm->sdispls_cpy[0] = 0; comm->rdispls[0] = 0; for (int32_t i = 1; i < nprocs; ++i) { comm->sdispls[i] = comm->sdispls[i-1] + comm->sendcounts[i-1]; comm->rdispls[i] = comm->rdispls[i-1] + comm->recvcounts[i-1]; comm->sdispls_cpy[i] = comm->sdispls[i]; } int32_t cur_send = comm->sdispls[nprocs-1] + comm->sendcounts[nprocs-1]; int32_t cur_recv = comm->rdispls[nprocs-1] + comm->recvcounts[nprocs-1]; uint64_t* buf_v = (uint64_t*)malloc((uint64_t)(cur_send)*sizeof(uint64_t)); if (buf_v == NULL) throw_err("exchange_verts(), unable to allocate comm buffers", procid); for (int32_t i = 0; i < nprocs; ++i) { uint64_t send_begin = (comm->sendcounts_temp[i] * c) / num_comms; uint64_t send_end = (comm->sendcounts_temp[i] * (c + 1)) / num_comms; if (c == (num_comms-1)) send_end = comm->sendcounts_temp[i]; for (uint64_t j = send_begin; j < send_end; ++j) { uint64_t vert = comm->sendbuf_vert[comm->sdispls_temp[i]+j]; buf_v[comm->sdispls_cpy[i]++] = vert; } } MPI_Alltoallv(buf_v, comm->sendcounts, comm->sdispls, MPI_UINT64_T, comm->recvbuf_vert+sum_recv, comm->recvcounts, comm->rdispls, MPI_UINT64_T, MPI_COMM_WORLD); free(buf_v); sum_recv += cur_recv; sum_send += cur_send; } assert(sum_recv == comm->total_recv); assert(sum_send == comm->total_send); if (debug) { printf("Task %d exchange_verts() success\n", procid); } } inline void exchange_data(mpi_data_t* comm) { if (debug) { printf("Task %d exchange_data() start\n", procid); } uint64_t num_comms = comm->global_queue_size / (uint64_t)MAX_SEND_SIZE + 1; uint64_t sum_recv = 0; uint64_t sum_send = 0; for (uint64_t c = 0; c < num_comms; ++c) { for (int32_t i = 0; i < nprocs; ++i) { uint64_t send_begin = (comm->sendcounts_temp[i] * c) / num_comms; uint64_t send_end = (comm->sendcounts_temp[i] * (c + 1)) / num_comms; if (c == (num_comms-1)) send_end = comm->sendcounts_temp[i]; comm->sendcounts[i] = (int32_t)(send_end - send_begin); assert(comm->sendcounts[i] >= 0); } MPI_Alltoall(comm->sendcounts, 1, MPI_INT32_T, comm->recvcounts, 1, MPI_INT32_T, MPI_COMM_WORLD); comm->sdispls[0] = 0; comm->sdispls_cpy[0] = 0; comm->rdispls[0] = 0; for (int32_t i = 1; i < nprocs; ++i) { comm->sdispls[i] = comm->sdispls[i-1] + comm->sendcounts[i-1]; comm->rdispls[i] = comm->rdispls[i-1] + comm->recvcounts[i-1]; comm->sdispls_cpy[i] = comm->sdispls[i]; } int32_t cur_send = comm->sdispls[nprocs-1] + comm->sendcounts[nprocs-1]; int32_t cur_recv = comm->rdispls[nprocs-1] + comm->recvcounts[nprocs-1]; uint64_t* buf_d = (uint64_t*)malloc((uint64_t)(cur_send)*sizeof(uint64_t)); if (buf_d == NULL) throw_err("exchange_data(), unable to allocate comm buffers", procid); for (int32_t i = 0; i < nprocs; ++i) { uint64_t send_begin = (comm->sendcounts_temp[i] * c) / num_comms; uint64_t send_end = (comm->sendcounts_temp[i] * (c + 1)) / num_comms; if (c == (num_comms-1)) send_end = comm->sendcounts_temp[i]; for (uint64_t j = send_begin; j < send_end; ++j) { uint64_t data = comm->sendbuf_data[comm->sdispls_temp[i]+j]; buf_d[comm->sdispls_cpy[i]++] = data; } } MPI_Alltoallv(buf_d, comm->sendcounts, comm->sdispls, MPI_UINT64_T, comm->recvbuf_data+sum_recv, comm->recvcounts, comm->rdispls, MPI_UINT64_T, MPI_COMM_WORLD); free(buf_d); sum_recv += cur_recv; sum_send += cur_send; } assert(sum_recv == comm->total_recv); assert(sum_send == comm->total_send); if (debug) { printf("Task %d exchange_data() success\n", procid); } } inline void exchange_data_flt(mpi_data_t* comm) { if (debug) { printf("Task %d exchange_data_flt() start\n", procid); } uint64_t num_comms = comm->global_queue_size / (uint64_t)MAX_SEND_SIZE + 1; uint64_t sum_recv = 0; uint64_t sum_send = 0; for (uint64_t c = 0; c < num_comms; ++c) { for (int32_t i = 0; i < nprocs; ++i) { uint64_t send_begin = (comm->sendcounts_temp[i] * c) / num_comms; uint64_t send_end = (comm->sendcounts_temp[i] * (c + 1)) / num_comms; if (c == (num_comms-1)) send_end = comm->sendcounts_temp[i]; comm->sendcounts[i] = (int32_t)(send_end - send_begin); assert(comm->sendcounts[i] >= 0); } MPI_Alltoall(comm->sendcounts, 1, MPI_INT32_T, comm->recvcounts, 1, MPI_INT32_T, MPI_COMM_WORLD); comm->sdispls[0] = 0; comm->sdispls_cpy[0] = 0; comm->rdispls[0] = 0; for (int32_t i = 1; i < nprocs; ++i) { comm->sdispls[i] = comm->sdispls[i-1] + comm->sendcounts[i-1]; comm->rdispls[i] = comm->rdispls[i-1] + comm->recvcounts[i-1]; comm->sdispls_cpy[i] = comm->sdispls[i]; } int32_t cur_send = comm->sdispls[nprocs-1] + comm->sendcounts[nprocs-1]; int32_t cur_recv = comm->rdispls[nprocs-1] + comm->recvcounts[nprocs-1]; double* buf_d = (double*)malloc((double)(cur_send)*sizeof(double)); if (buf_d == NULL) throw_err("exchange_data_flt(), unable to allocate comm buffers", procid); for (int32_t i = 0; i < nprocs; ++i) { uint64_t send_begin = (comm->sendcounts_temp[i] * c) / num_comms; uint64_t send_end = (comm->sendcounts_temp[i] * (c + 1)) / num_comms; if (c == (num_comms-1)) send_end = comm->sendcounts_temp[i]; for (uint64_t j = send_begin; j < send_end; ++j) { double data = comm->sendbuf_data_flt[comm->sdispls_temp[i]+j]; buf_d[comm->sdispls_cpy[i]++] = data; } } MPI_Alltoallv(buf_d, comm->sendcounts, comm->sdispls, MPI_DOUBLE, comm->recvbuf_data_flt+sum_recv, comm->recvcounts, comm->rdispls, MPI_DOUBLE, MPI_COMM_WORLD); free(buf_d); sum_recv += cur_recv; sum_send += cur_send; } assert(sum_recv == comm->total_recv); assert(sum_send == comm->total_send); if (debug) { printf("Task %d exchange_data_flt() success\n", procid); } } inline void update_sendcounts_thread(dist_graph_t* g, thread_comm_t* tc, uint64_t vert_index) { for (int32_t i = 0; i < nprocs; ++i) tc->v_to_rank[i] = false; uint64_t out_degree = out_degree(g, vert_index); uint64_t* outs = out_vertices(g, vert_index); for (uint64_t j = 0; j < out_degree; ++j) { uint64_t out_index = outs[j]; if (out_index >= g->n_local) { int32_t out_rank = g->ghost_tasks[out_index-g->n_local]; if (!tc->v_to_rank[out_rank]) { tc->v_to_rank[out_rank] = true; ++tc->sendcounts_thread[out_rank]; } } } } inline void update_sendcounts_thread(dist_graph_t* g, thread_comm_t* tc, uint64_t vert_index, uint64_t count_data) { for (int32_t i = 0; i < nprocs; ++i) tc->v_to_rank[i] = false; uint64_t out_degree = out_degree(g, vert_index); uint64_t* outs = out_vertices(g, vert_index); for (uint64_t j = 0; j < out_degree; ++j) { uint64_t out_index = outs[j]; if (out_index >= g->n_local) { int32_t out_rank = g->ghost_tasks[out_index-g->n_local]; if (!tc->v_to_rank[out_rank]) { tc->v_to_rank[out_rank] = true; tc->sendcounts_thread[out_rank] += count_data; } } } } inline void update_vid_data_queues(dist_graph_t* g, thread_comm_t* tc, mpi_data_t* comm, uint64_t vert_index, uint64_t data) { for (int32_t i = 0; i < nprocs; ++i) tc->v_to_rank[i] = false; uint64_t out_degree = out_degree(g, vert_index); uint64_t* outs = out_vertices(g, vert_index); for (uint64_t j = 0; j < out_degree; ++j) { uint64_t out_index = outs[j]; if (out_index >= g->n_local) { int32_t out_rank = g->ghost_tasks[out_index - g->n_local]; if (!tc->v_to_rank[out_rank]) { tc->v_to_rank[out_rank] = true; add_vid_data_to_send(tc, comm, g->local_unmap[vert_index], data, out_rank); } } } } inline void update_vid_data_queues(dist_graph_t* g, thread_comm_t* tc, mpi_data_t* comm, uint64_t vert_index, uint64_t data1, uint64_t data2, uint64_t data3) { for (int32_t i = 0; i < nprocs; ++i) tc->v_to_rank[i] = false; uint64_t out_degree = out_degree(g, vert_index); uint64_t* outs = out_vertices(g, vert_index); for (uint64_t j = 0; j < out_degree; ++j) { uint64_t out_index = outs[j]; if (out_index >= g->n_local) { int32_t out_rank = g->ghost_tasks[out_index - g->n_local]; if (!tc->v_to_rank[out_rank]) { tc->v_to_rank[out_rank] = true; add_vid_data_to_send(tc, comm, g->local_unmap[vert_index], data1, out_rank); add_vid_data_to_send(tc, comm, g->local_unmap[vert_index], data2, out_rank); add_vid_data_to_send(tc, comm, g->local_unmap[vert_index], data3, out_rank); } } } } inline void add_vid_to_queue(thread_queue_t* tq, queue_data_t* q, uint64_t vertex_id) { tq->thread_queue[tq->thread_queue_size++] = vertex_id; if (tq->thread_queue_size == THREAD_QUEUE_SIZE) empty_queue(tq, q); } inline void add_vid_to_queue(thread_queue_t* tq, queue_data_t* q, uint64_t vertex_id1, uint64_t vertex_id2) { tq->thread_queue[tq->thread_queue_size++] = vertex_id1; tq->thread_queue[tq->thread_queue_size++] = vertex_id2; if (tq->thread_queue_size == THREAD_QUEUE_SIZE) empty_queue(tq, q); } inline void empty_queue(thread_queue_t* tq, queue_data_t* q) { uint64_t start_offset; #pragma omp atomic capture start_offset = q->next_size += tq->thread_queue_size; start_offset -= tq->thread_queue_size; for (uint64_t i = 0; i < tq->thread_queue_size; ++i) q->queue_next[start_offset + i] = tq->thread_queue[i]; tq->thread_queue_size = 0; } inline void add_vid_to_send(thread_queue_t* tq, queue_data_t* q, uint64_t vertex_id) { tq->thread_send[tq->thread_send_size++] = vertex_id; if (tq->thread_send_size == THREAD_QUEUE_SIZE) empty_send(tq, q); } inline void add_vid_to_send(thread_queue_t* tq, queue_data_t* q, uint64_t vertex_id1, uint64_t vertex_id2) { tq->thread_send[tq->thread_send_size++] = vertex_id1; tq->thread_send[tq->thread_send_size++] = vertex_id2; if (tq->thread_send_size == THREAD_QUEUE_SIZE) empty_send(tq, q); } inline void empty_send(thread_queue_t* tq, queue_data_t* q) { uint64_t start_offset; #pragma omp atomic capture start_offset = q->send_size += tq->thread_send_size; start_offset -= tq->thread_send_size; for (uint64_t i = 0; i < tq->thread_send_size; ++i) q->queue_send[start_offset + i] = tq->thread_send[i]; tq->thread_send_size = 0; } inline void add_vid_data_to_send(thread_comm_t* tc, mpi_data_t* comm, uint64_t vertex_id, uint64_t data_val, int32_t send_rank) { tc->sendbuf_vert_thread[tc->thread_queue_size] = vertex_id; tc->sendbuf_data_thread[tc->thread_queue_size] = data_val; tc->sendbuf_rank_thread[tc->thread_queue_size] = send_rank; ++tc->thread_queue_size; ++tc->sendcounts_thread[send_rank]; if (tc->thread_queue_size == THREAD_QUEUE_SIZE) empty_vid_data(tc, comm); } inline void add_vid_data_to_send_flt(thread_comm_t* tc, mpi_data_t* comm, uint64_t vertex_id, double data_val, int32_t send_rank) { tc->sendbuf_vert_thread[tc->thread_queue_size] = vertex_id; tc->sendbuf_data_thread_flt[tc->thread_queue_size] = data_val; tc->sendbuf_rank_thread[tc->thread_queue_size] = send_rank; ++tc->thread_queue_size; ++tc->sendcounts_thread[send_rank]; if (tc->thread_queue_size == THREAD_QUEUE_SIZE) empty_vid_data_flt(tc, comm); } inline void empty_vid_data(thread_comm_t* tc, mpi_data_t* comm) { for (int32_t i = 0; i < nprocs; ++i) { #pragma omp atomic capture tc->thread_starts[i] = comm->sdispls_cpy_temp[i] += tc->sendcounts_thread[i]; tc->thread_starts[i] -= tc->sendcounts_thread[i]; } for (uint64_t i = 0; i < tc->thread_queue_size; ++i) { int32_t cur_rank = tc->sendbuf_rank_thread[i]; comm->sendbuf_vert[tc->thread_starts[cur_rank]] = tc->sendbuf_vert_thread[i]; comm->sendbuf_data[tc->thread_starts[cur_rank]] = tc->sendbuf_data_thread[i]; ++tc->thread_starts[cur_rank]; } for (int32_t i = 0; i < nprocs; ++i) { tc->thread_starts[i] = 0; tc->sendcounts_thread[i] = 0; } tc->thread_queue_size = 0; } inline void empty_vid_data_flt(thread_comm_t* tc, mpi_data_t* comm) { for (int32_t i = 0; i < nprocs; ++i) { #pragma omp atomic capture tc->thread_starts[i] = comm->sdispls_cpy_temp[i] += tc->sendcounts_thread[i]; tc->thread_starts[i] -= tc->sendcounts_thread[i]; } for (uint64_t i = 0; i < tc->thread_queue_size; ++i) { int32_t cur_rank = tc->sendbuf_rank_thread[i]; comm->sendbuf_vert[tc->thread_starts[cur_rank]] = tc->sendbuf_vert_thread[i]; comm->sendbuf_data_flt[tc->thread_starts[cur_rank]] = tc->sendbuf_data_thread_flt[i]; ++tc->thread_starts[cur_rank]; } for (int32_t i = 0; i < nprocs; ++i) { tc->thread_starts[i] = 0; tc->sendcounts_thread[i] = 0; } tc->thread_queue_size = 0; } #endif
#ifndef _COMMS_H_ #define _COMMS_H_ #include <mpi.h> #include <omp.h> #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <assert.h> #include "dist_graph.h" #include "util.h" extern int procid, nprocs; extern bool verbose, debug, debug2, verify, output; #define MAX_SEND_SIZE 268435456 #define THREAD_QUEUE_SIZE 3072 struct mpi_data_t { int32_t *sendcounts; int32_t *recvcounts; int32_t *sdispls; int32_t *rdispls; int32_t *sdispls_cpy; uint64_t *recvcounts_temp; uint64_t *sendcounts_temp; uint64_t *sdispls_temp; uint64_t *rdispls_temp; uint64_t *sdispls_cpy_temp; uint64_t *sendbuf_vert; uint64_t *sendbuf_data; double *sendbuf_data_flt; uint64_t *recvbuf_vert; uint64_t *recvbuf_data; double *recvbuf_data_flt; uint64_t total_recv; uint64_t total_send; uint64_t global_queue_size; }; struct queue_data_t { uint64_t *queue; uint64_t *queue_next; uint64_t *queue_send; uint64_t queue_size; uint64_t next_size; uint64_t send_size; }; struct thread_queue_t { int32_t tid; uint64_t *thread_queue; uint64_t *thread_send; uint64_t thread_queue_size; uint64_t thread_send_size; }; struct thread_comm_t { int32_t tid; bool *v_to_rank; uint64_t *sendcounts_thread; uint64_t *sendbuf_vert_thread; uint64_t *sendbuf_data_thread; double *sendbuf_data_thread_flt; int32_t *sendbuf_rank_thread; uint64_t *thread_starts; uint64_t thread_queue_size; }; void init_queue_data(dist_graph_t * g, queue_data_t * q); void clear_queue_data(queue_data_t * q); void init_comm_data(mpi_data_t * comm); void clear_comm_data(mpi_data_t * comm); void init_thread_queue(thread_queue_t * tq); void clear_thread_queue(thread_queue_t * tq); void init_thread_comm(thread_comm_t * tc); void clear_thread_comm(thread_comm_t * tc); void init_thread_comm_flt(thread_comm_t * tc); void clear_thread_commflt(thread_comm_t * tc); void init_sendbuf_vid_data(mpi_data_t * comm); void init_recvbuf_vid_data(mpi_data_t * comm); void init_sendbuf_vid_data_flt(mpi_data_t * comm); void init_recvbuf_vid_data_flt(mpi_data_t * comm); void clear_recvbuf_vid_data(mpi_data_t * comm); void clear_allbuf_vid_data(mpi_data_t * comm); inline void exchange_verts(dist_graph_t * g, mpi_data_t * comm, queue_data_t * q); inline void exchange_verts_bicc(dist_graph_t * g, mpi_data_t * comm, queue_data_t * q); inline void exchange_verts(mpi_data_t * comm); inline void exchange_data(mpi_data_t * comm); inline void exchange_data_flt(mpi_data_t * comm); inline void exchange_vert_data(dist_graph_t * g, mpi_data_t * comm, queue_data_t * q); inline void exchange_vert_data(dist_graph_t * g, mpi_data_t * comm); inline void update_sendcounts_thread(dist_graph_t * g, thread_comm_t * tc, uint64_t vert_index); inline void update_sendcounts_thread(dist_graph_t * g, thread_comm_t * tc, uint64_t vert_index, uint64_t count_data); inline void update_vid_data_queues(dist_graph_t * g, thread_comm_t * tc, mpi_data_t * comm, uint64_t vert_index, uint64_t data); inline void update_vid_data_queues(dist_graph_t * g, thread_comm_t * tc, mpi_data_t * comm, uint64_t vert_index, uint64_t data1, uint64_t data2, uint64_t data3); inline void add_vid_to_queue(thread_queue_t * tq, queue_data_t * q, uint64_t vertex_id); inline void add_vid_to_queue(thread_queue_t * tq, queue_data_t * q, uint64_t vertex_id1, uint64_t vertex_id2); inline void empty_queue(thread_queue_t * tq, queue_data_t * q); inline void add_vid_to_send(thread_queue_t * tq, queue_data_t * q, uint64_t vertex_id); inline void add_vid_to_send(thread_queue_t * tq, queue_data_t * q, uint64_t vertex_id1, uint64_t vertex_id2); inline void empty_send(thread_queue_t * tq, queue_data_t * q); inline void add_vid_data_to_send(thread_comm_t * tc, mpi_data_t * comm, uint64_t vertex_id, uint64_t data_val, int32_t send_rank); inline void add_vid_data_to_send_flt(thread_comm_t * tc, mpi_data_t * comm, uint64_t vertex_id, double data_val, int32_t send_rank); inline void empty_vid_data(thread_comm_t * tc, mpi_data_t * comm); inline void empty_vid_data_flt(thread_comm_t * tc, mpi_data_t * comm); inline void exchange_verts(dist_graph_t * g, mpi_data_t * comm, queue_data_t * q) { comm->global_queue_size = 0; uint64_t task_queue_size = q->next_size + q->send_size; MPI_Allreduce(&task_queue_size, &comm->global_queue_size, 1, MPI_UINT64_T, MPI_SUM, MPI_COMM_WORLD); uint64_t num_comms = comm->global_queue_size / (uint64_t) MAX_SEND_SIZE + 1; uint64_t sum_recv = 0; for (uint64_t c = 0; c < num_comms; ++c) { uint64_t send_begin = (q->send_size * c) / num_comms; uint64_t send_end = (q->send_size * (c + 1)) / num_comms; if (c == (num_comms - 1)) send_end = q->send_size; for (int32_t i = 0; i < nprocs; ++i) { comm->sendcounts[i] = 0; comm->recvcounts[i] = 0; } for (uint64_t i = send_begin; i < send_end; ++i) { uint64_t ghost_index = q->queue_send[i] - g->n_local; uint64_t ghost_task = g->ghost_tasks[ghost_index]; ++comm->sendcounts[ghost_task]; } MPI_Alltoall(comm->sendcounts, 1, MPI_INT32_T, comm->recvcounts, 1, MPI_INT32_T, MPI_COMM_WORLD); comm->sdispls[0] = 0; comm->sdispls_cpy[0] = 0; comm->rdispls[0] = 0; for (int32_t i = 1; i < nprocs; ++i) { comm->sdispls[i] = comm->sdispls[i - 1] + comm->sendcounts[i - 1]; comm->rdispls[i] = comm->rdispls[i - 1] + comm->recvcounts[i - 1]; comm->sdispls_cpy[i] = comm->sdispls[i]; } int32_t cur_send = comm->sdispls[nprocs - 1] + comm->sendcounts[nprocs - 1]; int32_t cur_recv = comm->rdispls[nprocs - 1] + comm->recvcounts[nprocs - 1]; comm->sendbuf_vert = (uint64_t *) malloc((uint64_t) (cur_send + 1) * sizeof(uint64_t)); if (comm->sendbuf_vert == NULL) throw_err("exchange_verts(), unable to allocate comm buffers", procid); for (uint64_t i = send_begin; i < send_end; ++i) { uint64_t ghost_index = q->queue_send[i] - g->n_local; uint64_t ghost_task = g->ghost_tasks[ghost_index]; uint64_t vert = g->ghost_unmap[ghost_index]; comm->sendbuf_vert[comm->sdispls_cpy[ghost_task]++] = vert; } MPI_Alltoallv(comm->sendbuf_vert, comm->sendcounts, comm->sdispls, MPI_UINT64_T, q->queue_next + q->next_size + sum_recv, comm->recvcounts, comm->rdispls, MPI_UINT64_T, MPI_COMM_WORLD); free(comm->sendbuf_vert); sum_recv += cur_recv; } q->queue_size = q->next_size + sum_recv; q->next_size = 0; q->send_size = 0; uint64_t *temp = q->queue; q->queue = q->queue_next; q->queue_next = temp; } inline void exchange_verts_bicc(dist_graph_t * g, mpi_data_t * comm, queue_data_t * q) { comm->global_queue_size = 0; uint64_t task_queue_size = q->next_size + q->send_size; MPI_Allreduce(&task_queue_size, &comm->global_queue_size, 1, MPI_UINT64_T, MPI_SUM, MPI_COMM_WORLD); uint64_t num_comms = comm->global_queue_size / (uint64_t) MAX_SEND_SIZE + 1; uint64_t sum_recv = 0; for (uint64_t c = 0; c < num_comms; ++c) { uint64_t send_begin = (q->send_size * c) / num_comms; uint64_t send_end = (q->send_size * (c + 1)) / num_comms; if (c == (num_comms - 1)) send_end = q->send_size; if (send_begin % 2 != 0) send_begin++; if (send_end % 2 != 0) send_end++; for (int32_t i = 0; i < nprocs; ++i) { comm->sendcounts[i] = 0; comm->recvcounts[i] = 0; } for (uint64_t i = send_begin; i < send_end; i += 2) { uint64_t ghost_index = q->queue_send[i] - g->n_local; uint64_t ghost_task = g->ghost_tasks[ghost_index]; comm->sendcounts[ghost_task] += 2; } MPI_Alltoall(comm->sendcounts, 1, MPI_INT32_T, comm->recvcounts, 1, MPI_INT32_T, MPI_COMM_WORLD); comm->sdispls[0] = 0; comm->sdispls_cpy[0] = 0; comm->rdispls[0] = 0; for (int32_t i = 1; i < nprocs; ++i) { comm->sdispls[i] = comm->sdispls[i - 1] + comm->sendcounts[i - 1]; comm->rdispls[i] = comm->rdispls[i - 1] + comm->recvcounts[i - 1]; comm->sdispls_cpy[i] = comm->sdispls[i]; } int32_t cur_send = comm->sdispls[nprocs - 1] + comm->sendcounts[nprocs - 1]; int32_t cur_recv = comm->rdispls[nprocs - 1] + comm->recvcounts[nprocs - 1]; comm->sendbuf_vert = (uint64_t *) malloc((uint64_t) (cur_send + 1) * sizeof(uint64_t)); if (comm->sendbuf_vert == NULL) throw_err("exchange_verts(), unable to allocate comm buffers", procid); for (uint64_t i = send_begin; i < send_end; i += 2) { uint64_t ghost_index = q->queue_send[i] - g->n_local; uint64_t ghost_task = g->ghost_tasks[ghost_index]; uint64_t vert = g->ghost_unmap[ghost_index]; uint64_t parent = q->queue_send[i + 1]; comm->sendbuf_vert[comm->sdispls_cpy[ghost_task]++] = vert; comm->sendbuf_vert[comm->sdispls_cpy[ghost_task]++] = parent; } MPI_Alltoallv(comm->sendbuf_vert, comm->sendcounts, comm->sdispls, MPI_UINT64_T, q->queue_next + q->next_size + sum_recv, comm->recvcounts, comm->rdispls, MPI_UINT64_T, MPI_COMM_WORLD); free(comm->sendbuf_vert); sum_recv += cur_recv; } q->queue_size = q->next_size + sum_recv; q->next_size = 0; q->send_size = 0; uint64_t *temp = q->queue; q->queue = q->queue_next; q->queue_next = temp; } inline void exchange_vert_data(dist_graph_t * g, mpi_data_t * comm, queue_data_t * q) { for (int32_t i = 0; i < nprocs; ++i) comm->recvcounts_temp[i] = 0; MPI_Alltoall(comm->sendcounts_temp, 1, MPI_UINT64_T, comm->recvcounts_temp, 1, MPI_UINT64_T, MPI_COMM_WORLD); comm->total_recv = 0; for (int i = 0; i < nprocs; ++i) comm->total_recv += comm->recvcounts_temp[i]; comm->recvbuf_vert = (uint64_t *) malloc(comm->total_recv * sizeof(uint64_t)); comm->recvbuf_data = (uint64_t *) malloc(comm->total_recv * sizeof(uint64_t)); comm->recvbuf_data_flt = NULL; if (comm->recvbuf_vert == NULL || comm->recvbuf_data == NULL) throw_err("exchange_vert_data() unable to allocate comm buffers", procid); comm->global_queue_size = 0; uint64_t task_queue_size = comm->total_send; MPI_Allreduce(&task_queue_size, &comm->global_queue_size, 1, MPI_UINT64_T, MPI_SUM, MPI_COMM_WORLD); uint64_t num_comms = comm->global_queue_size / (uint64_t) MAX_SEND_SIZE + 1; uint64_t sum_recv = 0; uint64_t sum_send = 0; for (uint64_t c = 0; c < num_comms; ++c) { for (int32_t i = 0; i < nprocs; ++i) { uint64_t send_begin = (comm->sendcounts_temp[i] * c) / num_comms; uint64_t send_end = (comm->sendcounts_temp[i] * (c + 1)) / num_comms; if (c == (num_comms - 1)) send_end = comm->sendcounts_temp[i]; comm->sendcounts[i] = (int32_t) (send_end - send_begin); assert(comm->sendcounts[i] >= 0); } MPI_Alltoall(comm->sendcounts, 1, MPI_INT32_T, comm->recvcounts, 1, MPI_INT32_T, MPI_COMM_WORLD); comm->sdispls[0] = 0; comm->sdispls_cpy[0] = 0; comm->rdispls[0] = 0; for (int32_t i = 1; i < nprocs; ++i) { comm->sdispls[i] = comm->sdispls[i - 1] + comm->sendcounts[i - 1]; comm->rdispls[i] = comm->rdispls[i - 1] + comm->recvcounts[i - 1]; comm->sdispls_cpy[i] = comm->sdispls[i]; } int32_t cur_send = comm->sdispls[nprocs - 1] + comm->sendcounts[nprocs - 1]; int32_t cur_recv = comm->rdispls[nprocs - 1] + comm->recvcounts[nprocs - 1]; uint64_t *buf_v = (uint64_t *) malloc((uint64_t) (cur_send) * sizeof(uint64_t)); uint64_t *buf_d = (uint64_t *) malloc((uint64_t) (cur_send) * sizeof(uint64_t)); if (buf_v == NULL || buf_d == NULL) throw_err("exchange_verts(), unable to allocate comm buffers", procid); for (int32_t i = 0; i < nprocs; ++i) { uint64_t send_begin = (comm->sendcounts_temp[i] * c) / num_comms; uint64_t send_end = (comm->sendcounts_temp[i] * (c + 1)) / num_comms; if (c == (num_comms - 1)) send_end = comm->sendcounts_temp[i]; for (uint64_t j = send_begin; j < send_end; ++j) { uint64_t vert = comm->sendbuf_vert[comm->sdispls_temp[i] + j]; uint64_t data = comm->sendbuf_data[comm->sdispls_temp[i] + j]; buf_v[comm->sdispls_cpy[i]] = vert; buf_d[comm->sdispls_cpy[i]++] = data; } } MPI_Alltoallv(buf_v, comm->sendcounts, comm->sdispls, MPI_UINT64_T, comm->recvbuf_vert + sum_recv, comm->recvcounts, comm->rdispls, MPI_UINT64_T, MPI_COMM_WORLD); MPI_Alltoallv(buf_d, comm->sendcounts, comm->sdispls, MPI_UINT64_T, comm->recvbuf_data + sum_recv, comm->recvcounts, comm->rdispls, MPI_UINT64_T, MPI_COMM_WORLD); free(buf_v); free(buf_d); sum_recv += cur_recv; sum_send += cur_send; } assert(sum_recv == comm->total_recv); assert(sum_send == comm->total_send); comm->global_queue_size = 0; task_queue_size = comm->total_recv + q->next_size; MPI_Allreduce(&task_queue_size, &comm->global_queue_size, 1, MPI_UINT64_T, MPI_SUM, MPI_COMM_WORLD); q->send_size = 0; } inline void exchange_verts(mpi_data_t * comm) { if (debug) { printf("Task %d exchange_verts() start\n", procid); } uint64_t num_comms = comm->global_queue_size / (uint64_t) MAX_SEND_SIZE + 1; uint64_t sum_recv = 0; uint64_t sum_send = 0; for (uint64_t c = 0; c < num_comms; ++c) { for (int32_t i = 0; i < nprocs; ++i) { uint64_t send_begin = (comm->sendcounts_temp[i] * c) / num_comms; uint64_t send_end = (comm->sendcounts_temp[i] * (c + 1)) / num_comms; if (c == (num_comms - 1)) send_end = comm->sendcounts_temp[i]; comm->sendcounts[i] = (int32_t) (send_end - send_begin); assert(comm->sendcounts[i] >= 0); } MPI_Alltoall(comm->sendcounts, 1, MPI_INT32_T, comm->recvcounts, 1, MPI_INT32_T, MPI_COMM_WORLD); comm->sdispls[0] = 0; comm->sdispls_cpy[0] = 0; comm->rdispls[0] = 0; for (int32_t i = 1; i < nprocs; ++i) { comm->sdispls[i] = comm->sdispls[i - 1] + comm->sendcounts[i - 1]; comm->rdispls[i] = comm->rdispls[i - 1] + comm->recvcounts[i - 1]; comm->sdispls_cpy[i] = comm->sdispls[i]; } int32_t cur_send = comm->sdispls[nprocs - 1] + comm->sendcounts[nprocs - 1]; int32_t cur_recv = comm->rdispls[nprocs - 1] + comm->recvcounts[nprocs - 1]; uint64_t *buf_v = (uint64_t *) malloc((uint64_t) (cur_send) * sizeof(uint64_t)); if (buf_v == NULL) throw_err("exchange_verts(), unable to allocate comm buffers", procid); for (int32_t i = 0; i < nprocs; ++i) { uint64_t send_begin = (comm->sendcounts_temp[i] * c) / num_comms; uint64_t send_end = (comm->sendcounts_temp[i] * (c + 1)) / num_comms; if (c == (num_comms - 1)) send_end = comm->sendcounts_temp[i]; for (uint64_t j = send_begin; j < send_end; ++j) { uint64_t vert = comm->sendbuf_vert[comm->sdispls_temp[i] + j]; buf_v[comm->sdispls_cpy[i]++] = vert; } } MPI_Alltoallv(buf_v, comm->sendcounts, comm->sdispls, MPI_UINT64_T, comm->recvbuf_vert + sum_recv, comm->recvcounts, comm->rdispls, MPI_UINT64_T, MPI_COMM_WORLD); free(buf_v); sum_recv += cur_recv; sum_send += cur_send; } assert(sum_recv == comm->total_recv); assert(sum_send == comm->total_send); if (debug) { printf("Task %d exchange_verts() success\n", procid); } } inline void exchange_data(mpi_data_t * comm) { if (debug) { printf("Task %d exchange_data() start\n", procid); } uint64_t num_comms = comm->global_queue_size / (uint64_t) MAX_SEND_SIZE + 1; uint64_t sum_recv = 0; uint64_t sum_send = 0; for (uint64_t c = 0; c < num_comms; ++c) { for (int32_t i = 0; i < nprocs; ++i) { uint64_t send_begin = (comm->sendcounts_temp[i] * c) / num_comms; uint64_t send_end = (comm->sendcounts_temp[i] * (c + 1)) / num_comms; if (c == (num_comms - 1)) send_end = comm->sendcounts_temp[i]; comm->sendcounts[i] = (int32_t) (send_end - send_begin); assert(comm->sendcounts[i] >= 0); } MPI_Alltoall(comm->sendcounts, 1, MPI_INT32_T, comm->recvcounts, 1, MPI_INT32_T, MPI_COMM_WORLD); comm->sdispls[0] = 0; comm->sdispls_cpy[0] = 0; comm->rdispls[0] = 0; for (int32_t i = 1; i < nprocs; ++i) { comm->sdispls[i] = comm->sdispls[i - 1] + comm->sendcounts[i - 1]; comm->rdispls[i] = comm->rdispls[i - 1] + comm->recvcounts[i - 1]; comm->sdispls_cpy[i] = comm->sdispls[i]; } int32_t cur_send = comm->sdispls[nprocs - 1] + comm->sendcounts[nprocs - 1]; int32_t cur_recv = comm->rdispls[nprocs - 1] + comm->recvcounts[nprocs - 1]; uint64_t *buf_d = (uint64_t *) malloc((uint64_t) (cur_send) * sizeof(uint64_t)); if (buf_d == NULL) throw_err("exchange_data(), unable to allocate comm buffers", procid); for (int32_t i = 0; i < nprocs; ++i) { uint64_t send_begin = (comm->sendcounts_temp[i] * c) / num_comms; uint64_t send_end = (comm->sendcounts_temp[i] * (c + 1)) / num_comms; if (c == (num_comms - 1)) send_end = comm->sendcounts_temp[i]; for (uint64_t j = send_begin; j < send_end; ++j) { uint64_t data = comm->sendbuf_data[comm->sdispls_temp[i] + j]; buf_d[comm->sdispls_cpy[i]++] = data; } } MPI_Alltoallv(buf_d, comm->sendcounts, comm->sdispls, MPI_UINT64_T, comm->recvbuf_data + sum_recv, comm->recvcounts, comm->rdispls, MPI_UINT64_T, MPI_COMM_WORLD); free(buf_d); sum_recv += cur_recv; sum_send += cur_send; } assert(sum_recv == comm->total_recv); assert(sum_send == comm->total_send); if (debug) { printf("Task %d exchange_data() success\n", procid); } } inline void exchange_data_flt(mpi_data_t * comm) { if (debug) { printf("Task %d exchange_data_flt() start\n", procid); } uint64_t num_comms = comm->global_queue_size / (uint64_t) MAX_SEND_SIZE + 1; uint64_t sum_recv = 0; uint64_t sum_send = 0; for (uint64_t c = 0; c < num_comms; ++c) { for (int32_t i = 0; i < nprocs; ++i) { uint64_t send_begin = (comm->sendcounts_temp[i] * c) / num_comms; uint64_t send_end = (comm->sendcounts_temp[i] * (c + 1)) / num_comms; if (c == (num_comms - 1)) send_end = comm->sendcounts_temp[i]; comm->sendcounts[i] = (int32_t) (send_end - send_begin); assert(comm->sendcounts[i] >= 0); } MPI_Alltoall(comm->sendcounts, 1, MPI_INT32_T, comm->recvcounts, 1, MPI_INT32_T, MPI_COMM_WORLD); comm->sdispls[0] = 0; comm->sdispls_cpy[0] = 0; comm->rdispls[0] = 0; for (int32_t i = 1; i < nprocs; ++i) { comm->sdispls[i] = comm->sdispls[i - 1] + comm->sendcounts[i - 1]; comm->rdispls[i] = comm->rdispls[i - 1] + comm->recvcounts[i - 1]; comm->sdispls_cpy[i] = comm->sdispls[i]; } int32_t cur_send = comm->sdispls[nprocs - 1] + comm->sendcounts[nprocs - 1]; int32_t cur_recv = comm->rdispls[nprocs - 1] + comm->recvcounts[nprocs - 1]; double *buf_d = (double *)malloc((double)(cur_send) * sizeof(double)); if (buf_d == NULL) throw_err("exchange_data_flt(), unable to allocate comm buffers", procid); for (int32_t i = 0; i < nprocs; ++i) { uint64_t send_begin = (comm->sendcounts_temp[i] * c) / num_comms; uint64_t send_end = (comm->sendcounts_temp[i] * (c + 1)) / num_comms; if (c == (num_comms - 1)) send_end = comm->sendcounts_temp[i]; for (uint64_t j = send_begin; j < send_end; ++j) { double data = comm->sendbuf_data_flt[comm->sdispls_temp[i] + j]; buf_d[comm->sdispls_cpy[i]++] = data; } } MPI_Alltoallv(buf_d, comm->sendcounts, comm->sdispls, MPI_DOUBLE, comm->recvbuf_data_flt + sum_recv, comm->recvcounts, comm->rdispls, MPI_DOUBLE, MPI_COMM_WORLD); free(buf_d); sum_recv += cur_recv; sum_send += cur_send; } assert(sum_recv == comm->total_recv); assert(sum_send == comm->total_send); if (debug) { printf("Task %d exchange_data_flt() success\n", procid); } } inline void update_sendcounts_thread(dist_graph_t * g, thread_comm_t * tc, uint64_t vert_index) { for (int32_t i = 0; i < nprocs; ++i) tc->v_to_rank[i] = false; uint64_t out_degree = out_degree(g, vert_index); uint64_t *outs = out_vertices(g, vert_index); for (uint64_t j = 0; j < out_degree; ++j) { uint64_t out_index = outs[j]; if (out_index >= g->n_local) { int32_t out_rank = g->ghost_tasks[out_index - g->n_local]; if (!tc->v_to_rank[out_rank]) { tc->v_to_rank[out_rank] = true; ++tc->sendcounts_thread[out_rank]; } } } } inline void update_sendcounts_thread(dist_graph_t * g, thread_comm_t * tc, uint64_t vert_index, uint64_t count_data) { for (int32_t i = 0; i < nprocs; ++i) tc->v_to_rank[i] = false; uint64_t out_degree = out_degree(g, vert_index); uint64_t *outs = out_vertices(g, vert_index); for (uint64_t j = 0; j < out_degree; ++j) { uint64_t out_index = outs[j]; if (out_index >= g->n_local) { int32_t out_rank = g->ghost_tasks[out_index - g->n_local]; if (!tc->v_to_rank[out_rank]) { tc->v_to_rank[out_rank] = true; tc->sendcounts_thread[out_rank] += count_data; } } } } inline void update_vid_data_queues(dist_graph_t * g, thread_comm_t * tc, mpi_data_t * comm, uint64_t vert_index, uint64_t data) { for (int32_t i = 0; i < nprocs; ++i) tc->v_to_rank[i] = false; uint64_t out_degree = out_degree(g, vert_index); uint64_t *outs = out_vertices(g, vert_index); for (uint64_t j = 0; j < out_degree; ++j) { uint64_t out_index = outs[j]; if (out_index >= g->n_local) { int32_t out_rank = g->ghost_tasks[out_index - g->n_local]; if (!tc->v_to_rank[out_rank]) { tc->v_to_rank[out_rank] = true; add_vid_data_to_send(tc, comm, g->local_unmap[vert_index], data, out_rank); } } } } inline void update_vid_data_queues(dist_graph_t * g, thread_comm_t * tc, mpi_data_t * comm, uint64_t vert_index, uint64_t data1, uint64_t data2, uint64_t data3) { for (int32_t i = 0; i < nprocs; ++i) tc->v_to_rank[i] = false; uint64_t out_degree = out_degree(g, vert_index); uint64_t *outs = out_vertices(g, vert_index); for (uint64_t j = 0; j < out_degree; ++j) { uint64_t out_index = outs[j]; if (out_index >= g->n_local) { int32_t out_rank = g->ghost_tasks[out_index - g->n_local]; if (!tc->v_to_rank[out_rank]) { tc->v_to_rank[out_rank] = true; add_vid_data_to_send(tc, comm, g->local_unmap[vert_index], data1, out_rank); add_vid_data_to_send(tc, comm, g->local_unmap[vert_index], data2, out_rank); add_vid_data_to_send(tc, comm, g->local_unmap[vert_index], data3, out_rank); } } } } inline void add_vid_to_queue(thread_queue_t * tq, queue_data_t * q, uint64_t vertex_id) { tq->thread_queue[tq->thread_queue_size++] = vertex_id; if (tq->thread_queue_size == THREAD_QUEUE_SIZE) empty_queue(tq, q); } inline void add_vid_to_queue(thread_queue_t * tq, queue_data_t * q, uint64_t vertex_id1, uint64_t vertex_id2) { tq->thread_queue[tq->thread_queue_size++] = vertex_id1; tq->thread_queue[tq->thread_queue_size++] = vertex_id2; if (tq->thread_queue_size == THREAD_QUEUE_SIZE) empty_queue(tq, q); } inline void empty_queue(thread_queue_t * tq, queue_data_t * q) { uint64_t start_offset; start_offset = q->next_size += tq->thread_queue_size; start_offset -= tq->thread_queue_size; for (uint64_t i = 0; i < tq->thread_queue_size; ++i) q->queue_next[start_offset + i] = tq->thread_queue[i]; tq->thread_queue_size = 0; } inline void add_vid_to_send(thread_queue_t * tq, queue_data_t * q, uint64_t vertex_id) { tq->thread_send[tq->thread_send_size++] = vertex_id; if (tq->thread_send_size == THREAD_QUEUE_SIZE) empty_send(tq, q); } inline void add_vid_to_send(thread_queue_t * tq, queue_data_t * q, uint64_t vertex_id1, uint64_t vertex_id2) { tq->thread_send[tq->thread_send_size++] = vertex_id1; tq->thread_send[tq->thread_send_size++] = vertex_id2; if (tq->thread_send_size == THREAD_QUEUE_SIZE) empty_send(tq, q); } inline void empty_send(thread_queue_t * tq, queue_data_t * q) { uint64_t start_offset; start_offset = q->send_size += tq->thread_send_size; start_offset -= tq->thread_send_size; for (uint64_t i = 0; i < tq->thread_send_size; ++i) q->queue_send[start_offset + i] = tq->thread_send[i]; tq->thread_send_size = 0; } inline void add_vid_data_to_send(thread_comm_t * tc, mpi_data_t * comm, uint64_t vertex_id, uint64_t data_val, int32_t send_rank) { tc->sendbuf_vert_thread[tc->thread_queue_size] = vertex_id; tc->sendbuf_data_thread[tc->thread_queue_size] = data_val; tc->sendbuf_rank_thread[tc->thread_queue_size] = send_rank; ++tc->thread_queue_size; ++tc->sendcounts_thread[send_rank]; if (tc->thread_queue_size == THREAD_QUEUE_SIZE) empty_vid_data(tc, comm); } inline void add_vid_data_to_send_flt(thread_comm_t * tc, mpi_data_t * comm, uint64_t vertex_id, double data_val, int32_t send_rank) { tc->sendbuf_vert_thread[tc->thread_queue_size] = vertex_id; tc->sendbuf_data_thread_flt[tc->thread_queue_size] = data_val; tc->sendbuf_rank_thread[tc->thread_queue_size] = send_rank; ++tc->thread_queue_size; ++tc->sendcounts_thread[send_rank]; if (tc->thread_queue_size == THREAD_QUEUE_SIZE) empty_vid_data_flt(tc, comm); } inline void empty_vid_data(thread_comm_t * tc, mpi_data_t * comm) { for (int32_t i = 0; i < nprocs; ++i) { tc->thread_starts[i] = comm->sdispls_cpy_temp[i] += tc->sendcounts_thread[i]; tc->thread_starts[i] -= tc->sendcounts_thread[i]; } for (uint64_t i = 0; i < tc->thread_queue_size; ++i) { int32_t cur_rank = tc->sendbuf_rank_thread[i]; comm->sendbuf_vert[tc->thread_starts[cur_rank]] = tc->sendbuf_vert_thread[i]; comm->sendbuf_data[tc->thread_starts[cur_rank]] = tc->sendbuf_data_thread[i]; ++tc->thread_starts[cur_rank]; } for (int32_t i = 0; i < nprocs; ++i) { tc->thread_starts[i] = 0; tc->sendcounts_thread[i] = 0; } tc->thread_queue_size = 0; } inline void empty_vid_data_flt(thread_comm_t * tc, mpi_data_t * comm) { for (int32_t i = 0; i < nprocs; ++i) { tc->thread_starts[i] = comm->sdispls_cpy_temp[i] += tc->sendcounts_thread[i]; tc->thread_starts[i] -= tc->sendcounts_thread[i]; } for (uint64_t i = 0; i < tc->thread_queue_size; ++i) { int32_t cur_rank = tc->sendbuf_rank_thread[i]; comm->sendbuf_vert[tc->thread_starts[cur_rank]] = tc->sendbuf_vert_thread[i]; comm->sendbuf_data_flt[tc->thread_starts[cur_rank]] = tc->sendbuf_data_thread_flt[i]; ++tc->thread_starts[cur_rank]; } for (int32_t i = 0; i < nprocs; ++i) { tc->thread_starts[i] = 0; tc->sendcounts_thread[i] = 0; } tc->thread_queue_size = 0; } #endif
#ifndef _COMMS_H_ #define _COMMS_H_ #include <mpi.h> #include <omp.h> #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <assert.h> #include "dist_graph.h" #include "util.h" extern int procid, nprocs; extern bool verbose, debug, debug2, verify, output; #define MAX_SEND_SIZE 268435456 #define THREAD_QUEUE_SIZE 3072 struct mpi_data_t { int32_t *sendcounts; int32_t *recvcounts; int32_t *sdispls; int32_t *rdispls; int32_t *sdispls_cpy; uint64_t *recvcounts_temp; uint64_t *sendcounts_temp; uint64_t *sdispls_temp; uint64_t *rdispls_temp; uint64_t *sdispls_cpy_temp; uint64_t *sendbuf_vert; uint64_t *sendbuf_data; double *sendbuf_data_flt; uint64_t *recvbuf_vert; uint64_t *recvbuf_data; double *recvbuf_data_flt; uint64_t total_recv; uint64_t total_send; uint64_t global_queue_size; }; struct queue_data_t { uint64_t *queue; uint64_t *queue_next; uint64_t *queue_send; uint64_t queue_size; uint64_t next_size; uint64_t send_size; }; struct thread_queue_t { int32_t tid; uint64_t *thread_queue; uint64_t *thread_send; uint64_t thread_queue_size; uint64_t thread_send_size; }; struct thread_comm_t { int32_t tid; bool *v_to_rank; uint64_t *sendcounts_thread; uint64_t *sendbuf_vert_thread; uint64_t *sendbuf_data_thread; double *sendbuf_data_thread_flt; int32_t *sendbuf_rank_thread; uint64_t *thread_starts; uint64_t thread_queue_size; }; void init_queue_data(dist_graph_t * g, queue_data_t * q); void clear_queue_data(queue_data_t * q); void init_comm_data(mpi_data_t * comm); void clear_comm_data(mpi_data_t * comm); void init_thread_queue(thread_queue_t * tq); void clear_thread_queue(thread_queue_t * tq); void init_thread_comm(thread_comm_t * tc); void clear_thread_comm(thread_comm_t * tc); void init_thread_comm_flt(thread_comm_t * tc); void clear_thread_commflt(thread_comm_t * tc); void init_sendbuf_vid_data(mpi_data_t * comm); void init_recvbuf_vid_data(mpi_data_t * comm); void init_sendbuf_vid_data_flt(mpi_data_t * comm); void init_recvbuf_vid_data_flt(mpi_data_t * comm); void clear_recvbuf_vid_data(mpi_data_t * comm); void clear_allbuf_vid_data(mpi_data_t * comm); inline void exchange_verts(dist_graph_t * g, mpi_data_t * comm, queue_data_t * q); inline void exchange_verts_bicc(dist_graph_t * g, mpi_data_t * comm, queue_data_t * q); inline void exchange_verts(mpi_data_t * comm); inline void exchange_data(mpi_data_t * comm); inline void exchange_data_flt(mpi_data_t * comm); inline void exchange_vert_data(dist_graph_t * g, mpi_data_t * comm, queue_data_t * q); inline void exchange_vert_data(dist_graph_t * g, mpi_data_t * comm); inline void update_sendcounts_thread(dist_graph_t * g, thread_comm_t * tc, uint64_t vert_index); inline void update_sendcounts_thread(dist_graph_t * g, thread_comm_t * tc, uint64_t vert_index, uint64_t count_data); inline void update_vid_data_queues(dist_graph_t * g, thread_comm_t * tc, mpi_data_t * comm, uint64_t vert_index, uint64_t data); inline void update_vid_data_queues(dist_graph_t * g, thread_comm_t * tc, mpi_data_t * comm, uint64_t vert_index, uint64_t data1, uint64_t data2, uint64_t data3); inline void add_vid_to_queue(thread_queue_t * tq, queue_data_t * q, uint64_t vertex_id); inline void add_vid_to_queue(thread_queue_t * tq, queue_data_t * q, uint64_t vertex_id1, uint64_t vertex_id2); inline void empty_queue(thread_queue_t * tq, queue_data_t * q); inline void add_vid_to_send(thread_queue_t * tq, queue_data_t * q, uint64_t vertex_id); inline void add_vid_to_send(thread_queue_t * tq, queue_data_t * q, uint64_t vertex_id1, uint64_t vertex_id2); inline void empty_send(thread_queue_t * tq, queue_data_t * q); inline void add_vid_data_to_send(thread_comm_t * tc, mpi_data_t * comm, uint64_t vertex_id, uint64_t data_val, int32_t send_rank); inline void add_vid_data_to_send_flt(thread_comm_t * tc, mpi_data_t * comm, uint64_t vertex_id, double data_val, int32_t send_rank); inline void empty_vid_data(thread_comm_t * tc, mpi_data_t * comm); inline void empty_vid_data_flt(thread_comm_t * tc, mpi_data_t * comm); inline void exchange_verts(dist_graph_t * g, mpi_data_t * comm, queue_data_t * q) { comm->global_queue_size = 0; uint64_t task_queue_size = q->next_size + q->send_size; MPI_Allreduce(&task_queue_size, &comm->global_queue_size, 1, MPI_UINT64_T, MPI_SUM, MPI_COMM_WORLD); uint64_t num_comms = comm->global_queue_size / (uint64_t) MAX_SEND_SIZE + 1; uint64_t sum_recv = 0; for (uint64_t c = 0; c < num_comms; ++c) { uint64_t send_begin = (q->send_size * c) / num_comms; uint64_t send_end = (q->send_size * (c + 1)) / num_comms; if (c == (num_comms - 1)) send_end = q->send_size; for (int32_t i = 0; i < nprocs; ++i) { comm->sendcounts[i] = 0; comm->recvcounts[i] = 0; } for (uint64_t i = send_begin; i < send_end; ++i) { uint64_t ghost_index = q->queue_send[i] - g->n_local; uint64_t ghost_task = g->ghost_tasks[ghost_index]; ++comm->sendcounts[ghost_task]; } MPI_Alltoall(comm->sendcounts, 1, MPI_INT32_T, comm->recvcounts, 1, MPI_INT32_T, MPI_COMM_WORLD); comm->sdispls[0] = 0; comm->sdispls_cpy[0] = 0; comm->rdispls[0] = 0; for (int32_t i = 1; i < nprocs; ++i) { comm->sdispls[i] = comm->sdispls[i - 1] + comm->sendcounts[i - 1]; comm->rdispls[i] = comm->rdispls[i - 1] + comm->recvcounts[i - 1]; comm->sdispls_cpy[i] = comm->sdispls[i]; } int32_t cur_send = comm->sdispls[nprocs - 1] + comm->sendcounts[nprocs - 1]; int32_t cur_recv = comm->rdispls[nprocs - 1] + comm->recvcounts[nprocs - 1]; comm->sendbuf_vert = (uint64_t *) malloc((uint64_t) (cur_send + 1) * sizeof(uint64_t)); if (comm->sendbuf_vert == NULL) throw_err("exchange_verts(), unable to allocate comm buffers", procid); for (uint64_t i = send_begin; i < send_end; ++i) { uint64_t ghost_index = q->queue_send[i] - g->n_local; uint64_t ghost_task = g->ghost_tasks[ghost_index]; uint64_t vert = g->ghost_unmap[ghost_index]; comm->sendbuf_vert[comm->sdispls_cpy[ghost_task]++] = vert; } MPI_Alltoallv(comm->sendbuf_vert, comm->sendcounts, comm->sdispls, MPI_UINT64_T, q->queue_next + q->next_size + sum_recv, comm->recvcounts, comm->rdispls, MPI_UINT64_T, MPI_COMM_WORLD); free(comm->sendbuf_vert); sum_recv += cur_recv; } q->queue_size = q->next_size + sum_recv; q->next_size = 0; q->send_size = 0; uint64_t *temp = q->queue; q->queue = q->queue_next; q->queue_next = temp; } inline void exchange_verts_bicc(dist_graph_t * g, mpi_data_t * comm, queue_data_t * q) { comm->global_queue_size = 0; uint64_t task_queue_size = q->next_size + q->send_size; MPI_Allreduce(&task_queue_size, &comm->global_queue_size, 1, MPI_UINT64_T, MPI_SUM, MPI_COMM_WORLD); uint64_t num_comms = comm->global_queue_size / (uint64_t) MAX_SEND_SIZE + 1; uint64_t sum_recv = 0; for (uint64_t c = 0; c < num_comms; ++c) { uint64_t send_begin = (q->send_size * c) / num_comms; uint64_t send_end = (q->send_size * (c + 1)) / num_comms; if (c == (num_comms - 1)) send_end = q->send_size; if (send_begin % 2 != 0) send_begin++; if (send_end % 2 != 0) send_end++; for (int32_t i = 0; i < nprocs; ++i) { comm->sendcounts[i] = 0; comm->recvcounts[i] = 0; } for (uint64_t i = send_begin; i < send_end; i += 2) { uint64_t ghost_index = q->queue_send[i] - g->n_local; uint64_t ghost_task = g->ghost_tasks[ghost_index]; comm->sendcounts[ghost_task] += 2; } MPI_Alltoall(comm->sendcounts, 1, MPI_INT32_T, comm->recvcounts, 1, MPI_INT32_T, MPI_COMM_WORLD); comm->sdispls[0] = 0; comm->sdispls_cpy[0] = 0; comm->rdispls[0] = 0; for (int32_t i = 1; i < nprocs; ++i) { comm->sdispls[i] = comm->sdispls[i - 1] + comm->sendcounts[i - 1]; comm->rdispls[i] = comm->rdispls[i - 1] + comm->recvcounts[i - 1]; comm->sdispls_cpy[i] = comm->sdispls[i]; } int32_t cur_send = comm->sdispls[nprocs - 1] + comm->sendcounts[nprocs - 1]; int32_t cur_recv = comm->rdispls[nprocs - 1] + comm->recvcounts[nprocs - 1]; comm->sendbuf_vert = (uint64_t *) malloc((uint64_t) (cur_send + 1) * sizeof(uint64_t)); if (comm->sendbuf_vert == NULL) throw_err("exchange_verts(), unable to allocate comm buffers", procid); for (uint64_t i = send_begin; i < send_end; i += 2) { uint64_t ghost_index = q->queue_send[i] - g->n_local; uint64_t ghost_task = g->ghost_tasks[ghost_index]; uint64_t vert = g->ghost_unmap[ghost_index]; uint64_t parent = q->queue_send[i + 1]; comm->sendbuf_vert[comm->sdispls_cpy[ghost_task]++] = vert; comm->sendbuf_vert[comm->sdispls_cpy[ghost_task]++] = parent; } MPI_Alltoallv(comm->sendbuf_vert, comm->sendcounts, comm->sdispls, MPI_UINT64_T, q->queue_next + q->next_size + sum_recv, comm->recvcounts, comm->rdispls, MPI_UINT64_T, MPI_COMM_WORLD); free(comm->sendbuf_vert); sum_recv += cur_recv; } q->queue_size = q->next_size + sum_recv; q->next_size = 0; q->send_size = 0; uint64_t *temp = q->queue; q->queue = q->queue_next; q->queue_next = temp; } inline void exchange_vert_data(dist_graph_t * g, mpi_data_t * comm, queue_data_t * q) { for (int32_t i = 0; i < nprocs; ++i) comm->recvcounts_temp[i] = 0; MPI_Alltoall(comm->sendcounts_temp, 1, MPI_UINT64_T, comm->recvcounts_temp, 1, MPI_UINT64_T, MPI_COMM_WORLD); comm->total_recv = 0; for (int i = 0; i < nprocs; ++i) comm->total_recv += comm->recvcounts_temp[i]; comm->recvbuf_vert = (uint64_t *) malloc(comm->total_recv * sizeof(uint64_t)); comm->recvbuf_data = (uint64_t *) malloc(comm->total_recv * sizeof(uint64_t)); comm->recvbuf_data_flt = NULL; if (comm->recvbuf_vert == NULL || comm->recvbuf_data == NULL) throw_err("exchange_vert_data() unable to allocate comm buffers", procid); comm->global_queue_size = 0; uint64_t task_queue_size = comm->total_send; MPI_Allreduce(&task_queue_size, &comm->global_queue_size, 1, MPI_UINT64_T, MPI_SUM, MPI_COMM_WORLD); uint64_t num_comms = comm->global_queue_size / (uint64_t) MAX_SEND_SIZE + 1; uint64_t sum_recv = 0; uint64_t sum_send = 0; for (uint64_t c = 0; c < num_comms; ++c) { for (int32_t i = 0; i < nprocs; ++i) { uint64_t send_begin = (comm->sendcounts_temp[i] * c) / num_comms; uint64_t send_end = (comm->sendcounts_temp[i] * (c + 1)) / num_comms; if (c == (num_comms - 1)) send_end = comm->sendcounts_temp[i]; comm->sendcounts[i] = (int32_t) (send_end - send_begin); assert(comm->sendcounts[i] >= 0); } MPI_Alltoall(comm->sendcounts, 1, MPI_INT32_T, comm->recvcounts, 1, MPI_INT32_T, MPI_COMM_WORLD); comm->sdispls[0] = 0; comm->sdispls_cpy[0] = 0; comm->rdispls[0] = 0; for (int32_t i = 1; i < nprocs; ++i) { comm->sdispls[i] = comm->sdispls[i - 1] + comm->sendcounts[i - 1]; comm->rdispls[i] = comm->rdispls[i - 1] + comm->recvcounts[i - 1]; comm->sdispls_cpy[i] = comm->sdispls[i]; } int32_t cur_send = comm->sdispls[nprocs - 1] + comm->sendcounts[nprocs - 1]; int32_t cur_recv = comm->rdispls[nprocs - 1] + comm->recvcounts[nprocs - 1]; uint64_t *buf_v = (uint64_t *) malloc((uint64_t) (cur_send) * sizeof(uint64_t)); uint64_t *buf_d = (uint64_t *) malloc((uint64_t) (cur_send) * sizeof(uint64_t)); if (buf_v == NULL || buf_d == NULL) throw_err("exchange_verts(), unable to allocate comm buffers", procid); for (int32_t i = 0; i < nprocs; ++i) { uint64_t send_begin = (comm->sendcounts_temp[i] * c) / num_comms; uint64_t send_end = (comm->sendcounts_temp[i] * (c + 1)) / num_comms; if (c == (num_comms - 1)) send_end = comm->sendcounts_temp[i]; for (uint64_t j = send_begin; j < send_end; ++j) { uint64_t vert = comm->sendbuf_vert[comm->sdispls_temp[i] + j]; uint64_t data = comm->sendbuf_data[comm->sdispls_temp[i] + j]; buf_v[comm->sdispls_cpy[i]] = vert; buf_d[comm->sdispls_cpy[i]++] = data; } } MPI_Alltoallv(buf_v, comm->sendcounts, comm->sdispls, MPI_UINT64_T, comm->recvbuf_vert + sum_recv, comm->recvcounts, comm->rdispls, MPI_UINT64_T, MPI_COMM_WORLD); MPI_Alltoallv(buf_d, comm->sendcounts, comm->sdispls, MPI_UINT64_T, comm->recvbuf_data + sum_recv, comm->recvcounts, comm->rdispls, MPI_UINT64_T, MPI_COMM_WORLD); free(buf_v); free(buf_d); sum_recv += cur_recv; sum_send += cur_send; } assert(sum_recv == comm->total_recv); assert(sum_send == comm->total_send); comm->global_queue_size = 0; task_queue_size = comm->total_recv + q->next_size; MPI_Allreduce(&task_queue_size, &comm->global_queue_size, 1, MPI_UINT64_T, MPI_SUM, MPI_COMM_WORLD); q->send_size = 0; } inline void exchange_verts(mpi_data_t * comm) { if (debug) { printf("Task %d exchange_verts() start\n", procid); } uint64_t num_comms = comm->global_queue_size / (uint64_t) MAX_SEND_SIZE + 1; uint64_t sum_recv = 0; uint64_t sum_send = 0; for (uint64_t c = 0; c < num_comms; ++c) { for (int32_t i = 0; i < nprocs; ++i) { uint64_t send_begin = (comm->sendcounts_temp[i] * c) / num_comms; uint64_t send_end = (comm->sendcounts_temp[i] * (c + 1)) / num_comms; if (c == (num_comms - 1)) send_end = comm->sendcounts_temp[i]; comm->sendcounts[i] = (int32_t) (send_end - send_begin); assert(comm->sendcounts[i] >= 0); } MPI_Alltoall(comm->sendcounts, 1, MPI_INT32_T, comm->recvcounts, 1, MPI_INT32_T, MPI_COMM_WORLD); comm->sdispls[0] = 0; comm->sdispls_cpy[0] = 0; comm->rdispls[0] = 0; for (int32_t i = 1; i < nprocs; ++i) { comm->sdispls[i] = comm->sdispls[i - 1] + comm->sendcounts[i - 1]; comm->rdispls[i] = comm->rdispls[i - 1] + comm->recvcounts[i - 1]; comm->sdispls_cpy[i] = comm->sdispls[i]; } int32_t cur_send = comm->sdispls[nprocs - 1] + comm->sendcounts[nprocs - 1]; int32_t cur_recv = comm->rdispls[nprocs - 1] + comm->recvcounts[nprocs - 1]; uint64_t *buf_v = (uint64_t *) malloc((uint64_t) (cur_send) * sizeof(uint64_t)); if (buf_v == NULL) throw_err("exchange_verts(), unable to allocate comm buffers", procid); for (int32_t i = 0; i < nprocs; ++i) { uint64_t send_begin = (comm->sendcounts_temp[i] * c) / num_comms; uint64_t send_end = (comm->sendcounts_temp[i] * (c + 1)) / num_comms; if (c == (num_comms - 1)) send_end = comm->sendcounts_temp[i]; for (uint64_t j = send_begin; j < send_end; ++j) { uint64_t vert = comm->sendbuf_vert[comm->sdispls_temp[i] + j]; buf_v[comm->sdispls_cpy[i]++] = vert; } } MPI_Alltoallv(buf_v, comm->sendcounts, comm->sdispls, MPI_UINT64_T, comm->recvbuf_vert + sum_recv, comm->recvcounts, comm->rdispls, MPI_UINT64_T, MPI_COMM_WORLD); free(buf_v); sum_recv += cur_recv; sum_send += cur_send; } assert(sum_recv == comm->total_recv); assert(sum_send == comm->total_send); if (debug) { printf("Task %d exchange_verts() success\n", procid); } } inline void exchange_data(mpi_data_t * comm) { if (debug) { printf("Task %d exchange_data() start\n", procid); } uint64_t num_comms = comm->global_queue_size / (uint64_t) MAX_SEND_SIZE + 1; uint64_t sum_recv = 0; uint64_t sum_send = 0; for (uint64_t c = 0; c < num_comms; ++c) { for (int32_t i = 0; i < nprocs; ++i) { uint64_t send_begin = (comm->sendcounts_temp[i] * c) / num_comms; uint64_t send_end = (comm->sendcounts_temp[i] * (c + 1)) / num_comms; if (c == (num_comms - 1)) send_end = comm->sendcounts_temp[i]; comm->sendcounts[i] = (int32_t) (send_end - send_begin); assert(comm->sendcounts[i] >= 0); } MPI_Alltoall(comm->sendcounts, 1, MPI_INT32_T, comm->recvcounts, 1, MPI_INT32_T, MPI_COMM_WORLD); comm->sdispls[0] = 0; comm->sdispls_cpy[0] = 0; comm->rdispls[0] = 0; for (int32_t i = 1; i < nprocs; ++i) { comm->sdispls[i] = comm->sdispls[i - 1] + comm->sendcounts[i - 1]; comm->rdispls[i] = comm->rdispls[i - 1] + comm->recvcounts[i - 1]; comm->sdispls_cpy[i] = comm->sdispls[i]; } int32_t cur_send = comm->sdispls[nprocs - 1] + comm->sendcounts[nprocs - 1]; int32_t cur_recv = comm->rdispls[nprocs - 1] + comm->recvcounts[nprocs - 1]; uint64_t *buf_d = (uint64_t *) malloc((uint64_t) (cur_send) * sizeof(uint64_t)); if (buf_d == NULL) throw_err("exchange_data(), unable to allocate comm buffers", procid); for (int32_t i = 0; i < nprocs; ++i) { uint64_t send_begin = (comm->sendcounts_temp[i] * c) / num_comms; uint64_t send_end = (comm->sendcounts_temp[i] * (c + 1)) / num_comms; if (c == (num_comms - 1)) send_end = comm->sendcounts_temp[i]; for (uint64_t j = send_begin; j < send_end; ++j) { uint64_t data = comm->sendbuf_data[comm->sdispls_temp[i] + j]; buf_d[comm->sdispls_cpy[i]++] = data; } } MPI_Alltoallv(buf_d, comm->sendcounts, comm->sdispls, MPI_UINT64_T, comm->recvbuf_data + sum_recv, comm->recvcounts, comm->rdispls, MPI_UINT64_T, MPI_COMM_WORLD); free(buf_d); sum_recv += cur_recv; sum_send += cur_send; } assert(sum_recv == comm->total_recv); assert(sum_send == comm->total_send); if (debug) { printf("Task %d exchange_data() success\n", procid); } } inline void exchange_data_flt(mpi_data_t * comm) { if (debug) { printf("Task %d exchange_data_flt() start\n", procid); } uint64_t num_comms = comm->global_queue_size / (uint64_t) MAX_SEND_SIZE + 1; uint64_t sum_recv = 0; uint64_t sum_send = 0; for (uint64_t c = 0; c < num_comms; ++c) { for (int32_t i = 0; i < nprocs; ++i) { uint64_t send_begin = (comm->sendcounts_temp[i] * c) / num_comms; uint64_t send_end = (comm->sendcounts_temp[i] * (c + 1)) / num_comms; if (c == (num_comms - 1)) send_end = comm->sendcounts_temp[i]; comm->sendcounts[i] = (int32_t) (send_end - send_begin); assert(comm->sendcounts[i] >= 0); } MPI_Alltoall(comm->sendcounts, 1, MPI_INT32_T, comm->recvcounts, 1, MPI_INT32_T, MPI_COMM_WORLD); comm->sdispls[0] = 0; comm->sdispls_cpy[0] = 0; comm->rdispls[0] = 0; for (int32_t i = 1; i < nprocs; ++i) { comm->sdispls[i] = comm->sdispls[i - 1] + comm->sendcounts[i - 1]; comm->rdispls[i] = comm->rdispls[i - 1] + comm->recvcounts[i - 1]; comm->sdispls_cpy[i] = comm->sdispls[i]; } int32_t cur_send = comm->sdispls[nprocs - 1] + comm->sendcounts[nprocs - 1]; int32_t cur_recv = comm->rdispls[nprocs - 1] + comm->recvcounts[nprocs - 1]; double *buf_d = (double *)malloc((double)(cur_send) * sizeof(double)); if (buf_d == NULL) throw_err("exchange_data_flt(), unable to allocate comm buffers", procid); for (int32_t i = 0; i < nprocs; ++i) { uint64_t send_begin = (comm->sendcounts_temp[i] * c) / num_comms; uint64_t send_end = (comm->sendcounts_temp[i] * (c + 1)) / num_comms; if (c == (num_comms - 1)) send_end = comm->sendcounts_temp[i]; for (uint64_t j = send_begin; j < send_end; ++j) { double data = comm->sendbuf_data_flt[comm->sdispls_temp[i] + j]; buf_d[comm->sdispls_cpy[i]++] = data; } } MPI_Alltoallv(buf_d, comm->sendcounts, comm->sdispls, MPI_DOUBLE, comm->recvbuf_data_flt + sum_recv, comm->recvcounts, comm->rdispls, MPI_DOUBLE, MPI_COMM_WORLD); free(buf_d); sum_recv += cur_recv; sum_send += cur_send; } assert(sum_recv == comm->total_recv); assert(sum_send == comm->total_send); if (debug) { printf("Task %d exchange_data_flt() success\n", procid); } } inline void update_sendcounts_thread(dist_graph_t * g, thread_comm_t * tc, uint64_t vert_index) { for (int32_t i = 0; i < nprocs; ++i) tc->v_to_rank[i] = false; uint64_t out_degree = out_degree(g, vert_index); uint64_t *outs = out_vertices(g, vert_index); for (uint64_t j = 0; j < out_degree; ++j) { uint64_t out_index = outs[j]; if (out_index >= g->n_local) { int32_t out_rank = g->ghost_tasks[out_index - g->n_local]; if (!tc->v_to_rank[out_rank]) { tc->v_to_rank[out_rank] = true; ++tc->sendcounts_thread[out_rank]; } } } } inline void update_sendcounts_thread(dist_graph_t * g, thread_comm_t * tc, uint64_t vert_index, uint64_t count_data) { for (int32_t i = 0; i < nprocs; ++i) tc->v_to_rank[i] = false; uint64_t out_degree = out_degree(g, vert_index); uint64_t *outs = out_vertices(g, vert_index); for (uint64_t j = 0; j < out_degree; ++j) { uint64_t out_index = outs[j]; if (out_index >= g->n_local) { int32_t out_rank = g->ghost_tasks[out_index - g->n_local]; if (!tc->v_to_rank[out_rank]) { tc->v_to_rank[out_rank] = true; tc->sendcounts_thread[out_rank] += count_data; } } } } inline void update_vid_data_queues(dist_graph_t * g, thread_comm_t * tc, mpi_data_t * comm, uint64_t vert_index, uint64_t data) { for (int32_t i = 0; i < nprocs; ++i) tc->v_to_rank[i] = false; uint64_t out_degree = out_degree(g, vert_index); uint64_t *outs = out_vertices(g, vert_index); for (uint64_t j = 0; j < out_degree; ++j) { uint64_t out_index = outs[j]; if (out_index >= g->n_local) { int32_t out_rank = g->ghost_tasks[out_index - g->n_local]; if (!tc->v_to_rank[out_rank]) { tc->v_to_rank[out_rank] = true; add_vid_data_to_send(tc, comm, g->local_unmap[vert_index], data, out_rank); } } } } inline void update_vid_data_queues(dist_graph_t * g, thread_comm_t * tc, mpi_data_t * comm, uint64_t vert_index, uint64_t data1, uint64_t data2, uint64_t data3) { for (int32_t i = 0; i < nprocs; ++i) tc->v_to_rank[i] = false; uint64_t out_degree = out_degree(g, vert_index); uint64_t *outs = out_vertices(g, vert_index); for (uint64_t j = 0; j < out_degree; ++j) { uint64_t out_index = outs[j]; if (out_index >= g->n_local) { int32_t out_rank = g->ghost_tasks[out_index - g->n_local]; if (!tc->v_to_rank[out_rank]) { tc->v_to_rank[out_rank] = true; add_vid_data_to_send(tc, comm, g->local_unmap[vert_index], data1, out_rank); add_vid_data_to_send(tc, comm, g->local_unmap[vert_index], data2, out_rank); add_vid_data_to_send(tc, comm, g->local_unmap[vert_index], data3, out_rank); } } } } inline void add_vid_to_queue(thread_queue_t * tq, queue_data_t * q, uint64_t vertex_id) { tq->thread_queue[tq->thread_queue_size++] = vertex_id; if (tq->thread_queue_size == THREAD_QUEUE_SIZE) empty_queue(tq, q); } inline void add_vid_to_queue(thread_queue_t * tq, queue_data_t * q, uint64_t vertex_id1, uint64_t vertex_id2) { tq->thread_queue[tq->thread_queue_size++] = vertex_id1; tq->thread_queue[tq->thread_queue_size++] = vertex_id2; if (tq->thread_queue_size == THREAD_QUEUE_SIZE) empty_queue(tq, q); } inline void empty_queue(thread_queue_t * tq, queue_data_t * q) { uint64_t start_offset; #pragma omp atomic capture start_offset = q->next_size += tq->thread_queue_size; start_offset -= tq->thread_queue_size; for (uint64_t i = 0; i < tq->thread_queue_size; ++i) q->queue_next[start_offset + i] = tq->thread_queue[i]; tq->thread_queue_size = 0; } inline void add_vid_to_send(thread_queue_t * tq, queue_data_t * q, uint64_t vertex_id) { tq->thread_send[tq->thread_send_size++] = vertex_id; if (tq->thread_send_size == THREAD_QUEUE_SIZE) empty_send(tq, q); } inline void add_vid_to_send(thread_queue_t * tq, queue_data_t * q, uint64_t vertex_id1, uint64_t vertex_id2) { tq->thread_send[tq->thread_send_size++] = vertex_id1; tq->thread_send[tq->thread_send_size++] = vertex_id2; if (tq->thread_send_size == THREAD_QUEUE_SIZE) empty_send(tq, q); } inline void empty_send(thread_queue_t * tq, queue_data_t * q) { uint64_t start_offset; #pragma omp atomic capture start_offset = q->send_size += tq->thread_send_size; start_offset -= tq->thread_send_size; for (uint64_t i = 0; i < tq->thread_send_size; ++i) q->queue_send[start_offset + i] = tq->thread_send[i]; tq->thread_send_size = 0; } inline void add_vid_data_to_send(thread_comm_t * tc, mpi_data_t * comm, uint64_t vertex_id, uint64_t data_val, int32_t send_rank) { tc->sendbuf_vert_thread[tc->thread_queue_size] = vertex_id; tc->sendbuf_data_thread[tc->thread_queue_size] = data_val; tc->sendbuf_rank_thread[tc->thread_queue_size] = send_rank; ++tc->thread_queue_size; ++tc->sendcounts_thread[send_rank]; if (tc->thread_queue_size == THREAD_QUEUE_SIZE) empty_vid_data(tc, comm); } inline void add_vid_data_to_send_flt(thread_comm_t * tc, mpi_data_t * comm, uint64_t vertex_id, double data_val, int32_t send_rank) { tc->sendbuf_vert_thread[tc->thread_queue_size] = vertex_id; tc->sendbuf_data_thread_flt[tc->thread_queue_size] = data_val; tc->sendbuf_rank_thread[tc->thread_queue_size] = send_rank; ++tc->thread_queue_size; ++tc->sendcounts_thread[send_rank]; if (tc->thread_queue_size == THREAD_QUEUE_SIZE) empty_vid_data_flt(tc, comm); } inline void empty_vid_data(thread_comm_t * tc, mpi_data_t * comm) { for (int32_t i = 0; i < nprocs; ++i) { #pragma omp atomic capture tc->thread_starts[i] = comm->sdispls_cpy_temp[i] += tc->sendcounts_thread[i]; tc->thread_starts[i] -= tc->sendcounts_thread[i]; } for (uint64_t i = 0; i < tc->thread_queue_size; ++i) { int32_t cur_rank = tc->sendbuf_rank_thread[i]; comm->sendbuf_vert[tc->thread_starts[cur_rank]] = tc->sendbuf_vert_thread[i]; comm->sendbuf_data[tc->thread_starts[cur_rank]] = tc->sendbuf_data_thread[i]; ++tc->thread_starts[cur_rank]; } for (int32_t i = 0; i < nprocs; ++i) { tc->thread_starts[i] = 0; tc->sendcounts_thread[i] = 0; } tc->thread_queue_size = 0; } inline void empty_vid_data_flt(thread_comm_t * tc, mpi_data_t * comm) { for (int32_t i = 0; i < nprocs; ++i) { #pragma omp atomic capture tc->thread_starts[i] = comm->sdispls_cpy_temp[i] += tc->sendcounts_thread[i]; tc->thread_starts[i] -= tc->sendcounts_thread[i]; } for (uint64_t i = 0; i < tc->thread_queue_size; ++i) { int32_t cur_rank = tc->sendbuf_rank_thread[i]; comm->sendbuf_vert[tc->thread_starts[cur_rank]] = tc->sendbuf_vert_thread[i]; comm->sendbuf_data_flt[tc->thread_starts[cur_rank]] = tc->sendbuf_data_thread_flt[i]; ++tc->thread_starts[cur_rank]; } for (int32_t i = 0; i < nprocs; ++i) { tc->thread_starts[i] = 0; tc->sendcounts_thread[i] = 0; } tc->thread_queue_size = 0; } #endif
Merge.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #define SWAP(a,b) {tt=(a); (a)=(b); (b)=tt;} void Merge(int *a1,int n1, int *a2,int n2, int *r) { int i1,i2,i; for (i1=0, i2=0, i=0;i1<n1&&i2<n2;) if (a1[i1]<a2[i2])r[i++]=a1[i1++]; else r[i++]=a2[i2++]; while(i1<n1) r[i++]=a1[i1++]; while(i2<n2) r[i++]=a2[i2++]; } void MSort(int *m,int n,int *t) { int n1,n2,i; if(n<=1)return; n1=n/2; n2=n-n1; MSort (m,n1,t); MSort(m+n1,n2,t); Merge(m,n1,m+n1,n2,t); memcpy (m,t,n*sizeof(int)); } void MSort2(int *m,int n,int *t) { int n1,n2,n3,n4; if (n<=1)return; n1=n/2; n3=n-n1; n2=n1/2; n1=n1-n2; n4=n3/2; n3=n3-n4; #pragma omp parallel sections { #pragma omp section {MSort(m,n1,t);} #pragma omp section {MSort (m+n1,n2,t+n1);} #pragma omp section {MSort (m+n1+n2,n3,t+n1+n2);} #pragma omp section {MSort (m+n1+n2+n3,n4,t+n1+n2+n3);} } #pragma omp parallel sections { #pragma omp section {Merge(m,n1, m+n1,n2, t); memcpy(m,t, (n1+n2)*sizeof(int));} #pragma omp section {Merge(m+n1+n2,n3,m+n1+n2+n3,n4,t+n1+n2); memcpy(m+n1+n2, t+n1+n2, (n3+n4)*sizeof(int));} } Merge(m,n1+n2, m+n1+n2,n3+n4,t); memcpy(m,t,n*sizeof(int)); } void MSort3(int *m,int n,int *t) { int k,k2,i,tt; //for (k=1;k<n;k*=2) k=1; { k2=k; #pragma omp parallel for private(tt) for(i=0;i<n-1;i+=2) if(m[i]>m[i+1]) SWAP(m[i],m[i+1]); } for(k=2;k<n;k*=2) { k2=k; #pragma omp parallel for private(k2) for(i=0;i<n-k;i+=2*k) {k2=k; if(n-i-k<k2)k2=n-i-k; Merge(m+i,k, m+i+k,k2,t+i); memcpy(m+i,t+i,(k+k2)*sizeof(int)); } } } int main(void) { int i,*a,*a0,*t,*b,n=100000; time_t t0,t1; a0=(int*)malloc(n*sizeof(int)); a=(int*)malloc(n*sizeof(int)); t=(int*)malloc(n*sizeof(int)); b=(int*)malloc(n*sizeof(int)); for(i=0;i<n;i++)a0[i]=rand()%n; // memcpy(b,a0,n*sizeof(int)); time(&t0); MSort(b,n,t); time(&t1); printf("MSort:%d\n", (int)(t1-t0)); for(i=1;i<n;i++) if(b[i]<b[i-1]) printf ("Err1:i=%d\n",i); // memcpy(a,a0,n*sizeof(int)); time(&t0); MSort3(a,n,t); time(&t1); printf("MSort3:%d\n", (int)(t1-t0)); for(i=0;i<n;i++) if(a[i]!=b[i]) printf ("Err2:i=%d\n",i); // memcpy(a,a0,n*sizeof(int)); time(&t0); MSort2(a,n,t); time(&t1); printf("MSort2:%d\n", (int)(t1-t0)); for(i=0;i<n;i++) if(a[i]!=b[i]) printf ("Err3:i=%d\n",i); // free(a0) ;a0=NULL; free(a); a=NULL; free(t); t=NULL; free(b); b=NULL; printf("done\n"); return 0; }
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #define SWAP(a,b) {tt=(a); (a)=(b); (b)=tt;} void Merge(int *a1, int n1, int *a2, int n2, int *r) { int i1, i2, i; for (i1 = 0, i2 = 0, i = 0; i1 < n1 && i2 < n2;) if (a1[i1] < a2[i2]) r[i++] = a1[i1++]; else r[i++] = a2[i2++]; while (i1 < n1) r[i++] = a1[i1++]; while (i2 < n2) r[i++] = a2[i2++]; } void MSort(int *m, int n, int *t) { int n1, n2, i; if (n <= 1) return; n1 = n / 2; n2 = n - n1; MSort(m, n1, t); MSort(m + n1, n2, t); Merge(m, n1, m + n1, n2, t); memcpy(m, t, n * sizeof(int)); } void MSort2(int *m, int n, int *t) { int n1, n2, n3, n4; if (n <= 1) return; n1 = n / 2; n3 = n - n1; n2 = n1 / 2; n1 = n1 - n2; n4 = n3 / 2; n3 = n3 - n4; #pragma omp section { MSort(m, n1, t); } MSort(m + n1, n2, t + n1); MSort(m + n1 + n2, n3, t + n1 + n2); MSort(m + n1 + n2 + n3, n4, t + n1 + n2 + n3); #pragma omp section { Merge(m, n1, m + n1, n2, t); memcpy(m, t, (n1 + n2) * sizeof(int)); } Merge(m + n1 + n2, n3, m + n1 + n2 + n3, n4, t + n1 + n2); memcpy(m + n1 + n2, t + n1 + n2, (n3 + n4) * sizeof(int)); Merge(m, n1 + n2, m + n1 + n2, n3 + n4, t); memcpy(m, t, n * sizeof(int)); } void MSort3(int *m, int n, int *t) { int k, k2, i, tt; //for (k = 1; k < n; k *= 2) k = 1; { k2 = k; for (i = 0; i < n - 1; i += 2) if (m[i] > m[i + 1]) SWAP(m[i], m[i + 1]); } for (k = 2; k < n; k *= 2) { k2 = k; for (i = 0; i < n - k; i += 2 * k) { k2 = k; if (n - i - k < k2) k2 = n - i - k; Merge(m + i, k, m + i + k, k2, t + i); memcpy(m + i, t + i, (k + k2) * sizeof(int)); } } } int main(void) { int i, *a, *a0, *t, *b, n = 100000; time_t t0, t1; a0 = (int *)malloc(n * sizeof(int)); a = (int *)malloc(n * sizeof(int)); t = (int *)malloc(n * sizeof(int)); b = (int *)malloc(n * sizeof(int)); for (i = 0; i < n; i++) a0[i] = rand() % n; // memcpy(b, a0, n * sizeof(int)); time(&t0); MSort(b, n, t); time(&t1); printf("MSort:%d\n", (int)(t1 - t0)); for (i = 1; i < n; i++) if (b[i] < b[i - 1]) printf("Err1:i=%d\n", i); // memcpy(a, a0, n * sizeof(int)); time(&t0); MSort3(a, n, t); time(&t1); printf("MSort3:%d\n", (int)(t1 - t0)); for (i = 0; i < n; i++) if (a[i] != b[i]) printf("Err2:i=%d\n", i); // memcpy(a, a0, n * sizeof(int)); time(&t0); MSort2(a, n, t); time(&t1); printf("MSort2:%d\n", (int)(t1 - t0)); for (i = 0; i < n; i++) if (a[i] != b[i]) printf("Err3:i=%d\n", i); // free(a0); a0 = NULL; free(a); a = NULL; free(t); t = NULL; free(b); b = NULL; printf("done\n"); return 0; }
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #define SWAP(a,b) {tt=(a); (a)=(b); (b)=tt;} void Merge(int *a1, int n1, int *a2, int n2, int *r) { int i1, i2, i; for (i1 = 0, i2 = 0, i = 0; i1 < n1 && i2 < n2;) if (a1[i1] < a2[i2]) r[i++] = a1[i1++]; else r[i++] = a2[i2++]; while (i1 < n1) r[i++] = a1[i1++]; while (i2 < n2) r[i++] = a2[i2++]; } void MSort(int *m, int n, int *t) { int n1, n2, i; if (n <= 1) return; n1 = n / 2; n2 = n - n1; MSort(m, n1, t); MSort(m + n1, n2, t); Merge(m, n1, m + n1, n2, t); memcpy(m, t, n * sizeof(int)); } void MSort2(int *m, int n, int *t) { int n1, n2, n3, n4; if (n <= 1) return; n1 = n / 2; n3 = n - n1; n2 = n1 / 2; n1 = n1 - n2; n4 = n3 / 2; n3 = n3 - n4; #pragma omp parallel sections { #pragma omp section { MSort(m, n1, t); } #pragma omp section { MSort(m + n1, n2, t + n1); } #pragma omp section { MSort(m + n1 + n2, n3, t + n1 + n2); } #pragma omp section { MSort(m + n1 + n2 + n3, n4, t + n1 + n2 + n3); } } #pragma omp parallel sections { #pragma omp section { Merge(m, n1, m + n1, n2, t); memcpy(m, t, (n1 + n2) * sizeof(int)); } #pragma omp section { Merge(m + n1 + n2, n3, m + n1 + n2 + n3, n4, t + n1 + n2); memcpy(m + n1 + n2, t + n1 + n2, (n3 + n4) * sizeof(int)); } } Merge(m, n1 + n2, m + n1 + n2, n3 + n4, t); memcpy(m, t, n * sizeof(int)); } void MSort3(int *m, int n, int *t) { int k, k2, i, tt; //for (k = 1; k < n; k *= 2) k = 1; { k2 = k; #pragma omp parallel for private(tt) for (i = 0; i < n - 1; i += 2) if (m[i] > m[i + 1]) SWAP(m[i], m[i + 1]); } for (k = 2; k < n; k *= 2) { k2 = k; #pragma omp parallel for private(k2) for (i = 0; i < n - k; i += 2 * k) { k2 = k; if (n - i - k < k2) k2 = n - i - k; Merge(m + i, k, m + i + k, k2, t + i); memcpy(m + i, t + i, (k + k2) * sizeof(int)); } } } int main(void) { int i, *a, *a0, *t, *b, n = 100000; time_t t0, t1; a0 = (int *)malloc(n * sizeof(int)); a = (int *)malloc(n * sizeof(int)); t = (int *)malloc(n * sizeof(int)); b = (int *)malloc(n * sizeof(int)); for (i = 0; i < n; i++) a0[i] = rand() % n; // memcpy(b, a0, n * sizeof(int)); time(&t0); MSort(b, n, t); time(&t1); printf("MSort:%d\n", (int)(t1 - t0)); for (i = 1; i < n; i++) if (b[i] < b[i - 1]) printf("Err1:i=%d\n", i); // memcpy(a, a0, n * sizeof(int)); time(&t0); MSort3(a, n, t); time(&t1); printf("MSort3:%d\n", (int)(t1 - t0)); for (i = 0; i < n; i++) if (a[i] != b[i]) printf("Err2:i=%d\n", i); // memcpy(a, a0, n * sizeof(int)); time(&t0); MSort2(a, n, t); time(&t1); printf("MSort2:%d\n", (int)(t1 - t0)); for (i = 0; i < n; i++) if (a[i] != b[i]) printf("Err3:i=%d\n", i); // free(a0); a0 = NULL; free(a); a = NULL; free(t); t = NULL; free(b); b = NULL; printf("done\n"); return 0; }
3d7pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 8; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,12);t1++) { lbp=max(ceild(t1,2),ceild(24*t1-Nt+3,24)); ubp=min(floord(Nt+Nz-4,24),floord(12*t1+Nz+9,24)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(3*t1-1,2)),ceild(24*t2-Nz-4,8));t3<=min(min(min(floord(Nt+Ny-4,8),floord(12*t1+Ny+21,8)),floord(24*t2+Ny+20,8)),floord(24*t1-24*t2+Nz+Ny+19,8));t3++) { for (t4=max(max(max(0,ceild(3*t1-31,32)),ceild(24*t2-Nz-124,128)),ceild(8*t3-Ny-124,128));t4<=min(min(min(min(floord(Nt+Nx-4,128),floord(12*t1+Nx+21,128)),floord(24*t2+Nx+20,128)),floord(8*t3+Nx+4,128)),floord(24*t1-24*t2+Nz+Nx+19,128));t4++) { for (t5=max(max(max(max(max(0,12*t1),24*t1-24*t2+1),24*t2-Nz+2),8*t3-Ny+2),128*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,12*t1+23),24*t2+22),8*t3+6),128*t4+126),24*t1-24*t2+Nz+21);t5++) { for (t6=max(max(24*t2,t5+1),-24*t1+24*t2+2*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(8*t3,t5+1);t7<=min(8*t3+7,t5+Ny-2);t7++) { lbv=max(128*t4,t5+1); ubv=min(128*t4+127,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 8; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; LIKWID_MARKER_THREADINIT; LIKWID_MARKER_START("calc"); #endif int num_threads = 1; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,12);t1++) { lbp=max(ceild(t1,2),ceild(24*t1-Nt+3,24)); ubp=min(floord(Nt+Nz-4,24),floord(12*t1+Nz+9,24)); for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(3*t1-1,2)),ceild(24*t2-Nz-4,8));t3<=min(min(min(floord(Nt+Ny-4,8),floord(12*t1+Ny+21,8)),floord(24*t2+Ny+20,8)),floord(24*t1-24*t2+Nz+Ny+19,8));t3++) { for (t4=max(max(max(0,ceild(3*t1-31,32)),ceild(24*t2-Nz-124,128)),ceild(8*t3-Ny-124,128));t4<=min(min(min(min(floord(Nt+Nx-4,128),floord(12*t1+Nx+21,128)),floord(24*t2+Nx+20,128)),floord(8*t3+Nx+4,128)),floord(24*t1-24*t2+Nz+Nx+19,128));t4++) { for (t5=max(max(max(max(max(0,12*t1),24*t1-24*t2+1),24*t2-Nz+2),8*t3-Ny+2),128*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,12*t1+23),24*t2+22),8*t3+6),128*t4+126),24*t1-24*t2+Nz+21);t5++) { for (t6=max(max(24*t2,t5+1),-24*t1+24*t2+2*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(8*t3,t5+1);t7<=min(8*t3+7,t5+Ny-2);t7++) { lbv=max(128*t4,t5+1); ubv=min(128*t4+127,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON LIKWID_MARKER_STOP("calc"); LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 8; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,12);t1++) { lbp=max(ceild(t1,2),ceild(24*t1-Nt+3,24)); ubp=min(floord(Nt+Nz-4,24),floord(12*t1+Nz+9,24)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(3*t1-1,2)),ceild(24*t2-Nz-4,8));t3<=min(min(min(floord(Nt+Ny-4,8),floord(12*t1+Ny+21,8)),floord(24*t2+Ny+20,8)),floord(24*t1-24*t2+Nz+Ny+19,8));t3++) { for (t4=max(max(max(0,ceild(3*t1-31,32)),ceild(24*t2-Nz-124,128)),ceild(8*t3-Ny-124,128));t4<=min(min(min(min(floord(Nt+Nx-4,128),floord(12*t1+Nx+21,128)),floord(24*t2+Nx+20,128)),floord(8*t3+Nx+4,128)),floord(24*t1-24*t2+Nz+Nx+19,128));t4++) { for (t5=max(max(max(max(max(0,12*t1),24*t1-24*t2+1),24*t2-Nz+2),8*t3-Ny+2),128*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,12*t1+23),24*t2+22),8*t3+6),128*t4+126),24*t1-24*t2+Nz+21);t5++) { for (t6=max(max(24*t2,t5+1),-24*t1+24*t2+2*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(8*t3,t5+1);t7<=min(8*t3+7,t5+Ny-2);t7++) { lbv=max(128*t4,t5+1); ubv=min(128*t4+127,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
struct_innerprod.c
/*BHEADER********************************************************************** * Copyright (c) 2008, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * This file is part of HYPRE. See file COPYRIGHT for details. * * HYPRE is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * $Revision$ ***********************************************************************EHEADER*/ /****************************************************************************** * * Structured inner product routine * *****************************************************************************/ #include "_hypre_struct_mv.h" /*-------------------------------------------------------------------------- * hypre_StructInnerProd *--------------------------------------------------------------------------*/ HYPRE_Real hypre_StructInnerProd( hypre_StructVector *x, hypre_StructVector *y ) { HYPRE_Real final_innerprod_result; HYPRE_Real local_result; HYPRE_Real process_result; hypre_Box *x_data_box; hypre_Box *y_data_box; HYPRE_Int xi; HYPRE_Int yi; HYPRE_Complex *xp; HYPRE_Complex *yp; hypre_BoxArray *boxes; hypre_Box *box; hypre_Index loop_size; hypre_IndexRef start; hypre_Index unit_stride; HYPRE_Int i; local_result = 0.0; process_result = 0.0; hypre_SetIndex(unit_stride, 1); boxes = hypre_StructGridBoxes(hypre_StructVectorGrid(y)); hypre_ForBoxI(i, boxes) { box = hypre_BoxArrayBox(boxes, i); start = hypre_BoxIMin(box); x_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace(x), i); y_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace(y), i); xp = hypre_StructVectorBoxData(x, i); yp = hypre_StructVectorBoxData(y, i); hypre_BoxGetSize(box, loop_size); hypre_BoxLoop2Begin(hypre_StructVectorNDim(x), loop_size, x_data_box, start, unit_stride, xi, y_data_box, start, unit_stride, yi); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,xi,yi) reduction(+:local_result) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop2For(xi, yi) { local_result += xp[xi] * hypre_conj(yp[yi]); } hypre_BoxLoop2End(xi, yi); } process_result = local_result; hypre_MPI_Allreduce(&process_result, &final_innerprod_result, 1, HYPRE_MPI_REAL, hypre_MPI_SUM, hypre_StructVectorComm(x)); hypre_IncFLOPCount(2*hypre_StructVectorGlobalSize(x)); return final_innerprod_result; }
/****************************************************************************** * * Structured inner product routine * *****************************************************************************/ #include "_hypre_struct_mv.h" /*-------------------------------------------------------------------------- * hypre_StructInnerProd *--------------------------------------------------------------------------*/ HYPRE_Real hypre_StructInnerProd(hypre_StructVector * x, hypre_StructVector * y) { HYPRE_Real final_innerprod_result; HYPRE_Real local_result; HYPRE_Real process_result; hypre_Box *x_data_box; hypre_Box *y_data_box; HYPRE_Int xi; HYPRE_Int yi; HYPRE_Complex *xp; HYPRE_Complex *yp; hypre_BoxArray *boxes; hypre_Box *box; hypre_Index loop_size; hypre_IndexRef start; hypre_Index unit_stride; HYPRE_Int i; local_result = 0.0; process_result = 0.0; hypre_SetIndex(unit_stride, 1); boxes = hypre_StructGridBoxes(hypre_StructVectorGrid(y)); hypre_ForBoxI(i, boxes) { box = hypre_BoxArrayBox(boxes, i); start = hypre_BoxIMin(box); x_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace(x), i); y_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace(y), i); xp = hypre_StructVectorBoxData(x, i); yp = hypre_StructVectorBoxData(y, i); hypre_BoxGetSize(box, loop_size); hypre_BoxLoop2Begin(hypre_StructVectorNDim(x), loop_size, x_data_box, start, unit_stride, xi, y_data_box, start, unit_stride, yi); hypre_BoxLoop2For(xi, yi) { local_result += xp[xi] * hypre_conj(yp[yi]); } hypre_BoxLoop2End(xi, yi); } process_result = local_result; hypre_MPI_Allreduce(&process_result, &final_innerprod_result, 1, HYPRE_MPI_REAL, hypre_MPI_SUM, hypre_StructVectorComm(x)); hypre_IncFLOPCount(2 * hypre_StructVectorGlobalSize(x)); return final_innerprod_result; }
/****************************************************************************** * * Structured inner product routine * *****************************************************************************/ #include "_hypre_struct_mv.h" /*-------------------------------------------------------------------------- * hypre_StructInnerProd *--------------------------------------------------------------------------*/ HYPRE_Real hypre_StructInnerProd(hypre_StructVector * x, hypre_StructVector * y) { HYPRE_Real final_innerprod_result; HYPRE_Real local_result; HYPRE_Real process_result; hypre_Box *x_data_box; hypre_Box *y_data_box; HYPRE_Int xi; HYPRE_Int yi; HYPRE_Complex *xp; HYPRE_Complex *yp; hypre_BoxArray *boxes; hypre_Box *box; hypre_Index loop_size; hypre_IndexRef start; hypre_Index unit_stride; HYPRE_Int i; local_result = 0.0; process_result = 0.0; hypre_SetIndex(unit_stride, 1); boxes = hypre_StructGridBoxes(hypre_StructVectorGrid(y)); hypre_ForBoxI(i, boxes) { box = hypre_BoxArrayBox(boxes, i); start = hypre_BoxIMin(box); x_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace(x), i); y_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace(y), i); xp = hypre_StructVectorBoxData(x, i); yp = hypre_StructVectorBoxData(y, i); hypre_BoxGetSize(box, loop_size); hypre_BoxLoop2Begin(hypre_StructVectorNDim(x), loop_size, x_data_box, start, unit_stride, xi, y_data_box, start, unit_stride, yi); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,xi,yi) reduction(+:local_result) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop2For(xi, yi) { local_result += xp[xi] * hypre_conj(yp[yi]); } hypre_BoxLoop2End(xi, yi); } process_result = local_result; hypre_MPI_Allreduce(&process_result, &final_innerprod_result, 1, HYPRE_MPI_REAL, hypre_MPI_SUM, hypre_StructVectorComm(x)); hypre_IncFLOPCount(2 * hypre_StructVectorGlobalSize(x)); return final_innerprod_result; }
irbuilder_unroll_partial_factor_for.c
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs // RUN: %clang_cc1 -fopenmp-enable-irbuilder -verify -fopenmp -fopenmp-version=51 -x c -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s // expected-no-diagnostics #ifndef HEADER #define HEADER // CHECK-LABEL: define {{.*}}@unroll_partial_heuristic_for( // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[N_ADDR:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[A_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[B_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[C_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[D_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[I:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[AGG_CAPTURED:.+]] = alloca %struct.anon, align 8 // CHECK-NEXT: %[[AGG_CAPTURED1:.+]] = alloca %struct.anon.0, align 4 // CHECK-NEXT: %[[DOTCOUNT_ADDR:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[P_LASTITER:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[P_LOWERBOUND:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[P_UPPERBOUND:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[P_STRIDE:.+]] = alloca i32, align 4 // CHECK-NEXT: store i32 %[[N:.+]], i32* %[[N_ADDR]], align 4 // CHECK-NEXT: store float* %[[A:.+]], float** %[[A_ADDR]], align 8 // CHECK-NEXT: store float* %[[B:.+]], float** %[[B_ADDR]], align 8 // CHECK-NEXT: store float* %[[C:.+]], float** %[[C_ADDR]], align 8 // CHECK-NEXT: store float* %[[D:.+]], float** %[[D_ADDR]], align 8 // CHECK-NEXT: store i32 0, i32* %[[I]], align 4 // CHECK-NEXT: %[[TMP0:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[AGG_CAPTURED]], i32 0, i32 0 // CHECK-NEXT: store i32* %[[I]], i32** %[[TMP0]], align 8 // CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[AGG_CAPTURED]], i32 0, i32 1 // CHECK-NEXT: store i32* %[[N_ADDR]], i32** %[[TMP1]], align 8 // CHECK-NEXT: %[[TMP2:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[AGG_CAPTURED1]], i32 0, i32 0 // CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: store i32 %[[TMP3]], i32* %[[TMP2]], align 4 // CHECK-NEXT: call void @__captured_stmt(i32* %[[DOTCOUNT_ADDR]], %struct.anon* %[[AGG_CAPTURED]]) // CHECK-NEXT: %[[DOTCOUNT:.+]] = load i32, i32* %[[DOTCOUNT_ADDR]], align 4 // CHECK-NEXT: br label %[[OMP_LOOP_PREHEADER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_PREHEADER]]: // CHECK-NEXT: %[[TMP4:.+]] = udiv i32 %[[DOTCOUNT]], 13 // CHECK-NEXT: %[[TMP5:.+]] = urem i32 %[[DOTCOUNT]], 13 // CHECK-NEXT: %[[TMP6:.+]] = icmp ne i32 %[[TMP5]], 0 // CHECK-NEXT: %[[TMP7:.+]] = zext i1 %[[TMP6]] to i32 // CHECK-NEXT: %[[OMP_FLOOR0_TRIPCOUNT:.+]] = add nuw i32 %[[TMP4]], %[[TMP7]] // CHECK-NEXT: br label %[[OMP_FLOOR0_PREHEADER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_PREHEADER]]: // CHECK-NEXT: store i32 0, i32* %[[P_LOWERBOUND]], align 4 // CHECK-NEXT: %[[TMP8:.+]] = sub i32 %[[OMP_FLOOR0_TRIPCOUNT]], 1 // CHECK-NEXT: store i32 %[[TMP8]], i32* %[[P_UPPERBOUND]], align 4 // CHECK-NEXT: store i32 1, i32* %[[P_STRIDE]], align 4 // CHECK-NEXT: %[[OMP_GLOBAL_THREAD_NUM:.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @1) // CHECK-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @1, i32 %[[OMP_GLOBAL_THREAD_NUM]], i32 34, i32* %[[P_LASTITER]], i32* %[[P_LOWERBOUND]], i32* %[[P_UPPERBOUND]], i32* %[[P_STRIDE]], i32 1, i32 0) // CHECK-NEXT: %[[TMP9:.+]] = load i32, i32* %[[P_LOWERBOUND]], align 4 // CHECK-NEXT: %[[TMP10:.+]] = load i32, i32* %[[P_UPPERBOUND]], align 4 // CHECK-NEXT: %[[TMP11:.+]] = sub i32 %[[TMP10]], %[[TMP9]] // CHECK-NEXT: %[[TMP12:.+]] = add i32 %[[TMP11]], 1 // CHECK-NEXT: br label %[[OMP_FLOOR0_HEADER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_HEADER]]: // CHECK-NEXT: %[[OMP_FLOOR0_IV:.+]] = phi i32 [ 0, %[[OMP_FLOOR0_PREHEADER]] ], [ %[[OMP_FLOOR0_NEXT:.+]], %[[OMP_FLOOR0_INC:.+]] ] // CHECK-NEXT: br label %[[OMP_FLOOR0_COND:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_COND]]: // CHECK-NEXT: %[[OMP_FLOOR0_CMP:.+]] = icmp ult i32 %[[OMP_FLOOR0_IV]], %[[TMP12]] // CHECK-NEXT: br i1 %[[OMP_FLOOR0_CMP]], label %[[OMP_FLOOR0_BODY:.+]], label %[[OMP_FLOOR0_EXIT:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_BODY]]: // CHECK-NEXT: %[[TMP13:.+]] = add i32 %[[OMP_FLOOR0_IV]], %[[TMP9]] // CHECK-NEXT: %[[TMP14:.+]] = icmp eq i32 %[[TMP13]], %[[OMP_FLOOR0_TRIPCOUNT]] // CHECK-NEXT: %[[TMP15:.+]] = select i1 %[[TMP14]], i32 %[[TMP5]], i32 13 // CHECK-NEXT: br label %[[OMP_TILE0_PREHEADER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_PREHEADER]]: // CHECK-NEXT: br label %[[OMP_TILE0_HEADER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_HEADER]]: // CHECK-NEXT: %[[OMP_TILE0_IV:.+]] = phi i32 [ 0, %[[OMP_TILE0_PREHEADER]] ], [ %[[OMP_TILE0_NEXT:.+]], %[[OMP_TILE0_INC:.+]] ] // CHECK-NEXT: br label %[[OMP_TILE0_COND:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_COND]]: // CHECK-NEXT: %[[OMP_TILE0_CMP:.+]] = icmp ult i32 %[[OMP_TILE0_IV]], %[[TMP15]] // CHECK-NEXT: br i1 %[[OMP_TILE0_CMP]], label %[[OMP_TILE0_BODY:.+]], label %[[OMP_TILE0_EXIT:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_BODY]]: // CHECK-NEXT: %[[TMP16:.+]] = mul nuw i32 13, %[[TMP13]] // CHECK-NEXT: %[[TMP17:.+]] = add nuw i32 %[[TMP16]], %[[OMP_TILE0_IV]] // CHECK-NEXT: br label %[[OMP_LOOP_BODY:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_BODY]]: // CHECK-NEXT: call void @__captured_stmt.1(i32* %[[I]], i32 %[[TMP17]], %struct.anon.0* %[[AGG_CAPTURED1]]) // CHECK-NEXT: %[[TMP18:.+]] = load float*, float** %[[B_ADDR]], align 8 // CHECK-NEXT: %[[TMP19:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM:.+]] = sext i32 %[[TMP19]] to i64 // CHECK-NEXT: %[[ARRAYIDX:.+]] = getelementptr inbounds float, float* %[[TMP18]], i64 %[[IDXPROM]] // CHECK-NEXT: %[[TMP20:.+]] = load float, float* %[[ARRAYIDX]], align 4 // CHECK-NEXT: %[[TMP21:.+]] = load float*, float** %[[C_ADDR]], align 8 // CHECK-NEXT: %[[TMP22:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM2:.+]] = sext i32 %[[TMP22]] to i64 // CHECK-NEXT: %[[ARRAYIDX3:.+]] = getelementptr inbounds float, float* %[[TMP21]], i64 %[[IDXPROM2]] // CHECK-NEXT: %[[TMP23:.+]] = load float, float* %[[ARRAYIDX3]], align 4 // CHECK-NEXT: %[[MUL:.+]] = fmul float %[[TMP20]], %[[TMP23]] // CHECK-NEXT: %[[TMP24:.+]] = load float*, float** %[[D_ADDR]], align 8 // CHECK-NEXT: %[[TMP25:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM4:.+]] = sext i32 %[[TMP25]] to i64 // CHECK-NEXT: %[[ARRAYIDX5:.+]] = getelementptr inbounds float, float* %[[TMP24]], i64 %[[IDXPROM4]] // CHECK-NEXT: %[[TMP26:.+]] = load float, float* %[[ARRAYIDX5]], align 4 // CHECK-NEXT: %[[MUL6:.+]] = fmul float %[[MUL]], %[[TMP26]] // CHECK-NEXT: %[[TMP27:.+]] = load float*, float** %[[A_ADDR]], align 8 // CHECK-NEXT: %[[TMP28:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM7:.+]] = sext i32 %[[TMP28]] to i64 // CHECK-NEXT: %[[ARRAYIDX8:.+]] = getelementptr inbounds float, float* %[[TMP27]], i64 %[[IDXPROM7]] // CHECK-NEXT: store float %[[MUL6]], float* %[[ARRAYIDX8]], align 4 // CHECK-NEXT: br label %[[OMP_TILE0_INC]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_INC]]: // CHECK-NEXT: %[[OMP_TILE0_NEXT]] = add nuw i32 %[[OMP_TILE0_IV]], 1 // CHECK-NEXT: br label %[[OMP_TILE0_HEADER]], !llvm.loop ![[LOOP3:[0-9]+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_EXIT]]: // CHECK-NEXT: br label %[[OMP_TILE0_AFTER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_AFTER]]: // CHECK-NEXT: br label %[[OMP_FLOOR0_INC]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_INC]]: // CHECK-NEXT: %[[OMP_FLOOR0_NEXT]] = add nuw i32 %[[OMP_FLOOR0_IV]], 1 // CHECK-NEXT: br label %[[OMP_FLOOR0_HEADER]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_EXIT]]: // CHECK-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @1, i32 %[[OMP_GLOBAL_THREAD_NUM]]) // CHECK-NEXT: %[[OMP_GLOBAL_THREAD_NUM9:.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @1) // CHECK-NEXT: call void @__kmpc_barrier(%struct.ident_t* @2, i32 %[[OMP_GLOBAL_THREAD_NUM9]]) // CHECK-NEXT: br label %[[OMP_FLOOR0_AFTER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_AFTER]]: // CHECK-NEXT: br label %[[OMP_LOOP_AFTER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_AFTER]]: // CHECK-NEXT: ret void // CHECK-NEXT: } void unroll_partial_heuristic_for(int n, float *a, float *b, float *c, float *d) { #pragma omp for #pragma omp unroll partial(13) for (int i = 0; i < n; i++) { a[i] = b[i] * c[i] * d[i]; } } #endif // HEADER // CHECK-LABEL: define {{.*}}@__captured_stmt( // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[DISTANCE_ADDR:.+]] = alloca i32*, align 8 // CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon*, align 8 // CHECK-NEXT: %[[DOTSTART:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[DOTSTOP:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[DOTSTEP:.+]] = alloca i32, align 4 // CHECK-NEXT: store i32* %[[DISTANCE:.+]], i32** %[[DISTANCE_ADDR]], align 8 // CHECK-NEXT: store %struct.anon* %[[__CONTEXT:.+]], %struct.anon** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon*, %struct.anon** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[TMP0]], i32 0, i32 0 // CHECK-NEXT: %[[TMP2:.+]] = load i32*, i32** %[[TMP1]], align 8 // CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[TMP2]], align 4 // CHECK-NEXT: store i32 %[[TMP3]], i32* %[[DOTSTART]], align 4 // CHECK-NEXT: %[[TMP4:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[TMP0]], i32 0, i32 1 // CHECK-NEXT: %[[TMP5:.+]] = load i32*, i32** %[[TMP4]], align 8 // CHECK-NEXT: %[[TMP6:.+]] = load i32, i32* %[[TMP5]], align 4 // CHECK-NEXT: store i32 %[[TMP6]], i32* %[[DOTSTOP]], align 4 // CHECK-NEXT: store i32 1, i32* %[[DOTSTEP]], align 4 // CHECK-NEXT: %[[TMP7:.+]] = load i32, i32* %[[DOTSTART]], align 4 // CHECK-NEXT: %[[TMP8:.+]] = load i32, i32* %[[DOTSTOP]], align 4 // CHECK-NEXT: %[[CMP:.+]] = icmp slt i32 %[[TMP7]], %[[TMP8]] // CHECK-NEXT: br i1 %[[CMP]], label %[[COND_TRUE:.+]], label %[[COND_FALSE:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[COND_TRUE]]: // CHECK-NEXT: %[[TMP9:.+]] = load i32, i32* %[[DOTSTOP]], align 4 // CHECK-NEXT: %[[TMP10:.+]] = load i32, i32* %[[DOTSTART]], align 4 // CHECK-NEXT: %[[SUB:.+]] = sub nsw i32 %[[TMP9]], %[[TMP10]] // CHECK-NEXT: %[[TMP11:.+]] = load i32, i32* %[[DOTSTEP]], align 4 // CHECK-NEXT: %[[SUB1:.+]] = sub i32 %[[TMP11]], 1 // CHECK-NEXT: %[[ADD:.+]] = add i32 %[[SUB]], %[[SUB1]] // CHECK-NEXT: %[[TMP12:.+]] = load i32, i32* %[[DOTSTEP]], align 4 // CHECK-NEXT: %[[DIV:.+]] = udiv i32 %[[ADD]], %[[TMP12]] // CHECK-NEXT: br label %[[COND_END:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[COND_FALSE]]: // CHECK-NEXT: br label %[[COND_END]] // CHECK-EMPTY: // CHECK-NEXT: [[COND_END]]: // CHECK-NEXT: %[[COND:.+]] = phi i32 [ %[[DIV]], %[[COND_TRUE]] ], [ 0, %[[COND_FALSE]] ] // CHECK-NEXT: %[[TMP13:.+]] = load i32*, i32** %[[DISTANCE_ADDR]], align 8 // CHECK-NEXT: store i32 %[[COND]], i32* %[[TMP13]], align 4 // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK-LABEL: define {{.*}}@__captured_stmt.1( // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[LOOPVAR_ADDR:.+]] = alloca i32*, align 8 // CHECK-NEXT: %[[LOGICAL_ADDR:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon.0*, align 8 // CHECK-NEXT: store i32* %[[LOOPVAR:.+]], i32** %[[LOOPVAR_ADDR]], align 8 // CHECK-NEXT: store i32 %[[LOGICAL:.+]], i32* %[[LOGICAL_ADDR]], align 4 // CHECK-NEXT: store %struct.anon.0* %[[__CONTEXT:.+]], %struct.anon.0** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon.0*, %struct.anon.0** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[TMP0]], i32 0, i32 0 // CHECK-NEXT: %[[TMP2:.+]] = load i32, i32* %[[TMP1]], align 4 // CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[LOGICAL_ADDR]], align 4 // CHECK-NEXT: %[[MUL:.+]] = mul i32 1, %[[TMP3]] // CHECK-NEXT: %[[ADD:.+]] = add i32 %[[TMP2]], %[[MUL]] // CHECK-NEXT: %[[TMP4:.+]] = load i32*, i32** %[[LOOPVAR_ADDR]], align 8 // CHECK-NEXT: store i32 %[[ADD]], i32* %[[TMP4]], align 4 // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK: ![[META0:[0-9]+]] = !{i32 1, !"wchar_size", i32 4} // CHECK: ![[META1:[0-9]+]] = !{i32 7, !"openmp", i32 51} // CHECK: ![[META2:[0-9]+]] = // CHECK: ![[LOOP3]] = distinct !{![[LOOP3]], ![[LOOPPROP4:[0-9]+]], ![[LOOPPROP5:[0-9]+]]} // CHECK: ![[LOOPPROP4]] = !{!"llvm.loop.unroll.enable"} // CHECK: ![[LOOPPROP5]] = !{!"llvm.loop.unroll.count", i32 13}
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs // RUN: %clang_cc1 -fopenmp-enable-irbuilder -verify -fopenmp -fopenmp-version=51 -x c -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s // expected-no-diagnostics #ifndef HEADER #define HEADER // CHECK-LABEL: define {{.*}}@unroll_partial_heuristic_for( // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[N_ADDR:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[A_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[B_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[C_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[D_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[I:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[AGG_CAPTURED:.+]] = alloca %struct.anon, align 8 // CHECK-NEXT: %[[AGG_CAPTURED1:.+]] = alloca %struct.anon.0, align 4 // CHECK-NEXT: %[[DOTCOUNT_ADDR:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[P_LASTITER:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[P_LOWERBOUND:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[P_UPPERBOUND:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[P_STRIDE:.+]] = alloca i32, align 4 // CHECK-NEXT: store i32 %[[N:.+]], i32* %[[N_ADDR]], align 4 // CHECK-NEXT: store float* %[[A:.+]], float** %[[A_ADDR]], align 8 // CHECK-NEXT: store float* %[[B:.+]], float** %[[B_ADDR]], align 8 // CHECK-NEXT: store float* %[[C:.+]], float** %[[C_ADDR]], align 8 // CHECK-NEXT: store float* %[[D:.+]], float** %[[D_ADDR]], align 8 // CHECK-NEXT: store i32 0, i32* %[[I]], align 4 // CHECK-NEXT: %[[TMP0:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[AGG_CAPTURED]], i32 0, i32 0 // CHECK-NEXT: store i32* %[[I]], i32** %[[TMP0]], align 8 // CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[AGG_CAPTURED]], i32 0, i32 1 // CHECK-NEXT: store i32* %[[N_ADDR]], i32** %[[TMP1]], align 8 // CHECK-NEXT: %[[TMP2:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[AGG_CAPTURED1]], i32 0, i32 0 // CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: store i32 %[[TMP3]], i32* %[[TMP2]], align 4 // CHECK-NEXT: call void @__captured_stmt(i32* %[[DOTCOUNT_ADDR]], %struct.anon* %[[AGG_CAPTURED]]) // CHECK-NEXT: %[[DOTCOUNT:.+]] = load i32, i32* %[[DOTCOUNT_ADDR]], align 4 // CHECK-NEXT: br label %[[OMP_LOOP_PREHEADER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_PREHEADER]]: // CHECK-NEXT: %[[TMP4:.+]] = udiv i32 %[[DOTCOUNT]], 13 // CHECK-NEXT: %[[TMP5:.+]] = urem i32 %[[DOTCOUNT]], 13 // CHECK-NEXT: %[[TMP6:.+]] = icmp ne i32 %[[TMP5]], 0 // CHECK-NEXT: %[[TMP7:.+]] = zext i1 %[[TMP6]] to i32 // CHECK-NEXT: %[[OMP_FLOOR0_TRIPCOUNT:.+]] = add nuw i32 %[[TMP4]], %[[TMP7]] // CHECK-NEXT: br label %[[OMP_FLOOR0_PREHEADER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_PREHEADER]]: // CHECK-NEXT: store i32 0, i32* %[[P_LOWERBOUND]], align 4 // CHECK-NEXT: %[[TMP8:.+]] = sub i32 %[[OMP_FLOOR0_TRIPCOUNT]], 1 // CHECK-NEXT: store i32 %[[TMP8]], i32* %[[P_UPPERBOUND]], align 4 // CHECK-NEXT: store i32 1, i32* %[[P_STRIDE]], align 4 // CHECK-NEXT: %[[OMP_GLOBAL_THREAD_NUM:.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @1) // CHECK-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @1, i32 %[[OMP_GLOBAL_THREAD_NUM]], i32 34, i32* %[[P_LASTITER]], i32* %[[P_LOWERBOUND]], i32* %[[P_UPPERBOUND]], i32* %[[P_STRIDE]], i32 1, i32 0) // CHECK-NEXT: %[[TMP9:.+]] = load i32, i32* %[[P_LOWERBOUND]], align 4 // CHECK-NEXT: %[[TMP10:.+]] = load i32, i32* %[[P_UPPERBOUND]], align 4 // CHECK-NEXT: %[[TMP11:.+]] = sub i32 %[[TMP10]], %[[TMP9]] // CHECK-NEXT: %[[TMP12:.+]] = add i32 %[[TMP11]], 1 // CHECK-NEXT: br label %[[OMP_FLOOR0_HEADER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_HEADER]]: // CHECK-NEXT: %[[OMP_FLOOR0_IV:.+]] = phi i32 [ 0, %[[OMP_FLOOR0_PREHEADER]] ], [ %[[OMP_FLOOR0_NEXT:.+]], %[[OMP_FLOOR0_INC:.+]] ] // CHECK-NEXT: br label %[[OMP_FLOOR0_COND:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_COND]]: // CHECK-NEXT: %[[OMP_FLOOR0_CMP:.+]] = icmp ult i32 %[[OMP_FLOOR0_IV]], %[[TMP12]] // CHECK-NEXT: br i1 %[[OMP_FLOOR0_CMP]], label %[[OMP_FLOOR0_BODY:.+]], label %[[OMP_FLOOR0_EXIT:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_BODY]]: // CHECK-NEXT: %[[TMP13:.+]] = add i32 %[[OMP_FLOOR0_IV]], %[[TMP9]] // CHECK-NEXT: %[[TMP14:.+]] = icmp eq i32 %[[TMP13]], %[[OMP_FLOOR0_TRIPCOUNT]] // CHECK-NEXT: %[[TMP15:.+]] = select i1 %[[TMP14]], i32 %[[TMP5]], i32 13 // CHECK-NEXT: br label %[[OMP_TILE0_PREHEADER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_PREHEADER]]: // CHECK-NEXT: br label %[[OMP_TILE0_HEADER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_HEADER]]: // CHECK-NEXT: %[[OMP_TILE0_IV:.+]] = phi i32 [ 0, %[[OMP_TILE0_PREHEADER]] ], [ %[[OMP_TILE0_NEXT:.+]], %[[OMP_TILE0_INC:.+]] ] // CHECK-NEXT: br label %[[OMP_TILE0_COND:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_COND]]: // CHECK-NEXT: %[[OMP_TILE0_CMP:.+]] = icmp ult i32 %[[OMP_TILE0_IV]], %[[TMP15]] // CHECK-NEXT: br i1 %[[OMP_TILE0_CMP]], label %[[OMP_TILE0_BODY:.+]], label %[[OMP_TILE0_EXIT:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_BODY]]: // CHECK-NEXT: %[[TMP16:.+]] = mul nuw i32 13, %[[TMP13]] // CHECK-NEXT: %[[TMP17:.+]] = add nuw i32 %[[TMP16]], %[[OMP_TILE0_IV]] // CHECK-NEXT: br label %[[OMP_LOOP_BODY:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_BODY]]: // CHECK-NEXT: call void @__captured_stmt.1(i32* %[[I]], i32 %[[TMP17]], %struct.anon.0* %[[AGG_CAPTURED1]]) // CHECK-NEXT: %[[TMP18:.+]] = load float*, float** %[[B_ADDR]], align 8 // CHECK-NEXT: %[[TMP19:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM:.+]] = sext i32 %[[TMP19]] to i64 // CHECK-NEXT: %[[ARRAYIDX:.+]] = getelementptr inbounds float, float* %[[TMP18]], i64 %[[IDXPROM]] // CHECK-NEXT: %[[TMP20:.+]] = load float, float* %[[ARRAYIDX]], align 4 // CHECK-NEXT: %[[TMP21:.+]] = load float*, float** %[[C_ADDR]], align 8 // CHECK-NEXT: %[[TMP22:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM2:.+]] = sext i32 %[[TMP22]] to i64 // CHECK-NEXT: %[[ARRAYIDX3:.+]] = getelementptr inbounds float, float* %[[TMP21]], i64 %[[IDXPROM2]] // CHECK-NEXT: %[[TMP23:.+]] = load float, float* %[[ARRAYIDX3]], align 4 // CHECK-NEXT: %[[MUL:.+]] = fmul float %[[TMP20]], %[[TMP23]] // CHECK-NEXT: %[[TMP24:.+]] = load float*, float** %[[D_ADDR]], align 8 // CHECK-NEXT: %[[TMP25:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM4:.+]] = sext i32 %[[TMP25]] to i64 // CHECK-NEXT: %[[ARRAYIDX5:.+]] = getelementptr inbounds float, float* %[[TMP24]], i64 %[[IDXPROM4]] // CHECK-NEXT: %[[TMP26:.+]] = load float, float* %[[ARRAYIDX5]], align 4 // CHECK-NEXT: %[[MUL6:.+]] = fmul float %[[MUL]], %[[TMP26]] // CHECK-NEXT: %[[TMP27:.+]] = load float*, float** %[[A_ADDR]], align 8 // CHECK-NEXT: %[[TMP28:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM7:.+]] = sext i32 %[[TMP28]] to i64 // CHECK-NEXT: %[[ARRAYIDX8:.+]] = getelementptr inbounds float, float* %[[TMP27]], i64 %[[IDXPROM7]] // CHECK-NEXT: store float %[[MUL6]], float* %[[ARRAYIDX8]], align 4 // CHECK-NEXT: br label %[[OMP_TILE0_INC]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_INC]]: // CHECK-NEXT: %[[OMP_TILE0_NEXT]] = add nuw i32 %[[OMP_TILE0_IV]], 1 // CHECK-NEXT: br label %[[OMP_TILE0_HEADER]], !llvm.loop ![[LOOP3:[0-9]+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_EXIT]]: // CHECK-NEXT: br label %[[OMP_TILE0_AFTER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_AFTER]]: // CHECK-NEXT: br label %[[OMP_FLOOR0_INC]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_INC]]: // CHECK-NEXT: %[[OMP_FLOOR0_NEXT]] = add nuw i32 %[[OMP_FLOOR0_IV]], 1 // CHECK-NEXT: br label %[[OMP_FLOOR0_HEADER]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_EXIT]]: // CHECK-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @1, i32 %[[OMP_GLOBAL_THREAD_NUM]]) // CHECK-NEXT: %[[OMP_GLOBAL_THREAD_NUM9:.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @1) // CHECK-NEXT: call void @__kmpc_barrier(%struct.ident_t* @2, i32 %[[OMP_GLOBAL_THREAD_NUM9]]) // CHECK-NEXT: br label %[[OMP_FLOOR0_AFTER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_AFTER]]: // CHECK-NEXT: br label %[[OMP_LOOP_AFTER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_AFTER]]: // CHECK-NEXT: ret void // CHECK-NEXT: } void unroll_partial_heuristic_for(int n, float *a, float *b, float *c, float *d) { for (int i = 0; i < n; i++) { a[i] = b[i] * c[i] * d[i]; } } #endif // HEADER // CHECK-LABEL: define {{.*}}@__captured_stmt( // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[DISTANCE_ADDR:.+]] = alloca i32*, align 8 // CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon*, align 8 // CHECK-NEXT: %[[DOTSTART:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[DOTSTOP:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[DOTSTEP:.+]] = alloca i32, align 4 // CHECK-NEXT: store i32* %[[DISTANCE:.+]], i32** %[[DISTANCE_ADDR]], align 8 // CHECK-NEXT: store %struct.anon* %[[__CONTEXT:.+]], %struct.anon** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon*, %struct.anon** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[TMP0]], i32 0, i32 0 // CHECK-NEXT: %[[TMP2:.+]] = load i32*, i32** %[[TMP1]], align 8 // CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[TMP2]], align 4 // CHECK-NEXT: store i32 %[[TMP3]], i32* %[[DOTSTART]], align 4 // CHECK-NEXT: %[[TMP4:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[TMP0]], i32 0, i32 1 // CHECK-NEXT: %[[TMP5:.+]] = load i32*, i32** %[[TMP4]], align 8 // CHECK-NEXT: %[[TMP6:.+]] = load i32, i32* %[[TMP5]], align 4 // CHECK-NEXT: store i32 %[[TMP6]], i32* %[[DOTSTOP]], align 4 // CHECK-NEXT: store i32 1, i32* %[[DOTSTEP]], align 4 // CHECK-NEXT: %[[TMP7:.+]] = load i32, i32* %[[DOTSTART]], align 4 // CHECK-NEXT: %[[TMP8:.+]] = load i32, i32* %[[DOTSTOP]], align 4 // CHECK-NEXT: %[[CMP:.+]] = icmp slt i32 %[[TMP7]], %[[TMP8]] // CHECK-NEXT: br i1 %[[CMP]], label %[[COND_TRUE:.+]], label %[[COND_FALSE:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[COND_TRUE]]: // CHECK-NEXT: %[[TMP9:.+]] = load i32, i32* %[[DOTSTOP]], align 4 // CHECK-NEXT: %[[TMP10:.+]] = load i32, i32* %[[DOTSTART]], align 4 // CHECK-NEXT: %[[SUB:.+]] = sub nsw i32 %[[TMP9]], %[[TMP10]] // CHECK-NEXT: %[[TMP11:.+]] = load i32, i32* %[[DOTSTEP]], align 4 // CHECK-NEXT: %[[SUB1:.+]] = sub i32 %[[TMP11]], 1 // CHECK-NEXT: %[[ADD:.+]] = add i32 %[[SUB]], %[[SUB1]] // CHECK-NEXT: %[[TMP12:.+]] = load i32, i32* %[[DOTSTEP]], align 4 // CHECK-NEXT: %[[DIV:.+]] = udiv i32 %[[ADD]], %[[TMP12]] // CHECK-NEXT: br label %[[COND_END:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[COND_FALSE]]: // CHECK-NEXT: br label %[[COND_END]] // CHECK-EMPTY: // CHECK-NEXT: [[COND_END]]: // CHECK-NEXT: %[[COND:.+]] = phi i32 [ %[[DIV]], %[[COND_TRUE]] ], [ 0, %[[COND_FALSE]] ] // CHECK-NEXT: %[[TMP13:.+]] = load i32*, i32** %[[DISTANCE_ADDR]], align 8 // CHECK-NEXT: store i32 %[[COND]], i32* %[[TMP13]], align 4 // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK-LABEL: define {{.*}}@__captured_stmt.1( // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[LOOPVAR_ADDR:.+]] = alloca i32*, align 8 // CHECK-NEXT: %[[LOGICAL_ADDR:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon.0*, align 8 // CHECK-NEXT: store i32* %[[LOOPVAR:.+]], i32** %[[LOOPVAR_ADDR]], align 8 // CHECK-NEXT: store i32 %[[LOGICAL:.+]], i32* %[[LOGICAL_ADDR]], align 4 // CHECK-NEXT: store %struct.anon.0* %[[__CONTEXT:.+]], %struct.anon.0** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon.0*, %struct.anon.0** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[TMP0]], i32 0, i32 0 // CHECK-NEXT: %[[TMP2:.+]] = load i32, i32* %[[TMP1]], align 4 // CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[LOGICAL_ADDR]], align 4 // CHECK-NEXT: %[[MUL:.+]] = mul i32 1, %[[TMP3]] // CHECK-NEXT: %[[ADD:.+]] = add i32 %[[TMP2]], %[[MUL]] // CHECK-NEXT: %[[TMP4:.+]] = load i32*, i32** %[[LOOPVAR_ADDR]], align 8 // CHECK-NEXT: store i32 %[[ADD]], i32* %[[TMP4]], align 4 // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK: ![[META0:[0-9]+]] = !{i32 1, !"wchar_size", i32 4} // CHECK: ![[META1:[0-9]+]] = !{i32 7, !"openmp", i32 51} // CHECK: ![[META2:[0-9]+]] = // CHECK: ![[LOOP3]] = distinct !{![[LOOP3]], ![[LOOPPROP4:[0-9]+]], ![[LOOPPROP5:[0-9]+]]} // CHECK: ![[LOOPPROP4]] = !{!"llvm.loop.unroll.enable"} // CHECK: ![[LOOPPROP5]] = !{!"llvm.loop.unroll.count", i32 13}
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs // RUN: %clang_cc1 -fopenmp-enable-irbuilder -verify -fopenmp -fopenmp-version=51 -x c -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s // expected-no-diagnostics #ifndef HEADER #define HEADER // CHECK-LABEL: define {{.*}}@unroll_partial_heuristic_for( // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[N_ADDR:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[A_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[B_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[C_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[D_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[I:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[AGG_CAPTURED:.+]] = alloca %struct.anon, align 8 // CHECK-NEXT: %[[AGG_CAPTURED1:.+]] = alloca %struct.anon.0, align 4 // CHECK-NEXT: %[[DOTCOUNT_ADDR:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[P_LASTITER:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[P_LOWERBOUND:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[P_UPPERBOUND:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[P_STRIDE:.+]] = alloca i32, align 4 // CHECK-NEXT: store i32 %[[N:.+]], i32* %[[N_ADDR]], align 4 // CHECK-NEXT: store float* %[[A:.+]], float** %[[A_ADDR]], align 8 // CHECK-NEXT: store float* %[[B:.+]], float** %[[B_ADDR]], align 8 // CHECK-NEXT: store float* %[[C:.+]], float** %[[C_ADDR]], align 8 // CHECK-NEXT: store float* %[[D:.+]], float** %[[D_ADDR]], align 8 // CHECK-NEXT: store i32 0, i32* %[[I]], align 4 // CHECK-NEXT: %[[TMP0:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[AGG_CAPTURED]], i32 0, i32 0 // CHECK-NEXT: store i32* %[[I]], i32** %[[TMP0]], align 8 // CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[AGG_CAPTURED]], i32 0, i32 1 // CHECK-NEXT: store i32* %[[N_ADDR]], i32** %[[TMP1]], align 8 // CHECK-NEXT: %[[TMP2:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[AGG_CAPTURED1]], i32 0, i32 0 // CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: store i32 %[[TMP3]], i32* %[[TMP2]], align 4 // CHECK-NEXT: call void @__captured_stmt(i32* %[[DOTCOUNT_ADDR]], %struct.anon* %[[AGG_CAPTURED]]) // CHECK-NEXT: %[[DOTCOUNT:.+]] = load i32, i32* %[[DOTCOUNT_ADDR]], align 4 // CHECK-NEXT: br label %[[OMP_LOOP_PREHEADER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_PREHEADER]]: // CHECK-NEXT: %[[TMP4:.+]] = udiv i32 %[[DOTCOUNT]], 13 // CHECK-NEXT: %[[TMP5:.+]] = urem i32 %[[DOTCOUNT]], 13 // CHECK-NEXT: %[[TMP6:.+]] = icmp ne i32 %[[TMP5]], 0 // CHECK-NEXT: %[[TMP7:.+]] = zext i1 %[[TMP6]] to i32 // CHECK-NEXT: %[[OMP_FLOOR0_TRIPCOUNT:.+]] = add nuw i32 %[[TMP4]], %[[TMP7]] // CHECK-NEXT: br label %[[OMP_FLOOR0_PREHEADER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_PREHEADER]]: // CHECK-NEXT: store i32 0, i32* %[[P_LOWERBOUND]], align 4 // CHECK-NEXT: %[[TMP8:.+]] = sub i32 %[[OMP_FLOOR0_TRIPCOUNT]], 1 // CHECK-NEXT: store i32 %[[TMP8]], i32* %[[P_UPPERBOUND]], align 4 // CHECK-NEXT: store i32 1, i32* %[[P_STRIDE]], align 4 // CHECK-NEXT: %[[OMP_GLOBAL_THREAD_NUM:.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @1) // CHECK-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @1, i32 %[[OMP_GLOBAL_THREAD_NUM]], i32 34, i32* %[[P_LASTITER]], i32* %[[P_LOWERBOUND]], i32* %[[P_UPPERBOUND]], i32* %[[P_STRIDE]], i32 1, i32 0) // CHECK-NEXT: %[[TMP9:.+]] = load i32, i32* %[[P_LOWERBOUND]], align 4 // CHECK-NEXT: %[[TMP10:.+]] = load i32, i32* %[[P_UPPERBOUND]], align 4 // CHECK-NEXT: %[[TMP11:.+]] = sub i32 %[[TMP10]], %[[TMP9]] // CHECK-NEXT: %[[TMP12:.+]] = add i32 %[[TMP11]], 1 // CHECK-NEXT: br label %[[OMP_FLOOR0_HEADER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_HEADER]]: // CHECK-NEXT: %[[OMP_FLOOR0_IV:.+]] = phi i32 [ 0, %[[OMP_FLOOR0_PREHEADER]] ], [ %[[OMP_FLOOR0_NEXT:.+]], %[[OMP_FLOOR0_INC:.+]] ] // CHECK-NEXT: br label %[[OMP_FLOOR0_COND:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_COND]]: // CHECK-NEXT: %[[OMP_FLOOR0_CMP:.+]] = icmp ult i32 %[[OMP_FLOOR0_IV]], %[[TMP12]] // CHECK-NEXT: br i1 %[[OMP_FLOOR0_CMP]], label %[[OMP_FLOOR0_BODY:.+]], label %[[OMP_FLOOR0_EXIT:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_BODY]]: // CHECK-NEXT: %[[TMP13:.+]] = add i32 %[[OMP_FLOOR0_IV]], %[[TMP9]] // CHECK-NEXT: %[[TMP14:.+]] = icmp eq i32 %[[TMP13]], %[[OMP_FLOOR0_TRIPCOUNT]] // CHECK-NEXT: %[[TMP15:.+]] = select i1 %[[TMP14]], i32 %[[TMP5]], i32 13 // CHECK-NEXT: br label %[[OMP_TILE0_PREHEADER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_PREHEADER]]: // CHECK-NEXT: br label %[[OMP_TILE0_HEADER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_HEADER]]: // CHECK-NEXT: %[[OMP_TILE0_IV:.+]] = phi i32 [ 0, %[[OMP_TILE0_PREHEADER]] ], [ %[[OMP_TILE0_NEXT:.+]], %[[OMP_TILE0_INC:.+]] ] // CHECK-NEXT: br label %[[OMP_TILE0_COND:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_COND]]: // CHECK-NEXT: %[[OMP_TILE0_CMP:.+]] = icmp ult i32 %[[OMP_TILE0_IV]], %[[TMP15]] // CHECK-NEXT: br i1 %[[OMP_TILE0_CMP]], label %[[OMP_TILE0_BODY:.+]], label %[[OMP_TILE0_EXIT:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_BODY]]: // CHECK-NEXT: %[[TMP16:.+]] = mul nuw i32 13, %[[TMP13]] // CHECK-NEXT: %[[TMP17:.+]] = add nuw i32 %[[TMP16]], %[[OMP_TILE0_IV]] // CHECK-NEXT: br label %[[OMP_LOOP_BODY:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_BODY]]: // CHECK-NEXT: call void @__captured_stmt.1(i32* %[[I]], i32 %[[TMP17]], %struct.anon.0* %[[AGG_CAPTURED1]]) // CHECK-NEXT: %[[TMP18:.+]] = load float*, float** %[[B_ADDR]], align 8 // CHECK-NEXT: %[[TMP19:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM:.+]] = sext i32 %[[TMP19]] to i64 // CHECK-NEXT: %[[ARRAYIDX:.+]] = getelementptr inbounds float, float* %[[TMP18]], i64 %[[IDXPROM]] // CHECK-NEXT: %[[TMP20:.+]] = load float, float* %[[ARRAYIDX]], align 4 // CHECK-NEXT: %[[TMP21:.+]] = load float*, float** %[[C_ADDR]], align 8 // CHECK-NEXT: %[[TMP22:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM2:.+]] = sext i32 %[[TMP22]] to i64 // CHECK-NEXT: %[[ARRAYIDX3:.+]] = getelementptr inbounds float, float* %[[TMP21]], i64 %[[IDXPROM2]] // CHECK-NEXT: %[[TMP23:.+]] = load float, float* %[[ARRAYIDX3]], align 4 // CHECK-NEXT: %[[MUL:.+]] = fmul float %[[TMP20]], %[[TMP23]] // CHECK-NEXT: %[[TMP24:.+]] = load float*, float** %[[D_ADDR]], align 8 // CHECK-NEXT: %[[TMP25:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM4:.+]] = sext i32 %[[TMP25]] to i64 // CHECK-NEXT: %[[ARRAYIDX5:.+]] = getelementptr inbounds float, float* %[[TMP24]], i64 %[[IDXPROM4]] // CHECK-NEXT: %[[TMP26:.+]] = load float, float* %[[ARRAYIDX5]], align 4 // CHECK-NEXT: %[[MUL6:.+]] = fmul float %[[MUL]], %[[TMP26]] // CHECK-NEXT: %[[TMP27:.+]] = load float*, float** %[[A_ADDR]], align 8 // CHECK-NEXT: %[[TMP28:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM7:.+]] = sext i32 %[[TMP28]] to i64 // CHECK-NEXT: %[[ARRAYIDX8:.+]] = getelementptr inbounds float, float* %[[TMP27]], i64 %[[IDXPROM7]] // CHECK-NEXT: store float %[[MUL6]], float* %[[ARRAYIDX8]], align 4 // CHECK-NEXT: br label %[[OMP_TILE0_INC]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_INC]]: // CHECK-NEXT: %[[OMP_TILE0_NEXT]] = add nuw i32 %[[OMP_TILE0_IV]], 1 // CHECK-NEXT: br label %[[OMP_TILE0_HEADER]], !llvm.loop ![[LOOP3:[0-9]+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_EXIT]]: // CHECK-NEXT: br label %[[OMP_TILE0_AFTER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_AFTER]]: // CHECK-NEXT: br label %[[OMP_FLOOR0_INC]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_INC]]: // CHECK-NEXT: %[[OMP_FLOOR0_NEXT]] = add nuw i32 %[[OMP_FLOOR0_IV]], 1 // CHECK-NEXT: br label %[[OMP_FLOOR0_HEADER]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_EXIT]]: // CHECK-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @1, i32 %[[OMP_GLOBAL_THREAD_NUM]]) // CHECK-NEXT: %[[OMP_GLOBAL_THREAD_NUM9:.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @1) // CHECK-NEXT: call void @__kmpc_barrier(%struct.ident_t* @2, i32 %[[OMP_GLOBAL_THREAD_NUM9]]) // CHECK-NEXT: br label %[[OMP_FLOOR0_AFTER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_AFTER]]: // CHECK-NEXT: br label %[[OMP_LOOP_AFTER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_AFTER]]: // CHECK-NEXT: ret void // CHECK-NEXT: } void unroll_partial_heuristic_for(int n, float *a, float *b, float *c, float *d) { #pragma omp for #pragma omp unroll partial(13) for (int i = 0; i < n; i++) { a[i] = b[i] * c[i] * d[i]; } } #endif // HEADER // CHECK-LABEL: define {{.*}}@__captured_stmt( // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[DISTANCE_ADDR:.+]] = alloca i32*, align 8 // CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon*, align 8 // CHECK-NEXT: %[[DOTSTART:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[DOTSTOP:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[DOTSTEP:.+]] = alloca i32, align 4 // CHECK-NEXT: store i32* %[[DISTANCE:.+]], i32** %[[DISTANCE_ADDR]], align 8 // CHECK-NEXT: store %struct.anon* %[[__CONTEXT:.+]], %struct.anon** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon*, %struct.anon** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[TMP0]], i32 0, i32 0 // CHECK-NEXT: %[[TMP2:.+]] = load i32*, i32** %[[TMP1]], align 8 // CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[TMP2]], align 4 // CHECK-NEXT: store i32 %[[TMP3]], i32* %[[DOTSTART]], align 4 // CHECK-NEXT: %[[TMP4:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[TMP0]], i32 0, i32 1 // CHECK-NEXT: %[[TMP5:.+]] = load i32*, i32** %[[TMP4]], align 8 // CHECK-NEXT: %[[TMP6:.+]] = load i32, i32* %[[TMP5]], align 4 // CHECK-NEXT: store i32 %[[TMP6]], i32* %[[DOTSTOP]], align 4 // CHECK-NEXT: store i32 1, i32* %[[DOTSTEP]], align 4 // CHECK-NEXT: %[[TMP7:.+]] = load i32, i32* %[[DOTSTART]], align 4 // CHECK-NEXT: %[[TMP8:.+]] = load i32, i32* %[[DOTSTOP]], align 4 // CHECK-NEXT: %[[CMP:.+]] = icmp slt i32 %[[TMP7]], %[[TMP8]] // CHECK-NEXT: br i1 %[[CMP]], label %[[COND_TRUE:.+]], label %[[COND_FALSE:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[COND_TRUE]]: // CHECK-NEXT: %[[TMP9:.+]] = load i32, i32* %[[DOTSTOP]], align 4 // CHECK-NEXT: %[[TMP10:.+]] = load i32, i32* %[[DOTSTART]], align 4 // CHECK-NEXT: %[[SUB:.+]] = sub nsw i32 %[[TMP9]], %[[TMP10]] // CHECK-NEXT: %[[TMP11:.+]] = load i32, i32* %[[DOTSTEP]], align 4 // CHECK-NEXT: %[[SUB1:.+]] = sub i32 %[[TMP11]], 1 // CHECK-NEXT: %[[ADD:.+]] = add i32 %[[SUB]], %[[SUB1]] // CHECK-NEXT: %[[TMP12:.+]] = load i32, i32* %[[DOTSTEP]], align 4 // CHECK-NEXT: %[[DIV:.+]] = udiv i32 %[[ADD]], %[[TMP12]] // CHECK-NEXT: br label %[[COND_END:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[COND_FALSE]]: // CHECK-NEXT: br label %[[COND_END]] // CHECK-EMPTY: // CHECK-NEXT: [[COND_END]]: // CHECK-NEXT: %[[COND:.+]] = phi i32 [ %[[DIV]], %[[COND_TRUE]] ], [ 0, %[[COND_FALSE]] ] // CHECK-NEXT: %[[TMP13:.+]] = load i32*, i32** %[[DISTANCE_ADDR]], align 8 // CHECK-NEXT: store i32 %[[COND]], i32* %[[TMP13]], align 4 // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK-LABEL: define {{.*}}@__captured_stmt.1( // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[LOOPVAR_ADDR:.+]] = alloca i32*, align 8 // CHECK-NEXT: %[[LOGICAL_ADDR:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon.0*, align 8 // CHECK-NEXT: store i32* %[[LOOPVAR:.+]], i32** %[[LOOPVAR_ADDR]], align 8 // CHECK-NEXT: store i32 %[[LOGICAL:.+]], i32* %[[LOGICAL_ADDR]], align 4 // CHECK-NEXT: store %struct.anon.0* %[[__CONTEXT:.+]], %struct.anon.0** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon.0*, %struct.anon.0** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[TMP0]], i32 0, i32 0 // CHECK-NEXT: %[[TMP2:.+]] = load i32, i32* %[[TMP1]], align 4 // CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[LOGICAL_ADDR]], align 4 // CHECK-NEXT: %[[MUL:.+]] = mul i32 1, %[[TMP3]] // CHECK-NEXT: %[[ADD:.+]] = add i32 %[[TMP2]], %[[MUL]] // CHECK-NEXT: %[[TMP4:.+]] = load i32*, i32** %[[LOOPVAR_ADDR]], align 8 // CHECK-NEXT: store i32 %[[ADD]], i32* %[[TMP4]], align 4 // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK: ![[META0:[0-9]+]] = !{i32 1, !"wchar_size", i32 4} // CHECK: ![[META1:[0-9]+]] = !{i32 7, !"openmp", i32 51} // CHECK: ![[META2:[0-9]+]] = // CHECK: ![[LOOP3]] = distinct !{![[LOOP3]], ![[LOOPPROP4:[0-9]+]], ![[LOOPPROP5:[0-9]+]]} // CHECK: ![[LOOPPROP4]] = !{!"llvm.loop.unroll.enable"} // CHECK: ![[LOOPPROP5]] = !{!"llvm.loop.unroll.count", i32 13}
hermv_c_coo_u_lo.c
#include <string.h> #include "alphasparse/opt.h" #ifdef _OPENMP #include <omp.h> #endif #include "alphasparse/kernel.h" #include "alphasparse/util.h" #include <stdio.h> alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_COO *A, const ALPHA_Number *x, const ALPHA_Number beta, ALPHA_Number *y) { const ALPHA_INT m = A->rows; const ALPHA_INT n = A->cols; const ALPHA_INT nnz = A->nnz; const ALPHA_INT thread_num = alpha_get_thread_num(); ALPHA_Number **tmp = (ALPHA_Number **)malloc(sizeof(ALPHA_Number *) * thread_num); #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for (int i = 0; i < thread_num; ++i) { tmp[i] = malloc(sizeof(ALPHA_Number) * m); memset(tmp[i], 0, sizeof(ALPHA_Number) * m); } #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for (ALPHA_INT i = 0; i < nnz; ++i) { const ALPHA_INT tid = alpha_get_thread_id(); const ALPHA_INT r = A->row_indx[i]; const ALPHA_INT c = A->col_indx[i]; const ALPHA_Number origin_val = A->values[i]; ALPHA_Number conj_val; alpha_conj(conj_val, origin_val); if (r <= c) { continue; } ALPHA_Number v, v_c; alpha_mul(v, origin_val, alpha); alpha_mul(v_c, conj_val, alpha); { alpha_madde(tmp[tid][r], v, x[c]); alpha_madde(tmp[tid][c], v_c, x[r]); } } #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for (ALPHA_INT i = 0; i < m; ++i) { alpha_mul(y[i], beta, y[i]); alpha_madde(y[i], alpha, x[i]); for (ALPHA_INT j = 0; j < thread_num; ++j) { alpha_add(y[i], y[i], tmp[j][i]); } } return ALPHA_SPARSE_STATUS_SUCCESS; }
#include <string.h> #include "alphasparse/opt.h" #include "alphasparse/kernel.h" #include "alphasparse/util.h" #include <stdio.h> alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_COO *A, const ALPHA_Number *x, const ALPHA_Number beta, ALPHA_Number *y) { const ALPHA_INT m = A->rows; const ALPHA_INT n = A->cols; const ALPHA_INT nnz = A->nnz; const ALPHA_INT thread_num = alpha_get_thread_num(); ALPHA_Number **tmp = (ALPHA_Number **)malloc(sizeof(ALPHA_Number *) * thread_num); for (int i = 0; i < thread_num; ++i) { tmp[i] = malloc(sizeof(ALPHA_Number) * m); memset(tmp[i], 0, sizeof(ALPHA_Number) * m); } for (ALPHA_INT i = 0; i < nnz; ++i) { const ALPHA_INT tid = alpha_get_thread_id(); const ALPHA_INT r = A->row_indx[i]; const ALPHA_INT c = A->col_indx[i]; const ALPHA_Number origin_val = A->values[i]; ALPHA_Number conj_val; alpha_conj(conj_val, origin_val); if (r <= c) { continue; } ALPHA_Number v, v_c; alpha_mul(v, origin_val, alpha); alpha_mul(v_c, conj_val, alpha); { alpha_madde(tmp[tid][r], v, x[c]); alpha_madde(tmp[tid][c], v_c, x[r]); } } for (ALPHA_INT i = 0; i < m; ++i) { alpha_mul(y[i], beta, y[i]); alpha_madde(y[i], alpha, x[i]); for (ALPHA_INT j = 0; j < thread_num; ++j) { alpha_add(y[i], y[i], tmp[j][i]); } } return ALPHA_SPARSE_STATUS_SUCCESS; }
#include <string.h> #include "alphasparse/opt.h" #ifdef _OPENMP #include <omp.h> #endif #include "alphasparse/kernel.h" #include "alphasparse/util.h" #include <stdio.h> alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_COO *A, const ALPHA_Number *x, const ALPHA_Number beta, ALPHA_Number *y) { const ALPHA_INT m = A->rows; const ALPHA_INT n = A->cols; const ALPHA_INT nnz = A->nnz; const ALPHA_INT thread_num = alpha_get_thread_num(); ALPHA_Number **tmp = (ALPHA_Number **)malloc(sizeof(ALPHA_Number *) * thread_num); #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for (int i = 0; i < thread_num; ++i) { tmp[i] = malloc(sizeof(ALPHA_Number) * m); memset(tmp[i], 0, sizeof(ALPHA_Number) * m); } #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for (ALPHA_INT i = 0; i < nnz; ++i) { const ALPHA_INT tid = alpha_get_thread_id(); const ALPHA_INT r = A->row_indx[i]; const ALPHA_INT c = A->col_indx[i]; const ALPHA_Number origin_val = A->values[i]; ALPHA_Number conj_val; alpha_conj(conj_val, origin_val); if (r <= c) { continue; } ALPHA_Number v, v_c; alpha_mul(v, origin_val, alpha); alpha_mul(v_c, conj_val, alpha); { alpha_madde(tmp[tid][r], v, x[c]); alpha_madde(tmp[tid][c], v_c, x[r]); } } #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for (ALPHA_INT i = 0; i < m; ++i) { alpha_mul(y[i], beta, y[i]); alpha_madde(y[i], alpha, x[i]); for (ALPHA_INT j = 0; j < thread_num; ++j) { alpha_add(y[i], y[i], tmp[j][i]); } } return ALPHA_SPARSE_STATUS_SUCCESS; }
Example_simple_lock.1.c
/* * @@name: simple_lock.1c * @@type: C * @@compilable: yes * @@linkable: yes * @@expect: success */ #include <stdio.h> #include <omp.h> void skip(int i) {} void work(int i) {} int main() { omp_lock_t lck; int id; omp_init_lock(&lck); #pragma omp parallel shared(lck) private(id) { id = omp_get_thread_num(); omp_set_lock(&lck); /* only one thread at a time can execute this printf */ printf("My thread id is %d.\n", id); omp_unset_lock(&lck); while (! omp_test_lock(&lck)) { skip(id); /* we do not yet have the lock, so we must do something else */ } work(id); /* we now have the lock and can do the work */ omp_unset_lock(&lck); } omp_destroy_lock(&lck); return 0; }
/* * @@name: simple_lock.1c @@type: C @@compilable: yes @@linkable: yes * @@expect: success */ #include <stdio.h> #include <omp.h> void skip(int i) { } void work(int i) { } int main() { omp_lock_t lck; int id; omp_init_lock(&lck); id = omp_get_thread_num(); omp_set_lock(&lck); /* only one thread at a time can execute this printf */ printf("My thread id is %d.\n", id); omp_unset_lock(&lck); while (!omp_test_lock(&lck)) { skip(id); /* we do not yet have the lock, so we must do * something else */ } work(id); /* we now have the lock and can do the work */ omp_unset_lock(&lck); omp_destroy_lock(&lck); return 0; }
/* * @@name: simple_lock.1c @@type: C @@compilable: yes @@linkable: yes * @@expect: success */ #include <stdio.h> #include <omp.h> void skip(int i) { } void work(int i) { } int main() { omp_lock_t lck; int id; omp_init_lock(&lck); #pragma omp parallel shared(lck) private(id) { id = omp_get_thread_num(); omp_set_lock(&lck); /* only one thread at a time can execute this printf */ printf("My thread id is %d.\n", id); omp_unset_lock(&lck); while (!omp_test_lock(&lck)) { skip(id); /* we do not yet have the lock, so we must do * something else */ } work(id); /* we now have the lock and can do the work */ omp_unset_lock(&lck); } omp_destroy_lock(&lck); return 0; }
pi-omp1.c
/* * BSD 2-Clause License * * Copyright (c) 2020, Alessandro Capotondi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** * @file exercise7.c * @author Alessandro Capotondi * @date 27 Mar 2020 * @brief Exercise 8 * * Pi calculation * @see https://dolly.fim.unimore.it/2019/course/view.php?id=152 */ #include <stdio.h> #include <omp.h> #include "utils.h" /** * @brief EX 8- Pi Calculation * * This program computes pi as * \pi = 4 arctan(1) * = 4 \int _0 ^1 \frac{1} {1 + x^2} dx * * @return void */ #include <stdio.h> #include <math.h> #include <omp.h> #include "utils.h" #if !defined(ITERS) #define ITERS (4) #endif #define NSTEPS 134217728 void exercise(){ long i; double dx = 1.0 / NSTEPS; double pi = 0.0; double start_time = omp_get_wtime(); #pragma omp parallel for (i = 0; i < NSTEPS; i++) { double x = (i + 0.5) * dx; #pragma omp critical pi += 1.0 / (1.0 + x * x); } pi *= 4.0 * dx; double run_time = omp_get_wtime() - start_time; double ref_pi = 4.0 * atan(1.0); printf("pi with %d steps is %.10f in %.6f seconds (error=%e)\n", NSTEPS, pi, run_time, fabs(ref_pi - pi)); } int main(int argc, char** argv) { for(int i=0; i<ITERS; i++){ printf("\n\n"); printf("============================\n"); printf("Test - Iteration %d...\n", i); printf("============================\n"); start_stats(); exercise(); collect_stats(); } printf("\n\n"); printf("============================\n"); printf("Statistics\n"); printf("============================\n"); print_stats(); return 0; }
/** * @file exercise7.c * @author Alessandro Capotondi * @date 27 Mar 2020 * @brief Exercise 8 * * Pi calculation * @see https://dolly.fim.unimore.it/2019/course/view.php?id=152 */ #include <stdio.h> #include <omp.h> #include "utils.h" /** * @brief EX 8- Pi Calculation * * This program computes pi as * \pi = 4 arctan(1) * = 4 \int _0 ^1 \frac{1} {1 + x^2} dx * * @return void */ #include <stdio.h> #include <math.h> #include <omp.h> #include "utils.h" #if !defined(ITERS) #define ITERS (4) #endif #define NSTEPS 134217728 void exercise() { long i; double dx = 1.0 / NSTEPS; double pi = 0.0; double start_time = omp_get_wtime(); for (i = 0; i < NSTEPS; i++) { double x = (i + 0.5) * dx; pi += 1.0 / (1.0 + x * x); } pi *= 4.0 * dx; double run_time = omp_get_wtime() - start_time; double ref_pi = 4.0 * atan(1.0); printf("pi with %d steps is %.10f in %.6f seconds (error=%e)\n", NSTEPS, pi, run_time, fabs(ref_pi - pi)); } int main(int argc, char **argv) { for (int i = 0; i < ITERS; i++) { printf("\n\n"); printf("============================\n"); printf("Test - Iteration %d...\n", i); printf("============================\n"); start_stats(); exercise(); collect_stats(); } printf("\n\n"); printf("============================\n"); printf("Statistics\n"); printf("============================\n"); print_stats(); return 0; }
/** * @file exercise7.c * @author Alessandro Capotondi * @date 27 Mar 2020 * @brief Exercise 8 * * Pi calculation * @see https://dolly.fim.unimore.it/2019/course/view.php?id=152 */ #include <stdio.h> #include <omp.h> #include "utils.h" /** * @brief EX 8- Pi Calculation * * This program computes pi as * \pi = 4 arctan(1) * = 4 \int _0 ^1 \frac{1} {1 + x^2} dx * * @return void */ #include <stdio.h> #include <math.h> #include <omp.h> #include "utils.h" #if !defined(ITERS) #define ITERS (4) #endif #define NSTEPS 134217728 void exercise() { long i; double dx = 1.0 / NSTEPS; double pi = 0.0; double start_time = omp_get_wtime(); #pragma omp parallel for (i = 0; i < NSTEPS; i++) { double x = (i + 0.5) * dx; #pragma omp critical pi += 1.0 / (1.0 + x * x); } pi *= 4.0 * dx; double run_time = omp_get_wtime() - start_time; double ref_pi = 4.0 * atan(1.0); printf("pi with %d steps is %.10f in %.6f seconds (error=%e)\n", NSTEPS, pi, run_time, fabs(ref_pi - pi)); } int main(int argc, char **argv) { for (int i = 0; i < ITERS; i++) { printf("\n\n"); printf("============================\n"); printf("Test - Iteration %d...\n", i); printf("============================\n"); start_stats(); exercise(); collect_stats(); } printf("\n\n"); printf("============================\n"); printf("Statistics\n"); printf("============================\n"); print_stats(); return 0; }
GB_binop__lt_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__lt_uint16) // A.*B function (eWiseMult): GB (_AemultB_08__lt_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__lt_uint16) // A.*B function (eWiseMult): GB (_AemultB_04__lt_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__lt_uint16) // A*D function (colscale): GB (_AxD__lt_uint16) // D*A function (rowscale): GB (_DxB__lt_uint16) // C+=B function (dense accum): GB (_Cdense_accumB__lt_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__lt_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lt_uint16) // C=scalar+B GB (_bind1st__lt_uint16) // C=scalar+B' GB (_bind1st_tran__lt_uint16) // C=A+scalar GB (_bind2nd__lt_uint16) // C=A'+scalar GB (_bind2nd_tran__lt_uint16) // C type: bool // A type: uint16_t // A pattern? 0 // B type: uint16_t // B pattern? 0 // BinaryOp: cij = (aij < bij) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x < y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LT || GxB_NO_UINT16 || GxB_NO_LT_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__lt_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__lt_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__lt_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__lt_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__lt_uint16) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__lt_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint16_t alpha_scalar ; uint16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ; beta_scalar = (*((uint16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__lt_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__lt_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__lt_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__lt_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__lt_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = GBX (Bx, p, false) ; Cx [p] = (x < bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__lt_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = (aij < y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x < aij) ; \ } GrB_Info GB (_bind1st_tran__lt_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij < y) ; \ } GrB_Info GB (_bind2nd_tran__lt_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__lt_uint16) // A.*B function (eWiseMult): GB (_AemultB_08__lt_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__lt_uint16) // A.*B function (eWiseMult): GB (_AemultB_04__lt_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__lt_uint16) // A*D function (colscale): GB (_AxD__lt_uint16) // D*A function (rowscale): GB (_DxB__lt_uint16) // C+=B function (dense accum): GB (_Cdense_accumB__lt_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__lt_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lt_uint16) // C=scalar+B GB (_bind1st__lt_uint16) // C=scalar+B' GB (_bind1st_tran__lt_uint16) // C=A+scalar GB (_bind2nd__lt_uint16) // C=A'+scalar GB (_bind2nd_tran__lt_uint16) // C type: bool // A type: uint16_t // A pattern? 0 // B type: uint16_t // B pattern? 0 // BinaryOp: cij = (aij < bij) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x < y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LT || GxB_NO_UINT16 || GxB_NO_LT_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__lt_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__lt_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__lt_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__lt_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__lt_uint16) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__lt_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint16_t alpha_scalar ; uint16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ; beta_scalar = (*((uint16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__lt_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__lt_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__lt_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__lt_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__lt_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = GBX (Bx, p, false) ; Cx [p] = (x < bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__lt_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = (aij < y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x < aij) ; \ } GrB_Info GB (_bind1st_tran__lt_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij < y) ; \ } GrB_Info GB (_bind2nd_tran__lt_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__lt_uint16) // A.*B function (eWiseMult): GB (_AemultB_08__lt_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__lt_uint16) // A.*B function (eWiseMult): GB (_AemultB_04__lt_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__lt_uint16) // A*D function (colscale): GB (_AxD__lt_uint16) // D*A function (rowscale): GB (_DxB__lt_uint16) // C+=B function (dense accum): GB (_Cdense_accumB__lt_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__lt_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lt_uint16) // C=scalar+B GB (_bind1st__lt_uint16) // C=scalar+B' GB (_bind1st_tran__lt_uint16) // C=A+scalar GB (_bind2nd__lt_uint16) // C=A'+scalar GB (_bind2nd_tran__lt_uint16) // C type: bool // A type: uint16_t // A pattern? 0 // B type: uint16_t // B pattern? 0 // BinaryOp: cij = (aij < bij) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x < y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LT || GxB_NO_UINT16 || GxB_NO_LT_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__lt_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__lt_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__lt_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__lt_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__lt_uint16) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__lt_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint16_t alpha_scalar ; uint16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ; beta_scalar = (*((uint16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__lt_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__lt_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__lt_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__lt_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__lt_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = GBX (Bx, p, false) ; Cx [p] = (x < bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__lt_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = (aij < y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x < aij) ; \ } GrB_Info GB (_bind1st_tran__lt_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij < y) ; \ } GrB_Info GB (_bind2nd_tran__lt_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
hybrid-hello.c
#include <stdio.h> #include <mpi.h> #include <omp.h> int main(int argc, char *argv[]) { int my_id, omp_rank; int provided, required=MPI_THREAD_FUNNELED; MPI_Init_thread(&argc, &argv, required, &provided); MPI_Comm_rank(MPI_COMM_WORLD, &my_id); #pragma omp parallel private(omp_rank) { omp_rank = omp_get_thread_num(); printf("I'm thread %d in process %d\n", omp_rank, my_id); } if (my_id == 0) { printf("\nProvided thread support level: %d\n", provided); printf(" %d - MPI_THREAD_SINGLE\n", MPI_THREAD_SINGLE); printf(" %d - MPI_THREAD_FUNNELED\n", MPI_THREAD_FUNNELED); printf(" %d - MPI_THREAD_SERIALIZED\n", MPI_THREAD_SERIALIZED); printf(" %d - MPI_THREAD_MULTIPLE\n", MPI_THREAD_MULTIPLE); } MPI_Finalize(); return 0; }
#include <stdio.h> #include <mpi.h> #include <omp.h> int main(int argc, char *argv[]) { int my_id, omp_rank; int provided, required = MPI_THREAD_FUNNELED; MPI_Init_thread(&argc, &argv, required, &provided); MPI_Comm_rank(MPI_COMM_WORLD, &my_id); omp_rank = omp_get_thread_num(); printf("I'm thread %d in process %d\n", omp_rank, my_id); if (my_id == 0) { printf("\nProvided thread support level: %d\n", provided); printf(" %d - MPI_THREAD_SINGLE\n", MPI_THREAD_SINGLE); printf(" %d - MPI_THREAD_FUNNELED\n", MPI_THREAD_FUNNELED); printf(" %d - MPI_THREAD_SERIALIZED\n", MPI_THREAD_SERIALIZED); printf(" %d - MPI_THREAD_MULTIPLE\n", MPI_THREAD_MULTIPLE); } MPI_Finalize(); return 0; }
#include <stdio.h> #include <mpi.h> #include <omp.h> int main(int argc, char *argv[]) { int my_id, omp_rank; int provided, required = MPI_THREAD_FUNNELED; MPI_Init_thread(&argc, &argv, required, &provided); MPI_Comm_rank(MPI_COMM_WORLD, &my_id); #pragma omp parallel private(omp_rank) { omp_rank = omp_get_thread_num(); printf("I'm thread %d in process %d\n", omp_rank, my_id); } if (my_id == 0) { printf("\nProvided thread support level: %d\n", provided); printf(" %d - MPI_THREAD_SINGLE\n", MPI_THREAD_SINGLE); printf(" %d - MPI_THREAD_FUNNELED\n", MPI_THREAD_FUNNELED); printf(" %d - MPI_THREAD_SERIALIZED\n", MPI_THREAD_SERIALIZED); printf(" %d - MPI_THREAD_MULTIPLE\n", MPI_THREAD_MULTIPLE); } MPI_Finalize(); return 0; }
stream.c
/*-----------------------------------------------------------------------*/ /* Program: Stream */ /* Revision: $Id: stream.c,v 5.9 2009/04/11 16:35:00 mccalpin Exp $ */ /* Original code developed by John D. McCalpin */ /* Programmers: John D. McCalpin */ /* Joe R. Zagar */ /* */ /* This program measures memory transfer rates in MB/s for simple */ /* computational kernels coded in C. */ /*-----------------------------------------------------------------------*/ /* Copyright 1991-2005: John D. McCalpin */ /*-----------------------------------------------------------------------*/ /* License: */ /* 1. You are free to use this program and/or to redistribute */ /* this program. */ /* 2. You are free to modify this program for your own use, */ /* including commercial use, subject to the publication */ /* restrictions in item 3. */ /* 3. You are free to publish results obtained from running this */ /* program, or from works that you derive from this program, */ /* with the following limitations: */ /* 3a. In order to be referred to as "STREAM benchmark results", */ /* published results must be in conformance to the STREAM */ /* Run Rules, (briefly reviewed below) published at */ /* http://www.cs.virginia.edu/stream/ref.html */ /* and incorporated herein by reference. */ /* As the copyright holder, John McCalpin retains the */ /* right to determine conformity with the Run Rules. */ /* 3b. Results based on modified source code or on runs not in */ /* accordance with the STREAM Run Rules must be clearly */ /* labelled whenever they are published. Examples of */ /* proper labelling include: */ /* "tuned STREAM benchmark results" */ /* "based on a variant of the STREAM benchmark code" */ /* Other comparable, clear and reasonable labelling is */ /* acceptable. */ /* 3c. Submission of results to the STREAM benchmark web site */ /* is encouraged, but not required. */ /* 4. Use of this program or creation of derived works based on this */ /* program constitutes acceptance of these licensing restrictions. */ /* 5. Absolutely no warranty is expressed or implied. */ /*-----------------------------------------------------------------------*/ # include <stdio.h> # include <math.h> # include <float.h> # include <limits.h> # include <sys/time.h> /* INSTRUCTIONS: * * 1) Stream requires a good bit of memory to run. Adjust the * value of 'N' (below) to give a 'timing calibration' of * at least 20 clock-ticks. This will provide rate estimates * that should be good to about 5% precision. */ #ifndef N # define N 2000000 #endif #ifndef NTIMES # define NTIMES 10 #endif #ifndef OFFSET # define OFFSET 0 #endif /* * 3) Compile the code with full optimization. Many compilers * generate unreasonably bad code before the optimizer tightens * things up. If the results are unreasonably good, on the * other hand, the optimizer might be too smart for me! * * Try compiling with: * cc -O stream_omp.c -o stream_omp * * This is known to work on Cray, SGI, IBM, and Sun machines. * * * 4) Mail the results to mccalpin@cs.virginia.edu * Be sure to include: * a) computer hardware model number and software revision * b) the compiler flags * c) all of the output from the test case. * Thanks! * */ # define HLINE "-------------------------------------------------------------\n" # ifndef MIN # define MIN(x,y) ((x)<(y)?(x):(y)) # endif # ifndef MAX # define MAX(x,y) ((x)>(y)?(x):(y)) # endif static double a[N+OFFSET], b[N+OFFSET], c[N+OFFSET]; static double avgtime[4] = {0}, maxtime[4] = {0}, mintime[4] = {FLT_MAX,FLT_MAX,FLT_MAX,FLT_MAX}; static char *label[4] = {"Copy: ", "Scale: ", "Add: ", "Triad: "}; static double bytes[4] = { 2 * sizeof(double) * N, 2 * sizeof(double) * N, 3 * sizeof(double) * N, 3 * sizeof(double) * N }; extern double mysecond(); extern void checkSTREAMresults(); #ifdef TUNED extern void tuned_STREAM_Copy(); extern void tuned_STREAM_Scale(double scalar); extern void tuned_STREAM_Add(); extern void tuned_STREAM_Triad(double scalar); #endif #ifdef _OPENMP extern int omp_get_num_threads(); #endif int main() { int quantum, checktick(); int BytesPerWord; register int j, k; double scalar, t, times[4][NTIMES]; /* --- SETUP --- determine precision and check timing --- */ printf(HLINE); printf("STREAM version $Revision: 5.9 $\n"); printf(HLINE); BytesPerWord = sizeof(double); printf("This system uses %d bytes per DOUBLE PRECISION word.\n", BytesPerWord); printf(HLINE); #ifdef NO_LONG_LONG printf("Array size = %d, Offset = %d\n" , N, OFFSET); #else printf("Array size = %llu, Offset = %d\n", (unsigned long long) N, OFFSET); #endif printf("Total memory required = %.1f MB.\n", (3.0 * BytesPerWord) * ( (double) N / 1048576.0)); printf("Each test is run %d times, but only\n", NTIMES); printf("the *best* time for each is used.\n"); #ifdef _OPENMP printf(HLINE); #pragma omp parallel { #pragma omp master { k = omp_get_num_threads(); printf ("Number of Threads requested = %i\n",k); } } #endif printf(HLINE); #pragma omp parallel { printf ("Printing one line per active thread....\n"); } /* Get initial value for system clock. */ #pragma omp parallel for for (j=0; j<N; j++) { a[j] = 1.0; b[j] = 2.0; c[j] = 0.0; } printf(HLINE); if ( (quantum = checktick()) >= 1) printf("Your clock granularity/precision appears to be " "%d microseconds.\n", quantum); else { printf("Your clock granularity appears to be " "less than one microsecond.\n"); quantum = 1; } t = mysecond(); #pragma omp parallel for for (j = 0; j < N; j++) a[j] = 2.0E0 * a[j]; t = 1.0E6 * (mysecond() - t); printf("Each test below will take on the order" " of %d microseconds.\n", (int) t ); printf(" (= %d clock ticks)\n", (int) (t/quantum) ); printf("Increase the size of the arrays if this shows that\n"); printf("you are not getting at least 20 clock ticks per test.\n"); printf(HLINE); printf("WARNING -- The above is only a rough guideline.\n"); printf("For best results, please be sure you know the\n"); printf("precision of your system timer.\n"); printf(HLINE); /* --- MAIN LOOP --- repeat test cases NTIMES times --- */ scalar = 3.0; for (k=0; k<NTIMES; k++) { times[0][k] = mysecond(); #ifdef TUNED tuned_STREAM_Copy(); #else #pragma omp parallel for for (j=0; j<N; j++) c[j] = a[j]; #endif times[0][k] = mysecond() - times[0][k]; times[1][k] = mysecond(); #ifdef TUNED tuned_STREAM_Scale(scalar); #else #pragma omp parallel for for (j=0; j<N; j++) b[j] = scalar*c[j]; #endif times[1][k] = mysecond() - times[1][k]; times[2][k] = mysecond(); #ifdef TUNED tuned_STREAM_Add(); #else #pragma omp parallel for for (j=0; j<N; j++) c[j] = a[j]+b[j]; #endif times[2][k] = mysecond() - times[2][k]; times[3][k] = mysecond(); #ifdef TUNED tuned_STREAM_Triad(scalar); #else #pragma omp parallel for for (j=0; j<N; j++) a[j] = b[j]+scalar*c[j]; #endif times[3][k] = mysecond() - times[3][k]; } /* --- SUMMARY --- */ for (k=1; k<NTIMES; k++) /* note -- skip first iteration */ { for (j=0; j<4; j++) { avgtime[j] = avgtime[j] + times[j][k]; mintime[j] = MIN(mintime[j], times[j][k]); maxtime[j] = MAX(maxtime[j], times[j][k]); } } printf("Function Rate (MB/s) Avg time Min time Max time\n"); for (j=0; j<4; j++) { avgtime[j] = avgtime[j]/(double)(NTIMES-1); printf("%s%11.4f %11.4f %11.4f %11.4f\n", label[j], 1.0E-06 * bytes[j]/mintime[j], avgtime[j], mintime[j], maxtime[j]); } printf(HLINE); /* --- Check Results --- */ checkSTREAMresults(); printf(HLINE); return 0; } # define M 20 int checktick() { int i, minDelta, Delta; double t1, t2, timesfound[M]; /* Collect a sequence of M unique time values from the system. */ for (i = 0; i < M; i++) { t1 = mysecond(); while( ((t2=mysecond()) - t1) < 1.0E-6 ) ; timesfound[i] = t1 = t2; } /* * Determine the minimum difference between these M values. * This result will be our estimate (in microseconds) for the * clock granularity. */ minDelta = 1000000; for (i = 1; i < M; i++) { Delta = (int)( 1.0E6 * (timesfound[i]-timesfound[i-1])); minDelta = MIN(minDelta, MAX(Delta,0)); } return(minDelta); } /* A gettimeofday routine to give access to the wall clock timer on most UNIX-like systems. */ #include <sys/time.h> double mysecond() { struct timeval tp; struct timezone tzp; int i; i = gettimeofday(&tp,&tzp); return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 ); } void checkSTREAMresults () { double aj,bj,cj,scalar; double asum,bsum,csum; double epsilon; int j,k; /* reproduce initialization */ aj = 1.0; bj = 2.0; cj = 0.0; /* a[] is modified during timing check */ aj = 2.0E0 * aj; /* now execute timing loop */ scalar = 3.0; for (k=0; k<NTIMES; k++) { cj = aj; bj = scalar*cj; cj = aj+bj; aj = bj+scalar*cj; } aj = aj * (double) (N); bj = bj * (double) (N); cj = cj * (double) (N); asum = 0.0; bsum = 0.0; csum = 0.0; for (j=0; j<N; j++) { asum += a[j]; bsum += b[j]; csum += c[j]; } #ifdef VERBOSE printf ("Results Comparison: \n"); printf (" Expected : %f %f %f \n",aj,bj,cj); printf (" Observed : %f %f %f \n",asum,bsum,csum); #endif #ifndef abs #define abs(a) ((a) >= 0 ? (a) : -(a)) #endif epsilon = 1.e-8; if (abs(aj-asum)/asum > epsilon) { printf ("Failed Validation on array a[]\n"); printf (" Expected : %f \n",aj); printf (" Observed : %f \n",asum); } else if (abs(bj-bsum)/bsum > epsilon) { printf ("Failed Validation on array b[]\n"); printf (" Expected : %f \n",bj); printf (" Observed : %f \n",bsum); } else if (abs(cj-csum)/csum > epsilon) { printf ("Failed Validation on array c[]\n"); printf (" Expected : %f \n",cj); printf (" Observed : %f \n",csum); } else { printf ("Solution Validates\n"); } } void tuned_STREAM_Copy() { int j; #pragma omp parallel for for (j=0; j<N; j++) c[j] = a[j]; } void tuned_STREAM_Scale(double scalar) { int j; #pragma omp parallel for for (j=0; j<N; j++) b[j] = scalar*c[j]; } void tuned_STREAM_Add() { int j; #pragma omp parallel for for (j=0; j<N; j++) c[j] = a[j]+b[j]; } void tuned_STREAM_Triad(double scalar) { int j; #pragma omp parallel for for (j=0; j<N; j++) a[j] = b[j]+scalar*c[j]; }
/*-----------------------------------------------------------------------*/ /* License: */ /* 1. You are free to use this program and/or to redistribute */ /* this program. */ /* 2. You are free to modify this program for your own use, */ /* including commercial use, subject to the publication */ /* restrictions in item 3. */ /* 3. You are free to publish results obtained from running this */ /* program, or from works that you derive from this program, */ /* with the following limitations: */ /* 3a. In order to be referred to as "STREAM benchmark results", */ /* published results must be in conformance to the STREAM */ /* Run Rules, (briefly reviewed below) published at */ /* http://www.cs.virginia.edu/stream/ref.html */ /* and incorporated herein by reference. */ /* As the copyright holder, John McCalpin retains the */ /* right to determine conformity with the Run Rules. */ /* 3b. Results based on modified source code or on runs not in */ /* accordance with the STREAM Run Rules must be clearly */ /* labelled whenever they are published. Examples of */ /* proper labelling include: */ /* "tuned STREAM benchmark results" */ /* "based on a variant of the STREAM benchmark code" */ /* Other comparable, clear and reasonable labelling is */ /* acceptable. */ /* 3c. Submission of results to the STREAM benchmark web site */ /* is encouraged, but not required. */ /* 4. Use of this program or creation of derived works based on this */ /* program constitutes acceptance of these licensing restrictions. */ /* 5. Absolutely no warranty is expressed or implied. */ /*-----------------------------------------------------------------------*/ #include <stdio.h> #include <math.h> #include <float.h> #include <limits.h> #include <sys/time.h> /* * INSTRUCTIONS: * * 1) Stream requires a good bit of memory to run. Adjust the value of 'N' * (below) to give a 'timing calibration' of at least 20 clock-ticks. This * will provide rate estimates that should be good to about 5% precision. */ #ifndef N #define N 2000000 #endif #ifndef NTIMES #define NTIMES 10 #endif #ifndef OFFSET #define OFFSET 0 #endif /* * 3) Compile the code with full optimization. Many compilers generate * unreasonably bad code before the optimizer tightens things up. If the * results are unreasonably good, on the other hand, the optimizer might be * too smart for me! * * Try compiling with: cc -O stream_omp.c -o stream_omp * * This is known to work on Cray, SGI, IBM, and Sun machines. * * * 4) Mail the results to mccalpin@cs.virginia.edu Be sure to include: a) * computer hardware model number and software revision b) the compiler flags * c) all of the output from the test case. Thanks! * */ #define HLINE "-------------------------------------------------------------\n" #ifndef MIN #define MIN(x,y) ((x)<(y)?(x):(y)) #endif #ifndef MAX #define MAX(x,y) ((x)>(y)?(x):(y)) #endif static double a[N + OFFSET], b[N + OFFSET], c[N + OFFSET]; static double avgtime[4] = {0}, maxtime[4] = {0}, mintime[4] = {FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX}; static char *label[4] = {"Copy: ", "Scale: ", "Add: ", "Triad: "}; static double bytes[4] = { 2 * sizeof(double) * N, 2 * sizeof(double) * N, 3 * sizeof(double) * N, 3 * sizeof(double) * N }; extern double mysecond(); extern void checkSTREAMresults(); #ifdef TUNED extern void tuned_STREAM_Copy(); extern void tuned_STREAM_Scale(double scalar); extern void tuned_STREAM_Add(); extern void tuned_STREAM_Triad(double scalar); #endif int main() { int quantum, checktick(); int BytesPerWord; register int j, k; double scalar, t, times[4][NTIMES]; /* --- SETUP --- determine precision and check timing --- */ printf(HLINE); printf("STREAM version $Revision: 5.9 $\n"); printf(HLINE); BytesPerWord = sizeof(double); printf("This system uses %d bytes per DOUBLE PRECISION word.\n", BytesPerWord); printf(HLINE); #ifdef NO_LONG_LONG printf("Array size = %d, Offset = %d\n", N, OFFSET); #else printf("Array size = %llu, Offset = %d\n", (unsigned long long)N, OFFSET); #endif printf("Total memory required = %.1f MB.\n", (3.0 * BytesPerWord) * ((double)N / 1048576.0)); printf("Each test is run %d times, but only\n", NTIMES); printf("the *best* time for each is used.\n"); printf(HLINE); printf("Printing one line per active thread....\n"); /* Get initial value for system clock. */ for (j = 0; j < N; j++) { a[j] = 1.0; b[j] = 2.0; c[j] = 0.0; } printf(HLINE); if ((quantum = checktick()) >= 1) printf("Your clock granularity/precision appears to be " "%d microseconds.\n", quantum); else { printf("Your clock granularity appears to be " "less than one microsecond.\n"); quantum = 1; } t = mysecond(); for (j = 0; j < N; j++) a[j] = 2.0E0 * a[j]; t = 1.0E6 * (mysecond() - t); printf("Each test below will take on the order" " of %d microseconds.\n", (int)t); printf(" (= %d clock ticks)\n", (int)(t / quantum)); printf("Increase the size of the arrays if this shows that\n"); printf("you are not getting at least 20 clock ticks per test.\n"); printf(HLINE); printf("WARNING -- The above is only a rough guideline.\n"); printf("For best results, please be sure you know the\n"); printf("precision of your system timer.\n"); printf(HLINE); /* --- MAIN LOOP --- repeat test cases NTIMES times --- */ scalar = 3.0; for (k = 0; k < NTIMES; k++) { times[0][k] = mysecond(); #ifdef TUNED tuned_STREAM_Copy(); #else for (j = 0; j < N; j++) c[j] = a[j]; #endif times[0][k] = mysecond() - times[0][k]; times[1][k] = mysecond(); #ifdef TUNED tuned_STREAM_Scale(scalar); #else for (j = 0; j < N; j++) b[j] = scalar * c[j]; #endif times[1][k] = mysecond() - times[1][k]; times[2][k] = mysecond(); #ifdef TUNED tuned_STREAM_Add(); #else for (j = 0; j < N; j++) c[j] = a[j] + b[j]; #endif times[2][k] = mysecond() - times[2][k]; times[3][k] = mysecond(); #ifdef TUNED tuned_STREAM_Triad(scalar); #else for (j = 0; j < N; j++) a[j] = b[j] + scalar * c[j]; #endif times[3][k] = mysecond() - times[3][k]; } /* --- SUMMARY --- */ for (k = 1; k < NTIMES; k++)/* note -- skip first iteration */ { for (j = 0; j < 4; j++) { avgtime[j] = avgtime[j] + times[j][k]; mintime[j] = MIN(mintime[j], times[j][k]); maxtime[j] = MAX(maxtime[j], times[j][k]); } } printf("Function Rate (MB/s) Avg time Min time Max time\n"); for (j = 0; j < 4; j++) { avgtime[j] = avgtime[j] / (double)(NTIMES - 1); printf("%s%11.4f %11.4f %11.4f %11.4f\n", label[j], 1.0E-06 * bytes[j] / mintime[j], avgtime[j], mintime[j], maxtime[j]); } printf(HLINE); /* --- Check Results --- */ checkSTREAMresults(); printf(HLINE); return 0; } #define M 20 int checktick() { int i, minDelta, Delta; double t1, t2, timesfound[M]; /* Collect a sequence of M unique time values from the system. */ for (i = 0; i < M; i++) { t1 = mysecond(); while (((t2 = mysecond()) - t1) < 1.0E-6) ; timesfound[i] = t1 = t2; } /* * Determine the minimum difference between these M values. This result * will be our estimate (in microseconds) for the clock granularity. */ minDelta = 1000000; for (i = 1; i < M; i++) { Delta = (int)(1.0E6 * (timesfound[i] - timesfound[i - 1])); minDelta = MIN(minDelta, MAX(Delta, 0)); } return (minDelta); } /* * A gettimeofday routine to give access to the wall clock timer on most * UNIX-like systems. */ #include <sys/time.h> double mysecond() { struct timeval tp; struct timezone tzp; int i; i = gettimeofday(&tp, &tzp); return ((double)tp.tv_sec + (double)tp.tv_usec * 1.e-6); } void checkSTREAMresults() { double aj, bj, cj, scalar; double asum, bsum, csum; double epsilon; int j, k; /* reproduce initialization */ aj = 1.0; bj = 2.0; cj = 0.0; /* a[] is modified during timing check */ aj = 2.0E0 * aj; /* now execute timing loop */ scalar = 3.0; for (k = 0; k < NTIMES; k++) { cj = aj; bj = scalar * cj; cj = aj + bj; aj = bj + scalar * cj; } aj = aj * (double)(N); bj = bj * (double)(N); cj = cj * (double)(N); asum = 0.0; bsum = 0.0; csum = 0.0; for (j = 0; j < N; j++) { asum += a[j]; bsum += b[j]; csum += c[j]; } #ifdef VERBOSE printf("Results Comparison: \n"); printf(" Expected : %f %f %f \n", aj, bj, cj); printf(" Observed : %f %f %f \n", asum, bsum, csum); #endif #ifndef abs #define abs(a) ((a) >= 0 ? (a) : -(a)) #endif epsilon = 1.e-8; if (abs(aj - asum) / asum > epsilon) { printf("Failed Validation on array a[]\n"); printf(" Expected : %f \n", aj); printf(" Observed : %f \n", asum); } else if (abs(bj - bsum) / bsum > epsilon) { printf("Failed Validation on array b[]\n"); printf(" Expected : %f \n", bj); printf(" Observed : %f \n", bsum); } else if (abs(cj - csum) / csum > epsilon) { printf("Failed Validation on array c[]\n"); printf(" Expected : %f \n", cj); printf(" Observed : %f \n", csum); } else { printf("Solution Validates\n"); } } void tuned_STREAM_Copy() { int j; for (j = 0; j < N; j++) c[j] = a[j]; } void tuned_STREAM_Scale(double scalar) { int j; for (j = 0; j < N; j++) b[j] = scalar * c[j]; } void tuned_STREAM_Add() { int j; for (j = 0; j < N; j++) c[j] = a[j] + b[j]; } void tuned_STREAM_Triad(double scalar) { int j; for (j = 0; j < N; j++) a[j] = b[j] + scalar * c[j]; }
/*-----------------------------------------------------------------------*/ /* License: */ /* 1. You are free to use this program and/or to redistribute */ /* this program. */ /* 2. You are free to modify this program for your own use, */ /* including commercial use, subject to the publication */ /* restrictions in item 3. */ /* 3. You are free to publish results obtained from running this */ /* program, or from works that you derive from this program, */ /* with the following limitations: */ /* 3a. In order to be referred to as "STREAM benchmark results", */ /* published results must be in conformance to the STREAM */ /* Run Rules, (briefly reviewed below) published at */ /* http://www.cs.virginia.edu/stream/ref.html */ /* and incorporated herein by reference. */ /* As the copyright holder, John McCalpin retains the */ /* right to determine conformity with the Run Rules. */ /* 3b. Results based on modified source code or on runs not in */ /* accordance with the STREAM Run Rules must be clearly */ /* labelled whenever they are published. Examples of */ /* proper labelling include: */ /* "tuned STREAM benchmark results" */ /* "based on a variant of the STREAM benchmark code" */ /* Other comparable, clear and reasonable labelling is */ /* acceptable. */ /* 3c. Submission of results to the STREAM benchmark web site */ /* is encouraged, but not required. */ /* 4. Use of this program or creation of derived works based on this */ /* program constitutes acceptance of these licensing restrictions. */ /* 5. Absolutely no warranty is expressed or implied. */ /*-----------------------------------------------------------------------*/ #include <stdio.h> #include <math.h> #include <float.h> #include <limits.h> #include <sys/time.h> /* * INSTRUCTIONS: * * 1) Stream requires a good bit of memory to run. Adjust the value of 'N' * (below) to give a 'timing calibration' of at least 20 clock-ticks. This * will provide rate estimates that should be good to about 5% precision. */ #ifndef N #define N 2000000 #endif #ifndef NTIMES #define NTIMES 10 #endif #ifndef OFFSET #define OFFSET 0 #endif /* * 3) Compile the code with full optimization. Many compilers generate * unreasonably bad code before the optimizer tightens things up. If the * results are unreasonably good, on the other hand, the optimizer might be * too smart for me! * * Try compiling with: cc -O stream_omp.c -o stream_omp * * This is known to work on Cray, SGI, IBM, and Sun machines. * * * 4) Mail the results to mccalpin@cs.virginia.edu Be sure to include: a) * computer hardware model number and software revision b) the compiler flags * c) all of the output from the test case. Thanks! * */ #define HLINE "-------------------------------------------------------------\n" #ifndef MIN #define MIN(x,y) ((x)<(y)?(x):(y)) #endif #ifndef MAX #define MAX(x,y) ((x)>(y)?(x):(y)) #endif static double a[N + OFFSET], b[N + OFFSET], c[N + OFFSET]; static double avgtime[4] = {0}, maxtime[4] = {0}, mintime[4] = {FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX}; static char *label[4] = {"Copy: ", "Scale: ", "Add: ", "Triad: "}; static double bytes[4] = { 2 * sizeof(double) * N, 2 * sizeof(double) * N, 3 * sizeof(double) * N, 3 * sizeof(double) * N }; extern double mysecond(); extern void checkSTREAMresults(); #ifdef TUNED extern void tuned_STREAM_Copy(); extern void tuned_STREAM_Scale(double scalar); extern void tuned_STREAM_Add(); extern void tuned_STREAM_Triad(double scalar); #endif #ifdef _OPENMP extern int omp_get_num_threads(); #endif int main() { int quantum, checktick(); int BytesPerWord; register int j, k; double scalar, t, times[4][NTIMES]; /* --- SETUP --- determine precision and check timing --- */ printf(HLINE); printf("STREAM version $Revision: 5.9 $\n"); printf(HLINE); BytesPerWord = sizeof(double); printf("This system uses %d bytes per DOUBLE PRECISION word.\n", BytesPerWord); printf(HLINE); #ifdef NO_LONG_LONG printf("Array size = %d, Offset = %d\n", N, OFFSET); #else printf("Array size = %llu, Offset = %d\n", (unsigned long long)N, OFFSET); #endif printf("Total memory required = %.1f MB.\n", (3.0 * BytesPerWord) * ((double)N / 1048576.0)); printf("Each test is run %d times, but only\n", NTIMES); printf("the *best* time for each is used.\n"); #ifdef _OPENMP printf(HLINE); #pragma omp parallel { #pragma omp master { k = omp_get_num_threads(); printf("Number of Threads requested = %i\n", k); } } #endif printf(HLINE); #pragma omp parallel { printf("Printing one line per active thread....\n"); } /* Get initial value for system clock. */ #pragma omp parallel for for (j = 0; j < N; j++) { a[j] = 1.0; b[j] = 2.0; c[j] = 0.0; } printf(HLINE); if ((quantum = checktick()) >= 1) printf("Your clock granularity/precision appears to be " "%d microseconds.\n", quantum); else { printf("Your clock granularity appears to be " "less than one microsecond.\n"); quantum = 1; } t = mysecond(); #pragma omp parallel for for (j = 0; j < N; j++) a[j] = 2.0E0 * a[j]; t = 1.0E6 * (mysecond() - t); printf("Each test below will take on the order" " of %d microseconds.\n", (int)t); printf(" (= %d clock ticks)\n", (int)(t / quantum)); printf("Increase the size of the arrays if this shows that\n"); printf("you are not getting at least 20 clock ticks per test.\n"); printf(HLINE); printf("WARNING -- The above is only a rough guideline.\n"); printf("For best results, please be sure you know the\n"); printf("precision of your system timer.\n"); printf(HLINE); /* --- MAIN LOOP --- repeat test cases NTIMES times --- */ scalar = 3.0; for (k = 0; k < NTIMES; k++) { times[0][k] = mysecond(); #ifdef TUNED tuned_STREAM_Copy(); #else #pragma omp parallel for for (j = 0; j < N; j++) c[j] = a[j]; #endif times[0][k] = mysecond() - times[0][k]; times[1][k] = mysecond(); #ifdef TUNED tuned_STREAM_Scale(scalar); #else #pragma omp parallel for for (j = 0; j < N; j++) b[j] = scalar * c[j]; #endif times[1][k] = mysecond() - times[1][k]; times[2][k] = mysecond(); #ifdef TUNED tuned_STREAM_Add(); #else #pragma omp parallel for for (j = 0; j < N; j++) c[j] = a[j] + b[j]; #endif times[2][k] = mysecond() - times[2][k]; times[3][k] = mysecond(); #ifdef TUNED tuned_STREAM_Triad(scalar); #else #pragma omp parallel for for (j = 0; j < N; j++) a[j] = b[j] + scalar * c[j]; #endif times[3][k] = mysecond() - times[3][k]; } /* --- SUMMARY --- */ for (k = 1; k < NTIMES; k++)/* note -- skip first iteration */ { for (j = 0; j < 4; j++) { avgtime[j] = avgtime[j] + times[j][k]; mintime[j] = MIN(mintime[j], times[j][k]); maxtime[j] = MAX(maxtime[j], times[j][k]); } } printf("Function Rate (MB/s) Avg time Min time Max time\n"); for (j = 0; j < 4; j++) { avgtime[j] = avgtime[j] / (double)(NTIMES - 1); printf("%s%11.4f %11.4f %11.4f %11.4f\n", label[j], 1.0E-06 * bytes[j] / mintime[j], avgtime[j], mintime[j], maxtime[j]); } printf(HLINE); /* --- Check Results --- */ checkSTREAMresults(); printf(HLINE); return 0; } #define M 20 int checktick() { int i, minDelta, Delta; double t1, t2, timesfound[M]; /* Collect a sequence of M unique time values from the system. */ for (i = 0; i < M; i++) { t1 = mysecond(); while (((t2 = mysecond()) - t1) < 1.0E-6) ; timesfound[i] = t1 = t2; } /* * Determine the minimum difference between these M values. This result * will be our estimate (in microseconds) for the clock granularity. */ minDelta = 1000000; for (i = 1; i < M; i++) { Delta = (int)(1.0E6 * (timesfound[i] - timesfound[i - 1])); minDelta = MIN(minDelta, MAX(Delta, 0)); } return (minDelta); } /* * A gettimeofday routine to give access to the wall clock timer on most * UNIX-like systems. */ #include <sys/time.h> double mysecond() { struct timeval tp; struct timezone tzp; int i; i = gettimeofday(&tp, &tzp); return ((double)tp.tv_sec + (double)tp.tv_usec * 1.e-6); } void checkSTREAMresults() { double aj, bj, cj, scalar; double asum, bsum, csum; double epsilon; int j, k; /* reproduce initialization */ aj = 1.0; bj = 2.0; cj = 0.0; /* a[] is modified during timing check */ aj = 2.0E0 * aj; /* now execute timing loop */ scalar = 3.0; for (k = 0; k < NTIMES; k++) { cj = aj; bj = scalar * cj; cj = aj + bj; aj = bj + scalar * cj; } aj = aj * (double)(N); bj = bj * (double)(N); cj = cj * (double)(N); asum = 0.0; bsum = 0.0; csum = 0.0; for (j = 0; j < N; j++) { asum += a[j]; bsum += b[j]; csum += c[j]; } #ifdef VERBOSE printf("Results Comparison: \n"); printf(" Expected : %f %f %f \n", aj, bj, cj); printf(" Observed : %f %f %f \n", asum, bsum, csum); #endif #ifndef abs #define abs(a) ((a) >= 0 ? (a) : -(a)) #endif epsilon = 1.e-8; if (abs(aj - asum) / asum > epsilon) { printf("Failed Validation on array a[]\n"); printf(" Expected : %f \n", aj); printf(" Observed : %f \n", asum); } else if (abs(bj - bsum) / bsum > epsilon) { printf("Failed Validation on array b[]\n"); printf(" Expected : %f \n", bj); printf(" Observed : %f \n", bsum); } else if (abs(cj - csum) / csum > epsilon) { printf("Failed Validation on array c[]\n"); printf(" Expected : %f \n", cj); printf(" Observed : %f \n", csum); } else { printf("Solution Validates\n"); } } void tuned_STREAM_Copy() { int j; #pragma omp parallel for for (j = 0; j < N; j++) c[j] = a[j]; } void tuned_STREAM_Scale(double scalar) { int j; #pragma omp parallel for for (j = 0; j < N; j++) b[j] = scalar * c[j]; } void tuned_STREAM_Add() { int j; #pragma omp parallel for for (j = 0; j < N; j++) c[j] = a[j] + b[j]; } void tuned_STREAM_Triad(double scalar) { int j; #pragma omp parallel for for (j = 0; j < N; j++) a[j] = b[j] + scalar * c[j]; }
ft.c
/*-------------------------------------------------------------------- NAS Parallel Benchmarks 2.3 OpenMP C versions - FT This benchmark is an OpenMP C version of the NPB FT code. The OpenMP C versions are developed by RWCP and derived from the serial Fortran versions in "NPB 2.3-serial" developed by NAS. Permission to use, copy, distribute and modify this software for any purpose with or without fee is hereby granted. This software is provided "as is" without express or implied warranty. Send comments on the OpenMP C versions to pdp-openmp@rwcp.or.jp Information on OpenMP activities at RWCP is available at: http://pdplab.trc.rwcp.or.jp/pdperf/Omni/ Information on NAS Parallel Benchmarks 2.3 is available at: http://www.nas.nasa.gov/NAS/NPB/ --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- Authors: D. Bailey W. Saphir OpenMP C version: S. Satoh --------------------------------------------------------------------*/ #include "npb-C.h" /* global variables */ #include "global.h" /* function declarations */ static void compute_initial_conditions (double u0_r[NTOTAL], double u0_i[NTOTAL]); static void ipow46 (double a, int exponent, double *result); static void setup (void); static void print_timers (void); static void fft (int dir, double x1_r[NTOTAL], double x1_i[NTOTAL], double x2_r[NTOTAL], double x2_i[NTOTAL]); static void fft_init (int n); static void cfftz (int is, int m, int n, double x_r[NTOTAL], double x_i[NTOTAL], double y_r[NTOTAL], double y_i[NTOTAL], int di1, int di2); static void fftz2 (int is, int l, int m, int n, double u_r[NX], double u_i[NX], double x_r[NTOTAL], double x_i[NTOTAL], double y_r[NTOTAL], double y_i[NTOTAL], int di1, int di2); static int ilog2 (int n); static void verify (int d1, int d2, int d3, int nt, boolean * verified, char *classT); /*-------------------------------------------------------------------- c FT benchmark c-------------------------------------------------------------------*/ int main (int argc, char **argv) { /*c------------------------------------------------------------------- c-------------------------------------------------------------------*/ int i_main, ierr; /*------------------------------------------------------------------ c u0, u1, u2 are the main arrays in the problem. c Depending on the decomposition, these arrays will have different c dimensions. To accomodate all possibilities, we allocate them as c one-dimensional arrays and pass them to subroutines for different c views c - u0 contains the initial (transformed) initial condition c - u1 and u2 are working arrays c - indexmap maps i,j,k of u0 to the correct i^2+j^2+k^2 for the c time evolution operator. c-----------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Large arrays are in common so that they are allocated on the c heap rather than the stack. This common block is not c referenced directly anywhere else. Padding is to avoid accidental c cache problems, since all array sizes are powers of two. c-------------------------------------------------------------------*/ static double u0_r[NTOTAL]; //u0_r[NZ][NY][NX]; static double u0_i[NTOTAL]; //u0_i[NZ][NY][NX]; static double u1_r[NTOTAL]; //u1_r[NZ][NY][NX]; static double u1_i[NTOTAL]; //u1_i[NZ][NY][NX]; static double u2_r[NTOTAL]; //u2_r[NZ][NY][NX]; static double u2_i[NTOTAL]; //u2_i[NZ][NY][NX]; static int indexmap[NTOTAL]; //indexmap[NZ][NY][NX]; int iter; int nthreads = 1; double total_time, mflops; boolean verified; char classT; ////////////////////////////////// // Used for compute_indexmap(). // ////////////////////////////////// int i, j, k, ii, ii2, jj, ij2, kk; int m; double ap; //////////////////////// // Used for evolve(). // //////////////////////// //int i, j, k; ////////////////////////// // Used for checksum(). // ////////////////////////// //int m, j, int q, r, s; double chk_r, chk_i; ///////////////////// // Used for fft(). // ///////////////////// int dir; static double y0_r[NTOTAL]; static double y0_i[NTOTAL]; static double y1_r[NTOTAL]; static double y1_i[NTOTAL]; int logNX, logNY, logNZ; /////////////////////// //Used for cffts1(). // /////////////////////// //int i, j, k, jj, m; int id; int is; /////////////////////// // Used for cfftz(). // /////////////////////// int l; /////////////////////// // Used for fftz2(). // /////////////////////// int k_fftz2, n1, li, lj, lk, ku, i_fftz2, i11, i12, i21, i22; double u1_rf, x11_r, x21_r; double u1_if, x11_i, x21_i; int idx, p, nn; double x11real, x11imag, x21real, x21imag; /*-------------------------------------------------------------------- c Run the entire problem once to make sure all data is touched. c This reduces variable startup costs, which is important for such a c short benchmark. The other NPB 2 implementations are similar. c-------------------------------------------------------------------*/ for (i_main = 0; i_main < T_MAX; i_main++) { timer_clear (i_main); } setup (); { //compute_indexmap (indexmap); /*-------------------------------------------------------------------- c compute function from local (i,j,k) to ibar^2+jbar^2+kbar^2 c for time evolution exponent. c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c basically we want to convert the fortran indices c 1 2 3 4 5 6 7 8 c to c 0 1 2 3 -4 -3 -2 -1 c The following magic formula does the trick: c mod(i-1+n/2, n) - n/2 c-------------------------------------------------------------------*/ #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; ii = (i + NX / 2) % NX - NX / 2; ii2 = ii * ii; jj = (j + NY / 2) % NY - NY / 2; ij2 = jj * jj + ii2; kk = (k + NZ / 2) % NZ - NZ / 2; indexmap[m] = kk * kk + ij2; } /*-------------------------------------------------------------------- c compute array of exponentials for time evolution. c-------------------------------------------------------------------*/ { ap = -4.0 * ALPHA * PI * PI; ex[0] = 1.0; ex[1] = exp (ap); for (i = 2; i <= EXPMAX; i++) { ex[i] = ex[i - 1] * ex[1]; } } /* end single */ { compute_initial_conditions (u1_r, u1_i); fft_init (dims[0][0]); } fft (1, u1_r, u1_i, u0_r, u0_i); } /* end parallel */ /*-------------------------------------------------------------------- c Start over from the beginning. Note that all operations must c be timed, in contrast to other benchmarks. c-------------------------------------------------------------------*/ for (i_main = 0; i_main < T_MAX; i_main++) { timer_clear (i_main); } timer_start (T_TOTAL); if (TIMERS_ENABLED == TRUE) timer_start (T_SETUP); //#pragma omp parallel private(iter) firstprivate(niter) #pragma acc data \ create(ex[0:EXPMAX+1]) \ create(indexmap[0:NTOTAL]) \ create(u_r[0:NX], u_i[0:NX]) \ create(u1_r[0:NTOTAL], u1_i[0:NTOTAL]) \ create(u0_r[0:NTOTAL], u0_i[0:NTOTAL]) \ create(u2_r[0:NTOTAL], u2_i[0:NTOTAL]) \ create(y0_r[0:NTOTAL], y0_i[0:NTOTAL]) \ create(y1_r[0:NTOTAL], y1_i[0:NTOTAL]) { //compute_indexmap (indexmap); /*-------------------------------------------------------------------- c compute function from local (i,j,k) to ibar^2+jbar^2+kbar^2 c for time evolution exponent. c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c basically we want to convert the fortran indices c 1 2 3 4 5 6 7 8 c to c 0 1 2 3 -4 -3 -2 -1 c The following magic formula does the trick: c mod(i-1+n/2, n) - n/2 c-------------------------------------------------------------------*/ #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; ii = (i + NX / 2) % NX - NX / 2; ii2 = ii * ii; jj = (j + NY / 2) % NY - NY / 2; ij2 = jj * jj + ii2; kk = (k + NZ / 2) % NZ - NZ / 2; indexmap[m] = kk * kk + ij2; } /*-------------------------------------------------------------------- c compute array of exponentials for time evolution. c-------------------------------------------------------------------*/ { ap = -4.0 * ALPHA * PI * PI; ex[0] = 1.0; ex[1] = exp (ap); for (i = 2; i <= EXPMAX; i++) { ex[i] = ex[i - 1] * ex[1]; } } /* end single */ #pragma acc update device(ex[0:EXPMAX+1]) { compute_initial_conditions (u1_r, u1_i); fft_init (dims[0][0]); } #pragma acc update device(u_r[0:NX], u_i[0:NX], \ u1_r[0:NTOTAL], u1_i[0:NTOTAL]) if (TIMERS_ENABLED == TRUE) { timer_stop (T_SETUP); } if (TIMERS_ENABLED == TRUE) { timer_start (T_FFT); } //fft (1, u1_r, u1_i, u0_r, u0_i); //START_FFT// dir = 1; logNX = ilog2(NX); logNY = ilog2(NY); logNZ = ilog2(NZ); /*-------------------------------------------------------------------- c note: args u1, x2 must be different arrays c note: args for cfftsx are (direction, layout, xin, xout, scratch) c xin/xout may be the same and it can be somewhat faster c if they are c-------------------------------------------------------------------*/ { if (dir == 1) { //cffts1 (1, logNX, u1_r, u1_i, u1_r, u1_i, y0_r, y0_i, y1_r, y1_i); /* u1 -> u1 */ is = 1; #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = i * NZ * NY + j * NZ + k; y0_r[id] = u1_r[m]; y0_i[id] = u1_i[m]; } //cfftz (is, logNX, NX, y0_r, y0_i, y1_r, y1_i, NZ, NY); /*-------------------------------------------------------------------- c Perform one variant of the Stockham FFT. c-------------------------------------------------------------------*/ for (l = 1; l <= logNX; l += 2) { //fftz2 (is, l, logNX, NX, u_r, u_i, y0_r, y0_i, y1_r, y1_i, NZ, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NZ * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NX; idx++) { n1 = NX / 2; if (l - 1 == 0) { lk = 1; } else { lk = 2 << ((l - 1) - 1); } if (logNX - l == 0) { li = 1; } else { li = 2 << ((logNX - l) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y0_r[i11 + p]; x11imag = y0_i[i11 + p]; x21real = y0_r[i12 + p]; x21imag = y0_i[i12 + p]; y1_r[i21 + p] = x11real + x21real; y1_i[i21 + p] = x11imag + x21imag; y1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } if (l == logNX) break; //fftz2 (is, l + 1, logNX, NX, u_r, u_i, y1_r, y1_i, y0_r, y0_i, NZ, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NZ * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NX; idx++) { n1 = NX / 2; if (l == 0) { lk = 1; } else { lk = 2 << ((l) - 1); } if (logNX - (l+1) == 0) { li = 1; } else { li = 2 << ((logNX - (l+1)) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y1_r[i11 + p]; x11imag = y1_i[i11 + p]; x21real = y1_r[i12 + p]; x21imag = y1_i[i12 + p]; y0_r[i21 + p] = x11real + x21real; y0_i[i21 + p] = x11imag + x21imag; y0_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y0_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } } if (logNX % 2 == 1) { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = i * NZ * NY + j * NZ + k; u1_r[m] = y1_r[id]; u1_i[m] = y1_i[id]; } } else { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = i * NZ * NY + j * NZ + k; u1_r[m] = y0_r[id]; u1_i[m] = y0_i[id]; } } //cffts2 (1, logNY, u1_r, u1_i, u1_r, u1_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x1 */ is = 1; #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = j * NX * NZ + k * NX + i; y0_r[id] = u1_r[m]; y0_i[id] = u1_i[m]; } //cfftz (is, logNY, NY, y0_r, y0_i, y1_r, y1_i, NX, NZ); /*-------------------------------------------------------------------- c Perform one variant of the Stockham FFT. c-------------------------------------------------------------------*/ for (l = 1; l <= logNY; l += 2) { //fftz2 (is, l, logNY, NY, u_r, u_i, y0_r, y0_i, y1_r, y1_i, NX, NZ); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NZ; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NY; idx++) { n1 = NY / 2; if (l - 1 == 0) { lk = 1; } else { lk = 2 << ((l - 1) - 1); } if (logNY - l == 0) { li = 1; } else { li = 2 << ((logNY - l) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y0_r[i11 + p]; x11imag = y0_i[i11 + p]; x21real = y0_r[i12 + p]; x21imag = y0_i[i12 + p]; y1_r[i21 + p] = x11real + x21real; y1_i[i21 + p] = x11imag + x21imag; y1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } if (l == logNY) break; //fftz2 (is, l + 1, logNY, NY, u_r, u_i, y1_r, y1_i, y0_r, y0_i, NX, NZ); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NZ; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NY; idx++) { n1 = NY / 2; if (l == 0) { lk = 1; } else { lk = 2 << ((l) - 1); } if (logNY - (l+1) == 0) { li = 1; } else { li = 2 << ((logNY - (l+1)) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y1_r[i11 + p]; x11imag = y1_i[i11 + p]; x21real = y1_r[i12 + p]; x21imag = y1_i[i12 + p]; y0_r[i21 + p] = x11real + x21real; y0_i[i21 + p] = x11imag + x21imag; y0_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y0_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } } if (logNY % 2 == 1) { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = j * NX * NZ + k * NX + i; u1_r[m] = y1_r[id]; u1_i[m] = y1_i[id]; } } else { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = j * NX * NZ + k * NX + i; u1_r[m] = y0_r[id]; u1_i[m] = y0_i[id]; } } //cffts3 (1, logNZ, u1_r, u1_i, u0_r, u0_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x2 */ is = 1; //cfftz (is, logNZ, NZ, u1_r, u1_i, y1_r, y1_i, NX, NY); /*-------------------------------------------------------------------- c Perform one variant of the Stockham FFT. c-------------------------------------------------------------------*/ for (l = 1; l <= logNZ; l += 2) { //fftz2 (is, l, logNZ, NZ, u_r, u_i, u1_r, u1_i, y1_r, y1_i, NX, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NZ; idx++) { n1 = NZ / 2; if (l - 1 == 0) { lk = 1; } else { lk = 2 << ((l - 1) - 1); } if (logNZ - l == 0) { li = 1; } else { li = 2 << ((logNZ - l) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = u1_r[i11 + p]; x11imag = u1_i[i11 + p]; x21real = u1_r[i12 + p]; x21imag = u1_i[i12 + p]; y1_r[i21 + p] = x11real + x21real; y1_i[i21 + p] = x11imag + x21imag; y1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } if (l == logNZ) break; //fftz2 (is, l + 1, logNZ, NZ, u_r, u_i, y1_r, y1_i, u1_r, u1_i, NX, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NZ; idx++) { n1 = NZ / 2; if (l == 0) { lk = 1; } else { lk = 2 << ((l) - 1); } if (logNZ - (l+1) == 0) { li = 1; } else { li = 2 << ((logNZ - (l+1)) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y1_r[i11 + p]; x11imag = y1_i[i11 + p]; x21real = y1_r[i12 + p]; x21imag = y1_i[i12 + p]; u1_r[i21 + p] = x11real + x21real; u1_i[i21 + p] = x11imag + x21imag; u1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); u1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } } if (logNZ % 2 == 1) { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { u0_r[m] = y1_r[m]; u0_i[m] = y1_i[m]; } } else { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { u0_r[m] = u1_r[m]; u0_i[m] = u1_i[m]; } } } else { //cffts3 (-1, logNZ, u1_r, u1_i, u1_r, u1_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x1 */ is = -1; //cfftz (is, logNZ, NZ, u1_r, u1_i, y1_r, y1_i, NX, NY); /*-------------------------------------------------------------------- c Perform one variant of the Stockham FFT. c-------------------------------------------------------------------*/ for (l = 1; l <= logNZ; l += 2) { //fftz2 (is, l, logNZ, NZ, u_r, u_i, u1_r, u1_i, y1_r, y1_i, NX, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NZ; idx++) { n1 = NZ / 2; if (l - 1 == 0) { lk = 1; } else { lk = 2 << ((l - 1) - 1); } if (logNZ - l == 0) { li = 1; } else { li = 2 << ((logNZ - l) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = u1_r[i11 + p]; x11imag = u1_i[i11 + p]; x21real = u1_r[i12 + p]; x21imag = u1_i[i12 + p]; y1_r[i21 + p] = x11real + x21real; y1_i[i21 + p] = x11imag + x21imag; y1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } if (l == logNZ) break; //fftz2 (is, l + 1, logNZ, NZ, u_r, u_i, y1_r, y1_i, u1_r, u1_i, NX, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NZ; idx++) { n1 = NZ / 2; if (l == 0) { lk = 1; } else { lk = 2 << ((l) - 1); } if (logNZ - (l+1) == 0) { li = 1; } else { li = 2 << ((logNZ - (l+1)) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y1_r[i11 + p]; x11imag = y1_i[i11 + p]; x21real = y1_r[i12 + p]; x21imag = y1_i[i12 + p]; u1_r[i21 + p] = x11real + x21real; u1_i[i21 + p] = x11imag + x21imag; u1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); u1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } } if (logNZ % 2 == 1) { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { u1_r[m] = y1_r[m]; u1_i[m] = y1_i[m]; } } else { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { u1_r[m] = u1_r[m]; u1_i[m] = u1_i[m]; } } //cffts2 (-1, logNY, u1_r, u1_i, u1_r, u1_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x1 */ is = -1; #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = j * NX * NZ + k * NX + i; y0_r[id] = u1_r[m]; y0_i[id] = u1_i[m]; } //cfftz (is, logNY, NY, y0_r, y0_i, y1_r, y1_i, NX, NZ); /*-------------------------------------------------------------------- c Perform one variant of the Stockham FFT. c-------------------------------------------------------------------*/ for (l = 1; l <= logNY; l += 2) { //fftz2 (is, l, logNY, NY, u_r, u_i, y0_r, y0_i, y1_r, y1_i, NX, NZ); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NZ; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NY; idx++) { n1 = NY / 2; if (l - 1 == 0) { lk = 1; } else { lk = 2 << ((l - 1) - 1); } if (logNY - l == 0) { li = 1; } else { li = 2 << ((logNY - l) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y0_r[i11 + p]; x11imag = y0_i[i11 + p]; x21real = y0_r[i12 + p]; x21imag = y0_i[i12 + p]; y1_r[i21 + p] = x11real + x21real; y1_i[i21 + p] = x11imag + x21imag; y1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } if (l == logNY) break; //fftz2 (is, l + 1, logNY, NY, u_r, u_i, y1_r, y1_i, y0_r, y0_i, NX, NZ); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NZ; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NY; idx++) { n1 = NY / 2; if (l == 0) { lk = 1; } else { lk = 2 << ((l) - 1); } if (logNY - (l+1) == 0) { li = 1; } else { li = 2 << ((logNY - (l+1)) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y1_r[i11 + p]; x11imag = y1_i[i11 + p]; x21real = y1_r[i12 + p]; x21imag = y1_i[i12 + p]; y0_r[i21 + p] = x11real + x21real; y0_i[i21 + p] = x11imag + x21imag; y0_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y0_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } } if (logNY % 2 == 1) { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = j * NX * NZ + k * NX + i; u1_r[m] = y1_r[id]; u1_i[m] = y1_i[id]; } } else { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = j * NX * NZ + k * NX + i; u1_r[m] = y0_r[id]; u1_i[m] = y0_i[id]; } } //cffts1 (-1, logNX, u1_r, u1_i, u0_r, u0_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x2 */ is = -1; #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = i * NZ * NY + j * NZ + k; y0_r[id] = u1_r[m]; y0_i[id] = u1_i[m]; } //cfftz (is, logNX, NX, y0_r, y0_i, y1_r, y1_i, NZ, NY); /*-------------------------------------------------------------------- c Perform one variant of the Stockham FFT. c-------------------------------------------------------------------*/ for (l = 1; l <= logNX; l += 2) { //fftz2 (is, l, logNX, NX, u_r, u_i, y0_r, y0_i, y1_r, y1_i, NZ, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NZ * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NX; idx++) { n1 = NX / 2; if (l - 1 == 0) { lk = 1; } else { lk = 2 << ((l - 1) - 1); } if (logNX - l == 0) { li = 1; } else { li = 2 << ((logNX - l) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y0_r[i11 + p]; x11imag = y0_i[i11 + p]; x21real = y0_r[i12 + p]; x21imag = y0_i[i12 + p]; y1_r[i21 + p] = x11real + x21real; y1_i[i21 + p] = x11imag + x21imag; y1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } if (l == logNX) break; //fftz2 (is, l + 1, logNX, NX, u_r, u_i, y1_r, y1_i, y0_r, y0_i, NZ, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NZ * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NX; idx++) { n1 = NX / 2; if (l == 0) { lk = 1; } else { lk = 2 << ((l) - 1); } if (logNX - (l+1) == 0) { li = 1; } else { li = 2 << ((logNX - (l+1)) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y1_r[i11 + p]; x11imag = y1_i[i11 + p]; x21real = y1_r[i12 + p]; x21imag = y1_i[i12 + p]; y0_r[i21 + p] = x11real + x21real; y0_i[i21 + p] = x11imag + x21imag; y0_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y0_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } } if (logNX % 2 == 1) { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = i * NZ * NY + j * NZ + k; u0_r[m] = y1_r[id]; u0_i[m] = y1_i[id]; } } else { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = i * NZ * NY + j * NZ + k; u0_r[m] = y0_r[id]; u0_i[m] = y0_i[id]; } } } } //END_FFT// if (TIMERS_ENABLED == TRUE) { timer_stop (T_FFT); } for (iter = 1; iter <= niter; iter++) { if (TIMERS_ENABLED == TRUE) { timer_start (T_EVOLVE); } //evolve (u0_r, u0_i, u1_r, u1_i, iter, indexmap); /*-------------------------------------------------------------------- c evolve u0 -> u1 (iter time steps) in fourier space c-------------------------------------------------------------------*/ #pragma acc kernels loop gang worker independent for (i = 0; i < NTOTAL; i++) { u1_r[i] = u0_r[i] * ex[iter * indexmap[i]]; u1_i[i] = u0_i[i] * ex[iter * indexmap[i]]; } if (TIMERS_ENABLED == TRUE) { timer_stop (T_EVOLVE); } if (TIMERS_ENABLED == TRUE) { timer_start (T_FFT); } //fft (-1, u1_r, u1_i, u2_r, u2_i); //START_FFT// dir = -1; logNX = ilog2(NX); logNY = ilog2(NY); logNZ = ilog2(NZ); /*-------------------------------------------------------------------- c note: args x1, x2 must be different arrays c note: args for cfftsx are (direction, layout, xin, xout, scratch) c xin/xout may be the same and it can be somewhat faster c if they are c-------------------------------------------------------------------*/ { if (dir == 1) { //cffts1 (1, logNX, u1_r, u1_i, u1_r, u1_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x1 */ is = 1; #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = i * NZ * NY + j * NZ + k; y0_r[id] = u1_r[m]; y0_i[id] = u1_i[m]; } //cfftz (is, logNX, NX, y0_r, y0_i, y1_r, y1_i, NZ, NY); /*-------------------------------------------------------------------- c Perform one variant of the Stockham FFT. c-------------------------------------------------------------------*/ for (l = 1; l <= logNX; l += 2) { //fftz2 (is, l, logNX, NX, u_r, u_i, y0_r, y0_i, y1_r, y1_i, NZ, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NZ * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NX; idx++) { n1 = NX / 2; if (l - 1 == 0) { lk = 1; } else { lk = 2 << ((l - 1) - 1); } if (logNX - l == 0) { li = 1; } else { li = 2 << ((logNX - l) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y0_r[i11 + p]; x11imag = y0_i[i11 + p]; x21real = y0_r[i12 + p]; x21imag = y0_i[i12 + p]; y1_r[i21 + p] = x11real + x21real; y1_i[i21 + p] = x11imag + x21imag; y1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } if (l == logNX) break; //fftz2 (is, l + 1, logNX, NX, u_r, u_i, y1_r, y1_i, y0_r, y0_i, NZ, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NZ * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NX; idx++) { n1 = NX / 2; if (l == 0) { lk = 1; } else { lk = 2 << ((l) - 1); } if (logNX - (l+1) == 0) { li = 1; } else { li = 2 << ((logNX - (l+1)) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y1_r[i11 + p]; x11imag = y1_i[i11 + p]; x21real = y1_r[i12 + p]; x21imag = y1_i[i12 + p]; y0_r[i21 + p] = x11real + x21real; y0_i[i21 + p] = x11imag + x21imag; y0_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y0_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } } if (logNX % 2 == 1) { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = i * NZ * NY + j * NZ + k; u1_r[m] = y1_r[id]; u1_i[m] = y1_i[id]; } } else { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = i * NZ * NY + j * NZ + k; u1_r[m] = y0_r[id]; u1_i[m] = y0_i[id]; } } //cffts2 (1, logNY, u1_r, u1_i, u1_r, u1_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x1 */ is = 1; #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = j * NX * NZ + k * NX + i; y0_r[id] = u1_r[m]; y0_i[id] = u1_i[m]; } //cfftz (is, logNY, NY, y0_r, y0_i, y1_r, y1_i, NX, NZ); /*-------------------------------------------------------------------- c Perform one variant of the Stockham FFT. c-------------------------------------------------------------------*/ for (l = 1; l <= logNY; l += 2) { //fftz2 (is, l, logNY, NY, u_r, u_i, y0_r, y0_i, y1_r, y1_i, NX, NZ); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NZ; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NY; idx++) { n1 = NY / 2; if (l - 1 == 0) { lk = 1; } else { lk = 2 << ((l - 1) - 1); } if (logNY - l == 0) { li = 1; } else { li = 2 << ((logNY - l) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y0_r[i11 + p]; x11imag = y0_i[i11 + p]; x21real = y0_r[i12 + p]; x21imag = y0_i[i12 + p]; y1_r[i21 + p] = x11real + x21real; y1_i[i21 + p] = x11imag + x21imag; y1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } if (l == logNY) break; //fftz2 (is, l + 1, logNY, NY, u_r, u_i, y1_r, y1_i, y0_r, y0_i, NX, NZ); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NZ; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NY; idx++) { n1 = NY / 2; if (l == 0) { lk = 1; } else { lk = 2 << ((l) - 1); } if (logNY - (l+1) == 0) { li = 1; } else { li = 2 << ((logNY - (l+1)) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y1_r[i11 + p]; x11imag = y1_i[i11 + p]; x21real = y1_r[i12 + p]; x21imag = y1_i[i12 + p]; y0_r[i21 + p] = x11real + x21real; y0_i[i21 + p] = x11imag + x21imag; y0_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y0_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } } if (logNY % 2 == 1) { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = j * NX * NZ + k * NX + i; u1_r[m] = y1_r[id]; u1_i[m] = y1_i[id]; } } else { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = j * NX * NZ + k * NX + i; u1_r[m] = y0_r[id]; u1_i[m] = y0_i[id]; } } //cffts3 (1, logNZ, u1_r, u1_i, u2_r, u2_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x2 */ is = 1; //cfftz (is, logNZ, NZ, u1_r, u1_i, y1_r, y1_i, NX, NY); /*-------------------------------------------------------------------- c Perform one variant of the Stockham FFT. c-------------------------------------------------------------------*/ for (l = 1; l <= logNZ; l += 2) { //fftz2 (is, l, logNZ, NZ, u_r, u_i, u1_r, u1_i, y1_r, y1_i, NX, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NZ; idx++) { n1 = NZ / 2; if (l - 1 == 0) { lk = 1; } else { lk = 2 << ((l - 1) - 1); } if (logNZ - l == 0) { li = 1; } else { li = 2 << ((logNZ - l) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = u1_r[i11 + p]; x11imag = u1_i[i11 + p]; x21real = u1_r[i12 + p]; x21imag = u1_i[i12 + p]; y1_r[i21 + p] = x11real + x21real; y1_i[i21 + p] = x11imag + x21imag; y1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } if (l == logNZ) break; //fftz2 (is, l + 1, logNZ, NZ, u_r, u_i, y1_r, y1_i, u1_r, u1_i, NX, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NZ; idx++) { n1 = NZ / 2; if (l == 0) { lk = 1; } else { lk = 2 << ((l) - 1); } if (logNZ - (l+1) == 0) { li = 1; } else { li = 2 << ((logNZ - (l+1)) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y1_r[i11 + p]; x11imag = y1_i[i11 + p]; x21real = y1_r[i12 + p]; x21imag = y1_i[i12 + p]; u1_r[i21 + p] = x11real + x21real; u1_i[i21 + p] = x11imag + x21imag; u1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); u1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } } if (logNZ % 2 == 1) { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { u2_r[m] = y1_r[m]; u2_i[m] = y1_i[m]; } } else { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { u2_r[m] = u1_r[m]; u2_i[m] = u1_i[m]; } } } else { //cffts3 (-1, logNZ, u1_r, u1_i, u1_r, u1_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x1 */ is = -1; //cfftz (is, logNZ, NZ, u1_r, u1_i, y1_r, y1_i, NX, NY); /*-------------------------------------------------------------------- c Perform one variant of the Stockham FFT. c-------------------------------------------------------------------*/ for (l = 1; l <= logNZ; l += 2) { //fftz2 (is, l, logNZ, NZ, u_r, u_i, u1_r, u1_i, y1_r, y1_i, NX, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NZ; idx++) { n1 = NZ / 2; if (l - 1 == 0) { lk = 1; } else { lk = 2 << ((l - 1) - 1); } if (logNZ - l == 0) { li = 1; } else { li = 2 << ((logNZ - l) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = u1_r[i11 + p]; x11imag = u1_i[i11 + p]; x21real = u1_r[i12 + p]; x21imag = u1_i[i12 + p]; y1_r[i21 + p] = x11real + x21real; y1_i[i21 + p] = x11imag + x21imag; y1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } if (l == logNZ) break; //fftz2 (is, l + 1, logNZ, NZ, u_r, u_i, y1_r, y1_i, u1_r, u1_i, NX, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NZ; idx++) { n1 = NZ / 2; if (l == 0) { lk = 1; } else { lk = 2 << ((l) - 1); } if (logNZ - (l+1) == 0) { li = 1; } else { li = 2 << ((logNZ - (l+1)) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y1_r[i11 + p]; x11imag = y1_i[i11 + p]; x21real = y1_r[i12 + p]; x21imag = y1_i[i12 + p]; u1_r[i21 + p] = x11real + x21real; u1_i[i21 + p] = x11imag + x21imag; u1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); u1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } } if (logNZ % 2 == 1) { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { u1_r[m] = y1_r[m]; u1_i[m] = y1_i[m]; } } else { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { u1_r[m] = u1_r[m]; u1_i[m] = u1_i[m]; } } //cffts2 (-1, logNY, u1_r, u1_i, u1_r, u1_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x1 */ is = -1; #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = j * NX * NZ + k * NX + i; y0_r[id] = u1_r[m]; y0_i[id] = u1_i[m]; } //cfftz (is, logNY, NY, y0_r, y0_i, y1_r, y1_i, NX, NZ); /*-------------------------------------------------------------------- c Perform one variant of the Stockham FFT. c-------------------------------------------------------------------*/ for (l = 1; l <= logNY; l += 2) { //fftz2 (is, l, logNY, NY, u_r, u_i, y0_r, y0_i, y1_r, y1_i, NX, NZ); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NZ; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NY; idx++) { n1 = NY / 2; if (l - 1 == 0) { lk = 1; } else { lk = 2 << ((l - 1) - 1); } if (logNY - l == 0) { li = 1; } else { li = 2 << ((logNY - l) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y0_r[i11 + p]; x11imag = y0_i[i11 + p]; x21real = y0_r[i12 + p]; x21imag = y0_i[i12 + p]; y1_r[i21 + p] = x11real + x21real; y1_i[i21 + p] = x11imag + x21imag; y1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } if (l == logNY) break; //fftz2 (is, l + 1, logNY, NY, u_r, u_i, y1_r, y1_i, y0_r, y0_i, NX, NZ); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NZ; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NY; idx++) { n1 = NY / 2; if (l == 0) { lk = 1; } else { lk = 2 << ((l) - 1); } if (logNY - (l+1) == 0) { li = 1; } else { li = 2 << ((logNY - (l+1)) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y1_r[i11 + p]; x11imag = y1_i[i11 + p]; x21real = y1_r[i12 + p]; x21imag = y1_i[i12 + p]; y0_r[i21 + p] = x11real + x21real; y0_i[i21 + p] = x11imag + x21imag; y0_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y0_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } } if (logNY % 2 == 1) { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = j * NX * NZ + k * NX + i; u1_r[m] = y1_r[id]; u1_i[m] = y1_i[id]; } } else { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = j * NX * NZ + k * NX + i; u1_r[m] = y0_r[id]; u1_i[m] = y0_i[id]; } } //cffts1 (-1, logNX, u1_r, u1_i, u2_r, u2_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x2 */ is = -1; #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = i * NZ * NY + j * NZ + k; y0_r[id] = u1_r[m]; y0_i[id] = u1_i[m]; } //cfftz (is, logNX, NX, y0_r, y0_i, y1_r, y1_i, NZ, NY); /*-------------------------------------------------------------------- c Perform one variant of the Stockham FFT. c-------------------------------------------------------------------*/ for (l = 1; l <= logNX; l += 2) { //fftz2 (is, l, logNX, NX, u_r, u_i, y0_r, y0_i, y1_r, y1_i, NZ, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NZ * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NX; idx++) { n1 = NX / 2; if (l - 1 == 0) { lk = 1; } else { lk = 2 << ((l - 1) - 1); } if (logNX - l == 0) { li = 1; } else { li = 2 << ((logNX - l) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y0_r[i11 + p]; x11imag = y0_i[i11 + p]; x21real = y0_r[i12 + p]; x21imag = y0_i[i12 + p]; y1_r[i21 + p] = x11real + x21real; y1_i[i21 + p] = x11imag + x21imag; y1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } if (l == logNX) break; //fftz2 (is, l + 1, logNX, NX, u_r, u_i, y1_r, y1_i, y0_r, y0_i, NZ, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NZ * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NX; idx++) { n1 = NX / 2; if (l == 0) { lk = 1; } else { lk = 2 << ((l) - 1); } if (logNX - (l+1) == 0) { li = 1; } else { li = 2 << ((logNX - (l+1)) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y1_r[i11 + p]; x11imag = y1_i[i11 + p]; x21real = y1_r[i12 + p]; x21imag = y1_i[i12 + p]; y0_r[i21 + p] = x11real + x21real; y0_i[i21 + p] = x11imag + x21imag; y0_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y0_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } } if (logNX % 2 == 1) { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = i * NZ * NY + j * NZ + k; u2_r[m] = y1_r[id]; u2_i[m] = y1_i[id]; } } else { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = i * NZ * NY + j * NZ + k; u2_r[m] = y0_r[id]; u2_i[m] = y0_i[id]; } } } } //END_FFT// if (TIMERS_ENABLED == TRUE) { timer_stop (T_FFT); } if (TIMERS_ENABLED == TRUE) { timer_start (T_CHECKSUM); } //checksum (iter, u2_r, u2_i, dims[0]); chk_r = 0.0; chk_i = 0.0; #pragma acc kernels loop gang worker independent for(m = 0; m < 1024; m++){ j = 1+m; q = j%NX; if (q >= 0 && q < NX) { r = (3*j)%NY; if (r >= 0 && r < NY) { s = (5*j)%NZ; if (s >= 0 && s < NZ) { chk_r = chk_r + u2_r[s*NY*NX + r*NX + q]; chk_i = chk_i + u2_i[s*NY*NX + r*NX + q]; } } } } //printf("chk_r = %22.12e, chk_i =%22.12e\n", chk_r, chk_i); { sums_r[iter] += chk_r; sums_i[iter] += chk_i; } { sums_r[iter] = sums_r[iter] / (double) (NTOTAL); sums_i[iter] = sums_i[iter] / (double) (NTOTAL); printf ("T = %5d Checksum = %22.12e %22.12e\n", iter, sums_r[iter], sums_i[iter]); } if (TIMERS_ENABLED == TRUE) { timer_stop (T_CHECKSUM); } } verify (NX, NY, NZ, niter, &verified, &classT); #if defined(_OPENMP) nthreads = omp_get_num_threads (); #endif /* _OPENMP */ } /* end parallel */ timer_stop (T_TOTAL); total_time = timer_read (T_TOTAL); if (total_time != 0.0) { mflops = 1.0e-6 * (double) (NTOTAL) * (14.8157 + 7.19641 * log ((double) (NTOTAL)) + (5.23518 + 7.21113 * log ((double) (NTOTAL))) * niter) / total_time; } else { mflops = 0.0; } c_print_results ("FT", classT, NX, NY, NZ, niter, nthreads, total_time, mflops, " floating point", verified, NPBVERSION, COMPILETIME, CS1, CS2, CS3, CS4, CS5, CS6, CS7); if (TIMERS_ENABLED == TRUE) print_timers (); return 0; } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void compute_initial_conditions (double u0_r[NTOTAL], double u0_i[NTOTAL]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Fill in array u0 with initial conditions from c random number generator c-------------------------------------------------------------------*/ int k; double x0, start, an, dummy; static double tmp[NX * 2 * MAXDIM + 1]; int i, j, t; start = SEED; /*-------------------------------------------------------------------- c Jump to the starting element for our first plane. c-------------------------------------------------------------------*/ ipow46 (A, (zstart[0] - 1) * 2 * NX * NY + (ystart[0] - 1) * 2 * NX, &an); dummy = randlc (&start, an); ipow46 (A, 2 * NX * NY, &an); /*-------------------------------------------------------------------- c Go through by z planes filling in one square at a time. c-------------------------------------------------------------------*/ for (k = 0; k < NZ; k++) { x0 = start; vranlc (2 * NX * NY, &x0, A, tmp); t = 1; for (j = 0; j < NY; j++) for (i = 0; i < NX; i++) { u0_r[k * NY * NX + j * NX + i] = tmp[t++]; u0_i[k * NY * NX + j * NX + i] = tmp[t++]; } if (k != NZ) dummy = randlc (&start, an); } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void ipow46 (double a, int exponent, double *result) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c compute a^exponent mod 2^46 c-------------------------------------------------------------------*/ double dummy, q, r; int n, n2; /*-------------------------------------------------------------------- c Use c a^n = a^(n/2)*a^(n/2) if n even else c a^n = a*a^(n-1) if n odd c-------------------------------------------------------------------*/ *result = 1; if (exponent == 0) return; q = a; r = 1; n = exponent; while (n > 1) { n2 = n / 2; if (n2 * 2 == n) { dummy = randlc (&q, q); n = n2; } else { dummy = randlc (&r, q); n = n - 1; } } dummy = randlc (&r, q); *result = r; } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void setup (void) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int ierr, i, j, fstatus; printf ("\n\n NAS Parallel Benchmarks 2.3 OpenMP C version" " - FT Benchmark\n\n"); niter = NITER_DEFAULT; printf (" Size : %3dx%3dx%3d\n", NX, NY, NZ); printf (" Iterations : %7d\n", niter); /* 1004 format(' Number of processes : ', i7) 1005 format(' Processor array : ', i3, 'x', i3) 1006 format(' WARNING: compiled for ', i5, ' processes. ', > ' Will not verify. ')*/ for (i = 0; i < 3; i++) { dims[i][0] = NX; dims[i][1] = NY; dims[i][2] = NZ; } for (i = 0; i < 3; i++) { xstart[i] = 1; xend[i] = NX; ystart[i] = 1; yend[i] = NY; zstart[i] = 1; zend[i] = NZ; } /*-------------------------------------------------------------------- c Set up info for blocking of ffts and transposes. This improves c performance on cache-based systems. Blocking involves c working on a chunk of the problem at a time, taking chunks c along the first, second, or third dimension. c c - In cffts1 blocking is on 2nd dimension (with fft on 1st dim) c - In cffts2/3 blocking is on 1st dimension (with fft on 2nd and 3rd dims) c Since 1st dim is always in processor, we'll assume it's long enough c (default blocking factor is 16 so min size for 1st dim is 16) c The only case we have to worry about is cffts1 in a 2d decomposition. c so the blocking factor should not be larger than the 2nd dimension. c-------------------------------------------------------------------*/ fftblock = FFTBLOCK_DEFAULT; fftblockpad = FFTBLOCKPAD_DEFAULT; if (fftblock != FFTBLOCK_DEFAULT) fftblockpad = fftblock + 3; } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void print_timers (void) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int i; const char *tstrings[] = { " total ", " setup ", " fft ", " evolve ", " checksum ", " fftlow ", " fftcopy " }; for (i = 0; i < T_MAX; i++) { if (timer_read (i) != 0.0) { printf ("timer %2d(%16s( :%10.6f\n", i, tstrings[i], timer_read (i)); } } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void fft (int dir, double x1_r[NTOTAL], double x1_i[NTOTAL], double x2_r[NTOTAL], double x2_i[NTOTAL]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static double y0_r[NTOTAL]; static double y0_i[NTOTAL]; static double y1_r[NTOTAL]; static double y1_i[NTOTAL]; int logNX, logNY, logNZ; /////////////////////// //Used for cffts1(). // /////////////////////// int i, j, k, jj; int m, id; int is; /////////////////////// // Used for cfftz(). // /////////////////////// int l; /////////////////////// // Used for fftz2(). // /////////////////////// int k_fftz2, n1, li, lj, lk, ku, i_fftz2, i11, i12, i21, i22; double u1_rf, x11_r, x21_r; double u1_if, x11_i, x21_i; int idx, p, nn; double x11real, x11imag, x21real, x21imag; //START_FFT// logNX = ilog2(NX); logNY = ilog2(NY); logNZ = ilog2(NZ); /*-------------------------------------------------------------------- c note: args x1, x2 must be different arrays c note: args for cfftsx are (direction, layout, xin, xout, scratch) c xin/xout may be the same and it can be somewhat faster c if they are c-------------------------------------------------------------------*/ #pragma acc data \ copyin(u_r[0:NX], u_i[0:NX]) \ copy(x1_r[0:NTOTAL], x1_i[0:NTOTAL]) \ copyout(x2_r[0:NTOTAL], x2_i[0:NTOTAL]) \ create(y0_r[0:NTOTAL], y0_i[0:NTOTAL]) \ create(y1_r[0:NTOTAL], y1_i[0:NTOTAL]) { if (dir == 1) { //cffts1 (1, logNX, x1_r, x1_i, x1_r, x1_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x1 */ is = 1; #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = i * NZ * NY + j * NZ + k; y0_r[id] = x1_r[m]; y0_i[id] = x1_i[m]; } //cfftz (is, logNX, NX, y0_r, y0_i, y1_r, y1_i, NZ, NY); /*-------------------------------------------------------------------- c Perform one variant of the Stockham FFT. c-------------------------------------------------------------------*/ for (l = 1; l <= logNX; l += 2) { //fftz2 (is, l, logNX, NX, u_r, u_i, y0_r, y0_i, y1_r, y1_i, NZ, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NZ * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NX; idx++) { n1 = NX / 2; if (l - 1 == 0) { lk = 1; } else { lk = 2 << ((l - 1) - 1); } if (logNX - l == 0) { li = 1; } else { li = 2 << ((logNX - l) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y0_r[i11 + p]; x11imag = y0_i[i11 + p]; x21real = y0_r[i12 + p]; x21imag = y0_i[i12 + p]; y1_r[i21 + p] = x11real + x21real; y1_i[i21 + p] = x11imag + x21imag; y1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } if (l == logNX) break; //fftz2 (is, l + 1, logNX, NX, u_r, u_i, y1_r, y1_i, y0_r, y0_i, NZ, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NZ * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NX; idx++) { n1 = NX / 2; if (l == 0) { lk = 1; } else { lk = 2 << ((l) - 1); } if (logNX - (l+1) == 0) { li = 1; } else { li = 2 << ((logNX - (l+1)) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y1_r[i11 + p]; x11imag = y1_i[i11 + p]; x21real = y1_r[i12 + p]; x21imag = y1_i[i12 + p]; y0_r[i21 + p] = x11real + x21real; y0_i[i21 + p] = x11imag + x21imag; y0_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y0_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } } if (logNX % 2 == 1) { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = i * NZ * NY + j * NZ + k; x1_r[m] = y1_r[id]; x1_i[m] = y1_i[id]; } } else { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = i * NZ * NY + j * NZ + k; x1_r[m] = y0_r[id]; x1_i[m] = y0_i[id]; } } //cffts2 (1, logNY, x1_r, x1_i, x1_r, x1_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x1 */ is = 1; #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = j * NX * NZ + k * NX + i; y0_r[id] = x1_r[m]; y0_i[id] = x1_i[m]; } //cfftz (is, logNY, NY, y0_r, y0_i, y1_r, y1_i, NX, NZ); /*-------------------------------------------------------------------- c Perform one variant of the Stockham FFT. c-------------------------------------------------------------------*/ for (l = 1; l <= logNY; l += 2) { //fftz2 (is, l, logNY, NY, u_r, u_i, y0_r, y0_i, y1_r, y1_i, NX, NZ); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NZ; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NY; idx++) { n1 = NY / 2; if (l - 1 == 0) { lk = 1; } else { lk = 2 << ((l - 1) - 1); } if (logNY - l == 0) { li = 1; } else { li = 2 << ((logNY - l) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y0_r[i11 + p]; x11imag = y0_i[i11 + p]; x21real = y0_r[i12 + p]; x21imag = y0_i[i12 + p]; y1_r[i21 + p] = x11real + x21real; y1_i[i21 + p] = x11imag + x21imag; y1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } if (l == logNY) break; //fftz2 (is, l + 1, logNY, NY, u_r, u_i, y1_r, y1_i, y0_r, y0_i, NX, NZ); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NZ; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NY; idx++) { n1 = NY / 2; if (l == 0) { lk = 1; } else { lk = 2 << ((l) - 1); } if (logNY - (l+1) == 0) { li = 1; } else { li = 2 << ((logNY - (l+1)) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y1_r[i11 + p]; x11imag = y1_i[i11 + p]; x21real = y1_r[i12 + p]; x21imag = y1_i[i12 + p]; y0_r[i21 + p] = x11real + x21real; y0_i[i21 + p] = x11imag + x21imag; y0_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y0_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } } if (logNY % 2 == 1) { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = j * NX * NZ + k * NX + i; x1_r[m] = y1_r[id]; x1_i[m] = y1_i[id]; } } else { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = j * NX * NZ + k * NX + i; x1_r[m] = y0_r[id]; x1_i[m] = y0_i[id]; } } //cffts3 (1, logNZ, x1_r, x1_i, x2_r, x2_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x2 */ is = 1; //cfftz (is, logNZ, NZ, x1_r, x1_i, y1_r, y1_i, NX, NY); /*-------------------------------------------------------------------- c Perform one variant of the Stockham FFT. c-------------------------------------------------------------------*/ for (l = 1; l <= logNZ; l += 2) { //fftz2 (is, l, logNZ, NZ, u_r, u_i, x1_r, x1_i, y1_r, y1_i, NX, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NZ; idx++) { n1 = NZ / 2; if (l - 1 == 0) { lk = 1; } else { lk = 2 << ((l - 1) - 1); } if (logNZ - l == 0) { li = 1; } else { li = 2 << ((logNZ - l) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = x1_r[i11 + p]; x11imag = x1_i[i11 + p]; x21real = x1_r[i12 + p]; x21imag = x1_i[i12 + p]; y1_r[i21 + p] = x11real + x21real; y1_i[i21 + p] = x11imag + x21imag; y1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } if (l == logNZ) break; //fftz2 (is, l + 1, logNZ, NZ, u_r, u_i, y1_r, y1_i, x1_r, x1_i, NX, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NZ; idx++) { n1 = NZ / 2; if (l == 0) { lk = 1; } else { lk = 2 << ((l) - 1); } if (logNZ - (l+1) == 0) { li = 1; } else { li = 2 << ((logNZ - (l+1)) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y1_r[i11 + p]; x11imag = y1_i[i11 + p]; x21real = y1_r[i12 + p]; x21imag = y1_i[i12 + p]; x1_r[i21 + p] = x11real + x21real; x1_i[i21 + p] = x11imag + x21imag; x1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); x1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } } if (logNZ % 2 == 1) { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { x2_r[m] = y1_r[m]; x2_i[m] = y1_i[m]; } } else { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { x2_r[m] = x1_r[m]; x2_i[m] = x1_i[m]; } } } else { //cffts3 (-1, logNZ, x1_r, x1_i, x1_r, x1_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x1 */ is = -1; //cfftz (is, logNZ, NZ, x1_r, x1_i, y1_r, y1_i, NX, NY); /*-------------------------------------------------------------------- c Perform one variant of the Stockham FFT. c-------------------------------------------------------------------*/ for (l = 1; l <= logNZ; l += 2) { //fftz2 (is, l, logNZ, NZ, u_r, u_i, x1_r, x1_i, y1_r, y1_i, NX, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NZ; idx++) { n1 = NZ / 2; if (l - 1 == 0) { lk = 1; } else { lk = 2 << ((l - 1) - 1); } if (logNZ - l == 0) { li = 1; } else { li = 2 << ((logNZ - l) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = x1_r[i11 + p]; x11imag = x1_i[i11 + p]; x21real = x1_r[i12 + p]; x21imag = x1_i[i12 + p]; y1_r[i21 + p] = x11real + x21real; y1_i[i21 + p] = x11imag + x21imag; y1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } if (l == logNZ) break; //fftz2 (is, l + 1, logNZ, NZ, u_r, u_i, y1_r, y1_i, x1_r, x1_i, NX, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NZ; idx++) { n1 = NZ / 2; if (l == 0) { lk = 1; } else { lk = 2 << ((l) - 1); } if (logNZ - (l+1) == 0) { li = 1; } else { li = 2 << ((logNZ - (l+1)) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y1_r[i11 + p]; x11imag = y1_i[i11 + p]; x21real = y1_r[i12 + p]; x21imag = y1_i[i12 + p]; x1_r[i21 + p] = x11real + x21real; x1_i[i21 + p] = x11imag + x21imag; x1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); x1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } } if (logNZ % 2 == 1) { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { x1_r[m] = y1_r[m]; x1_i[m] = y1_i[m]; } } else { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { x1_r[m] = x1_r[m]; x1_i[m] = x1_i[m]; } } //cffts2 (-1, logNY, x1_r, x1_i, x1_r, x1_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x1 */ is = -1; #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = j * NX * NZ + k * NX + i; y0_r[id] = x1_r[m]; y0_i[id] = x1_i[m]; } //cfftz (is, logNY, NY, y0_r, y0_i, y1_r, y1_i, NX, NZ); /*-------------------------------------------------------------------- c Perform one variant of the Stockham FFT. c-------------------------------------------------------------------*/ for (l = 1; l <= logNY; l += 2) { //fftz2 (is, l, logNY, NY, u_r, u_i, y0_r, y0_i, y1_r, y1_i, NX, NZ); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NZ; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NY; idx++) { n1 = NY / 2; if (l - 1 == 0) { lk = 1; } else { lk = 2 << ((l - 1) - 1); } if (logNY - l == 0) { li = 1; } else { li = 2 << ((logNY - l) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y0_r[i11 + p]; x11imag = y0_i[i11 + p]; x21real = y0_r[i12 + p]; x21imag = y0_i[i12 + p]; y1_r[i21 + p] = x11real + x21real; y1_i[i21 + p] = x11imag + x21imag; y1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } if (l == logNY) break; //fftz2 (is, l + 1, logNY, NY, u_r, u_i, y1_r, y1_i, y0_r, y0_i, NX, NZ); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NZ; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NY; idx++) { n1 = NY / 2; if (l == 0) { lk = 1; } else { lk = 2 << ((l) - 1); } if (logNY - (l+1) == 0) { li = 1; } else { li = 2 << ((logNY - (l+1)) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y1_r[i11 + p]; x11imag = y1_i[i11 + p]; x21real = y1_r[i12 + p]; x21imag = y1_i[i12 + p]; y0_r[i21 + p] = x11real + x21real; y0_i[i21 + p] = x11imag + x21imag; y0_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y0_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } } if (logNY % 2 == 1) { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = j * NX * NZ + k * NX + i; x1_r[m] = y1_r[id]; x1_i[m] = y1_i[id]; } } else { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = j * NX * NZ + k * NX + i; x1_r[m] = y0_r[id]; x1_i[m] = y0_i[id]; } } //cffts1 (-1, logNX, x1_r, x1_i, x2_r, x2_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x2 */ is = -1; #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = i * NZ * NY + j * NZ + k; y0_r[id] = x1_r[m]; y0_i[id] = x1_i[m]; } //cfftz (is, logNX, NX, y0_r, y0_i, y1_r, y1_i, NZ, NY); /*-------------------------------------------------------------------- c Perform one variant of the Stockham FFT. c-------------------------------------------------------------------*/ for (l = 1; l <= logNX; l += 2) { //fftz2 (is, l, logNX, NX, u_r, u_i, y0_r, y0_i, y1_r, y1_i, NZ, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NZ * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NX; idx++) { n1 = NX / 2; if (l - 1 == 0) { lk = 1; } else { lk = 2 << ((l - 1) - 1); } if (logNX - l == 0) { li = 1; } else { li = 2 << ((logNX - l) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y0_r[i11 + p]; x11imag = y0_i[i11 + p]; x21real = y0_r[i12 + p]; x21imag = y0_i[i12 + p]; y1_r[i21 + p] = x11real + x21real; y1_i[i21 + p] = x11imag + x21imag; y1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } if (l == logNX) break; //fftz2 (is, l + 1, logNX, NX, u_r, u_i, y1_r, y1_i, y0_r, y0_i, NZ, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NZ * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NX; idx++) { n1 = NX / 2; if (l == 0) { lk = 1; } else { lk = 2 << ((l) - 1); } if (logNX - (l+1) == 0) { li = 1; } else { li = 2 << ((logNX - (l+1)) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y1_r[i11 + p]; x11imag = y1_i[i11 + p]; x21real = y1_r[i12 + p]; x21imag = y1_i[i12 + p]; y0_r[i21 + p] = x11real + x21real; y0_i[i21 + p] = x11imag + x21imag; y0_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y0_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } } if (logNX % 2 == 1) { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = i * NZ * NY + j * NZ + k; x2_r[m] = y1_r[id]; x2_i[m] = y1_i[id]; } } else { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = i * NZ * NY + j * NZ + k; x2_r[m] = y0_r[id]; x2_i[m] = y0_i[id]; } } } } //END_FFT// } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void fft_init (int n) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c compute the roots-of-unity array that will be used for subsequent FFTs. c-------------------------------------------------------------------*/ int m, nu, ku, i, j, ln; double t, ti; /*-------------------------------------------------------------------- c Initialize the U array with sines and cosines in a manner that permits c stride one access at each FFT iteration. c-------------------------------------------------------------------*/ nu = n; m = ilog2 (n); u_r[0] = (double) m; u_i[0] = 0.0; ku = 1; ln = 1; for (j = 1; j <= m; j++) { t = PI / ln; for (i = 0; i <= ln - 1; i++) { ti = i * t; u_r[i + ku] = cos (ti); u_i[i + ku] = sin (ti); } ku = ku + ln; ln = 2 * ln; } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static int ilog2 (int n) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int nn, lg; if (n == 1) { return 0; } lg = 1; nn = 2; while (nn < n) { nn = nn << 1; lg++; } return lg; } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void verify (int d1, int d2, int d3, int nt, boolean * verified, char *classT) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int ierr, size, i; double err, epsilon; /*-------------------------------------------------------------------- c Sample size reference checksums c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Class S size reference checksums c-------------------------------------------------------------------*/ double vdata_real_s[6 + 1] = { 0.0, 5.546087004964e+02, 5.546385409189e+02, 5.546148406171e+02, 5.545423607415e+02, 5.544255039624e+02, 5.542683411902e+02 }; double vdata_imag_s[6 + 1] = { 0.0, 4.845363331978e+02, 4.865304269511e+02, 4.883910722336e+02, 4.901273169046e+02, 4.917475857993e+02, 4.932597244941e+02 }; /*-------------------------------------------------------------------- c Class W size reference checksums c-------------------------------------------------------------------*/ double vdata_real_w[6 + 1] = { 0.0, 5.673612178944e+02, 5.631436885271e+02, 5.594024089970e+02, 5.560698047020e+02, 5.530898991250e+02, 5.504159734538e+02 }; double vdata_imag_w[6 + 1] = { 0.0, 5.293246849175e+02, 5.282149986629e+02, 5.270996558037e+02, 5.260027904925e+02, 5.249400845633e+02, 5.239212247086e+02 }; /*-------------------------------------------------------------------- c Class A size reference checksums c-------------------------------------------------------------------*/ double vdata_real_a[6 + 1] = { 0.0, 5.046735008193e+02, 5.059412319734e+02, 5.069376896287e+02, 5.077892868474e+02, 5.085233095391e+02, 5.091487099959e+02 }; double vdata_imag_a[6 + 1] = { 0.0, 5.114047905510e+02, 5.098809666433e+02, 5.098144042213e+02, 5.101336130759e+02, 5.104914655194e+02, 5.107917842803e+02 }; /*-------------------------------------------------------------------- c Class B size reference checksums c-------------------------------------------------------------------*/ double vdata_real_b[20 + 1] = { 0.0, 5.177643571579e+02, 5.154521291263e+02, 5.146409228649e+02, 5.142378756213e+02, 5.139626667737e+02, 5.137423460082e+02, 5.135547056878e+02, 5.133910925466e+02, 5.132470705390e+02, 5.131197729984e+02, 5.130070319283e+02, 5.129070537032e+02, 5.128182883502e+02, 5.127393733383e+02, 5.126691062020e+02, 5.126064276004e+02, 5.125504076570e+02, 5.125002331720e+02, 5.124551951846e+02, 5.124146770029e+02 }; double vdata_imag_b[20 + 1] = { 0.0, 5.077803458597e+02, 5.088249431599e+02, 5.096208912659e+02, 5.101023387619e+02, 5.103976610617e+02, 5.105948019802e+02, 5.107404165783e+02, 5.108576573661e+02, 5.109577278523e+02, 5.110460304483e+02, 5.111252433800e+02, 5.111968077718e+02, 5.112616233064e+02, 5.113203605551e+02, 5.113735928093e+02, 5.114218460548e+02, 5.114656139760e+02, 5.115053595966e+02, 5.115415130407e+02, 5.115744692211e+02 }; /*-------------------------------------------------------------------- c Class C size reference checksums c-------------------------------------------------------------------*/ double vdata_real_c[20 + 1] = { 0.0, 5.195078707457e+02, 5.155422171134e+02, 5.144678022222e+02, 5.140150594328e+02, 5.137550426810e+02, 5.135811056728e+02, 5.134569343165e+02, 5.133651975661e+02, 5.132955192805e+02, 5.132410471738e+02, 5.131971141679e+02, 5.131605205716e+02, 5.131290734194e+02, 5.131012720314e+02, 5.130760908195e+02, 5.130528295923e+02, 5.130310107773e+02, 5.130103090133e+02, 5.129905029333e+02, 5.129714421109e+02 }; double vdata_imag_c[20 + 1] = { 0.0, 5.149019699238e+02, 5.127578201997e+02, 5.122251847514e+02, 5.121090289018e+02, 5.121143685824e+02, 5.121496764568e+02, 5.121870921893e+02, 5.122193250322e+02, 5.122454735794e+02, 5.122663649603e+02, 5.122830879827e+02, 5.122965869718e+02, 5.123075927445e+02, 5.123166486553e+02, 5.123241541685e+02, 5.123304037599e+02, 5.123356167976e+02, 5.123399592211e+02, 5.123435588985e+02, 5.123465164008e+02 }; epsilon = 1.0e-12; *verified = TRUE; *classT = 'U'; if (d1 == 64 && d2 == 64 && d3 == 64 && nt == 6) { *classT = 'S'; for (i = 1; i <= nt; i++) { err = (sums_r[i] - vdata_real_s[i]) / vdata_real_s[i]; if (fabs (err) > epsilon) { *verified = FALSE; break; } err = (sums_i[i] - vdata_imag_s[i]) / vdata_imag_s[i]; if (fabs (err) > epsilon) { *verified = FALSE; break; } } } else if (d1 == 128 && d2 == 128 && d3 == 32 && nt == 6) { *classT = 'W'; for (i = 1; i <= nt; i++) { err = (sums_r[i] - vdata_real_w[i]) / vdata_real_w[i]; if (fabs (err) > epsilon) { *verified = FALSE; break; } err = (sums_i[i] - vdata_imag_w[i]) / vdata_imag_w[i]; if (fabs (err) > epsilon) { *verified = FALSE; break; } } } else if (d1 == 256 && d2 == 256 && d3 == 128 && nt == 6) { *classT = 'A'; for (i = 1; i <= nt; i++) { err = (sums_r[i] - vdata_real_a[i]) / vdata_real_a[i]; if (fabs (err) > epsilon) { *verified = FALSE; break; } err = (sums_i[i] - vdata_imag_a[i]) / vdata_imag_a[i]; if (fabs (err) > epsilon) { *verified = FALSE; break; } } } else if (d1 == 512 && d2 == 256 && d3 == 256 && nt == 20) { *classT = 'B'; for (i = 1; i <= nt; i++) { err = (sums_r[i] - vdata_real_b[i]) / vdata_real_b[i]; if (fabs (err) > epsilon) { *verified = FALSE; break; } err = (sums_i[i] - vdata_imag_b[i]) / vdata_imag_b[i]; if (fabs (err) > epsilon) { *verified = FALSE; break; } } } else if (d1 == 512 && d2 == 512 && d3 == 512 && nt == 20) { *classT = 'C'; for (i = 1; i <= nt; i++) { err = (sums_r[i] - vdata_real_c[i]) / vdata_real_c[i]; if (fabs (err) > epsilon) { *verified = FALSE; break; } err = (sums_i[i] - vdata_imag_c[i]) / vdata_imag_c[i]; if (fabs (err) > epsilon) { *verified = FALSE; break; } } } if (*classT != 'U') { printf ("Result verification successful\n"); } else { printf ("Result verification failed\n"); } printf ("class = %1c\n", *classT); }
/*-------------------------------------------------------------------- NAS Parallel Benchmarks 2.3 OpenMP C versions - FT This benchmark is an OpenMP C version of the NPB FT code. The OpenMP C versions are developed by RWCP and derived from the serial Fortran versions in "NPB 2.3-serial" developed by NAS. Permission to use, copy, distribute and modify this software for any purpose with or without fee is hereby granted. This software is provided "as is" without express or implied warranty. Send comments on the OpenMP C versions to pdp-openmp@rwcp.or.jp Information on OpenMP activities at RWCP is available at: http://pdplab.trc.rwcp.or.jp/pdperf/Omni/ Information on NAS Parallel Benchmarks 2.3 is available at: http://www.nas.nasa.gov/NAS/NPB/ --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- Authors: D. Bailey W. Saphir OpenMP C version: S. Satoh --------------------------------------------------------------------*/ #include "npb-C.h" /* global variables */ #include "global.h" /* function declarations */ static void compute_initial_conditions(double u0_r[NTOTAL], double u0_i[NTOTAL]); static void ipow46(double a, int exponent, double *result); static void setup(void); static void print_timers(void); static void fft(int dir, double x1_r[NTOTAL], double x1_i[NTOTAL], double x2_r[NTOTAL], double x2_i[NTOTAL]); static void fft_init(int n); static void cfftz(int is, int m, int n, double x_r[NTOTAL], double x_i[NTOTAL], double y_r[NTOTAL], double y_i[NTOTAL], int di1, int di2); static void fftz2(int is, int l, int m, int n, double u_r[NX], double u_i[NX], double x_r[NTOTAL], double x_i[NTOTAL], double y_r[NTOTAL], double y_i[NTOTAL], int di1, int di2); static int ilog2(int n); static void verify(int d1, int d2, int d3, int nt, boolean * verified, char *classT); /*-------------------------------------------------------------------- c FT benchmark c-------------------------------------------------------------------*/ int main(int argc, char **argv) { /* * c------------------------------------------------------------------- * c------------------------------------------------------------------- */ int i_main, ierr; /*------------------------------------------------------------------ c u0, u1, u2 are the main arrays in the problem. c Depending on the decomposition, these arrays will have different c dimensions. To accomodate all possibilities, we allocate them as c one-dimensional arrays and pass them to subroutines for different c views c - u0 contains the initial (transformed) initial condition c - u1 and u2 are working arrays c - indexmap maps i,j,k of u0 to the correct i^2+j^2+k^2 for the c time evolution operator. c-----------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Large arrays are in common so that they are allocated on the c heap rather than the stack. This common block is not c referenced directly anywhere else. Padding is to avoid accidental c cache problems, since all array sizes are powers of two. c-------------------------------------------------------------------*/ static double u0_r[NTOTAL]; //u0_r[NZ][NY][NX]; static double u0_i[NTOTAL]; //u0_i[NZ][NY][NX]; static double u1_r[NTOTAL]; //u1_r[NZ][NY][NX]; static double u1_i[NTOTAL]; //u1_i[NZ][NY][NX]; static double u2_r[NTOTAL]; //u2_r[NZ][NY][NX]; static double u2_i[NTOTAL]; //u2_i[NZ][NY][NX]; static int indexmap[NTOTAL]; //indexmap[NZ][NY][NX]; int iter; int nthreads = 1; double total_time, mflops; boolean verified; char classT; ////////////////////////////////// //Used for compute_indexmap ().// ////////////////////////////////// int i, j, k, ii, ii2, jj, ij2, kk; int m; double ap; //////////////////////// //Used for evolve ().// //////////////////////// //int i, j, k; ////////////////////////// //Used for checksum ().// ////////////////////////// //int m, j, int q, r, s; double chk_r, chk_i; ///////////////////// //Used for fft ().// ///////////////////// int dir; static double y0_r[NTOTAL]; static double y0_i[NTOTAL]; static double y1_r[NTOTAL]; static double y1_i[NTOTAL]; int logNX, logNY, logNZ; /////////////////////// //Used for cffts1 ().// /////////////////////// //int i, j, k, jj, m; int id; int is; /////////////////////// //Used for cfftz ().// /////////////////////// int l; /////////////////////// //Used for fftz2 ().// /////////////////////// int k_fftz2, n1, li, lj, lk, ku, i_fftz2, i11, i12, i21, i22; double u1_rf, x11_r, x21_r; double u1_if, x11_i, x21_i; int idx, p, nn; double x11real, x11imag, x21real, x21imag; /*-------------------------------------------------------------------- c Run the entire problem once to make sure all data is touched. c This reduces variable startup costs, which is important for such a c short benchmark. The other NPB 2 implementations are similar. c-------------------------------------------------------------------*/ for (i_main = 0; i_main < T_MAX; i_main++) { timer_clear(i_main); } setup(); { //compute_indexmap(indexmap); /*-------------------------------------------------------------------- c compute function from local (i,j,k) to ibar^2+jbar^2+kbar^2 c for time evolution exponent. c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c basically we want to convert the fortran indices c 1 2 3 4 5 6 7 8 c to c 0 1 2 3 -4 -3 -2 -1 c The following magic formula does the trick: c mod(i-1+n/2, n) - n/2 c-------------------------------------------------------------------*/ #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; ii = (i + NX / 2) % NX - NX / 2; ii2 = ii * ii; jj = (j + NY / 2) % NY - NY / 2; ij2 = jj * jj + ii2; kk = (k + NZ / 2) % NZ - NZ / 2; indexmap[m] = kk * kk + ij2; } /*-------------------------------------------------------------------- c compute array of exponentials for time evolution. c-------------------------------------------------------------------*/ { ap = -4.0 * ALPHA * PI * PI; ex[0] = 1.0; ex[1] = exp(ap); for (i = 2; i <= EXPMAX; i++) { ex[i] = ex[i - 1] * ex[1]; } } /* end single */ { compute_initial_conditions(u1_r, u1_i); fft_init(dims[0][0]); } fft(1, u1_r, u1_i, u0_r, u0_i); } /* end parallel */ /*-------------------------------------------------------------------- c Start over from the beginning. Note that all operations must c be timed, in contrast to other benchmarks. c-------------------------------------------------------------------*/ for (i_main = 0; i_main < T_MAX; i_main++) { timer_clear(i_main); } timer_start(T_TOTAL); if (TIMERS_ENABLED == TRUE) timer_start(T_SETUP); // #pragma acc data \ create(ex[0:EXPMAX+1]) \ create(indexmap[0:NTOTAL]) \ create(u_r[0:NX], u_i[0:NX]) \ create(u1_r[0:NTOTAL], u1_i[0:NTOTAL]) \ create(u0_r[0:NTOTAL], u0_i[0:NTOTAL]) \ create(u2_r[0:NTOTAL], u2_i[0:NTOTAL]) \ create(y0_r[0:NTOTAL], y0_i[0:NTOTAL]) \ create(y1_r[0:NTOTAL], y1_i[0:NTOTAL]) { //compute_indexmap(indexmap); /*-------------------------------------------------------------------- c compute function from local (i,j,k) to ibar^2+jbar^2+kbar^2 c for time evolution exponent. c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c basically we want to convert the fortran indices c 1 2 3 4 5 6 7 8 c to c 0 1 2 3 -4 -3 -2 -1 c The following magic formula does the trick: c mod(i-1+n/2, n) - n/2 c-------------------------------------------------------------------*/ #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; ii = (i + NX / 2) % NX - NX / 2; ii2 = ii * ii; jj = (j + NY / 2) % NY - NY / 2; ij2 = jj * jj + ii2; kk = (k + NZ / 2) % NZ - NZ / 2; indexmap[m] = kk * kk + ij2; } /*-------------------------------------------------------------------- c compute array of exponentials for time evolution. c-------------------------------------------------------------------*/ { ap = -4.0 * ALPHA * PI * PI; ex[0] = 1.0; ex[1] = exp(ap); for (i = 2; i <= EXPMAX; i++) { ex[i] = ex[i - 1] * ex[1]; } } /* end single */ #pragma acc update device(ex[0:EXPMAX+1]) { compute_initial_conditions(u1_r, u1_i); fft_init(dims[0][0]); } #pragma acc update device(u_r[0:NX], u_i[0:NX], \ u1_r[0:NTOTAL], u1_i[0:NTOTAL]) if (TIMERS_ENABLED == TRUE) { timer_stop(T_SETUP); } if (TIMERS_ENABLED == TRUE) { timer_start(T_FFT); } //fft(1, u1_r, u1_i, u0_r, u0_i); //START_FFT // dir = 1; logNX = ilog2(NX); logNY = ilog2(NY); logNZ = ilog2(NZ); /*-------------------------------------------------------------------- c note: args u1, x2 must be different arrays c note: args for cfftsx are (direction, layout, xin, xout, scratch) c xin/xout may be the same and it can be somewhat faster c if they are c-------------------------------------------------------------------*/ { if (dir == 1) { //cffts1(1, logNX, u1_r, u1_i, u1_r, u1_i, y0_r, y0_i, y1_r, y1_i); /* u1 -> u1 */ is = 1; #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = i * NZ * NY + j * NZ + k; y0_r[id] = u1_r[m]; y0_i[id] = u1_i[m]; } //cfftz(is, logNX, NX, y0_r, y0_i, y1_r, y1_i, NZ, NY); /*-------------------------------------------------------------------- c Perform one variant of the Stockham FFT. c-------------------------------------------------------------------*/ for (l = 1; l <= logNX; l += 2) { //fftz2(is, l, logNX, NX, u_r, u_i, y0_r, y0_i, y1_r, y1_i, NZ, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NZ * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NX; idx++) { n1 = NX / 2; if (l - 1 == 0) { lk = 1; } else { lk = 2 << ((l - 1) - 1); } if (logNX - l == 0) { li = 1; } else { li = 2 << ((logNX - l) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y0_r[i11 + p]; x11imag = y0_i[i11 + p]; x21real = y0_r[i12 + p]; x21imag = y0_i[i12 + p]; y1_r[i21 + p] = x11real + x21real; y1_i[i21 + p] = x11imag + x21imag; y1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } if (l == logNX) break; //fftz2(is, l + 1, logNX, NX, u_r, u_i, y1_r, y1_i, y0_r, y0_i, NZ, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NZ * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NX; idx++) { n1 = NX / 2; if (l == 0) { lk = 1; } else { lk = 2 << ((l) - 1); } if (logNX - (l + 1) == 0) { li = 1; } else { li = 2 << ((logNX - (l + 1)) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y1_r[i11 + p]; x11imag = y1_i[i11 + p]; x21real = y1_r[i12 + p]; x21imag = y1_i[i12 + p]; y0_r[i21 + p] = x11real + x21real; y0_i[i21 + p] = x11imag + x21imag; y0_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y0_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } } if (logNX % 2 == 1) { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = i * NZ * NY + j * NZ + k; u1_r[m] = y1_r[id]; u1_i[m] = y1_i[id]; } } else { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = i * NZ * NY + j * NZ + k; u1_r[m] = y0_r[id]; u1_i[m] = y0_i[id]; } } //cffts2(1, logNY, u1_r, u1_i, u1_r, u1_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x1 */ is = 1; #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = j * NX * NZ + k * NX + i; y0_r[id] = u1_r[m]; y0_i[id] = u1_i[m]; } //cfftz(is, logNY, NY, y0_r, y0_i, y1_r, y1_i, NX, NZ); /*-------------------------------------------------------------------- c Perform one variant of the Stockham FFT. c-------------------------------------------------------------------*/ for (l = 1; l <= logNY; l += 2) { //fftz2(is, l, logNY, NY, u_r, u_i, y0_r, y0_i, y1_r, y1_i, NX, NZ); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NZ; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NY; idx++) { n1 = NY / 2; if (l - 1 == 0) { lk = 1; } else { lk = 2 << ((l - 1) - 1); } if (logNY - l == 0) { li = 1; } else { li = 2 << ((logNY - l) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y0_r[i11 + p]; x11imag = y0_i[i11 + p]; x21real = y0_r[i12 + p]; x21imag = y0_i[i12 + p]; y1_r[i21 + p] = x11real + x21real; y1_i[i21 + p] = x11imag + x21imag; y1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } if (l == logNY) break; //fftz2(is, l + 1, logNY, NY, u_r, u_i, y1_r, y1_i, y0_r, y0_i, NX, NZ); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NZ; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NY; idx++) { n1 = NY / 2; if (l == 0) { lk = 1; } else { lk = 2 << ((l) - 1); } if (logNY - (l + 1) == 0) { li = 1; } else { li = 2 << ((logNY - (l + 1)) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y1_r[i11 + p]; x11imag = y1_i[i11 + p]; x21real = y1_r[i12 + p]; x21imag = y1_i[i12 + p]; y0_r[i21 + p] = x11real + x21real; y0_i[i21 + p] = x11imag + x21imag; y0_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y0_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } } if (logNY % 2 == 1) { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = j * NX * NZ + k * NX + i; u1_r[m] = y1_r[id]; u1_i[m] = y1_i[id]; } } else { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = j * NX * NZ + k * NX + i; u1_r[m] = y0_r[id]; u1_i[m] = y0_i[id]; } } //cffts3(1, logNZ, u1_r, u1_i, u0_r, u0_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x2 */ is = 1; //cfftz(is, logNZ, NZ, u1_r, u1_i, y1_r, y1_i, NX, NY); /*-------------------------------------------------------------------- c Perform one variant of the Stockham FFT. c-------------------------------------------------------------------*/ for (l = 1; l <= logNZ; l += 2) { //fftz2(is, l, logNZ, NZ, u_r, u_i, u1_r, u1_i, y1_r, y1_i, NX, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NZ; idx++) { n1 = NZ / 2; if (l - 1 == 0) { lk = 1; } else { lk = 2 << ((l - 1) - 1); } if (logNZ - l == 0) { li = 1; } else { li = 2 << ((logNZ - l) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = u1_r[i11 + p]; x11imag = u1_i[i11 + p]; x21real = u1_r[i12 + p]; x21imag = u1_i[i12 + p]; y1_r[i21 + p] = x11real + x21real; y1_i[i21 + p] = x11imag + x21imag; y1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } if (l == logNZ) break; //fftz2(is, l + 1, logNZ, NZ, u_r, u_i, y1_r, y1_i, u1_r, u1_i, NX, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NZ; idx++) { n1 = NZ / 2; if (l == 0) { lk = 1; } else { lk = 2 << ((l) - 1); } if (logNZ - (l + 1) == 0) { li = 1; } else { li = 2 << ((logNZ - (l + 1)) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y1_r[i11 + p]; x11imag = y1_i[i11 + p]; x21real = y1_r[i12 + p]; x21imag = y1_i[i12 + p]; u1_r[i21 + p] = x11real + x21real; u1_i[i21 + p] = x11imag + x21imag; u1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); u1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } } if (logNZ % 2 == 1) { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { u0_r[m] = y1_r[m]; u0_i[m] = y1_i[m]; } } else { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { u0_r[m] = u1_r[m]; u0_i[m] = u1_i[m]; } } } else { //cffts3(-1, logNZ, u1_r, u1_i, u1_r, u1_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x1 */ is = -1; //cfftz(is, logNZ, NZ, u1_r, u1_i, y1_r, y1_i, NX, NY); /*-------------------------------------------------------------------- c Perform one variant of the Stockham FFT. c-------------------------------------------------------------------*/ for (l = 1; l <= logNZ; l += 2) { //fftz2(is, l, logNZ, NZ, u_r, u_i, u1_r, u1_i, y1_r, y1_i, NX, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NZ; idx++) { n1 = NZ / 2; if (l - 1 == 0) { lk = 1; } else { lk = 2 << ((l - 1) - 1); } if (logNZ - l == 0) { li = 1; } else { li = 2 << ((logNZ - l) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = u1_r[i11 + p]; x11imag = u1_i[i11 + p]; x21real = u1_r[i12 + p]; x21imag = u1_i[i12 + p]; y1_r[i21 + p] = x11real + x21real; y1_i[i21 + p] = x11imag + x21imag; y1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } if (l == logNZ) break; //fftz2(is, l + 1, logNZ, NZ, u_r, u_i, y1_r, y1_i, u1_r, u1_i, NX, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NZ; idx++) { n1 = NZ / 2; if (l == 0) { lk = 1; } else { lk = 2 << ((l) - 1); } if (logNZ - (l + 1) == 0) { li = 1; } else { li = 2 << ((logNZ - (l + 1)) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y1_r[i11 + p]; x11imag = y1_i[i11 + p]; x21real = y1_r[i12 + p]; x21imag = y1_i[i12 + p]; u1_r[i21 + p] = x11real + x21real; u1_i[i21 + p] = x11imag + x21imag; u1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); u1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } } if (logNZ % 2 == 1) { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { u1_r[m] = y1_r[m]; u1_i[m] = y1_i[m]; } } else { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { u1_r[m] = u1_r[m]; u1_i[m] = u1_i[m]; } } //cffts2(-1, logNY, u1_r, u1_i, u1_r, u1_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x1 */ is = -1; #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = j * NX * NZ + k * NX + i; y0_r[id] = u1_r[m]; y0_i[id] = u1_i[m]; } //cfftz(is, logNY, NY, y0_r, y0_i, y1_r, y1_i, NX, NZ); /*-------------------------------------------------------------------- c Perform one variant of the Stockham FFT. c-------------------------------------------------------------------*/ for (l = 1; l <= logNY; l += 2) { //fftz2(is, l, logNY, NY, u_r, u_i, y0_r, y0_i, y1_r, y1_i, NX, NZ); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NZ; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NY; idx++) { n1 = NY / 2; if (l - 1 == 0) { lk = 1; } else { lk = 2 << ((l - 1) - 1); } if (logNY - l == 0) { li = 1; } else { li = 2 << ((logNY - l) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y0_r[i11 + p]; x11imag = y0_i[i11 + p]; x21real = y0_r[i12 + p]; x21imag = y0_i[i12 + p]; y1_r[i21 + p] = x11real + x21real; y1_i[i21 + p] = x11imag + x21imag; y1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } if (l == logNY) break; //fftz2(is, l + 1, logNY, NY, u_r, u_i, y1_r, y1_i, y0_r, y0_i, NX, NZ); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NZ; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NY; idx++) { n1 = NY / 2; if (l == 0) { lk = 1; } else { lk = 2 << ((l) - 1); } if (logNY - (l + 1) == 0) { li = 1; } else { li = 2 << ((logNY - (l + 1)) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y1_r[i11 + p]; x11imag = y1_i[i11 + p]; x21real = y1_r[i12 + p]; x21imag = y1_i[i12 + p]; y0_r[i21 + p] = x11real + x21real; y0_i[i21 + p] = x11imag + x21imag; y0_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y0_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } } if (logNY % 2 == 1) { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = j * NX * NZ + k * NX + i; u1_r[m] = y1_r[id]; u1_i[m] = y1_i[id]; } } else { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = j * NX * NZ + k * NX + i; u1_r[m] = y0_r[id]; u1_i[m] = y0_i[id]; } } //cffts1(-1, logNX, u1_r, u1_i, u0_r, u0_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x2 */ is = -1; #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = i * NZ * NY + j * NZ + k; y0_r[id] = u1_r[m]; y0_i[id] = u1_i[m]; } //cfftz(is, logNX, NX, y0_r, y0_i, y1_r, y1_i, NZ, NY); /*-------------------------------------------------------------------- c Perform one variant of the Stockham FFT. c-------------------------------------------------------------------*/ for (l = 1; l <= logNX; l += 2) { //fftz2(is, l, logNX, NX, u_r, u_i, y0_r, y0_i, y1_r, y1_i, NZ, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NZ * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NX; idx++) { n1 = NX / 2; if (l - 1 == 0) { lk = 1; } else { lk = 2 << ((l - 1) - 1); } if (logNX - l == 0) { li = 1; } else { li = 2 << ((logNX - l) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y0_r[i11 + p]; x11imag = y0_i[i11 + p]; x21real = y0_r[i12 + p]; x21imag = y0_i[i12 + p]; y1_r[i21 + p] = x11real + x21real; y1_i[i21 + p] = x11imag + x21imag; y1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } if (l == logNX) break; //fftz2(is, l + 1, logNX, NX, u_r, u_i, y1_r, y1_i, y0_r, y0_i, NZ, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NZ * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NX; idx++) { n1 = NX / 2; if (l == 0) { lk = 1; } else { lk = 2 << ((l) - 1); } if (logNX - (l + 1) == 0) { li = 1; } else { li = 2 << ((logNX - (l + 1)) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y1_r[i11 + p]; x11imag = y1_i[i11 + p]; x21real = y1_r[i12 + p]; x21imag = y1_i[i12 + p]; y0_r[i21 + p] = x11real + x21real; y0_i[i21 + p] = x11imag + x21imag; y0_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y0_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } } if (logNX % 2 == 1) { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = i * NZ * NY + j * NZ + k; u0_r[m] = y1_r[id]; u0_i[m] = y1_i[id]; } } else { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = i * NZ * NY + j * NZ + k; u0_r[m] = y0_r[id]; u0_i[m] = y0_i[id]; } } } } //END_FFT // if (TIMERS_ENABLED == TRUE) { timer_stop(T_FFT); } for (iter = 1; iter <= niter; iter++) { if (TIMERS_ENABLED == TRUE) { timer_start(T_EVOLVE); } //evolve(u0_r, u0_i, u1_r, u1_i, iter, indexmap); /*-------------------------------------------------------------------- c evolve u0 -> u1 (iter time steps) in fourier space c-------------------------------------------------------------------*/ #pragma acc kernels loop gang worker independent for (i = 0; i < NTOTAL; i++) { u1_r[i] = u0_r[i] * ex[iter * indexmap[i]]; u1_i[i] = u0_i[i] * ex[iter * indexmap[i]]; } if (TIMERS_ENABLED == TRUE) { timer_stop(T_EVOLVE); } if (TIMERS_ENABLED == TRUE) { timer_start(T_FFT); } //fft(-1, u1_r, u1_i, u2_r, u2_i); //START_FFT // dir = -1; logNX = ilog2(NX); logNY = ilog2(NY); logNZ = ilog2(NZ); /*-------------------------------------------------------------------- c note: args x1, x2 must be different arrays c note: args for cfftsx are (direction, layout, xin, xout, scratch) c xin/xout may be the same and it can be somewhat faster c if they are c-------------------------------------------------------------------*/ { if (dir == 1) { //cffts1(1, logNX, u1_r, u1_i, u1_r, u1_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x1 */ is = 1; #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = i * NZ * NY + j * NZ + k; y0_r[id] = u1_r[m]; y0_i[id] = u1_i[m]; } //cfftz(is, logNX, NX, y0_r, y0_i, y1_r, y1_i, NZ, NY); /*-------------------------------------------------------------------- c Perform one variant of the Stockham FFT. c-------------------------------------------------------------------*/ for (l = 1; l <= logNX; l += 2) { //fftz2(is, l, logNX, NX, u_r, u_i, y0_r, y0_i, y1_r, y1_i, NZ, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NZ * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NX; idx++) { n1 = NX / 2; if (l - 1 == 0) { lk = 1; } else { lk = 2 << ((l - 1) - 1); } if (logNX - l == 0) { li = 1; } else { li = 2 << ((logNX - l) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y0_r[i11 + p]; x11imag = y0_i[i11 + p]; x21real = y0_r[i12 + p]; x21imag = y0_i[i12 + p]; y1_r[i21 + p] = x11real + x21real; y1_i[i21 + p] = x11imag + x21imag; y1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } if (l == logNX) break; //fftz2(is, l + 1, logNX, NX, u_r, u_i, y1_r, y1_i, y0_r, y0_i, NZ, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NZ * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NX; idx++) { n1 = NX / 2; if (l == 0) { lk = 1; } else { lk = 2 << ((l) - 1); } if (logNX - (l + 1) == 0) { li = 1; } else { li = 2 << ((logNX - (l + 1)) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y1_r[i11 + p]; x11imag = y1_i[i11 + p]; x21real = y1_r[i12 + p]; x21imag = y1_i[i12 + p]; y0_r[i21 + p] = x11real + x21real; y0_i[i21 + p] = x11imag + x21imag; y0_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y0_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } } if (logNX % 2 == 1) { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = i * NZ * NY + j * NZ + k; u1_r[m] = y1_r[id]; u1_i[m] = y1_i[id]; } } else { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = i * NZ * NY + j * NZ + k; u1_r[m] = y0_r[id]; u1_i[m] = y0_i[id]; } } //cffts2(1, logNY, u1_r, u1_i, u1_r, u1_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x1 */ is = 1; #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = j * NX * NZ + k * NX + i; y0_r[id] = u1_r[m]; y0_i[id] = u1_i[m]; } //cfftz(is, logNY, NY, y0_r, y0_i, y1_r, y1_i, NX, NZ); /*-------------------------------------------------------------------- c Perform one variant of the Stockham FFT. c-------------------------------------------------------------------*/ for (l = 1; l <= logNY; l += 2) { //fftz2(is, l, logNY, NY, u_r, u_i, y0_r, y0_i, y1_r, y1_i, NX, NZ); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NZ; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NY; idx++) { n1 = NY / 2; if (l - 1 == 0) { lk = 1; } else { lk = 2 << ((l - 1) - 1); } if (logNY - l == 0) { li = 1; } else { li = 2 << ((logNY - l) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y0_r[i11 + p]; x11imag = y0_i[i11 + p]; x21real = y0_r[i12 + p]; x21imag = y0_i[i12 + p]; y1_r[i21 + p] = x11real + x21real; y1_i[i21 + p] = x11imag + x21imag; y1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } if (l == logNY) break; //fftz2(is, l + 1, logNY, NY, u_r, u_i, y1_r, y1_i, y0_r, y0_i, NX, NZ); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NZ; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NY; idx++) { n1 = NY / 2; if (l == 0) { lk = 1; } else { lk = 2 << ((l) - 1); } if (logNY - (l + 1) == 0) { li = 1; } else { li = 2 << ((logNY - (l + 1)) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y1_r[i11 + p]; x11imag = y1_i[i11 + p]; x21real = y1_r[i12 + p]; x21imag = y1_i[i12 + p]; y0_r[i21 + p] = x11real + x21real; y0_i[i21 + p] = x11imag + x21imag; y0_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y0_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } } if (logNY % 2 == 1) { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = j * NX * NZ + k * NX + i; u1_r[m] = y1_r[id]; u1_i[m] = y1_i[id]; } } else { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = j * NX * NZ + k * NX + i; u1_r[m] = y0_r[id]; u1_i[m] = y0_i[id]; } } //cffts3(1, logNZ, u1_r, u1_i, u2_r, u2_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x2 */ is = 1; //cfftz(is, logNZ, NZ, u1_r, u1_i, y1_r, y1_i, NX, NY); /*-------------------------------------------------------------------- c Perform one variant of the Stockham FFT. c-------------------------------------------------------------------*/ for (l = 1; l <= logNZ; l += 2) { //fftz2(is, l, logNZ, NZ, u_r, u_i, u1_r, u1_i, y1_r, y1_i, NX, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NZ; idx++) { n1 = NZ / 2; if (l - 1 == 0) { lk = 1; } else { lk = 2 << ((l - 1) - 1); } if (logNZ - l == 0) { li = 1; } else { li = 2 << ((logNZ - l) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = u1_r[i11 + p]; x11imag = u1_i[i11 + p]; x21real = u1_r[i12 + p]; x21imag = u1_i[i12 + p]; y1_r[i21 + p] = x11real + x21real; y1_i[i21 + p] = x11imag + x21imag; y1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } if (l == logNZ) break; //fftz2(is, l + 1, logNZ, NZ, u_r, u_i, y1_r, y1_i, u1_r, u1_i, NX, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NZ; idx++) { n1 = NZ / 2; if (l == 0) { lk = 1; } else { lk = 2 << ((l) - 1); } if (logNZ - (l + 1) == 0) { li = 1; } else { li = 2 << ((logNZ - (l + 1)) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y1_r[i11 + p]; x11imag = y1_i[i11 + p]; x21real = y1_r[i12 + p]; x21imag = y1_i[i12 + p]; u1_r[i21 + p] = x11real + x21real; u1_i[i21 + p] = x11imag + x21imag; u1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); u1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } } if (logNZ % 2 == 1) { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { u2_r[m] = y1_r[m]; u2_i[m] = y1_i[m]; } } else { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { u2_r[m] = u1_r[m]; u2_i[m] = u1_i[m]; } } } else { //cffts3(-1, logNZ, u1_r, u1_i, u1_r, u1_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x1 */ is = -1; //cfftz(is, logNZ, NZ, u1_r, u1_i, y1_r, y1_i, NX, NY); /*-------------------------------------------------------------------- c Perform one variant of the Stockham FFT. c-------------------------------------------------------------------*/ for (l = 1; l <= logNZ; l += 2) { //fftz2(is, l, logNZ, NZ, u_r, u_i, u1_r, u1_i, y1_r, y1_i, NX, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NZ; idx++) { n1 = NZ / 2; if (l - 1 == 0) { lk = 1; } else { lk = 2 << ((l - 1) - 1); } if (logNZ - l == 0) { li = 1; } else { li = 2 << ((logNZ - l) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = u1_r[i11 + p]; x11imag = u1_i[i11 + p]; x21real = u1_r[i12 + p]; x21imag = u1_i[i12 + p]; y1_r[i21 + p] = x11real + x21real; y1_i[i21 + p] = x11imag + x21imag; y1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } if (l == logNZ) break; //fftz2(is, l + 1, logNZ, NZ, u_r, u_i, y1_r, y1_i, u1_r, u1_i, NX, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NZ; idx++) { n1 = NZ / 2; if (l == 0) { lk = 1; } else { lk = 2 << ((l) - 1); } if (logNZ - (l + 1) == 0) { li = 1; } else { li = 2 << ((logNZ - (l + 1)) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y1_r[i11 + p]; x11imag = y1_i[i11 + p]; x21real = y1_r[i12 + p]; x21imag = y1_i[i12 + p]; u1_r[i21 + p] = x11real + x21real; u1_i[i21 + p] = x11imag + x21imag; u1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); u1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } } if (logNZ % 2 == 1) { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { u1_r[m] = y1_r[m]; u1_i[m] = y1_i[m]; } } else { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { u1_r[m] = u1_r[m]; u1_i[m] = u1_i[m]; } } //cffts2(-1, logNY, u1_r, u1_i, u1_r, u1_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x1 */ is = -1; #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = j * NX * NZ + k * NX + i; y0_r[id] = u1_r[m]; y0_i[id] = u1_i[m]; } //cfftz(is, logNY, NY, y0_r, y0_i, y1_r, y1_i, NX, NZ); /*-------------------------------------------------------------------- c Perform one variant of the Stockham FFT. c-------------------------------------------------------------------*/ for (l = 1; l <= logNY; l += 2) { //fftz2(is, l, logNY, NY, u_r, u_i, y0_r, y0_i, y1_r, y1_i, NX, NZ); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NZ; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NY; idx++) { n1 = NY / 2; if (l - 1 == 0) { lk = 1; } else { lk = 2 << ((l - 1) - 1); } if (logNY - l == 0) { li = 1; } else { li = 2 << ((logNY - l) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y0_r[i11 + p]; x11imag = y0_i[i11 + p]; x21real = y0_r[i12 + p]; x21imag = y0_i[i12 + p]; y1_r[i21 + p] = x11real + x21real; y1_i[i21 + p] = x11imag + x21imag; y1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } if (l == logNY) break; //fftz2(is, l + 1, logNY, NY, u_r, u_i, y1_r, y1_i, y0_r, y0_i, NX, NZ); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NZ; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NY; idx++) { n1 = NY / 2; if (l == 0) { lk = 1; } else { lk = 2 << ((l) - 1); } if (logNY - (l + 1) == 0) { li = 1; } else { li = 2 << ((logNY - (l + 1)) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y1_r[i11 + p]; x11imag = y1_i[i11 + p]; x21real = y1_r[i12 + p]; x21imag = y1_i[i12 + p]; y0_r[i21 + p] = x11real + x21real; y0_i[i21 + p] = x11imag + x21imag; y0_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y0_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } } if (logNY % 2 == 1) { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = j * NX * NZ + k * NX + i; u1_r[m] = y1_r[id]; u1_i[m] = y1_i[id]; } } else { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = j * NX * NZ + k * NX + i; u1_r[m] = y0_r[id]; u1_i[m] = y0_i[id]; } } //cffts1(-1, logNX, u1_r, u1_i, u2_r, u2_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x2 */ is = -1; #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = i * NZ * NY + j * NZ + k; y0_r[id] = u1_r[m]; y0_i[id] = u1_i[m]; } //cfftz(is, logNX, NX, y0_r, y0_i, y1_r, y1_i, NZ, NY); /*-------------------------------------------------------------------- c Perform one variant of the Stockham FFT. c-------------------------------------------------------------------*/ for (l = 1; l <= logNX; l += 2) { //fftz2(is, l, logNX, NX, u_r, u_i, y0_r, y0_i, y1_r, y1_i, NZ, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NZ * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NX; idx++) { n1 = NX / 2; if (l - 1 == 0) { lk = 1; } else { lk = 2 << ((l - 1) - 1); } if (logNX - l == 0) { li = 1; } else { li = 2 << ((logNX - l) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y0_r[i11 + p]; x11imag = y0_i[i11 + p]; x21real = y0_r[i12 + p]; x21imag = y0_i[i12 + p]; y1_r[i21 + p] = x11real + x21real; y1_i[i21 + p] = x11imag + x21imag; y1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } if (l == logNX) break; //fftz2(is, l + 1, logNX, NX, u_r, u_i, y1_r, y1_i, y0_r, y0_i, NZ, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NZ * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NX; idx++) { n1 = NX / 2; if (l == 0) { lk = 1; } else { lk = 2 << ((l) - 1); } if (logNX - (l + 1) == 0) { li = 1; } else { li = 2 << ((logNX - (l + 1)) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y1_r[i11 + p]; x11imag = y1_i[i11 + p]; x21real = y1_r[i12 + p]; x21imag = y1_i[i12 + p]; y0_r[i21 + p] = x11real + x21real; y0_i[i21 + p] = x11imag + x21imag; y0_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y0_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } } if (logNX % 2 == 1) { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = i * NZ * NY + j * NZ + k; u2_r[m] = y1_r[id]; u2_i[m] = y1_i[id]; } } else { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = i * NZ * NY + j * NZ + k; u2_r[m] = y0_r[id]; u2_i[m] = y0_i[id]; } } } } //END_FFT // if (TIMERS_ENABLED == TRUE) { timer_stop(T_FFT); } if (TIMERS_ENABLED == TRUE) { timer_start(T_CHECKSUM); } //checksum(iter, u2_r, u2_i, dims[0]); chk_r = 0.0; chk_i = 0.0; #pragma acc kernels loop gang worker independent for (m = 0; m < 1024; m++) { j = 1 + m; q = j % NX; if (q >= 0 && q < NX) { r = (3 * j) % NY; if (r >= 0 && r < NY) { s = (5 * j) % NZ; if (s >= 0 && s < NZ) { chk_r = chk_r + u2_r[s * NY * NX + r * NX + q]; chk_i = chk_i + u2_i[s * NY * NX + r * NX + q]; } } } } //printf("chk_r = %22.12e, chk_i =%22.12e\n", chk_r, chk_i); { sums_r[iter] += chk_r; sums_i[iter] += chk_i; } { sums_r[iter] = sums_r[iter] / (double)(NTOTAL); sums_i[iter] = sums_i[iter] / (double)(NTOTAL); printf("T = %5d Checksum = %22.12e %22.12e\n", iter, sums_r[iter], sums_i[iter]); } if (TIMERS_ENABLED == TRUE) { timer_stop(T_CHECKSUM); } } verify(NX, NY, NZ, niter, &verified, &classT); } /* end parallel */ timer_stop(T_TOTAL); total_time = timer_read(T_TOTAL); if (total_time != 0.0) { mflops = 1.0e-6 * (double)(NTOTAL) * (14.8157 + 7.19641 * log((double)(NTOTAL)) + (5.23518 + 7.21113 * log((double)(NTOTAL))) * niter) / total_time; } else { mflops = 0.0; } c_print_results("FT", classT, NX, NY, NZ, niter, nthreads, total_time, mflops, " floating point", verified, NPBVERSION, COMPILETIME, CS1, CS2, CS3, CS4, CS5, CS6, CS7); if (TIMERS_ENABLED == TRUE) print_timers(); return 0; } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void compute_initial_conditions(double u0_r[NTOTAL], double u0_i[NTOTAL]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Fill in array u0 with initial conditions from c random number generator c-------------------------------------------------------------------*/ int k; double x0, start, an, dummy; static double tmp[NX * 2 * MAXDIM + 1]; int i, j, t; start = SEED; /*-------------------------------------------------------------------- c Jump to the starting element for our first plane. c-------------------------------------------------------------------*/ ipow46(A, (zstart[0] - 1) * 2 * NX * NY + (ystart[0] - 1) * 2 * NX, &an); dummy = randlc(&start, an); ipow46(A, 2 * NX * NY, &an); /*-------------------------------------------------------------------- c Go through by z planes filling in one square at a time. c-------------------------------------------------------------------*/ for (k = 0; k < NZ; k++) { x0 = start; vranlc(2 * NX * NY, &x0, A, tmp); t = 1; for (j = 0; j < NY; j++) for (i = 0; i < NX; i++) { u0_r[k * NY * NX + j * NX + i] = tmp[t++]; u0_i[k * NY * NX + j * NX + i] = tmp[t++]; } if (k != NZ) dummy = randlc(&start, an); } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void ipow46(double a, int exponent, double *result) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c compute a^exponent mod 2^46 c-------------------------------------------------------------------*/ double dummy, q, r; int n, n2; /*-------------------------------------------------------------------- c Use c a^n = a^(n/2)*a^(n/2) if n even else c a^n = a*a^(n-1) if n odd c-------------------------------------------------------------------*/ *result = 1; if (exponent == 0) return; q = a; r = 1; n = exponent; while (n > 1) { n2 = n / 2; if (n2 * 2 == n) { dummy = randlc(&q, q); n = n2; } else { dummy = randlc(&r, q); n = n - 1; } } dummy = randlc(&r, q); *result = r; } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void setup(void) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int ierr, i, j, fstatus; printf("\n\n NAS Parallel Benchmarks 2.3 OpenMP C version" " - FT Benchmark\n\n"); niter = NITER_DEFAULT; printf(" Size : %3dx%3dx%3d\n", NX, NY, NZ); printf(" Iterations : %7d\n", niter); /* * 1004 format(' Number of processes : ', i7) 1005 format(' Processor * array : ', i3, 'x', i3) 1006 format(' WARNING: compiled for ', * i5, ' processes. ', > ' Will not verify. ') */ for (i = 0; i < 3; i++) { dims[i][0] = NX; dims[i][1] = NY; dims[i][2] = NZ; } for (i = 0; i < 3; i++) { xstart[i] = 1; xend[i] = NX; ystart[i] = 1; yend[i] = NY; zstart[i] = 1; zend[i] = NZ; } /*-------------------------------------------------------------------- c Set up info for blocking of ffts and transposes. This improves c performance on cache-based systems. Blocking involves c working on a chunk of the problem at a time, taking chunks c along the first, second, or third dimension. c c - In cffts1 blocking is on 2nd dimension (with fft on 1st dim) c - In cffts2/3 blocking is on 1st dimension (with fft on 2nd and 3rd dims) c Since 1st dim is always in processor, we'll assume it's long enough c (default blocking factor is 16 so min size for 1st dim is 16) c The only case we have to worry about is cffts1 in a 2d decomposition. c so the blocking factor should not be larger than the 2nd dimension. c-------------------------------------------------------------------*/ fftblock = FFTBLOCK_DEFAULT; fftblockpad = FFTBLOCKPAD_DEFAULT; if (fftblock != FFTBLOCK_DEFAULT) fftblockpad = fftblock + 3; } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void print_timers(void) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int i; const char *tstrings[] = {" total ", " setup ", " fft ", " evolve ", " checksum ", " fftlow ", " fftcopy " }; for (i = 0; i < T_MAX; i++) { if (timer_read(i) != 0.0) { printf("timer %2d(%16s( :%10.6f\n", i, tstrings[i], timer_read(i)); } } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void fft(int dir, double x1_r[NTOTAL], double x1_i[NTOTAL], double x2_r[NTOTAL], double x2_i[NTOTAL]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static double y0_r[NTOTAL]; static double y0_i[NTOTAL]; static double y1_r[NTOTAL]; static double y1_i[NTOTAL]; int logNX, logNY, logNZ; /////////////////////// //Used for cffts1 ().// /////////////////////// int i, j, k, jj; int m, id; int is; /////////////////////// //Used for cfftz ().// /////////////////////// int l; /////////////////////// //Used for fftz2 ().// /////////////////////// int k_fftz2, n1, li, lj, lk, ku, i_fftz2, i11, i12, i21, i22; double u1_rf, x11_r, x21_r; double u1_if, x11_i, x21_i; int idx, p, nn; double x11real, x11imag, x21real, x21imag; //START_FFT // logNX = ilog2(NX); logNY = ilog2(NY); logNZ = ilog2(NZ); /*-------------------------------------------------------------------- c note: args x1, x2 must be different arrays c note: args for cfftsx are (direction, layout, xin, xout, scratch) c xin/xout may be the same and it can be somewhat faster c if they are c-------------------------------------------------------------------*/ #pragma acc data \ copyin(u_r[0:NX], u_i[0:NX]) \ copy(x1_r[0:NTOTAL], x1_i[0:NTOTAL]) \ copyout(x2_r[0:NTOTAL], x2_i[0:NTOTAL]) \ create(y0_r[0:NTOTAL], y0_i[0:NTOTAL]) \ create(y1_r[0:NTOTAL], y1_i[0:NTOTAL]) { if (dir == 1) { //cffts1(1, logNX, x1_r, x1_i, x1_r, x1_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x1 */ is = 1; #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = i * NZ * NY + j * NZ + k; y0_r[id] = x1_r[m]; y0_i[id] = x1_i[m]; } //cfftz(is, logNX, NX, y0_r, y0_i, y1_r, y1_i, NZ, NY); /*-------------------------------------------------------------------- c Perform one variant of the Stockham FFT. c-------------------------------------------------------------------*/ for (l = 1; l <= logNX; l += 2) { //fftz2(is, l, logNX, NX, u_r, u_i, y0_r, y0_i, y1_r, y1_i, NZ, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NZ * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NX; idx++) { n1 = NX / 2; if (l - 1 == 0) { lk = 1; } else { lk = 2 << ((l - 1) - 1); } if (logNX - l == 0) { li = 1; } else { li = 2 << ((logNX - l) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y0_r[i11 + p]; x11imag = y0_i[i11 + p]; x21real = y0_r[i12 + p]; x21imag = y0_i[i12 + p]; y1_r[i21 + p] = x11real + x21real; y1_i[i21 + p] = x11imag + x21imag; y1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } if (l == logNX) break; //fftz2(is, l + 1, logNX, NX, u_r, u_i, y1_r, y1_i, y0_r, y0_i, NZ, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NZ * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NX; idx++) { n1 = NX / 2; if (l == 0) { lk = 1; } else { lk = 2 << ((l) - 1); } if (logNX - (l + 1) == 0) { li = 1; } else { li = 2 << ((logNX - (l + 1)) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y1_r[i11 + p]; x11imag = y1_i[i11 + p]; x21real = y1_r[i12 + p]; x21imag = y1_i[i12 + p]; y0_r[i21 + p] = x11real + x21real; y0_i[i21 + p] = x11imag + x21imag; y0_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y0_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } } if (logNX % 2 == 1) { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = i * NZ * NY + j * NZ + k; x1_r[m] = y1_r[id]; x1_i[m] = y1_i[id]; } } else { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = i * NZ * NY + j * NZ + k; x1_r[m] = y0_r[id]; x1_i[m] = y0_i[id]; } } //cffts2(1, logNY, x1_r, x1_i, x1_r, x1_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x1 */ is = 1; #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = j * NX * NZ + k * NX + i; y0_r[id] = x1_r[m]; y0_i[id] = x1_i[m]; } //cfftz(is, logNY, NY, y0_r, y0_i, y1_r, y1_i, NX, NZ); /*-------------------------------------------------------------------- c Perform one variant of the Stockham FFT. c-------------------------------------------------------------------*/ for (l = 1; l <= logNY; l += 2) { //fftz2(is, l, logNY, NY, u_r, u_i, y0_r, y0_i, y1_r, y1_i, NX, NZ); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NZ; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NY; idx++) { n1 = NY / 2; if (l - 1 == 0) { lk = 1; } else { lk = 2 << ((l - 1) - 1); } if (logNY - l == 0) { li = 1; } else { li = 2 << ((logNY - l) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y0_r[i11 + p]; x11imag = y0_i[i11 + p]; x21real = y0_r[i12 + p]; x21imag = y0_i[i12 + p]; y1_r[i21 + p] = x11real + x21real; y1_i[i21 + p] = x11imag + x21imag; y1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } if (l == logNY) break; //fftz2(is, l + 1, logNY, NY, u_r, u_i, y1_r, y1_i, y0_r, y0_i, NX, NZ); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NZ; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NY; idx++) { n1 = NY / 2; if (l == 0) { lk = 1; } else { lk = 2 << ((l) - 1); } if (logNY - (l + 1) == 0) { li = 1; } else { li = 2 << ((logNY - (l + 1)) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y1_r[i11 + p]; x11imag = y1_i[i11 + p]; x21real = y1_r[i12 + p]; x21imag = y1_i[i12 + p]; y0_r[i21 + p] = x11real + x21real; y0_i[i21 + p] = x11imag + x21imag; y0_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y0_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } } if (logNY % 2 == 1) { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = j * NX * NZ + k * NX + i; x1_r[m] = y1_r[id]; x1_i[m] = y1_i[id]; } } else { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = j * NX * NZ + k * NX + i; x1_r[m] = y0_r[id]; x1_i[m] = y0_i[id]; } } //cffts3(1, logNZ, x1_r, x1_i, x2_r, x2_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x2 */ is = 1; //cfftz(is, logNZ, NZ, x1_r, x1_i, y1_r, y1_i, NX, NY); /*-------------------------------------------------------------------- c Perform one variant of the Stockham FFT. c-------------------------------------------------------------------*/ for (l = 1; l <= logNZ; l += 2) { //fftz2(is, l, logNZ, NZ, u_r, u_i, x1_r, x1_i, y1_r, y1_i, NX, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NZ; idx++) { n1 = NZ / 2; if (l - 1 == 0) { lk = 1; } else { lk = 2 << ((l - 1) - 1); } if (logNZ - l == 0) { li = 1; } else { li = 2 << ((logNZ - l) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = x1_r[i11 + p]; x11imag = x1_i[i11 + p]; x21real = x1_r[i12 + p]; x21imag = x1_i[i12 + p]; y1_r[i21 + p] = x11real + x21real; y1_i[i21 + p] = x11imag + x21imag; y1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } if (l == logNZ) break; //fftz2(is, l + 1, logNZ, NZ, u_r, u_i, y1_r, y1_i, x1_r, x1_i, NX, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NZ; idx++) { n1 = NZ / 2; if (l == 0) { lk = 1; } else { lk = 2 << ((l) - 1); } if (logNZ - (l + 1) == 0) { li = 1; } else { li = 2 << ((logNZ - (l + 1)) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y1_r[i11 + p]; x11imag = y1_i[i11 + p]; x21real = y1_r[i12 + p]; x21imag = y1_i[i12 + p]; x1_r[i21 + p] = x11real + x21real; x1_i[i21 + p] = x11imag + x21imag; x1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); x1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } } if (logNZ % 2 == 1) { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { x2_r[m] = y1_r[m]; x2_i[m] = y1_i[m]; } } else { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { x2_r[m] = x1_r[m]; x2_i[m] = x1_i[m]; } } } else { //cffts3(-1, logNZ, x1_r, x1_i, x1_r, x1_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x1 */ is = -1; //cfftz(is, logNZ, NZ, x1_r, x1_i, y1_r, y1_i, NX, NY); /*-------------------------------------------------------------------- c Perform one variant of the Stockham FFT. c-------------------------------------------------------------------*/ for (l = 1; l <= logNZ; l += 2) { //fftz2(is, l, logNZ, NZ, u_r, u_i, x1_r, x1_i, y1_r, y1_i, NX, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NZ; idx++) { n1 = NZ / 2; if (l - 1 == 0) { lk = 1; } else { lk = 2 << ((l - 1) - 1); } if (logNZ - l == 0) { li = 1; } else { li = 2 << ((logNZ - l) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = x1_r[i11 + p]; x11imag = x1_i[i11 + p]; x21real = x1_r[i12 + p]; x21imag = x1_i[i12 + p]; y1_r[i21 + p] = x11real + x21real; y1_i[i21 + p] = x11imag + x21imag; y1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } if (l == logNZ) break; //fftz2(is, l + 1, logNZ, NZ, u_r, u_i, y1_r, y1_i, x1_r, x1_i, NX, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NZ; idx++) { n1 = NZ / 2; if (l == 0) { lk = 1; } else { lk = 2 << ((l) - 1); } if (logNZ - (l + 1) == 0) { li = 1; } else { li = 2 << ((logNZ - (l + 1)) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y1_r[i11 + p]; x11imag = y1_i[i11 + p]; x21real = y1_r[i12 + p]; x21imag = y1_i[i12 + p]; x1_r[i21 + p] = x11real + x21real; x1_i[i21 + p] = x11imag + x21imag; x1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); x1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } } if (logNZ % 2 == 1) { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { x1_r[m] = y1_r[m]; x1_i[m] = y1_i[m]; } } else { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { x1_r[m] = x1_r[m]; x1_i[m] = x1_i[m]; } } //cffts2(-1, logNY, x1_r, x1_i, x1_r, x1_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x1 */ is = -1; #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = j * NX * NZ + k * NX + i; y0_r[id] = x1_r[m]; y0_i[id] = x1_i[m]; } //cfftz(is, logNY, NY, y0_r, y0_i, y1_r, y1_i, NX, NZ); /*-------------------------------------------------------------------- c Perform one variant of the Stockham FFT. c-------------------------------------------------------------------*/ for (l = 1; l <= logNY; l += 2) { //fftz2(is, l, logNY, NY, u_r, u_i, y0_r, y0_i, y1_r, y1_i, NX, NZ); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NZ; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NY; idx++) { n1 = NY / 2; if (l - 1 == 0) { lk = 1; } else { lk = 2 << ((l - 1) - 1); } if (logNY - l == 0) { li = 1; } else { li = 2 << ((logNY - l) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y0_r[i11 + p]; x11imag = y0_i[i11 + p]; x21real = y0_r[i12 + p]; x21imag = y0_i[i12 + p]; y1_r[i21 + p] = x11real + x21real; y1_i[i21 + p] = x11imag + x21imag; y1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } if (l == logNY) break; //fftz2(is, l + 1, logNY, NY, u_r, u_i, y1_r, y1_i, y0_r, y0_i, NX, NZ); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NZ; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NY; idx++) { n1 = NY / 2; if (l == 0) { lk = 1; } else { lk = 2 << ((l) - 1); } if (logNY - (l + 1) == 0) { li = 1; } else { li = 2 << ((logNY - (l + 1)) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y1_r[i11 + p]; x11imag = y1_i[i11 + p]; x21real = y1_r[i12 + p]; x21imag = y1_i[i12 + p]; y0_r[i21 + p] = x11real + x21real; y0_i[i21 + p] = x11imag + x21imag; y0_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y0_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } } if (logNY % 2 == 1) { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = j * NX * NZ + k * NX + i; x1_r[m] = y1_r[id]; x1_i[m] = y1_i[id]; } } else { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = j * NX * NZ + k * NX + i; x1_r[m] = y0_r[id]; x1_i[m] = y0_i[id]; } } //cffts1(-1, logNX, x1_r, x1_i, x2_r, x2_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x2 */ is = -1; #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = i * NZ * NY + j * NZ + k; y0_r[id] = x1_r[m]; y0_i[id] = x1_i[m]; } //cfftz(is, logNX, NX, y0_r, y0_i, y1_r, y1_i, NZ, NY); /*-------------------------------------------------------------------- c Perform one variant of the Stockham FFT. c-------------------------------------------------------------------*/ for (l = 1; l <= logNX; l += 2) { //fftz2(is, l, logNX, NX, u_r, u_i, y0_r, y0_i, y1_r, y1_i, NZ, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NZ * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NX; idx++) { n1 = NX / 2; if (l - 1 == 0) { lk = 1; } else { lk = 2 << ((l - 1) - 1); } if (logNX - l == 0) { li = 1; } else { li = 2 << ((logNX - l) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y0_r[i11 + p]; x11imag = y0_i[i11 + p]; x21real = y0_r[i12 + p]; x21imag = y0_i[i12 + p]; y1_r[i21 + p] = x11real + x21real; y1_i[i21 + p] = x11imag + x21imag; y1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } if (l == logNX) break; //fftz2(is, l + 1, logNX, NX, u_r, u_i, y1_r, y1_i, y0_r, y0_i, NZ, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NZ * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NX; idx++) { n1 = NX / 2; if (l == 0) { lk = 1; } else { lk = 2 << ((l) - 1); } if (logNX - (l + 1) == 0) { li = 1; } else { li = 2 << ((logNX - (l + 1)) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y1_r[i11 + p]; x11imag = y1_i[i11 + p]; x21real = y1_r[i12 + p]; x21imag = y1_i[i12 + p]; y0_r[i21 + p] = x11real + x21real; y0_i[i21 + p] = x11imag + x21imag; y0_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y0_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } } if (logNX % 2 == 1) { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = i * NZ * NY + j * NZ + k; x2_r[m] = y1_r[id]; x2_i[m] = y1_i[id]; } } else { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = i * NZ * NY + j * NZ + k; x2_r[m] = y0_r[id]; x2_i[m] = y0_i[id]; } } } } //END_FFT // } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void fft_init(int n) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c compute the roots-of-unity array that will be used for subsequent FFTs. c-------------------------------------------------------------------*/ int m, nu, ku, i, j, ln; double t, ti; /*-------------------------------------------------------------------- c Initialize the U array with sines and cosines in a manner that permits c stride one access at each FFT iteration. c-------------------------------------------------------------------*/ nu = n; m = ilog2(n); u_r[0] = (double)m; u_i[0] = 0.0; ku = 1; ln = 1; for (j = 1; j <= m; j++) { t = PI / ln; for (i = 0; i <= ln - 1; i++) { ti = i * t; u_r[i + ku] = cos(ti); u_i[i + ku] = sin(ti); } ku = ku + ln; ln = 2 * ln; } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static int ilog2(int n) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int nn, lg; if (n == 1) { return 0; } lg = 1; nn = 2; while (nn < n) { nn = nn << 1; lg++; } return lg; } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void verify(int d1, int d2, int d3, int nt, boolean * verified, char *classT) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int ierr, size, i; double err, epsilon; /*-------------------------------------------------------------------- c Sample size reference checksums c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Class S size reference checksums c-------------------------------------------------------------------*/ double vdata_real_s[6 + 1] = {0.0, 5.546087004964e+02, 5.546385409189e+02, 5.546148406171e+02, 5.545423607415e+02, 5.544255039624e+02, 5.542683411902e+02 }; double vdata_imag_s[6 + 1] = {0.0, 4.845363331978e+02, 4.865304269511e+02, 4.883910722336e+02, 4.901273169046e+02, 4.917475857993e+02, 4.932597244941e+02 }; /*-------------------------------------------------------------------- c Class W size reference checksums c-------------------------------------------------------------------*/ double vdata_real_w[6 + 1] = {0.0, 5.673612178944e+02, 5.631436885271e+02, 5.594024089970e+02, 5.560698047020e+02, 5.530898991250e+02, 5.504159734538e+02 }; double vdata_imag_w[6 + 1] = {0.0, 5.293246849175e+02, 5.282149986629e+02, 5.270996558037e+02, 5.260027904925e+02, 5.249400845633e+02, 5.239212247086e+02 }; /*-------------------------------------------------------------------- c Class A size reference checksums c-------------------------------------------------------------------*/ double vdata_real_a[6 + 1] = {0.0, 5.046735008193e+02, 5.059412319734e+02, 5.069376896287e+02, 5.077892868474e+02, 5.085233095391e+02, 5.091487099959e+02 }; double vdata_imag_a[6 + 1] = {0.0, 5.114047905510e+02, 5.098809666433e+02, 5.098144042213e+02, 5.101336130759e+02, 5.104914655194e+02, 5.107917842803e+02 }; /*-------------------------------------------------------------------- c Class B size reference checksums c-------------------------------------------------------------------*/ double vdata_real_b[20 + 1] = {0.0, 5.177643571579e+02, 5.154521291263e+02, 5.146409228649e+02, 5.142378756213e+02, 5.139626667737e+02, 5.137423460082e+02, 5.135547056878e+02, 5.133910925466e+02, 5.132470705390e+02, 5.131197729984e+02, 5.130070319283e+02, 5.129070537032e+02, 5.128182883502e+02, 5.127393733383e+02, 5.126691062020e+02, 5.126064276004e+02, 5.125504076570e+02, 5.125002331720e+02, 5.124551951846e+02, 5.124146770029e+02 }; double vdata_imag_b[20 + 1] = {0.0, 5.077803458597e+02, 5.088249431599e+02, 5.096208912659e+02, 5.101023387619e+02, 5.103976610617e+02, 5.105948019802e+02, 5.107404165783e+02, 5.108576573661e+02, 5.109577278523e+02, 5.110460304483e+02, 5.111252433800e+02, 5.111968077718e+02, 5.112616233064e+02, 5.113203605551e+02, 5.113735928093e+02, 5.114218460548e+02, 5.114656139760e+02, 5.115053595966e+02, 5.115415130407e+02, 5.115744692211e+02 }; /*-------------------------------------------------------------------- c Class C size reference checksums c-------------------------------------------------------------------*/ double vdata_real_c[20 + 1] = {0.0, 5.195078707457e+02, 5.155422171134e+02, 5.144678022222e+02, 5.140150594328e+02, 5.137550426810e+02, 5.135811056728e+02, 5.134569343165e+02, 5.133651975661e+02, 5.132955192805e+02, 5.132410471738e+02, 5.131971141679e+02, 5.131605205716e+02, 5.131290734194e+02, 5.131012720314e+02, 5.130760908195e+02, 5.130528295923e+02, 5.130310107773e+02, 5.130103090133e+02, 5.129905029333e+02, 5.129714421109e+02 }; double vdata_imag_c[20 + 1] = {0.0, 5.149019699238e+02, 5.127578201997e+02, 5.122251847514e+02, 5.121090289018e+02, 5.121143685824e+02, 5.121496764568e+02, 5.121870921893e+02, 5.122193250322e+02, 5.122454735794e+02, 5.122663649603e+02, 5.122830879827e+02, 5.122965869718e+02, 5.123075927445e+02, 5.123166486553e+02, 5.123241541685e+02, 5.123304037599e+02, 5.123356167976e+02, 5.123399592211e+02, 5.123435588985e+02, 5.123465164008e+02 }; epsilon = 1.0e-12; *verified = TRUE; *classT = 'U'; if (d1 == 64 && d2 == 64 && d3 == 64 && nt == 6) { *classT = 'S'; for (i = 1; i <= nt; i++) { err = (sums_r[i] - vdata_real_s[i]) / vdata_real_s[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } err = (sums_i[i] - vdata_imag_s[i]) / vdata_imag_s[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } } } else if (d1 == 128 && d2 == 128 && d3 == 32 && nt == 6) { *classT = 'W'; for (i = 1; i <= nt; i++) { err = (sums_r[i] - vdata_real_w[i]) / vdata_real_w[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } err = (sums_i[i] - vdata_imag_w[i]) / vdata_imag_w[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } } } else if (d1 == 256 && d2 == 256 && d3 == 128 && nt == 6) { *classT = 'A'; for (i = 1; i <= nt; i++) { err = (sums_r[i] - vdata_real_a[i]) / vdata_real_a[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } err = (sums_i[i] - vdata_imag_a[i]) / vdata_imag_a[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } } } else if (d1 == 512 && d2 == 256 && d3 == 256 && nt == 20) { *classT = 'B'; for (i = 1; i <= nt; i++) { err = (sums_r[i] - vdata_real_b[i]) / vdata_real_b[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } err = (sums_i[i] - vdata_imag_b[i]) / vdata_imag_b[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } } } else if (d1 == 512 && d2 == 512 && d3 == 512 && nt == 20) { *classT = 'C'; for (i = 1; i <= nt; i++) { err = (sums_r[i] - vdata_real_c[i]) / vdata_real_c[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } err = (sums_i[i] - vdata_imag_c[i]) / vdata_imag_c[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } } } if (*classT != 'U') { printf("Result verification successful\n"); } else { printf("Result verification failed\n"); } printf("class = %1c\n", *classT); }
/*-------------------------------------------------------------------- NAS Parallel Benchmarks 2.3 OpenMP C versions - FT This benchmark is an OpenMP C version of the NPB FT code. The OpenMP C versions are developed by RWCP and derived from the serial Fortran versions in "NPB 2.3-serial" developed by NAS. Permission to use, copy, distribute and modify this software for any purpose with or without fee is hereby granted. This software is provided "as is" without express or implied warranty. Send comments on the OpenMP C versions to pdp-openmp@rwcp.or.jp Information on OpenMP activities at RWCP is available at: http://pdplab.trc.rwcp.or.jp/pdperf/Omni/ Information on NAS Parallel Benchmarks 2.3 is available at: http://www.nas.nasa.gov/NAS/NPB/ --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- Authors: D. Bailey W. Saphir OpenMP C version: S. Satoh --------------------------------------------------------------------*/ #include "npb-C.h" /* global variables */ #include "global.h" /* function declarations */ static void compute_initial_conditions(double u0_r[NTOTAL], double u0_i[NTOTAL]); static void ipow46(double a, int exponent, double *result); static void setup(void); static void print_timers(void); static void fft(int dir, double x1_r[NTOTAL], double x1_i[NTOTAL], double x2_r[NTOTAL], double x2_i[NTOTAL]); static void fft_init(int n); static void cfftz(int is, int m, int n, double x_r[NTOTAL], double x_i[NTOTAL], double y_r[NTOTAL], double y_i[NTOTAL], int di1, int di2); static void fftz2(int is, int l, int m, int n, double u_r[NX], double u_i[NX], double x_r[NTOTAL], double x_i[NTOTAL], double y_r[NTOTAL], double y_i[NTOTAL], int di1, int di2); static int ilog2(int n); static void verify(int d1, int d2, int d3, int nt, boolean * verified, char *classT); /*-------------------------------------------------------------------- c FT benchmark c-------------------------------------------------------------------*/ int main(int argc, char **argv) { /* * c------------------------------------------------------------------- * c------------------------------------------------------------------- */ int i_main, ierr; /*------------------------------------------------------------------ c u0, u1, u2 are the main arrays in the problem. c Depending on the decomposition, these arrays will have different c dimensions. To accomodate all possibilities, we allocate them as c one-dimensional arrays and pass them to subroutines for different c views c - u0 contains the initial (transformed) initial condition c - u1 and u2 are working arrays c - indexmap maps i,j,k of u0 to the correct i^2+j^2+k^2 for the c time evolution operator. c-----------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Large arrays are in common so that they are allocated on the c heap rather than the stack. This common block is not c referenced directly anywhere else. Padding is to avoid accidental c cache problems, since all array sizes are powers of two. c-------------------------------------------------------------------*/ static double u0_r[NTOTAL]; //u0_r[NZ][NY][NX]; static double u0_i[NTOTAL]; //u0_i[NZ][NY][NX]; static double u1_r[NTOTAL]; //u1_r[NZ][NY][NX]; static double u1_i[NTOTAL]; //u1_i[NZ][NY][NX]; static double u2_r[NTOTAL]; //u2_r[NZ][NY][NX]; static double u2_i[NTOTAL]; //u2_i[NZ][NY][NX]; static int indexmap[NTOTAL]; //indexmap[NZ][NY][NX]; int iter; int nthreads = 1; double total_time, mflops; boolean verified; char classT; ////////////////////////////////// //Used for compute_indexmap ().// ////////////////////////////////// int i, j, k, ii, ii2, jj, ij2, kk; int m; double ap; //////////////////////// //Used for evolve ().// //////////////////////// //int i, j, k; ////////////////////////// //Used for checksum ().// ////////////////////////// //int m, j, int q, r, s; double chk_r, chk_i; ///////////////////// //Used for fft ().// ///////////////////// int dir; static double y0_r[NTOTAL]; static double y0_i[NTOTAL]; static double y1_r[NTOTAL]; static double y1_i[NTOTAL]; int logNX, logNY, logNZ; /////////////////////// //Used for cffts1 ().// /////////////////////// //int i, j, k, jj, m; int id; int is; /////////////////////// //Used for cfftz ().// /////////////////////// int l; /////////////////////// //Used for fftz2 ().// /////////////////////// int k_fftz2, n1, li, lj, lk, ku, i_fftz2, i11, i12, i21, i22; double u1_rf, x11_r, x21_r; double u1_if, x11_i, x21_i; int idx, p, nn; double x11real, x11imag, x21real, x21imag; /*-------------------------------------------------------------------- c Run the entire problem once to make sure all data is touched. c This reduces variable startup costs, which is important for such a c short benchmark. The other NPB 2 implementations are similar. c-------------------------------------------------------------------*/ for (i_main = 0; i_main < T_MAX; i_main++) { timer_clear(i_main); } setup(); { //compute_indexmap(indexmap); /*-------------------------------------------------------------------- c compute function from local (i,j,k) to ibar^2+jbar^2+kbar^2 c for time evolution exponent. c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c basically we want to convert the fortran indices c 1 2 3 4 5 6 7 8 c to c 0 1 2 3 -4 -3 -2 -1 c The following magic formula does the trick: c mod(i-1+n/2, n) - n/2 c-------------------------------------------------------------------*/ #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; ii = (i + NX / 2) % NX - NX / 2; ii2 = ii * ii; jj = (j + NY / 2) % NY - NY / 2; ij2 = jj * jj + ii2; kk = (k + NZ / 2) % NZ - NZ / 2; indexmap[m] = kk * kk + ij2; } /*-------------------------------------------------------------------- c compute array of exponentials for time evolution. c-------------------------------------------------------------------*/ { ap = -4.0 * ALPHA * PI * PI; ex[0] = 1.0; ex[1] = exp(ap); for (i = 2; i <= EXPMAX; i++) { ex[i] = ex[i - 1] * ex[1]; } } /* end single */ { compute_initial_conditions(u1_r, u1_i); fft_init(dims[0][0]); } fft(1, u1_r, u1_i, u0_r, u0_i); } /* end parallel */ /*-------------------------------------------------------------------- c Start over from the beginning. Note that all operations must c be timed, in contrast to other benchmarks. c-------------------------------------------------------------------*/ for (i_main = 0; i_main < T_MAX; i_main++) { timer_clear(i_main); } timer_start(T_TOTAL); if (TIMERS_ENABLED == TRUE) timer_start(T_SETUP); // #pragma omp parallel private(iter) firstprivate(niter) #pragma acc data \ create(ex[0:EXPMAX+1]) \ create(indexmap[0:NTOTAL]) \ create(u_r[0:NX], u_i[0:NX]) \ create(u1_r[0:NTOTAL], u1_i[0:NTOTAL]) \ create(u0_r[0:NTOTAL], u0_i[0:NTOTAL]) \ create(u2_r[0:NTOTAL], u2_i[0:NTOTAL]) \ create(y0_r[0:NTOTAL], y0_i[0:NTOTAL]) \ create(y1_r[0:NTOTAL], y1_i[0:NTOTAL]) { //compute_indexmap(indexmap); /*-------------------------------------------------------------------- c compute function from local (i,j,k) to ibar^2+jbar^2+kbar^2 c for time evolution exponent. c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c basically we want to convert the fortran indices c 1 2 3 4 5 6 7 8 c to c 0 1 2 3 -4 -3 -2 -1 c The following magic formula does the trick: c mod(i-1+n/2, n) - n/2 c-------------------------------------------------------------------*/ #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; ii = (i + NX / 2) % NX - NX / 2; ii2 = ii * ii; jj = (j + NY / 2) % NY - NY / 2; ij2 = jj * jj + ii2; kk = (k + NZ / 2) % NZ - NZ / 2; indexmap[m] = kk * kk + ij2; } /*-------------------------------------------------------------------- c compute array of exponentials for time evolution. c-------------------------------------------------------------------*/ { ap = -4.0 * ALPHA * PI * PI; ex[0] = 1.0; ex[1] = exp(ap); for (i = 2; i <= EXPMAX; i++) { ex[i] = ex[i - 1] * ex[1]; } } /* end single */ #pragma acc update device(ex[0:EXPMAX+1]) { compute_initial_conditions(u1_r, u1_i); fft_init(dims[0][0]); } #pragma acc update device(u_r[0:NX], u_i[0:NX], \ u1_r[0:NTOTAL], u1_i[0:NTOTAL]) if (TIMERS_ENABLED == TRUE) { timer_stop(T_SETUP); } if (TIMERS_ENABLED == TRUE) { timer_start(T_FFT); } //fft(1, u1_r, u1_i, u0_r, u0_i); //START_FFT // dir = 1; logNX = ilog2(NX); logNY = ilog2(NY); logNZ = ilog2(NZ); /*-------------------------------------------------------------------- c note: args u1, x2 must be different arrays c note: args for cfftsx are (direction, layout, xin, xout, scratch) c xin/xout may be the same and it can be somewhat faster c if they are c-------------------------------------------------------------------*/ { if (dir == 1) { //cffts1(1, logNX, u1_r, u1_i, u1_r, u1_i, y0_r, y0_i, y1_r, y1_i); /* u1 -> u1 */ is = 1; #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = i * NZ * NY + j * NZ + k; y0_r[id] = u1_r[m]; y0_i[id] = u1_i[m]; } //cfftz(is, logNX, NX, y0_r, y0_i, y1_r, y1_i, NZ, NY); /*-------------------------------------------------------------------- c Perform one variant of the Stockham FFT. c-------------------------------------------------------------------*/ for (l = 1; l <= logNX; l += 2) { //fftz2(is, l, logNX, NX, u_r, u_i, y0_r, y0_i, y1_r, y1_i, NZ, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NZ * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NX; idx++) { n1 = NX / 2; if (l - 1 == 0) { lk = 1; } else { lk = 2 << ((l - 1) - 1); } if (logNX - l == 0) { li = 1; } else { li = 2 << ((logNX - l) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y0_r[i11 + p]; x11imag = y0_i[i11 + p]; x21real = y0_r[i12 + p]; x21imag = y0_i[i12 + p]; y1_r[i21 + p] = x11real + x21real; y1_i[i21 + p] = x11imag + x21imag; y1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } if (l == logNX) break; //fftz2(is, l + 1, logNX, NX, u_r, u_i, y1_r, y1_i, y0_r, y0_i, NZ, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NZ * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NX; idx++) { n1 = NX / 2; if (l == 0) { lk = 1; } else { lk = 2 << ((l) - 1); } if (logNX - (l + 1) == 0) { li = 1; } else { li = 2 << ((logNX - (l + 1)) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y1_r[i11 + p]; x11imag = y1_i[i11 + p]; x21real = y1_r[i12 + p]; x21imag = y1_i[i12 + p]; y0_r[i21 + p] = x11real + x21real; y0_i[i21 + p] = x11imag + x21imag; y0_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y0_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } } if (logNX % 2 == 1) { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = i * NZ * NY + j * NZ + k; u1_r[m] = y1_r[id]; u1_i[m] = y1_i[id]; } } else { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = i * NZ * NY + j * NZ + k; u1_r[m] = y0_r[id]; u1_i[m] = y0_i[id]; } } //cffts2(1, logNY, u1_r, u1_i, u1_r, u1_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x1 */ is = 1; #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = j * NX * NZ + k * NX + i; y0_r[id] = u1_r[m]; y0_i[id] = u1_i[m]; } //cfftz(is, logNY, NY, y0_r, y0_i, y1_r, y1_i, NX, NZ); /*-------------------------------------------------------------------- c Perform one variant of the Stockham FFT. c-------------------------------------------------------------------*/ for (l = 1; l <= logNY; l += 2) { //fftz2(is, l, logNY, NY, u_r, u_i, y0_r, y0_i, y1_r, y1_i, NX, NZ); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NZ; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NY; idx++) { n1 = NY / 2; if (l - 1 == 0) { lk = 1; } else { lk = 2 << ((l - 1) - 1); } if (logNY - l == 0) { li = 1; } else { li = 2 << ((logNY - l) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y0_r[i11 + p]; x11imag = y0_i[i11 + p]; x21real = y0_r[i12 + p]; x21imag = y0_i[i12 + p]; y1_r[i21 + p] = x11real + x21real; y1_i[i21 + p] = x11imag + x21imag; y1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } if (l == logNY) break; //fftz2(is, l + 1, logNY, NY, u_r, u_i, y1_r, y1_i, y0_r, y0_i, NX, NZ); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NZ; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NY; idx++) { n1 = NY / 2; if (l == 0) { lk = 1; } else { lk = 2 << ((l) - 1); } if (logNY - (l + 1) == 0) { li = 1; } else { li = 2 << ((logNY - (l + 1)) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y1_r[i11 + p]; x11imag = y1_i[i11 + p]; x21real = y1_r[i12 + p]; x21imag = y1_i[i12 + p]; y0_r[i21 + p] = x11real + x21real; y0_i[i21 + p] = x11imag + x21imag; y0_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y0_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } } if (logNY % 2 == 1) { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = j * NX * NZ + k * NX + i; u1_r[m] = y1_r[id]; u1_i[m] = y1_i[id]; } } else { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = j * NX * NZ + k * NX + i; u1_r[m] = y0_r[id]; u1_i[m] = y0_i[id]; } } //cffts3(1, logNZ, u1_r, u1_i, u0_r, u0_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x2 */ is = 1; //cfftz(is, logNZ, NZ, u1_r, u1_i, y1_r, y1_i, NX, NY); /*-------------------------------------------------------------------- c Perform one variant of the Stockham FFT. c-------------------------------------------------------------------*/ for (l = 1; l <= logNZ; l += 2) { //fftz2(is, l, logNZ, NZ, u_r, u_i, u1_r, u1_i, y1_r, y1_i, NX, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NZ; idx++) { n1 = NZ / 2; if (l - 1 == 0) { lk = 1; } else { lk = 2 << ((l - 1) - 1); } if (logNZ - l == 0) { li = 1; } else { li = 2 << ((logNZ - l) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = u1_r[i11 + p]; x11imag = u1_i[i11 + p]; x21real = u1_r[i12 + p]; x21imag = u1_i[i12 + p]; y1_r[i21 + p] = x11real + x21real; y1_i[i21 + p] = x11imag + x21imag; y1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } if (l == logNZ) break; //fftz2(is, l + 1, logNZ, NZ, u_r, u_i, y1_r, y1_i, u1_r, u1_i, NX, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NZ; idx++) { n1 = NZ / 2; if (l == 0) { lk = 1; } else { lk = 2 << ((l) - 1); } if (logNZ - (l + 1) == 0) { li = 1; } else { li = 2 << ((logNZ - (l + 1)) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y1_r[i11 + p]; x11imag = y1_i[i11 + p]; x21real = y1_r[i12 + p]; x21imag = y1_i[i12 + p]; u1_r[i21 + p] = x11real + x21real; u1_i[i21 + p] = x11imag + x21imag; u1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); u1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } } if (logNZ % 2 == 1) { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { u0_r[m] = y1_r[m]; u0_i[m] = y1_i[m]; } } else { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { u0_r[m] = u1_r[m]; u0_i[m] = u1_i[m]; } } } else { //cffts3(-1, logNZ, u1_r, u1_i, u1_r, u1_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x1 */ is = -1; //cfftz(is, logNZ, NZ, u1_r, u1_i, y1_r, y1_i, NX, NY); /*-------------------------------------------------------------------- c Perform one variant of the Stockham FFT. c-------------------------------------------------------------------*/ for (l = 1; l <= logNZ; l += 2) { //fftz2(is, l, logNZ, NZ, u_r, u_i, u1_r, u1_i, y1_r, y1_i, NX, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NZ; idx++) { n1 = NZ / 2; if (l - 1 == 0) { lk = 1; } else { lk = 2 << ((l - 1) - 1); } if (logNZ - l == 0) { li = 1; } else { li = 2 << ((logNZ - l) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = u1_r[i11 + p]; x11imag = u1_i[i11 + p]; x21real = u1_r[i12 + p]; x21imag = u1_i[i12 + p]; y1_r[i21 + p] = x11real + x21real; y1_i[i21 + p] = x11imag + x21imag; y1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } if (l == logNZ) break; //fftz2(is, l + 1, logNZ, NZ, u_r, u_i, y1_r, y1_i, u1_r, u1_i, NX, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NZ; idx++) { n1 = NZ / 2; if (l == 0) { lk = 1; } else { lk = 2 << ((l) - 1); } if (logNZ - (l + 1) == 0) { li = 1; } else { li = 2 << ((logNZ - (l + 1)) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y1_r[i11 + p]; x11imag = y1_i[i11 + p]; x21real = y1_r[i12 + p]; x21imag = y1_i[i12 + p]; u1_r[i21 + p] = x11real + x21real; u1_i[i21 + p] = x11imag + x21imag; u1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); u1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } } if (logNZ % 2 == 1) { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { u1_r[m] = y1_r[m]; u1_i[m] = y1_i[m]; } } else { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { u1_r[m] = u1_r[m]; u1_i[m] = u1_i[m]; } } //cffts2(-1, logNY, u1_r, u1_i, u1_r, u1_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x1 */ is = -1; #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = j * NX * NZ + k * NX + i; y0_r[id] = u1_r[m]; y0_i[id] = u1_i[m]; } //cfftz(is, logNY, NY, y0_r, y0_i, y1_r, y1_i, NX, NZ); /*-------------------------------------------------------------------- c Perform one variant of the Stockham FFT. c-------------------------------------------------------------------*/ for (l = 1; l <= logNY; l += 2) { //fftz2(is, l, logNY, NY, u_r, u_i, y0_r, y0_i, y1_r, y1_i, NX, NZ); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NZ; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NY; idx++) { n1 = NY / 2; if (l - 1 == 0) { lk = 1; } else { lk = 2 << ((l - 1) - 1); } if (logNY - l == 0) { li = 1; } else { li = 2 << ((logNY - l) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y0_r[i11 + p]; x11imag = y0_i[i11 + p]; x21real = y0_r[i12 + p]; x21imag = y0_i[i12 + p]; y1_r[i21 + p] = x11real + x21real; y1_i[i21 + p] = x11imag + x21imag; y1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } if (l == logNY) break; //fftz2(is, l + 1, logNY, NY, u_r, u_i, y1_r, y1_i, y0_r, y0_i, NX, NZ); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NZ; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NY; idx++) { n1 = NY / 2; if (l == 0) { lk = 1; } else { lk = 2 << ((l) - 1); } if (logNY - (l + 1) == 0) { li = 1; } else { li = 2 << ((logNY - (l + 1)) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y1_r[i11 + p]; x11imag = y1_i[i11 + p]; x21real = y1_r[i12 + p]; x21imag = y1_i[i12 + p]; y0_r[i21 + p] = x11real + x21real; y0_i[i21 + p] = x11imag + x21imag; y0_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y0_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } } if (logNY % 2 == 1) { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = j * NX * NZ + k * NX + i; u1_r[m] = y1_r[id]; u1_i[m] = y1_i[id]; } } else { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = j * NX * NZ + k * NX + i; u1_r[m] = y0_r[id]; u1_i[m] = y0_i[id]; } } //cffts1(-1, logNX, u1_r, u1_i, u0_r, u0_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x2 */ is = -1; #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = i * NZ * NY + j * NZ + k; y0_r[id] = u1_r[m]; y0_i[id] = u1_i[m]; } //cfftz(is, logNX, NX, y0_r, y0_i, y1_r, y1_i, NZ, NY); /*-------------------------------------------------------------------- c Perform one variant of the Stockham FFT. c-------------------------------------------------------------------*/ for (l = 1; l <= logNX; l += 2) { //fftz2(is, l, logNX, NX, u_r, u_i, y0_r, y0_i, y1_r, y1_i, NZ, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NZ * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NX; idx++) { n1 = NX / 2; if (l - 1 == 0) { lk = 1; } else { lk = 2 << ((l - 1) - 1); } if (logNX - l == 0) { li = 1; } else { li = 2 << ((logNX - l) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y0_r[i11 + p]; x11imag = y0_i[i11 + p]; x21real = y0_r[i12 + p]; x21imag = y0_i[i12 + p]; y1_r[i21 + p] = x11real + x21real; y1_i[i21 + p] = x11imag + x21imag; y1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } if (l == logNX) break; //fftz2(is, l + 1, logNX, NX, u_r, u_i, y1_r, y1_i, y0_r, y0_i, NZ, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NZ * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NX; idx++) { n1 = NX / 2; if (l == 0) { lk = 1; } else { lk = 2 << ((l) - 1); } if (logNX - (l + 1) == 0) { li = 1; } else { li = 2 << ((logNX - (l + 1)) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y1_r[i11 + p]; x11imag = y1_i[i11 + p]; x21real = y1_r[i12 + p]; x21imag = y1_i[i12 + p]; y0_r[i21 + p] = x11real + x21real; y0_i[i21 + p] = x11imag + x21imag; y0_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y0_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } } if (logNX % 2 == 1) { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = i * NZ * NY + j * NZ + k; u0_r[m] = y1_r[id]; u0_i[m] = y1_i[id]; } } else { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = i * NZ * NY + j * NZ + k; u0_r[m] = y0_r[id]; u0_i[m] = y0_i[id]; } } } } //END_FFT // if (TIMERS_ENABLED == TRUE) { timer_stop(T_FFT); } for (iter = 1; iter <= niter; iter++) { if (TIMERS_ENABLED == TRUE) { timer_start(T_EVOLVE); } //evolve(u0_r, u0_i, u1_r, u1_i, iter, indexmap); /*-------------------------------------------------------------------- c evolve u0 -> u1 (iter time steps) in fourier space c-------------------------------------------------------------------*/ #pragma acc kernels loop gang worker independent for (i = 0; i < NTOTAL; i++) { u1_r[i] = u0_r[i] * ex[iter * indexmap[i]]; u1_i[i] = u0_i[i] * ex[iter * indexmap[i]]; } if (TIMERS_ENABLED == TRUE) { timer_stop(T_EVOLVE); } if (TIMERS_ENABLED == TRUE) { timer_start(T_FFT); } //fft(-1, u1_r, u1_i, u2_r, u2_i); //START_FFT // dir = -1; logNX = ilog2(NX); logNY = ilog2(NY); logNZ = ilog2(NZ); /*-------------------------------------------------------------------- c note: args x1, x2 must be different arrays c note: args for cfftsx are (direction, layout, xin, xout, scratch) c xin/xout may be the same and it can be somewhat faster c if they are c-------------------------------------------------------------------*/ { if (dir == 1) { //cffts1(1, logNX, u1_r, u1_i, u1_r, u1_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x1 */ is = 1; #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = i * NZ * NY + j * NZ + k; y0_r[id] = u1_r[m]; y0_i[id] = u1_i[m]; } //cfftz(is, logNX, NX, y0_r, y0_i, y1_r, y1_i, NZ, NY); /*-------------------------------------------------------------------- c Perform one variant of the Stockham FFT. c-------------------------------------------------------------------*/ for (l = 1; l <= logNX; l += 2) { //fftz2(is, l, logNX, NX, u_r, u_i, y0_r, y0_i, y1_r, y1_i, NZ, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NZ * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NX; idx++) { n1 = NX / 2; if (l - 1 == 0) { lk = 1; } else { lk = 2 << ((l - 1) - 1); } if (logNX - l == 0) { li = 1; } else { li = 2 << ((logNX - l) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y0_r[i11 + p]; x11imag = y0_i[i11 + p]; x21real = y0_r[i12 + p]; x21imag = y0_i[i12 + p]; y1_r[i21 + p] = x11real + x21real; y1_i[i21 + p] = x11imag + x21imag; y1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } if (l == logNX) break; //fftz2(is, l + 1, logNX, NX, u_r, u_i, y1_r, y1_i, y0_r, y0_i, NZ, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NZ * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NX; idx++) { n1 = NX / 2; if (l == 0) { lk = 1; } else { lk = 2 << ((l) - 1); } if (logNX - (l + 1) == 0) { li = 1; } else { li = 2 << ((logNX - (l + 1)) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y1_r[i11 + p]; x11imag = y1_i[i11 + p]; x21real = y1_r[i12 + p]; x21imag = y1_i[i12 + p]; y0_r[i21 + p] = x11real + x21real; y0_i[i21 + p] = x11imag + x21imag; y0_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y0_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } } if (logNX % 2 == 1) { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = i * NZ * NY + j * NZ + k; u1_r[m] = y1_r[id]; u1_i[m] = y1_i[id]; } } else { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = i * NZ * NY + j * NZ + k; u1_r[m] = y0_r[id]; u1_i[m] = y0_i[id]; } } //cffts2(1, logNY, u1_r, u1_i, u1_r, u1_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x1 */ is = 1; #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = j * NX * NZ + k * NX + i; y0_r[id] = u1_r[m]; y0_i[id] = u1_i[m]; } //cfftz(is, logNY, NY, y0_r, y0_i, y1_r, y1_i, NX, NZ); /*-------------------------------------------------------------------- c Perform one variant of the Stockham FFT. c-------------------------------------------------------------------*/ for (l = 1; l <= logNY; l += 2) { //fftz2(is, l, logNY, NY, u_r, u_i, y0_r, y0_i, y1_r, y1_i, NX, NZ); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NZ; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NY; idx++) { n1 = NY / 2; if (l - 1 == 0) { lk = 1; } else { lk = 2 << ((l - 1) - 1); } if (logNY - l == 0) { li = 1; } else { li = 2 << ((logNY - l) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y0_r[i11 + p]; x11imag = y0_i[i11 + p]; x21real = y0_r[i12 + p]; x21imag = y0_i[i12 + p]; y1_r[i21 + p] = x11real + x21real; y1_i[i21 + p] = x11imag + x21imag; y1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } if (l == logNY) break; //fftz2(is, l + 1, logNY, NY, u_r, u_i, y1_r, y1_i, y0_r, y0_i, NX, NZ); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NZ; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NY; idx++) { n1 = NY / 2; if (l == 0) { lk = 1; } else { lk = 2 << ((l) - 1); } if (logNY - (l + 1) == 0) { li = 1; } else { li = 2 << ((logNY - (l + 1)) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y1_r[i11 + p]; x11imag = y1_i[i11 + p]; x21real = y1_r[i12 + p]; x21imag = y1_i[i12 + p]; y0_r[i21 + p] = x11real + x21real; y0_i[i21 + p] = x11imag + x21imag; y0_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y0_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } } if (logNY % 2 == 1) { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = j * NX * NZ + k * NX + i; u1_r[m] = y1_r[id]; u1_i[m] = y1_i[id]; } } else { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = j * NX * NZ + k * NX + i; u1_r[m] = y0_r[id]; u1_i[m] = y0_i[id]; } } //cffts3(1, logNZ, u1_r, u1_i, u2_r, u2_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x2 */ is = 1; //cfftz(is, logNZ, NZ, u1_r, u1_i, y1_r, y1_i, NX, NY); /*-------------------------------------------------------------------- c Perform one variant of the Stockham FFT. c-------------------------------------------------------------------*/ for (l = 1; l <= logNZ; l += 2) { //fftz2(is, l, logNZ, NZ, u_r, u_i, u1_r, u1_i, y1_r, y1_i, NX, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NZ; idx++) { n1 = NZ / 2; if (l - 1 == 0) { lk = 1; } else { lk = 2 << ((l - 1) - 1); } if (logNZ - l == 0) { li = 1; } else { li = 2 << ((logNZ - l) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = u1_r[i11 + p]; x11imag = u1_i[i11 + p]; x21real = u1_r[i12 + p]; x21imag = u1_i[i12 + p]; y1_r[i21 + p] = x11real + x21real; y1_i[i21 + p] = x11imag + x21imag; y1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } if (l == logNZ) break; //fftz2(is, l + 1, logNZ, NZ, u_r, u_i, y1_r, y1_i, u1_r, u1_i, NX, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NZ; idx++) { n1 = NZ / 2; if (l == 0) { lk = 1; } else { lk = 2 << ((l) - 1); } if (logNZ - (l + 1) == 0) { li = 1; } else { li = 2 << ((logNZ - (l + 1)) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y1_r[i11 + p]; x11imag = y1_i[i11 + p]; x21real = y1_r[i12 + p]; x21imag = y1_i[i12 + p]; u1_r[i21 + p] = x11real + x21real; u1_i[i21 + p] = x11imag + x21imag; u1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); u1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } } if (logNZ % 2 == 1) { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { u2_r[m] = y1_r[m]; u2_i[m] = y1_i[m]; } } else { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { u2_r[m] = u1_r[m]; u2_i[m] = u1_i[m]; } } } else { //cffts3(-1, logNZ, u1_r, u1_i, u1_r, u1_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x1 */ is = -1; //cfftz(is, logNZ, NZ, u1_r, u1_i, y1_r, y1_i, NX, NY); /*-------------------------------------------------------------------- c Perform one variant of the Stockham FFT. c-------------------------------------------------------------------*/ for (l = 1; l <= logNZ; l += 2) { //fftz2(is, l, logNZ, NZ, u_r, u_i, u1_r, u1_i, y1_r, y1_i, NX, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NZ; idx++) { n1 = NZ / 2; if (l - 1 == 0) { lk = 1; } else { lk = 2 << ((l - 1) - 1); } if (logNZ - l == 0) { li = 1; } else { li = 2 << ((logNZ - l) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = u1_r[i11 + p]; x11imag = u1_i[i11 + p]; x21real = u1_r[i12 + p]; x21imag = u1_i[i12 + p]; y1_r[i21 + p] = x11real + x21real; y1_i[i21 + p] = x11imag + x21imag; y1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } if (l == logNZ) break; //fftz2(is, l + 1, logNZ, NZ, u_r, u_i, y1_r, y1_i, u1_r, u1_i, NX, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NZ; idx++) { n1 = NZ / 2; if (l == 0) { lk = 1; } else { lk = 2 << ((l) - 1); } if (logNZ - (l + 1) == 0) { li = 1; } else { li = 2 << ((logNZ - (l + 1)) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y1_r[i11 + p]; x11imag = y1_i[i11 + p]; x21real = y1_r[i12 + p]; x21imag = y1_i[i12 + p]; u1_r[i21 + p] = x11real + x21real; u1_i[i21 + p] = x11imag + x21imag; u1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); u1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } } if (logNZ % 2 == 1) { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { u1_r[m] = y1_r[m]; u1_i[m] = y1_i[m]; } } else { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { u1_r[m] = u1_r[m]; u1_i[m] = u1_i[m]; } } //cffts2(-1, logNY, u1_r, u1_i, u1_r, u1_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x1 */ is = -1; #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = j * NX * NZ + k * NX + i; y0_r[id] = u1_r[m]; y0_i[id] = u1_i[m]; } //cfftz(is, logNY, NY, y0_r, y0_i, y1_r, y1_i, NX, NZ); /*-------------------------------------------------------------------- c Perform one variant of the Stockham FFT. c-------------------------------------------------------------------*/ for (l = 1; l <= logNY; l += 2) { //fftz2(is, l, logNY, NY, u_r, u_i, y0_r, y0_i, y1_r, y1_i, NX, NZ); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NZ; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NY; idx++) { n1 = NY / 2; if (l - 1 == 0) { lk = 1; } else { lk = 2 << ((l - 1) - 1); } if (logNY - l == 0) { li = 1; } else { li = 2 << ((logNY - l) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y0_r[i11 + p]; x11imag = y0_i[i11 + p]; x21real = y0_r[i12 + p]; x21imag = y0_i[i12 + p]; y1_r[i21 + p] = x11real + x21real; y1_i[i21 + p] = x11imag + x21imag; y1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } if (l == logNY) break; //fftz2(is, l + 1, logNY, NY, u_r, u_i, y1_r, y1_i, y0_r, y0_i, NX, NZ); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NZ; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NY; idx++) { n1 = NY / 2; if (l == 0) { lk = 1; } else { lk = 2 << ((l) - 1); } if (logNY - (l + 1) == 0) { li = 1; } else { li = 2 << ((logNY - (l + 1)) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y1_r[i11 + p]; x11imag = y1_i[i11 + p]; x21real = y1_r[i12 + p]; x21imag = y1_i[i12 + p]; y0_r[i21 + p] = x11real + x21real; y0_i[i21 + p] = x11imag + x21imag; y0_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y0_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } } if (logNY % 2 == 1) { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = j * NX * NZ + k * NX + i; u1_r[m] = y1_r[id]; u1_i[m] = y1_i[id]; } } else { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = j * NX * NZ + k * NX + i; u1_r[m] = y0_r[id]; u1_i[m] = y0_i[id]; } } //cffts1(-1, logNX, u1_r, u1_i, u2_r, u2_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x2 */ is = -1; #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = i * NZ * NY + j * NZ + k; y0_r[id] = u1_r[m]; y0_i[id] = u1_i[m]; } //cfftz(is, logNX, NX, y0_r, y0_i, y1_r, y1_i, NZ, NY); /*-------------------------------------------------------------------- c Perform one variant of the Stockham FFT. c-------------------------------------------------------------------*/ for (l = 1; l <= logNX; l += 2) { //fftz2(is, l, logNX, NX, u_r, u_i, y0_r, y0_i, y1_r, y1_i, NZ, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NZ * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NX; idx++) { n1 = NX / 2; if (l - 1 == 0) { lk = 1; } else { lk = 2 << ((l - 1) - 1); } if (logNX - l == 0) { li = 1; } else { li = 2 << ((logNX - l) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y0_r[i11 + p]; x11imag = y0_i[i11 + p]; x21real = y0_r[i12 + p]; x21imag = y0_i[i12 + p]; y1_r[i21 + p] = x11real + x21real; y1_i[i21 + p] = x11imag + x21imag; y1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } if (l == logNX) break; //fftz2(is, l + 1, logNX, NX, u_r, u_i, y1_r, y1_i, y0_r, y0_i, NZ, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NZ * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NX; idx++) { n1 = NX / 2; if (l == 0) { lk = 1; } else { lk = 2 << ((l) - 1); } if (logNX - (l + 1) == 0) { li = 1; } else { li = 2 << ((logNX - (l + 1)) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y1_r[i11 + p]; x11imag = y1_i[i11 + p]; x21real = y1_r[i12 + p]; x21imag = y1_i[i12 + p]; y0_r[i21 + p] = x11real + x21real; y0_i[i21 + p] = x11imag + x21imag; y0_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y0_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } } if (logNX % 2 == 1) { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = i * NZ * NY + j * NZ + k; u2_r[m] = y1_r[id]; u2_i[m] = y1_i[id]; } } else { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = i * NZ * NY + j * NZ + k; u2_r[m] = y0_r[id]; u2_i[m] = y0_i[id]; } } } } //END_FFT // if (TIMERS_ENABLED == TRUE) { timer_stop(T_FFT); } if (TIMERS_ENABLED == TRUE) { timer_start(T_CHECKSUM); } //checksum(iter, u2_r, u2_i, dims[0]); chk_r = 0.0; chk_i = 0.0; #pragma acc kernels loop gang worker independent for (m = 0; m < 1024; m++) { j = 1 + m; q = j % NX; if (q >= 0 && q < NX) { r = (3 * j) % NY; if (r >= 0 && r < NY) { s = (5 * j) % NZ; if (s >= 0 && s < NZ) { chk_r = chk_r + u2_r[s * NY * NX + r * NX + q]; chk_i = chk_i + u2_i[s * NY * NX + r * NX + q]; } } } } //printf("chk_r = %22.12e, chk_i =%22.12e\n", chk_r, chk_i); { sums_r[iter] += chk_r; sums_i[iter] += chk_i; } { sums_r[iter] = sums_r[iter] / (double)(NTOTAL); sums_i[iter] = sums_i[iter] / (double)(NTOTAL); printf("T = %5d Checksum = %22.12e %22.12e\n", iter, sums_r[iter], sums_i[iter]); } if (TIMERS_ENABLED == TRUE) { timer_stop(T_CHECKSUM); } } verify(NX, NY, NZ, niter, &verified, &classT); #if defined(_OPENMP) nthreads = omp_get_num_threads(); #endif /* _OPENMP */ } /* end parallel */ timer_stop(T_TOTAL); total_time = timer_read(T_TOTAL); if (total_time != 0.0) { mflops = 1.0e-6 * (double)(NTOTAL) * (14.8157 + 7.19641 * log((double)(NTOTAL)) + (5.23518 + 7.21113 * log((double)(NTOTAL))) * niter) / total_time; } else { mflops = 0.0; } c_print_results("FT", classT, NX, NY, NZ, niter, nthreads, total_time, mflops, " floating point", verified, NPBVERSION, COMPILETIME, CS1, CS2, CS3, CS4, CS5, CS6, CS7); if (TIMERS_ENABLED == TRUE) print_timers(); return 0; } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void compute_initial_conditions(double u0_r[NTOTAL], double u0_i[NTOTAL]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Fill in array u0 with initial conditions from c random number generator c-------------------------------------------------------------------*/ int k; double x0, start, an, dummy; static double tmp[NX * 2 * MAXDIM + 1]; int i, j, t; start = SEED; /*-------------------------------------------------------------------- c Jump to the starting element for our first plane. c-------------------------------------------------------------------*/ ipow46(A, (zstart[0] - 1) * 2 * NX * NY + (ystart[0] - 1) * 2 * NX, &an); dummy = randlc(&start, an); ipow46(A, 2 * NX * NY, &an); /*-------------------------------------------------------------------- c Go through by z planes filling in one square at a time. c-------------------------------------------------------------------*/ for (k = 0; k < NZ; k++) { x0 = start; vranlc(2 * NX * NY, &x0, A, tmp); t = 1; for (j = 0; j < NY; j++) for (i = 0; i < NX; i++) { u0_r[k * NY * NX + j * NX + i] = tmp[t++]; u0_i[k * NY * NX + j * NX + i] = tmp[t++]; } if (k != NZ) dummy = randlc(&start, an); } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void ipow46(double a, int exponent, double *result) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c compute a^exponent mod 2^46 c-------------------------------------------------------------------*/ double dummy, q, r; int n, n2; /*-------------------------------------------------------------------- c Use c a^n = a^(n/2)*a^(n/2) if n even else c a^n = a*a^(n-1) if n odd c-------------------------------------------------------------------*/ *result = 1; if (exponent == 0) return; q = a; r = 1; n = exponent; while (n > 1) { n2 = n / 2; if (n2 * 2 == n) { dummy = randlc(&q, q); n = n2; } else { dummy = randlc(&r, q); n = n - 1; } } dummy = randlc(&r, q); *result = r; } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void setup(void) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int ierr, i, j, fstatus; printf("\n\n NAS Parallel Benchmarks 2.3 OpenMP C version" " - FT Benchmark\n\n"); niter = NITER_DEFAULT; printf(" Size : %3dx%3dx%3d\n", NX, NY, NZ); printf(" Iterations : %7d\n", niter); /* * 1004 format(' Number of processes : ', i7) 1005 format(' Processor * array : ', i3, 'x', i3) 1006 format(' WARNING: compiled for ', * i5, ' processes. ', > ' Will not verify. ') */ for (i = 0; i < 3; i++) { dims[i][0] = NX; dims[i][1] = NY; dims[i][2] = NZ; } for (i = 0; i < 3; i++) { xstart[i] = 1; xend[i] = NX; ystart[i] = 1; yend[i] = NY; zstart[i] = 1; zend[i] = NZ; } /*-------------------------------------------------------------------- c Set up info for blocking of ffts and transposes. This improves c performance on cache-based systems. Blocking involves c working on a chunk of the problem at a time, taking chunks c along the first, second, or third dimension. c c - In cffts1 blocking is on 2nd dimension (with fft on 1st dim) c - In cffts2/3 blocking is on 1st dimension (with fft on 2nd and 3rd dims) c Since 1st dim is always in processor, we'll assume it's long enough c (default blocking factor is 16 so min size for 1st dim is 16) c The only case we have to worry about is cffts1 in a 2d decomposition. c so the blocking factor should not be larger than the 2nd dimension. c-------------------------------------------------------------------*/ fftblock = FFTBLOCK_DEFAULT; fftblockpad = FFTBLOCKPAD_DEFAULT; if (fftblock != FFTBLOCK_DEFAULT) fftblockpad = fftblock + 3; } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void print_timers(void) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int i; const char *tstrings[] = {" total ", " setup ", " fft ", " evolve ", " checksum ", " fftlow ", " fftcopy " }; for (i = 0; i < T_MAX; i++) { if (timer_read(i) != 0.0) { printf("timer %2d(%16s( :%10.6f\n", i, tstrings[i], timer_read(i)); } } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void fft(int dir, double x1_r[NTOTAL], double x1_i[NTOTAL], double x2_r[NTOTAL], double x2_i[NTOTAL]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static double y0_r[NTOTAL]; static double y0_i[NTOTAL]; static double y1_r[NTOTAL]; static double y1_i[NTOTAL]; int logNX, logNY, logNZ; /////////////////////// //Used for cffts1 ().// /////////////////////// int i, j, k, jj; int m, id; int is; /////////////////////// //Used for cfftz ().// /////////////////////// int l; /////////////////////// //Used for fftz2 ().// /////////////////////// int k_fftz2, n1, li, lj, lk, ku, i_fftz2, i11, i12, i21, i22; double u1_rf, x11_r, x21_r; double u1_if, x11_i, x21_i; int idx, p, nn; double x11real, x11imag, x21real, x21imag; //START_FFT // logNX = ilog2(NX); logNY = ilog2(NY); logNZ = ilog2(NZ); /*-------------------------------------------------------------------- c note: args x1, x2 must be different arrays c note: args for cfftsx are (direction, layout, xin, xout, scratch) c xin/xout may be the same and it can be somewhat faster c if they are c-------------------------------------------------------------------*/ #pragma acc data \ copyin(u_r[0:NX], u_i[0:NX]) \ copy(x1_r[0:NTOTAL], x1_i[0:NTOTAL]) \ copyout(x2_r[0:NTOTAL], x2_i[0:NTOTAL]) \ create(y0_r[0:NTOTAL], y0_i[0:NTOTAL]) \ create(y1_r[0:NTOTAL], y1_i[0:NTOTAL]) { if (dir == 1) { //cffts1(1, logNX, x1_r, x1_i, x1_r, x1_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x1 */ is = 1; #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = i * NZ * NY + j * NZ + k; y0_r[id] = x1_r[m]; y0_i[id] = x1_i[m]; } //cfftz(is, logNX, NX, y0_r, y0_i, y1_r, y1_i, NZ, NY); /*-------------------------------------------------------------------- c Perform one variant of the Stockham FFT. c-------------------------------------------------------------------*/ for (l = 1; l <= logNX; l += 2) { //fftz2(is, l, logNX, NX, u_r, u_i, y0_r, y0_i, y1_r, y1_i, NZ, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NZ * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NX; idx++) { n1 = NX / 2; if (l - 1 == 0) { lk = 1; } else { lk = 2 << ((l - 1) - 1); } if (logNX - l == 0) { li = 1; } else { li = 2 << ((logNX - l) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y0_r[i11 + p]; x11imag = y0_i[i11 + p]; x21real = y0_r[i12 + p]; x21imag = y0_i[i12 + p]; y1_r[i21 + p] = x11real + x21real; y1_i[i21 + p] = x11imag + x21imag; y1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } if (l == logNX) break; //fftz2(is, l + 1, logNX, NX, u_r, u_i, y1_r, y1_i, y0_r, y0_i, NZ, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NZ * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NX; idx++) { n1 = NX / 2; if (l == 0) { lk = 1; } else { lk = 2 << ((l) - 1); } if (logNX - (l + 1) == 0) { li = 1; } else { li = 2 << ((logNX - (l + 1)) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y1_r[i11 + p]; x11imag = y1_i[i11 + p]; x21real = y1_r[i12 + p]; x21imag = y1_i[i12 + p]; y0_r[i21 + p] = x11real + x21real; y0_i[i21 + p] = x11imag + x21imag; y0_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y0_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } } if (logNX % 2 == 1) { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = i * NZ * NY + j * NZ + k; x1_r[m] = y1_r[id]; x1_i[m] = y1_i[id]; } } else { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = i * NZ * NY + j * NZ + k; x1_r[m] = y0_r[id]; x1_i[m] = y0_i[id]; } } //cffts2(1, logNY, x1_r, x1_i, x1_r, x1_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x1 */ is = 1; #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = j * NX * NZ + k * NX + i; y0_r[id] = x1_r[m]; y0_i[id] = x1_i[m]; } //cfftz(is, logNY, NY, y0_r, y0_i, y1_r, y1_i, NX, NZ); /*-------------------------------------------------------------------- c Perform one variant of the Stockham FFT. c-------------------------------------------------------------------*/ for (l = 1; l <= logNY; l += 2) { //fftz2(is, l, logNY, NY, u_r, u_i, y0_r, y0_i, y1_r, y1_i, NX, NZ); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NZ; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NY; idx++) { n1 = NY / 2; if (l - 1 == 0) { lk = 1; } else { lk = 2 << ((l - 1) - 1); } if (logNY - l == 0) { li = 1; } else { li = 2 << ((logNY - l) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y0_r[i11 + p]; x11imag = y0_i[i11 + p]; x21real = y0_r[i12 + p]; x21imag = y0_i[i12 + p]; y1_r[i21 + p] = x11real + x21real; y1_i[i21 + p] = x11imag + x21imag; y1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } if (l == logNY) break; //fftz2(is, l + 1, logNY, NY, u_r, u_i, y1_r, y1_i, y0_r, y0_i, NX, NZ); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NZ; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NY; idx++) { n1 = NY / 2; if (l == 0) { lk = 1; } else { lk = 2 << ((l) - 1); } if (logNY - (l + 1) == 0) { li = 1; } else { li = 2 << ((logNY - (l + 1)) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y1_r[i11 + p]; x11imag = y1_i[i11 + p]; x21real = y1_r[i12 + p]; x21imag = y1_i[i12 + p]; y0_r[i21 + p] = x11real + x21real; y0_i[i21 + p] = x11imag + x21imag; y0_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y0_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } } if (logNY % 2 == 1) { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = j * NX * NZ + k * NX + i; x1_r[m] = y1_r[id]; x1_i[m] = y1_i[id]; } } else { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = j * NX * NZ + k * NX + i; x1_r[m] = y0_r[id]; x1_i[m] = y0_i[id]; } } //cffts3(1, logNZ, x1_r, x1_i, x2_r, x2_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x2 */ is = 1; //cfftz(is, logNZ, NZ, x1_r, x1_i, y1_r, y1_i, NX, NY); /*-------------------------------------------------------------------- c Perform one variant of the Stockham FFT. c-------------------------------------------------------------------*/ for (l = 1; l <= logNZ; l += 2) { //fftz2(is, l, logNZ, NZ, u_r, u_i, x1_r, x1_i, y1_r, y1_i, NX, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NZ; idx++) { n1 = NZ / 2; if (l - 1 == 0) { lk = 1; } else { lk = 2 << ((l - 1) - 1); } if (logNZ - l == 0) { li = 1; } else { li = 2 << ((logNZ - l) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = x1_r[i11 + p]; x11imag = x1_i[i11 + p]; x21real = x1_r[i12 + p]; x21imag = x1_i[i12 + p]; y1_r[i21 + p] = x11real + x21real; y1_i[i21 + p] = x11imag + x21imag; y1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } if (l == logNZ) break; //fftz2(is, l + 1, logNZ, NZ, u_r, u_i, y1_r, y1_i, x1_r, x1_i, NX, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NZ; idx++) { n1 = NZ / 2; if (l == 0) { lk = 1; } else { lk = 2 << ((l) - 1); } if (logNZ - (l + 1) == 0) { li = 1; } else { li = 2 << ((logNZ - (l + 1)) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y1_r[i11 + p]; x11imag = y1_i[i11 + p]; x21real = y1_r[i12 + p]; x21imag = y1_i[i12 + p]; x1_r[i21 + p] = x11real + x21real; x1_i[i21 + p] = x11imag + x21imag; x1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); x1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } } if (logNZ % 2 == 1) { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { x2_r[m] = y1_r[m]; x2_i[m] = y1_i[m]; } } else { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { x2_r[m] = x1_r[m]; x2_i[m] = x1_i[m]; } } } else { //cffts3(-1, logNZ, x1_r, x1_i, x1_r, x1_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x1 */ is = -1; //cfftz(is, logNZ, NZ, x1_r, x1_i, y1_r, y1_i, NX, NY); /*-------------------------------------------------------------------- c Perform one variant of the Stockham FFT. c-------------------------------------------------------------------*/ for (l = 1; l <= logNZ; l += 2) { //fftz2(is, l, logNZ, NZ, u_r, u_i, x1_r, x1_i, y1_r, y1_i, NX, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NZ; idx++) { n1 = NZ / 2; if (l - 1 == 0) { lk = 1; } else { lk = 2 << ((l - 1) - 1); } if (logNZ - l == 0) { li = 1; } else { li = 2 << ((logNZ - l) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = x1_r[i11 + p]; x11imag = x1_i[i11 + p]; x21real = x1_r[i12 + p]; x21imag = x1_i[i12 + p]; y1_r[i21 + p] = x11real + x21real; y1_i[i21 + p] = x11imag + x21imag; y1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } if (l == logNZ) break; //fftz2(is, l + 1, logNZ, NZ, u_r, u_i, y1_r, y1_i, x1_r, x1_i, NX, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NZ; idx++) { n1 = NZ / 2; if (l == 0) { lk = 1; } else { lk = 2 << ((l) - 1); } if (logNZ - (l + 1) == 0) { li = 1; } else { li = 2 << ((logNZ - (l + 1)) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y1_r[i11 + p]; x11imag = y1_i[i11 + p]; x21real = y1_r[i12 + p]; x21imag = y1_i[i12 + p]; x1_r[i21 + p] = x11real + x21real; x1_i[i21 + p] = x11imag + x21imag; x1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); x1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } } if (logNZ % 2 == 1) { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { x1_r[m] = y1_r[m]; x1_i[m] = y1_i[m]; } } else { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { x1_r[m] = x1_r[m]; x1_i[m] = x1_i[m]; } } //cffts2(-1, logNY, x1_r, x1_i, x1_r, x1_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x1 */ is = -1; #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = j * NX * NZ + k * NX + i; y0_r[id] = x1_r[m]; y0_i[id] = x1_i[m]; } //cfftz(is, logNY, NY, y0_r, y0_i, y1_r, y1_i, NX, NZ); /*-------------------------------------------------------------------- c Perform one variant of the Stockham FFT. c-------------------------------------------------------------------*/ for (l = 1; l <= logNY; l += 2) { //fftz2(is, l, logNY, NY, u_r, u_i, y0_r, y0_i, y1_r, y1_i, NX, NZ); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NZ; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NY; idx++) { n1 = NY / 2; if (l - 1 == 0) { lk = 1; } else { lk = 2 << ((l - 1) - 1); } if (logNY - l == 0) { li = 1; } else { li = 2 << ((logNY - l) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y0_r[i11 + p]; x11imag = y0_i[i11 + p]; x21real = y0_r[i12 + p]; x21imag = y0_i[i12 + p]; y1_r[i21 + p] = x11real + x21real; y1_i[i21 + p] = x11imag + x21imag; y1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } if (l == logNY) break; //fftz2(is, l + 1, logNY, NY, u_r, u_i, y1_r, y1_i, y0_r, y0_i, NX, NZ); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NX * NZ; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NY; idx++) { n1 = NY / 2; if (l == 0) { lk = 1; } else { lk = 2 << ((l) - 1); } if (logNY - (l + 1) == 0) { li = 1; } else { li = 2 << ((logNY - (l + 1)) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y1_r[i11 + p]; x11imag = y1_i[i11 + p]; x21real = y1_r[i12 + p]; x21imag = y1_i[i12 + p]; y0_r[i21 + p] = x11real + x21real; y0_i[i21 + p] = x11imag + x21imag; y0_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y0_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } } if (logNY % 2 == 1) { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = j * NX * NZ + k * NX + i; x1_r[m] = y1_r[id]; x1_i[m] = y1_i[id]; } } else { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = j * NX * NZ + k * NX + i; x1_r[m] = y0_r[id]; x1_i[m] = y0_i[id]; } } //cffts1(-1, logNX, x1_r, x1_i, x2_r, x2_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x2 */ is = -1; #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = i * NZ * NY + j * NZ + k; y0_r[id] = x1_r[m]; y0_i[id] = x1_i[m]; } //cfftz(is, logNX, NX, y0_r, y0_i, y1_r, y1_i, NZ, NY); /*-------------------------------------------------------------------- c Perform one variant of the Stockham FFT. c-------------------------------------------------------------------*/ for (l = 1; l <= logNX; l += 2) { //fftz2(is, l, logNX, NX, u_r, u_i, y0_r, y0_i, y1_r, y1_i, NZ, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NZ * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NX; idx++) { n1 = NX / 2; if (l - 1 == 0) { lk = 1; } else { lk = 2 << ((l - 1) - 1); } if (logNX - l == 0) { li = 1; } else { li = 2 << ((logNX - l) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y0_r[i11 + p]; x11imag = y0_i[i11 + p]; x21real = y0_r[i12 + p]; x21imag = y0_i[i12 + p]; y1_r[i21 + p] = x11real + x21real; y1_i[i21 + p] = x11imag + x21imag; y1_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y1_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } if (l == logNX) break; //fftz2(is, l + 1, logNX, NX, u_r, u_i, y1_r, y1_i, y0_r, y0_i, NZ, NY); /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ nn = NZ * NY; //number of threads #pragma acc kernels loop gang worker independent for (idx = 0; idx < NTOTAL / NX; idx++) { n1 = NX / 2; if (l == 0) { lk = 1; } else { lk = 2 << ((l) - 1); } if (logNX - (l + 1) == 0) { li = 1; } else { li = 2 << ((logNX - (l + 1)) - 1); } lj = 2 * lk; ku = li; for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++) { i11 = idx + i_fftz2 * lk * nn; i12 = i11 + n1 * nn; i21 = idx + i_fftz2 * lj * nn; i22 = i21 + lk * nn; if (is >= 1) { u1_rf = u_r[ku + i_fftz2]; u1_if = u_i[ku + i_fftz2]; } else { u1_rf = u_r[ku + i_fftz2]; u1_if = -u_i[ku + i_fftz2]; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++) { p = k_fftz2 * nn; x11real = y1_r[i11 + p]; x11imag = y1_i[i11 + p]; x21real = y1_r[i12 + p]; x21imag = y1_i[i12 + p]; y0_r[i21 + p] = x11real + x21real; y0_i[i21 + p] = x11imag + x21imag; y0_r[i22 + p] = u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag); y0_i[i22 + p] = u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real); } } } } if (logNX % 2 == 1) { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = i * NZ * NY + j * NZ + k; x2_r[m] = y1_r[id]; x2_i[m] = y1_i[id]; } } else { #pragma acc kernels loop gang worker independent for (m = 0; m < NTOTAL; m++) { i = m % NX; k = m / NX; j = k % NY; k = k / NY; id = i * NZ * NY + j * NZ + k; x2_r[m] = y0_r[id]; x2_i[m] = y0_i[id]; } } } } //END_FFT // } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void fft_init(int n) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c compute the roots-of-unity array that will be used for subsequent FFTs. c-------------------------------------------------------------------*/ int m, nu, ku, i, j, ln; double t, ti; /*-------------------------------------------------------------------- c Initialize the U array with sines and cosines in a manner that permits c stride one access at each FFT iteration. c-------------------------------------------------------------------*/ nu = n; m = ilog2(n); u_r[0] = (double)m; u_i[0] = 0.0; ku = 1; ln = 1; for (j = 1; j <= m; j++) { t = PI / ln; for (i = 0; i <= ln - 1; i++) { ti = i * t; u_r[i + ku] = cos(ti); u_i[i + ku] = sin(ti); } ku = ku + ln; ln = 2 * ln; } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static int ilog2(int n) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int nn, lg; if (n == 1) { return 0; } lg = 1; nn = 2; while (nn < n) { nn = nn << 1; lg++; } return lg; } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void verify(int d1, int d2, int d3, int nt, boolean * verified, char *classT) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int ierr, size, i; double err, epsilon; /*-------------------------------------------------------------------- c Sample size reference checksums c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Class S size reference checksums c-------------------------------------------------------------------*/ double vdata_real_s[6 + 1] = {0.0, 5.546087004964e+02, 5.546385409189e+02, 5.546148406171e+02, 5.545423607415e+02, 5.544255039624e+02, 5.542683411902e+02 }; double vdata_imag_s[6 + 1] = {0.0, 4.845363331978e+02, 4.865304269511e+02, 4.883910722336e+02, 4.901273169046e+02, 4.917475857993e+02, 4.932597244941e+02 }; /*-------------------------------------------------------------------- c Class W size reference checksums c-------------------------------------------------------------------*/ double vdata_real_w[6 + 1] = {0.0, 5.673612178944e+02, 5.631436885271e+02, 5.594024089970e+02, 5.560698047020e+02, 5.530898991250e+02, 5.504159734538e+02 }; double vdata_imag_w[6 + 1] = {0.0, 5.293246849175e+02, 5.282149986629e+02, 5.270996558037e+02, 5.260027904925e+02, 5.249400845633e+02, 5.239212247086e+02 }; /*-------------------------------------------------------------------- c Class A size reference checksums c-------------------------------------------------------------------*/ double vdata_real_a[6 + 1] = {0.0, 5.046735008193e+02, 5.059412319734e+02, 5.069376896287e+02, 5.077892868474e+02, 5.085233095391e+02, 5.091487099959e+02 }; double vdata_imag_a[6 + 1] = {0.0, 5.114047905510e+02, 5.098809666433e+02, 5.098144042213e+02, 5.101336130759e+02, 5.104914655194e+02, 5.107917842803e+02 }; /*-------------------------------------------------------------------- c Class B size reference checksums c-------------------------------------------------------------------*/ double vdata_real_b[20 + 1] = {0.0, 5.177643571579e+02, 5.154521291263e+02, 5.146409228649e+02, 5.142378756213e+02, 5.139626667737e+02, 5.137423460082e+02, 5.135547056878e+02, 5.133910925466e+02, 5.132470705390e+02, 5.131197729984e+02, 5.130070319283e+02, 5.129070537032e+02, 5.128182883502e+02, 5.127393733383e+02, 5.126691062020e+02, 5.126064276004e+02, 5.125504076570e+02, 5.125002331720e+02, 5.124551951846e+02, 5.124146770029e+02 }; double vdata_imag_b[20 + 1] = {0.0, 5.077803458597e+02, 5.088249431599e+02, 5.096208912659e+02, 5.101023387619e+02, 5.103976610617e+02, 5.105948019802e+02, 5.107404165783e+02, 5.108576573661e+02, 5.109577278523e+02, 5.110460304483e+02, 5.111252433800e+02, 5.111968077718e+02, 5.112616233064e+02, 5.113203605551e+02, 5.113735928093e+02, 5.114218460548e+02, 5.114656139760e+02, 5.115053595966e+02, 5.115415130407e+02, 5.115744692211e+02 }; /*-------------------------------------------------------------------- c Class C size reference checksums c-------------------------------------------------------------------*/ double vdata_real_c[20 + 1] = {0.0, 5.195078707457e+02, 5.155422171134e+02, 5.144678022222e+02, 5.140150594328e+02, 5.137550426810e+02, 5.135811056728e+02, 5.134569343165e+02, 5.133651975661e+02, 5.132955192805e+02, 5.132410471738e+02, 5.131971141679e+02, 5.131605205716e+02, 5.131290734194e+02, 5.131012720314e+02, 5.130760908195e+02, 5.130528295923e+02, 5.130310107773e+02, 5.130103090133e+02, 5.129905029333e+02, 5.129714421109e+02 }; double vdata_imag_c[20 + 1] = {0.0, 5.149019699238e+02, 5.127578201997e+02, 5.122251847514e+02, 5.121090289018e+02, 5.121143685824e+02, 5.121496764568e+02, 5.121870921893e+02, 5.122193250322e+02, 5.122454735794e+02, 5.122663649603e+02, 5.122830879827e+02, 5.122965869718e+02, 5.123075927445e+02, 5.123166486553e+02, 5.123241541685e+02, 5.123304037599e+02, 5.123356167976e+02, 5.123399592211e+02, 5.123435588985e+02, 5.123465164008e+02 }; epsilon = 1.0e-12; *verified = TRUE; *classT = 'U'; if (d1 == 64 && d2 == 64 && d3 == 64 && nt == 6) { *classT = 'S'; for (i = 1; i <= nt; i++) { err = (sums_r[i] - vdata_real_s[i]) / vdata_real_s[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } err = (sums_i[i] - vdata_imag_s[i]) / vdata_imag_s[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } } } else if (d1 == 128 && d2 == 128 && d3 == 32 && nt == 6) { *classT = 'W'; for (i = 1; i <= nt; i++) { err = (sums_r[i] - vdata_real_w[i]) / vdata_real_w[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } err = (sums_i[i] - vdata_imag_w[i]) / vdata_imag_w[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } } } else if (d1 == 256 && d2 == 256 && d3 == 128 && nt == 6) { *classT = 'A'; for (i = 1; i <= nt; i++) { err = (sums_r[i] - vdata_real_a[i]) / vdata_real_a[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } err = (sums_i[i] - vdata_imag_a[i]) / vdata_imag_a[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } } } else if (d1 == 512 && d2 == 256 && d3 == 256 && nt == 20) { *classT = 'B'; for (i = 1; i <= nt; i++) { err = (sums_r[i] - vdata_real_b[i]) / vdata_real_b[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } err = (sums_i[i] - vdata_imag_b[i]) / vdata_imag_b[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } } } else if (d1 == 512 && d2 == 512 && d3 == 512 && nt == 20) { *classT = 'C'; for (i = 1; i <= nt; i++) { err = (sums_r[i] - vdata_real_c[i]) / vdata_real_c[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } err = (sums_i[i] - vdata_imag_c[i]) / vdata_imag_c[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } } } if (*classT != 'U') { printf("Result verification successful\n"); } else { printf("Result verification failed\n"); } printf("class = %1c\n", *classT); }
pwsafe_fmt_plug.c
/* Password Safe and Password Gorilla cracker patch for JtR. Hacked together * during May of 2012 by Dhiru Kholia <dhiru.kholia at gmail.com>. * * Optimization patch during January of 2013 by Brian Wallace <brian.wallace9809 at gmail.com>. * * This software is Copyright (c) 2012-2013 * Dhiru Kholia <dhiru.kholia at gmail.com> and Brian Wallace <brian.wallace9809 at gmail.com> * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without modification, are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_pwsafe; #elif FMT_REGISTERS_H john_register_one(&fmt_pwsafe); #else #include <string.h> #include <assert.h> #include <errno.h> #include "arch.h" //#undef SIMD_COEF_32 #include "sha2.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "johnswap.h" #include "simd-intrinsics.h" #ifdef _OPENMP static int omp_t = 1; #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 1 // tuned on core i7 #endif #endif #include "memdbg.h" #define FORMAT_LABEL "pwsafe" #define FORMAT_NAME "Password Safe" #define FORMAT_TAG "$pwsafe$*" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #define ALGORITHM_NAME "SHA256 " SHA256_ALGORITHM_NAME #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 125 #define BINARY_SIZE 32 #define SALT_SIZE sizeof(struct custom_salt) #define BINARY_ALIGN sizeof(uint32_t) #define SALT_ALIGN sizeof(int) #ifdef SIMD_COEF_32 #if ARCH_LITTLE_ENDIAN==1 #define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + (3-((i)&3)) + (unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32*4 ) #else #define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + ((i)&3) + (unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32*4 ) #endif #define MIN_KEYS_PER_CRYPT (SIMD_COEF_32*SIMD_PARA_SHA256) #define MAX_KEYS_PER_CRYPT (SIMD_COEF_32*SIMD_PARA_SHA256) #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif static struct fmt_tests pwsafe_tests[] = { {"$pwsafe$*3*fefc1172093344c9d5577b25f5b4b6e5d2942c94f9fc24c21733e28ae6527521*2048*88cbaf7d8668c1a98263f5dce7cb39c3304c49a3e0d76a7ea475dc02ab2f97a7", "12345678"}, {"$pwsafe$*3*581cd1135b9b993ccb0f6b01c1fcfacd799c69960496c96286f94fe1400c1b25*2048*4ab3c2d3af251e94eb2f753fdf30fb9da074bec6bac0fa9d9d152b95fc5795c6", "openwall"}, {"$pwsafe$*3*34ba0066d0fc594c126b60b9db98b6024e1cf585901b81b5b005ce386f173d4c*2048*cc86f1a5d930ff19b3602770a86586b5d9dea7bb657012aca875aa2a7dc71dc0", "12345678901234567890123"}, {"$pwsafe$*3*a42431191707895fb8d1121a3a6e255e33892d8eecb50fc616adab6185b5affb*2048*0f71d12df2b7c5394ae90771f6475a7ad0437007a8eeb5d9b58e35d8fd57c827", "123456789012345678901234567"}, {"$pwsafe$*3*c380dee0dbb536f5454f78603b020be76b33e294e9c2a0e047f43b9c61669fc8*2048*e88ed54a85e419d555be219d200563ae3ba864e24442826f412867fc0403917d", "this is an 87 character password to test the max bound of pwsafe-opencl................"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)]; static struct custom_salt { int version; unsigned int iterations; unsigned char salt[32]; } *cur_salt; static void init(struct fmt_main *self) { #ifdef _OPENMP omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt); crypt_out = mem_calloc(sizeof(*crypt_out), self->params.max_keys_per_crypt); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { // format $pwsafe$version*salt*iterations*hash char *p; char *ctcopy; char *keeptr; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += FORMAT_TAG_LEN; /* skip over "$pwsafe$*" */ if ((p = strtokm(ctcopy, "*")) == NULL) /* version */ goto err; if (!isdec(p)) goto err; if (!atoi(p)) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* salt */ goto err; if (strlen(p) < 64) goto err; if (strspn(p, HEXCHARS_lc) != 64) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* iterations */ goto err; if (!isdec(p)) goto err; if (!atoi(p)) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* hash */ goto err; if (strlen(p) != 64) goto err; if (strspn(p, HEXCHARS_lc) != 64) goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; char *p; int i; static struct custom_salt cs; memset(&cs, 0, sizeof(cs)); ctcopy += FORMAT_TAG_LEN; /* skip over "$pwsafe$*" */ p = strtokm(ctcopy, "*"); cs.version = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < 32; i++) cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); cs.iterations = (unsigned int)atoi(p); MEM_FREE(keeptr); return (void *)&cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; p = strrchr(ciphertext, '*') + 1; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } #define COMMON_GET_HASH_VAR crypt_out #include "common-get-hash.h" static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } #ifndef SIMD_COEF_32 #define rotl(x,y) ( x<<y | x>>(32-y) ) #define rotr(x,y) ( x>>y | x<<(32-y) ) #define CHOICE(x,y,z) ( z ^ (x & ( y ^ z)) ) #define MAJORITY(x,y,z) ( (x & y) | (z & (x | y)) ) #define ROTXOR1(x) (rotr(x,2) ^ rotr(x,13) ^ rotr(x,22)) #define ROTXOR2(x) (rotr(x,6) ^ rotr(x,11) ^ rotr(x,25)) #define ROTXOR3(x) (rotr(x,7) ^ rotr(x,18) ^ (x>>3)) #define ROTXOR4(x) (rotr(x,17) ^ rotr(x,19) ^ (x>>10)) #if ARCH_LITTLE_ENDIAN #define bytereverse(x) ( ((x) << 24) | (((x) << 8) & 0x00ff0000) | (((x) >> 8) & 0x0000ff00) | ((x) >> 24) ) #else #define bytereverse(x) (x) #endif static void pwsafe_sha256_iterate(unsigned int * state, unsigned int iterations) { unsigned int word00,word01,word02,word03,word04,word05,word06,word07; unsigned int word08,word09,word10,word11,word12,word13,word14,word15; unsigned int temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7; iterations++; word00 = state[0]; word01 = state[1]; word02 = state[2]; word03 = state[3]; word04 = state[4]; word05 = state[5]; word06 = state[6]; word07 = state[7]; while(iterations) { iterations--; temp0 = 0x6a09e667UL; temp1 = 0xbb67ae85UL; temp2 = 0x3c6ef372UL; temp3 = 0xa54ff53aUL; temp4 = 0x510e527fUL; temp5 = 0x9b05688cUL; temp6 = 0x1f83d9abUL; temp7 = 0x5be0cd19UL; temp7 += ROTXOR2( temp4 ) + CHOICE( temp4, temp5, temp6 ) + 0x428a2f98 + (word00); temp3 += temp7; temp7 += ROTXOR1( temp0 ) + MAJORITY( temp0, temp1, temp2 ); temp6 += ROTXOR2( temp3 ) + CHOICE( temp3, temp4, temp5 ) + 0x71374491 + (word01); temp2 += temp6; temp6 += ROTXOR1( temp7 ) + MAJORITY( temp7, temp0, temp1 ); temp5 += ROTXOR2( temp2 ) + CHOICE( temp2, temp3, temp4 ) + 0xb5c0fbcf + (word02); temp1 += temp5; temp5 += ROTXOR1( temp6 ) + MAJORITY( temp6, temp7, temp0 ); temp4 += ROTXOR2( temp1 ) + CHOICE( temp1, temp2, temp3 ) + 0xe9b5dba5 + (word03); temp0 += temp4; temp4 += ROTXOR1( temp5 ) + MAJORITY( temp5, temp6, temp7 ); temp3 += ROTXOR2( temp0 ) + CHOICE( temp0, temp1, temp2 ) + 0x3956c25b + (word04); temp7 += temp3; temp3 += ROTXOR1( temp4 ) + MAJORITY( temp4, temp5, temp6 ); temp2 += ROTXOR2( temp7 ) + CHOICE( temp7, temp0, temp1 ) + 0x59f111f1 + (word05); temp6 += temp2; temp2 += ROTXOR1( temp3 ) + MAJORITY( temp3, temp4, temp5 ); temp1 += ROTXOR2( temp6 ) + CHOICE( temp6, temp7, temp0 ) + 0x923f82a4 + (word06); temp5 += temp1; temp1 += ROTXOR1( temp2 ) + MAJORITY( temp2, temp3, temp4 ); temp0 += ROTXOR2( temp5 ) + CHOICE( temp5, temp6, temp7 ) + 0xab1c5ed5 + (word07); temp4 += temp0; temp0 += ROTXOR1( temp1 ) + MAJORITY( temp1, temp2, temp3 ); temp7 += ROTXOR2( temp4 ) + CHOICE( temp4, temp5, temp6 ) + 0xd807aa98 + ( (word08 = 0x80000000U) ); temp3 += temp7; temp7 += ROTXOR1( temp0 ) + MAJORITY( temp0, temp1, temp2 ); temp6 += ROTXOR2( temp3 ) + CHOICE( temp3, temp4, temp5 ) + 0x12835b01 + ( (word09 = 0) ); temp2 += temp6; temp6 += ROTXOR1( temp7 ) + MAJORITY( temp7, temp0, temp1 ); temp5 += ROTXOR2( temp2 ) + CHOICE( temp2, temp3, temp4 ) + 0x243185be + ( (word10 = 0) ); temp1 += temp5; temp5 += ROTXOR1( temp6 ) + MAJORITY( temp6, temp7, temp0 ); temp4 += ROTXOR2( temp1 ) + CHOICE( temp1, temp2, temp3 ) + 0x550c7dc3 + ( (word11 = 0) ); temp0 += temp4; temp4 += ROTXOR1( temp5 ) + MAJORITY( temp5, temp6, temp7 ); temp3 += ROTXOR2( temp0 ) + CHOICE( temp0, temp1, temp2 ) + 0x72be5d74 + ( (word12 = 0) ); temp7 += temp3; temp3 += ROTXOR1( temp4 ) + MAJORITY( temp4, temp5, temp6 ); temp2 += ROTXOR2( temp7 ) + CHOICE( temp7, temp0, temp1 ) + 0x80deb1fe + ( (word13 = 0) ); temp6 += temp2; temp2 += ROTXOR1( temp3 ) + MAJORITY( temp3, temp4, temp5 ); temp1 += ROTXOR2( temp6 ) + CHOICE( temp6, temp7, temp0 ) + 0x9bdc06a7 + ( (word14 = 0) ); temp5 += temp1; temp1 += ROTXOR1( temp2 ) + MAJORITY( temp2, temp3, temp4 ); temp0 += ROTXOR2( temp5 ) + CHOICE( temp5, temp6, temp7 ) + 0xc19bf174 + ( (word15 = 256) ); temp4 += temp0; temp0 += ROTXOR1( temp1 ) + MAJORITY( temp1, temp2, temp3 ); temp7 += ROTXOR2( temp4 ) + CHOICE( temp4, temp5, temp6 ) + 0xe49b69c1 + ( (word00 += ROTXOR4( word14 ) + word09 + ROTXOR3( word01 ) ) ); temp3 += temp7; temp7 += ROTXOR1( temp0 ) + MAJORITY( temp0, temp1, temp2 ); temp6 += ROTXOR2( temp3 ) + CHOICE( temp3, temp4, temp5 ) + 0xefbe4786 + ( (word01 += ROTXOR4( word15 ) + word10 + ROTXOR3( word02 ) ) ); temp2 += temp6; temp6 += ROTXOR1( temp7 ) + MAJORITY( temp7, temp0, temp1 ); temp5 += ROTXOR2( temp2 ) + CHOICE( temp2, temp3, temp4 ) + 0x0fc19dc6 + ( (word02 += ROTXOR4( word00 ) + word11 + ROTXOR3( word03 ) ) ); temp1 += temp5; temp5 += ROTXOR1( temp6 ) + MAJORITY( temp6, temp7, temp0 ); temp4 += ROTXOR2( temp1 ) + CHOICE( temp1, temp2, temp3 ) + 0x240ca1cc + ( (word03 += ROTXOR4( word01 ) + word12 + ROTXOR3( word04 ) ) ); temp0 += temp4; temp4 += ROTXOR1( temp5 ) + MAJORITY( temp5, temp6, temp7 ); temp3 += ROTXOR2( temp0 ) + CHOICE( temp0, temp1, temp2 ) + 0x2de92c6f + ( (word04 += ROTXOR4( word02 ) + word13 + ROTXOR3( word05 ) ) ); temp7 += temp3; temp3 += ROTXOR1( temp4 ) + MAJORITY( temp4, temp5, temp6 ); temp2 += ROTXOR2( temp7 ) + CHOICE( temp7, temp0, temp1 ) + 0x4a7484aa + ( (word05 += ROTXOR4( word03 ) + word14 + ROTXOR3( word06 ) ) ); temp6 += temp2; temp2 += ROTXOR1( temp3 ) + MAJORITY( temp3, temp4, temp5 ); temp1 += ROTXOR2( temp6 ) + CHOICE( temp6, temp7, temp0 ) + 0x5cb0a9dc + ( (word06 += ROTXOR4( word04 ) + word15 + ROTXOR3( word07 ) ) ); temp5 += temp1; temp1 += ROTXOR1( temp2 ) + MAJORITY( temp2, temp3, temp4 ); temp0 += ROTXOR2( temp5 ) + CHOICE( temp5, temp6, temp7 ) + 0x76f988da + ( (word07 += ROTXOR4( word05 ) + word00 + ROTXOR3( word08 ) ) ); temp4 += temp0; temp0 += ROTXOR1( temp1 ) + MAJORITY( temp1, temp2, temp3 ); temp7 += ROTXOR2( temp4 ) + CHOICE( temp4, temp5, temp6 ) + 0x983e5152 + ( (word08 += ROTXOR4( word06 ) + word01 + ROTXOR3( word09 ) ) ); temp3 += temp7; temp7 += ROTXOR1( temp0 ) + MAJORITY( temp0, temp1, temp2 ); temp6 += ROTXOR2( temp3 ) + CHOICE( temp3, temp4, temp5 ) + 0xa831c66d + ( (word09 += ROTXOR4( word07 ) + word02 + ROTXOR3( word10 ) ) ); temp2 += temp6; temp6 += ROTXOR1( temp7 ) + MAJORITY( temp7, temp0, temp1 ); temp5 += ROTXOR2( temp2 ) + CHOICE( temp2, temp3, temp4 ) + 0xb00327c8 + ( (word10 += ROTXOR4( word08 ) + word03 + ROTXOR3( word11 ) ) ); temp1 += temp5; temp5 += ROTXOR1( temp6 ) + MAJORITY( temp6, temp7, temp0 ); temp4 += ROTXOR2( temp1 ) + CHOICE( temp1, temp2, temp3 ) + 0xbf597fc7 + ( (word11 += ROTXOR4( word09 ) + word04 + ROTXOR3( word12 ) ) ); temp0 += temp4; temp4 += ROTXOR1( temp5 ) + MAJORITY( temp5, temp6, temp7 ); temp3 += ROTXOR2( temp0 ) + CHOICE( temp0, temp1, temp2 ) + 0xc6e00bf3 + ( (word12 += ROTXOR4( word10 ) + word05 + ROTXOR3( word13 ) ) ); temp7 += temp3; temp3 += ROTXOR1( temp4 ) + MAJORITY( temp4, temp5, temp6 ); temp2 += ROTXOR2( temp7 ) + CHOICE( temp7, temp0, temp1 ) + 0xd5a79147 + ( (word13 += ROTXOR4( word11 ) + word06 + ROTXOR3( word14 ) ) ); temp6 += temp2; temp2 += ROTXOR1( temp3 ) + MAJORITY( temp3, temp4, temp5 ); temp1 += ROTXOR2( temp6 ) + CHOICE( temp6, temp7, temp0 ) + 0x06ca6351 + ( (word14 += ROTXOR4( word12 ) + word07 + ROTXOR3( word15 ) ) ); temp5 += temp1; temp1 += ROTXOR1( temp2 ) + MAJORITY( temp2, temp3, temp4 ); temp0 += ROTXOR2( temp5 ) + CHOICE( temp5, temp6, temp7 ) + 0x14292967 + ( (word15 += ROTXOR4( word13 ) + word08 + ROTXOR3( word00 ) ) ); temp4 += temp0; temp0 += ROTXOR1( temp1 ) + MAJORITY( temp1, temp2, temp3 ); temp7 += ROTXOR2( temp4 ) + CHOICE( temp4, temp5, temp6 ) + 0x27b70a85 + ( (word00 += ROTXOR4( word14 ) + word09 + ROTXOR3( word01 ) ) ); temp3 += temp7; temp7 += ROTXOR1( temp0 ) + MAJORITY( temp0, temp1, temp2 ); temp6 += ROTXOR2( temp3 ) + CHOICE( temp3, temp4, temp5 ) + 0x2e1b2138 + ( (word01 += ROTXOR4( word15 ) + word10 + ROTXOR3( word02 ) ) ); temp2 += temp6; temp6 += ROTXOR1( temp7 ) + MAJORITY( temp7, temp0, temp1 ); temp5 += ROTXOR2( temp2 ) + CHOICE( temp2, temp3, temp4 ) + 0x4d2c6dfc + ( (word02 += ROTXOR4( word00 ) + word11 + ROTXOR3( word03 ) ) ); temp1 += temp5; temp5 += ROTXOR1( temp6 ) + MAJORITY( temp6, temp7, temp0 ); temp4 += ROTXOR2( temp1 ) + CHOICE( temp1, temp2, temp3 ) + 0x53380d13 + ( (word03 += ROTXOR4( word01 ) + word12 + ROTXOR3( word04 ) ) ); temp0 += temp4; temp4 += ROTXOR1( temp5 ) + MAJORITY( temp5, temp6, temp7 ); temp3 += ROTXOR2( temp0 ) + CHOICE( temp0, temp1, temp2 ) + 0x650a7354 + ( (word04 += ROTXOR4( word02 ) + word13 + ROTXOR3( word05 ) ) ); temp7 += temp3; temp3 += ROTXOR1( temp4 ) + MAJORITY( temp4, temp5, temp6 ); temp2 += ROTXOR2( temp7 ) + CHOICE( temp7, temp0, temp1 ) + 0x766a0abb + ( (word05 += ROTXOR4( word03 ) + word14 + ROTXOR3( word06 ) ) ); temp6 += temp2; temp2 += ROTXOR1( temp3 ) + MAJORITY( temp3, temp4, temp5 ); temp1 += ROTXOR2( temp6 ) + CHOICE( temp6, temp7, temp0 ) + 0x81c2c92e + ( (word06 += ROTXOR4( word04 ) + word15 + ROTXOR3( word07 ) ) ); temp5 += temp1; temp1 += ROTXOR1( temp2 ) + MAJORITY( temp2, temp3, temp4 ); temp0 += ROTXOR2( temp5 ) + CHOICE( temp5, temp6, temp7 ) + 0x92722c85 + ( (word07 += ROTXOR4( word05 ) + word00 + ROTXOR3( word08 ) ) ); temp4 += temp0; temp0 += ROTXOR1( temp1 ) + MAJORITY( temp1, temp2, temp3 ); temp7 += ROTXOR2( temp4 ) + CHOICE( temp4, temp5, temp6 ) + 0xa2bfe8a1 + ( (word08 += ROTXOR4( word06 ) + word01 + ROTXOR3( word09 ) ) ); temp3 += temp7; temp7 += ROTXOR1( temp0 ) + MAJORITY( temp0, temp1, temp2 ); temp6 += ROTXOR2( temp3 ) + CHOICE( temp3, temp4, temp5 ) + 0xa81a664b + ( (word09 += ROTXOR4( word07 ) + word02 + ROTXOR3( word10 ) ) ); temp2 += temp6; temp6 += ROTXOR1( temp7 ) + MAJORITY( temp7, temp0, temp1 ); temp5 += ROTXOR2( temp2 ) + CHOICE( temp2, temp3, temp4 ) + 0xc24b8b70 + ( (word10 += ROTXOR4( word08 ) + word03 + ROTXOR3( word11 ) ) ); temp1 += temp5; temp5 += ROTXOR1( temp6 ) + MAJORITY( temp6, temp7, temp0 ); temp4 += ROTXOR2( temp1 ) + CHOICE( temp1, temp2, temp3 ) + 0xc76c51a3 + ( (word11 += ROTXOR4( word09 ) + word04 + ROTXOR3( word12 ) ) ); temp0 += temp4; temp4 += ROTXOR1( temp5 ) + MAJORITY( temp5, temp6, temp7 ); temp3 += ROTXOR2( temp0 ) + CHOICE( temp0, temp1, temp2 ) + 0xd192e819 + ( (word12 += ROTXOR4( word10 ) + word05 + ROTXOR3( word13 ) ) ); temp7 += temp3; temp3 += ROTXOR1( temp4 ) + MAJORITY( temp4, temp5, temp6 ); temp2 += ROTXOR2( temp7 ) + CHOICE( temp7, temp0, temp1 ) + 0xd6990624 + ( (word13 += ROTXOR4( word11 ) + word06 + ROTXOR3( word14 ) ) ); temp6 += temp2; temp2 += ROTXOR1( temp3 ) + MAJORITY( temp3, temp4, temp5 ); temp1 += ROTXOR2( temp6 ) + CHOICE( temp6, temp7, temp0 ) + 0xf40e3585 + ( (word14 += ROTXOR4( word12 ) + word07 + ROTXOR3( word15 ) ) ); temp5 += temp1; temp1 += ROTXOR1( temp2 ) + MAJORITY( temp2, temp3, temp4 ); temp0 += ROTXOR2( temp5 ) + CHOICE( temp5, temp6, temp7 ) + 0x106aa070 + ( (word15 += ROTXOR4( word13 ) + word08 + ROTXOR3( word00 ) ) ); temp4 += temp0; temp0 += ROTXOR1( temp1 ) + MAJORITY( temp1, temp2, temp3 ); temp7 += ROTXOR2( temp4 ) + CHOICE( temp4, temp5, temp6 ) + 0x19a4c116 + ( (word00 += ROTXOR4( word14 ) + word09 + ROTXOR3( word01 ) ) ); temp3 += temp7; temp7 += ROTXOR1( temp0 ) + MAJORITY( temp0, temp1, temp2 ); temp6 += ROTXOR2( temp3 ) + CHOICE( temp3, temp4, temp5 ) + 0x1e376c08 + ( (word01 += ROTXOR4( word15 ) + word10 + ROTXOR3( word02 ) ) ); temp2 += temp6; temp6 += ROTXOR1( temp7 ) + MAJORITY( temp7, temp0, temp1 ); temp5 += ROTXOR2( temp2 ) + CHOICE( temp2, temp3, temp4 ) + 0x2748774c + ( (word02 += ROTXOR4( word00 ) + word11 + ROTXOR3( word03 ) ) ); temp1 += temp5; temp5 += ROTXOR1( temp6 ) + MAJORITY( temp6, temp7, temp0 ); temp4 += ROTXOR2( temp1 ) + CHOICE( temp1, temp2, temp3 ) + 0x34b0bcb5 + ( (word03 += ROTXOR4( word01 ) + word12 + ROTXOR3( word04 ) ) ); temp0 += temp4; temp4 += ROTXOR1( temp5 ) + MAJORITY( temp5, temp6, temp7 ); temp3 += ROTXOR2( temp0 ) + CHOICE( temp0, temp1, temp2 ) + 0x391c0cb3 + ( (word04 += ROTXOR4( word02 ) + word13 + ROTXOR3( word05 ) ) ); temp7 += temp3; temp3 += ROTXOR1( temp4 ) + MAJORITY( temp4, temp5, temp6 ); temp2 += ROTXOR2( temp7 ) + CHOICE( temp7, temp0, temp1 ) + 0x4ed8aa4a + ( (word05 += ROTXOR4( word03 ) + word14 + ROTXOR3( word06 ) ) ); temp6 += temp2; temp2 += ROTXOR1( temp3 ) + MAJORITY( temp3, temp4, temp5 ); temp1 += ROTXOR2( temp6 ) + CHOICE( temp6, temp7, temp0 ) + 0x5b9cca4f + ( (word06 += ROTXOR4( word04 ) + word15 + ROTXOR3( word07 ) ) ); temp5 += temp1; temp1 += ROTXOR1( temp2 ) + MAJORITY( temp2, temp3, temp4 ); temp0 += ROTXOR2( temp5 ) + CHOICE( temp5, temp6, temp7 ) + 0x682e6ff3 + ( (word07 += ROTXOR4( word05 ) + word00 + ROTXOR3( word08 ) ) ); temp4 += temp0; temp0 += ROTXOR1( temp1 ) + MAJORITY( temp1, temp2, temp3 ); temp7 += ROTXOR2( temp4 ) + CHOICE( temp4, temp5, temp6 ) + 0x748f82ee + ( (word08 += ROTXOR4( word06 ) + word01 + ROTXOR3( word09 ) ) ); temp3 += temp7; temp7 += ROTXOR1( temp0 ) + MAJORITY( temp0, temp1, temp2 ); temp6 += ROTXOR2( temp3 ) + CHOICE( temp3, temp4, temp5 ) + 0x78a5636f + ( (word09 += ROTXOR4( word07 ) + word02 + ROTXOR3( word10 ) ) ); temp2 += temp6; temp6 += ROTXOR1( temp7 ) + MAJORITY( temp7, temp0, temp1 ); temp5 += ROTXOR2( temp2 ) + CHOICE( temp2, temp3, temp4 ) + 0x84c87814 + ( (word10 += ROTXOR4( word08 ) + word03 + ROTXOR3( word11 ) ) ); temp1 += temp5; temp5 += ROTXOR1( temp6 ) + MAJORITY( temp6, temp7, temp0 ); temp4 += ROTXOR2( temp1 ) + CHOICE( temp1, temp2, temp3 ) + 0x8cc70208 + ( (word11 += ROTXOR4( word09 ) + word04 + ROTXOR3( word12 ) ) ); temp0 += temp4; temp4 += ROTXOR1( temp5 ) + MAJORITY( temp5, temp6, temp7 ); temp3 += ROTXOR2( temp0 ) + CHOICE( temp0, temp1, temp2 ) + 0x90befffa + ( (word12 += ROTXOR4( word10 ) + word05 + ROTXOR3( word13 ) ) ); temp7 += temp3; temp3 += ROTXOR1( temp4 ) + MAJORITY( temp4, temp5, temp6 ); temp2 += ROTXOR2( temp7 ) + CHOICE( temp7, temp0, temp1 ) + 0xa4506ceb + ( (word13 += ROTXOR4( word11 ) + word06 + ROTXOR3( word14 ) ) ); temp6 += temp2; temp2 += ROTXOR1( temp3 ) + MAJORITY( temp3, temp4, temp5 ); temp1 += ROTXOR2( temp6 ) + CHOICE( temp6, temp7, temp0 ) + 0xbef9a3f7 + ( (word14 += ROTXOR4( word12 ) + word07 + ROTXOR3( word15 ) ) ); temp5 += temp1; temp1 += ROTXOR1( temp2 ) + MAJORITY( temp2, temp3, temp4 ); temp0 += ROTXOR2( temp5 ) + CHOICE( temp5, temp6, temp7 ) + 0xc67178f2 + ( (word15 += ROTXOR4( word13 ) + word08 + ROTXOR3( word00 ) ) ); temp4 += temp0; temp0 += ROTXOR1( temp1 ) + MAJORITY( temp1, temp2, temp3 ); word00 = 0x6a09e667UL + temp0; word01 = 0xbb67ae85UL + temp1; word02 = 0x3c6ef372UL + temp2; word03 = 0xa54ff53aUL + temp3; word04 = 0x510e527fUL + temp4; word05 = 0x9b05688cUL + temp5; word06 = 0x1f83d9abUL + temp6; word07 = 0x5be0cd19UL + temp7; } state[0] = bytereverse(word00); state[1] = bytereverse(word01); state[2] = bytereverse(word02); state[3] = bytereverse(word03); state[4] = bytereverse(word04); state[5] = bytereverse(word05); state[6] = bytereverse(word06); state[7] = bytereverse(word07); } #endif static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index+=MAX_KEYS_PER_CRYPT) { SHA256_CTX ctx; #ifdef SIMD_COEF_32 unsigned int i; unsigned char _IBuf[64*MAX_KEYS_PER_CRYPT+MEM_ALIGN_CACHE], *keys, tmpBuf[32]; uint32_t *keys32, j; keys = (unsigned char*)mem_align(_IBuf, MEM_ALIGN_CACHE); keys32 = (uint32_t*)keys; memset(keys, 0, 64*MAX_KEYS_PER_CRYPT); for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { SHA256_Init(&ctx); SHA256_Update(&ctx, saved_key[index+i], strlen(saved_key[index+i])); SHA256_Update(&ctx, cur_salt->salt, 32); SHA256_Final(tmpBuf, &ctx); for (j = 0; j < 32; ++j) keys[GETPOS(j, i)] = tmpBuf[j]; keys[GETPOS(j, i)] = 0x80; // 32 bytes of crypt data (0x100 bits). keys[GETPOS(62, i)] = 0x01; } for (i = 0; i < cur_salt->iterations; i++) { SIMDSHA256body(keys, keys32, NULL, SSEi_MIXED_IN|SSEi_OUTPUT_AS_INP_FMT); } // Last one with FLAT_OUT SIMDSHA256body(keys, crypt_out[index], NULL, SSEi_MIXED_IN|SSEi_OUTPUT_AS_INP_FMT|SSEi_FLAT_OUT); #else SHA256_Init(&ctx); SHA256_Update(&ctx, saved_key[index], strlen(saved_key[index])); SHA256_Update(&ctx, cur_salt->salt, 32); SHA256_Final((unsigned char*)crypt_out[index], &ctx); #if 1 // This complex crap only boosted speed on my quad-HT from 5016 to 5285. // A ton of complex code for VERY little gain. The SIMD change gave us // a 4x improvement with very little change. This pwsafe_sha256_iterate // does get 5% gain, but 400% is so much better, lol. I put the other // code in to be able to dump data out easier, getting dump_stuff() // data in flat, to be able to help get the SIMD code working. #ifdef COMMON_DIGEST_FOR_OPENSSL pwsafe_sha256_iterate(ctx.hash, cur_salt->iterations); memcpy(crypt_out[index], ctx.hash, 32); #else pwsafe_sha256_iterate(ctx.h, cur_salt->iterations); memcpy(crypt_out[index], ctx.h, 32); #endif #else { int i; for (i = 0; i <= cur_salt->iterations; ++i) { SHA256_Init(&ctx); SHA256_Update(&ctx, (unsigned char*)crypt_out[index], 32); SHA256_Final((unsigned char*)crypt_out[index], &ctx); } } #endif #endif } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void pwsafe_set_key(char *key, int index) { strnzcpy(saved_key[index], key, sizeof(*saved_key)); } static char *get_key(int index) { return saved_key[index]; } static unsigned int iteration_count(void *salt) { struct custom_salt *my_salt; my_salt = salt; return (unsigned int) my_salt->iterations; } struct fmt_main fmt_pwsafe = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { "iteration count", }, { FORMAT_TAG }, pwsafe_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { iteration_count, }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, pwsafe_set_key, get_key, fmt_default_clear_keys, crypt_all, { #define COMMON_GET_HASH_LINK #include "common-get-hash.h" }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
#if FMT_EXTERNS_H extern struct fmt_main fmt_pwsafe; #elif FMT_REGISTERS_H john_register_one(&fmt_pwsafe); #else #include <string.h> #include <assert.h> #include <errno.h> #include "arch.h" // #undef SIMD_COEF_32 #include "sha2.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "johnswap.h" #include "simd-intrinsics.h" #include "memdbg.h" #define FORMAT_LABEL "pwsafe" #define FORMAT_NAME "Password Safe" #define FORMAT_TAG "$pwsafe$*" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #define ALGORITHM_NAME "SHA256 " SHA256_ALGORITHM_NAME #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 125 #define BINARY_SIZE 32 #define SALT_SIZE sizeof(struct custom_salt) #define BINARY_ALIGN sizeof(uint32_t) #define SALT_ALIGN sizeof(int) #ifdef SIMD_COEF_32 #if ARCH_LITTLE_ENDIAN==1 #define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + (3-((i)&3)) + (unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32*4 ) #else #define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + ((i)&3) + (unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32*4 ) #endif #define MIN_KEYS_PER_CRYPT (SIMD_COEF_32*SIMD_PARA_SHA256) #define MAX_KEYS_PER_CRYPT (SIMD_COEF_32*SIMD_PARA_SHA256) #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif static struct fmt_tests pwsafe_tests[] = { {"$pwsafe$*3*fefc1172093344c9d5577b25f5b4b6e5d2942c94f9fc24c21733e28ae6527521*2048*88cbaf7d8668c1a98263f5dce7cb39c3304c49a3e0d76a7ea475dc02ab2f97a7", "12345678"}, {"$pwsafe$*3*581cd1135b9b993ccb0f6b01c1fcfacd799c69960496c96286f94fe1400c1b25*2048*4ab3c2d3af251e94eb2f753fdf30fb9da074bec6bac0fa9d9d152b95fc5795c6", "openwall"}, {"$pwsafe$*3*34ba0066d0fc594c126b60b9db98b6024e1cf585901b81b5b005ce386f173d4c*2048*cc86f1a5d930ff19b3602770a86586b5d9dea7bb657012aca875aa2a7dc71dc0", "12345678901234567890123"}, {"$pwsafe$*3*a42431191707895fb8d1121a3a6e255e33892d8eecb50fc616adab6185b5affb*2048*0f71d12df2b7c5394ae90771f6475a7ad0437007a8eeb5d9b58e35d8fd57c827", "123456789012345678901234567"}, {"$pwsafe$*3*c380dee0dbb536f5454f78603b020be76b33e294e9c2a0e047f43b9c61669fc8*2048*e88ed54a85e419d555be219d200563ae3ba864e24442826f412867fc0403917d", "this is an 87 character password to test the max bound of pwsafe-opencl................"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static uint32_t(*crypt_out)[BINARY_SIZE / sizeof(uint32_t)]; static struct custom_salt { int version; unsigned int iterations; unsigned char salt[32]; } *cur_salt; static void init(struct fmt_main *self) { saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt); crypt_out = mem_calloc(sizeof(*crypt_out), self->params.max_keys_per_crypt); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { //format $pwsafe$version * salt * iterations * hash char *p; char *ctcopy; char *keeptr; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += FORMAT_TAG_LEN; /* skip over "$pwsafe$*" */ if ((p = strtokm(ctcopy, "*")) == NULL) /* version */ goto err; if (!isdec(p)) goto err; if (!atoi(p)) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* salt */ goto err; if (strlen(p) < 64) goto err; if (strspn(p, HEXCHARS_lc) != 64) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* iterations */ goto err; if (!isdec(p)) goto err; if (!atoi(p)) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* hash */ goto err; if (strlen(p) != 64) goto err; if (strspn(p, HEXCHARS_lc) != 64) goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void * get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; char *p; int i; static struct custom_salt cs; memset(&cs, 0, sizeof(cs)); ctcopy += FORMAT_TAG_LEN; /* skip over "$pwsafe$*" */ p = strtokm(ctcopy, "*"); cs.version = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < 32; i++) cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); cs.iterations = (unsigned int)atoi(p); MEM_FREE(keeptr); return (void *)&cs; } static void * get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; p = strrchr(ciphertext, '*') + 1; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } #define COMMON_GET_HASH_VAR crypt_out #include "common-get-hash.h" static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } #ifndef SIMD_COEF_32 #define rotl(x,y) ( x<<y | x>>(32-y) ) #define rotr(x,y) ( x>>y | x<<(32-y) ) #define CHOICE(x,y,z) ( z ^ (x & ( y ^ z)) ) #define MAJORITY(x,y,z) ( (x & y) | (z & (x | y)) ) #define ROTXOR1(x) (rotr(x,2) ^ rotr(x,13) ^ rotr(x,22)) #define ROTXOR2(x) (rotr(x,6) ^ rotr(x,11) ^ rotr(x,25)) #define ROTXOR3(x) (rotr(x,7) ^ rotr(x,18) ^ (x>>3)) #define ROTXOR4(x) (rotr(x,17) ^ rotr(x,19) ^ (x>>10)) #if ARCH_LITTLE_ENDIAN #define bytereverse(x) ( ((x) << 24) | (((x) << 8) & 0x00ff0000) | (((x) >> 8) & 0x0000ff00) | ((x) >> 24) ) #else #define bytereverse(x) (x) #endif static void pwsafe_sha256_iterate(unsigned int *state, unsigned int iterations) { unsigned int word00, word01, word02, word03, word04, word05, word06, word07; unsigned int word08, word09, word10, word11, word12, word13, word14, word15; unsigned int temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7; iterations++; word00 = state[0]; word01 = state[1]; word02 = state[2]; word03 = state[3]; word04 = state[4]; word05 = state[5]; word06 = state[6]; word07 = state[7]; while (iterations) { iterations--; temp0 = 0x6a09e667UL; temp1 = 0xbb67ae85UL; temp2 = 0x3c6ef372UL; temp3 = 0xa54ff53aUL; temp4 = 0x510e527fUL; temp5 = 0x9b05688cUL; temp6 = 0x1f83d9abUL; temp7 = 0x5be0cd19UL; temp7 += ROTXOR2(temp4) + CHOICE(temp4, temp5, temp6) + 0x428a2f98 + (word00); temp3 += temp7; temp7 += ROTXOR1(temp0) + MAJORITY(temp0, temp1, temp2); temp6 += ROTXOR2(temp3) + CHOICE(temp3, temp4, temp5) + 0x71374491 + (word01); temp2 += temp6; temp6 += ROTXOR1(temp7) + MAJORITY(temp7, temp0, temp1); temp5 += ROTXOR2(temp2) + CHOICE(temp2, temp3, temp4) + 0xb5c0fbcf + (word02); temp1 += temp5; temp5 += ROTXOR1(temp6) + MAJORITY(temp6, temp7, temp0); temp4 += ROTXOR2(temp1) + CHOICE(temp1, temp2, temp3) + 0xe9b5dba5 + (word03); temp0 += temp4; temp4 += ROTXOR1(temp5) + MAJORITY(temp5, temp6, temp7); temp3 += ROTXOR2(temp0) + CHOICE(temp0, temp1, temp2) + 0x3956c25b + (word04); temp7 += temp3; temp3 += ROTXOR1(temp4) + MAJORITY(temp4, temp5, temp6); temp2 += ROTXOR2(temp7) + CHOICE(temp7, temp0, temp1) + 0x59f111f1 + (word05); temp6 += temp2; temp2 += ROTXOR1(temp3) + MAJORITY(temp3, temp4, temp5); temp1 += ROTXOR2(temp6) + CHOICE(temp6, temp7, temp0) + 0x923f82a4 + (word06); temp5 += temp1; temp1 += ROTXOR1(temp2) + MAJORITY(temp2, temp3, temp4); temp0 += ROTXOR2(temp5) + CHOICE(temp5, temp6, temp7) + 0xab1c5ed5 + (word07); temp4 += temp0; temp0 += ROTXOR1(temp1) + MAJORITY(temp1, temp2, temp3); temp7 += ROTXOR2(temp4) + CHOICE(temp4, temp5, temp6) + 0xd807aa98 + ((word08 = 0x80000000U)); temp3 += temp7; temp7 += ROTXOR1(temp0) + MAJORITY(temp0, temp1, temp2); temp6 += ROTXOR2(temp3) + CHOICE(temp3, temp4, temp5) + 0x12835b01 + ((word09 = 0)); temp2 += temp6; temp6 += ROTXOR1(temp7) + MAJORITY(temp7, temp0, temp1); temp5 += ROTXOR2(temp2) + CHOICE(temp2, temp3, temp4) + 0x243185be + ((word10 = 0)); temp1 += temp5; temp5 += ROTXOR1(temp6) + MAJORITY(temp6, temp7, temp0); temp4 += ROTXOR2(temp1) + CHOICE(temp1, temp2, temp3) + 0x550c7dc3 + ((word11 = 0)); temp0 += temp4; temp4 += ROTXOR1(temp5) + MAJORITY(temp5, temp6, temp7); temp3 += ROTXOR2(temp0) + CHOICE(temp0, temp1, temp2) + 0x72be5d74 + ((word12 = 0)); temp7 += temp3; temp3 += ROTXOR1(temp4) + MAJORITY(temp4, temp5, temp6); temp2 += ROTXOR2(temp7) + CHOICE(temp7, temp0, temp1) + 0x80deb1fe + ((word13 = 0)); temp6 += temp2; temp2 += ROTXOR1(temp3) + MAJORITY(temp3, temp4, temp5); temp1 += ROTXOR2(temp6) + CHOICE(temp6, temp7, temp0) + 0x9bdc06a7 + ((word14 = 0)); temp5 += temp1; temp1 += ROTXOR1(temp2) + MAJORITY(temp2, temp3, temp4); temp0 += ROTXOR2(temp5) + CHOICE(temp5, temp6, temp7) + 0xc19bf174 + ((word15 = 256)); temp4 += temp0; temp0 += ROTXOR1(temp1) + MAJORITY(temp1, temp2, temp3); temp7 += ROTXOR2(temp4) + CHOICE(temp4, temp5, temp6) + 0xe49b69c1 + ((word00 += ROTXOR4(word14) + word09 + ROTXOR3(word01))); temp3 += temp7; temp7 += ROTXOR1(temp0) + MAJORITY(temp0, temp1, temp2); temp6 += ROTXOR2(temp3) + CHOICE(temp3, temp4, temp5) + 0xefbe4786 + ((word01 += ROTXOR4(word15) + word10 + ROTXOR3(word02))); temp2 += temp6; temp6 += ROTXOR1(temp7) + MAJORITY(temp7, temp0, temp1); temp5 += ROTXOR2(temp2) + CHOICE(temp2, temp3, temp4) + 0x0fc19dc6 + ((word02 += ROTXOR4(word00) + word11 + ROTXOR3(word03))); temp1 += temp5; temp5 += ROTXOR1(temp6) + MAJORITY(temp6, temp7, temp0); temp4 += ROTXOR2(temp1) + CHOICE(temp1, temp2, temp3) + 0x240ca1cc + ((word03 += ROTXOR4(word01) + word12 + ROTXOR3(word04))); temp0 += temp4; temp4 += ROTXOR1(temp5) + MAJORITY(temp5, temp6, temp7); temp3 += ROTXOR2(temp0) + CHOICE(temp0, temp1, temp2) + 0x2de92c6f + ((word04 += ROTXOR4(word02) + word13 + ROTXOR3(word05))); temp7 += temp3; temp3 += ROTXOR1(temp4) + MAJORITY(temp4, temp5, temp6); temp2 += ROTXOR2(temp7) + CHOICE(temp7, temp0, temp1) + 0x4a7484aa + ((word05 += ROTXOR4(word03) + word14 + ROTXOR3(word06))); temp6 += temp2; temp2 += ROTXOR1(temp3) + MAJORITY(temp3, temp4, temp5); temp1 += ROTXOR2(temp6) + CHOICE(temp6, temp7, temp0) + 0x5cb0a9dc + ((word06 += ROTXOR4(word04) + word15 + ROTXOR3(word07))); temp5 += temp1; temp1 += ROTXOR1(temp2) + MAJORITY(temp2, temp3, temp4); temp0 += ROTXOR2(temp5) + CHOICE(temp5, temp6, temp7) + 0x76f988da + ((word07 += ROTXOR4(word05) + word00 + ROTXOR3(word08))); temp4 += temp0; temp0 += ROTXOR1(temp1) + MAJORITY(temp1, temp2, temp3); temp7 += ROTXOR2(temp4) + CHOICE(temp4, temp5, temp6) + 0x983e5152 + ((word08 += ROTXOR4(word06) + word01 + ROTXOR3(word09))); temp3 += temp7; temp7 += ROTXOR1(temp0) + MAJORITY(temp0, temp1, temp2); temp6 += ROTXOR2(temp3) + CHOICE(temp3, temp4, temp5) + 0xa831c66d + ((word09 += ROTXOR4(word07) + word02 + ROTXOR3(word10))); temp2 += temp6; temp6 += ROTXOR1(temp7) + MAJORITY(temp7, temp0, temp1); temp5 += ROTXOR2(temp2) + CHOICE(temp2, temp3, temp4) + 0xb00327c8 + ((word10 += ROTXOR4(word08) + word03 + ROTXOR3(word11))); temp1 += temp5; temp5 += ROTXOR1(temp6) + MAJORITY(temp6, temp7, temp0); temp4 += ROTXOR2(temp1) + CHOICE(temp1, temp2, temp3) + 0xbf597fc7 + ((word11 += ROTXOR4(word09) + word04 + ROTXOR3(word12))); temp0 += temp4; temp4 += ROTXOR1(temp5) + MAJORITY(temp5, temp6, temp7); temp3 += ROTXOR2(temp0) + CHOICE(temp0, temp1, temp2) + 0xc6e00bf3 + ((word12 += ROTXOR4(word10) + word05 + ROTXOR3(word13))); temp7 += temp3; temp3 += ROTXOR1(temp4) + MAJORITY(temp4, temp5, temp6); temp2 += ROTXOR2(temp7) + CHOICE(temp7, temp0, temp1) + 0xd5a79147 + ((word13 += ROTXOR4(word11) + word06 + ROTXOR3(word14))); temp6 += temp2; temp2 += ROTXOR1(temp3) + MAJORITY(temp3, temp4, temp5); temp1 += ROTXOR2(temp6) + CHOICE(temp6, temp7, temp0) + 0x06ca6351 + ((word14 += ROTXOR4(word12) + word07 + ROTXOR3(word15))); temp5 += temp1; temp1 += ROTXOR1(temp2) + MAJORITY(temp2, temp3, temp4); temp0 += ROTXOR2(temp5) + CHOICE(temp5, temp6, temp7) + 0x14292967 + ((word15 += ROTXOR4(word13) + word08 + ROTXOR3(word00))); temp4 += temp0; temp0 += ROTXOR1(temp1) + MAJORITY(temp1, temp2, temp3); temp7 += ROTXOR2(temp4) + CHOICE(temp4, temp5, temp6) + 0x27b70a85 + ((word00 += ROTXOR4(word14) + word09 + ROTXOR3(word01))); temp3 += temp7; temp7 += ROTXOR1(temp0) + MAJORITY(temp0, temp1, temp2); temp6 += ROTXOR2(temp3) + CHOICE(temp3, temp4, temp5) + 0x2e1b2138 + ((word01 += ROTXOR4(word15) + word10 + ROTXOR3(word02))); temp2 += temp6; temp6 += ROTXOR1(temp7) + MAJORITY(temp7, temp0, temp1); temp5 += ROTXOR2(temp2) + CHOICE(temp2, temp3, temp4) + 0x4d2c6dfc + ((word02 += ROTXOR4(word00) + word11 + ROTXOR3(word03))); temp1 += temp5; temp5 += ROTXOR1(temp6) + MAJORITY(temp6, temp7, temp0); temp4 += ROTXOR2(temp1) + CHOICE(temp1, temp2, temp3) + 0x53380d13 + ((word03 += ROTXOR4(word01) + word12 + ROTXOR3(word04))); temp0 += temp4; temp4 += ROTXOR1(temp5) + MAJORITY(temp5, temp6, temp7); temp3 += ROTXOR2(temp0) + CHOICE(temp0, temp1, temp2) + 0x650a7354 + ((word04 += ROTXOR4(word02) + word13 + ROTXOR3(word05))); temp7 += temp3; temp3 += ROTXOR1(temp4) + MAJORITY(temp4, temp5, temp6); temp2 += ROTXOR2(temp7) + CHOICE(temp7, temp0, temp1) + 0x766a0abb + ((word05 += ROTXOR4(word03) + word14 + ROTXOR3(word06))); temp6 += temp2; temp2 += ROTXOR1(temp3) + MAJORITY(temp3, temp4, temp5); temp1 += ROTXOR2(temp6) + CHOICE(temp6, temp7, temp0) + 0x81c2c92e + ((word06 += ROTXOR4(word04) + word15 + ROTXOR3(word07))); temp5 += temp1; temp1 += ROTXOR1(temp2) + MAJORITY(temp2, temp3, temp4); temp0 += ROTXOR2(temp5) + CHOICE(temp5, temp6, temp7) + 0x92722c85 + ((word07 += ROTXOR4(word05) + word00 + ROTXOR3(word08))); temp4 += temp0; temp0 += ROTXOR1(temp1) + MAJORITY(temp1, temp2, temp3); temp7 += ROTXOR2(temp4) + CHOICE(temp4, temp5, temp6) + 0xa2bfe8a1 + ((word08 += ROTXOR4(word06) + word01 + ROTXOR3(word09))); temp3 += temp7; temp7 += ROTXOR1(temp0) + MAJORITY(temp0, temp1, temp2); temp6 += ROTXOR2(temp3) + CHOICE(temp3, temp4, temp5) + 0xa81a664b + ((word09 += ROTXOR4(word07) + word02 + ROTXOR3(word10))); temp2 += temp6; temp6 += ROTXOR1(temp7) + MAJORITY(temp7, temp0, temp1); temp5 += ROTXOR2(temp2) + CHOICE(temp2, temp3, temp4) + 0xc24b8b70 + ((word10 += ROTXOR4(word08) + word03 + ROTXOR3(word11))); temp1 += temp5; temp5 += ROTXOR1(temp6) + MAJORITY(temp6, temp7, temp0); temp4 += ROTXOR2(temp1) + CHOICE(temp1, temp2, temp3) + 0xc76c51a3 + ((word11 += ROTXOR4(word09) + word04 + ROTXOR3(word12))); temp0 += temp4; temp4 += ROTXOR1(temp5) + MAJORITY(temp5, temp6, temp7); temp3 += ROTXOR2(temp0) + CHOICE(temp0, temp1, temp2) + 0xd192e819 + ((word12 += ROTXOR4(word10) + word05 + ROTXOR3(word13))); temp7 += temp3; temp3 += ROTXOR1(temp4) + MAJORITY(temp4, temp5, temp6); temp2 += ROTXOR2(temp7) + CHOICE(temp7, temp0, temp1) + 0xd6990624 + ((word13 += ROTXOR4(word11) + word06 + ROTXOR3(word14))); temp6 += temp2; temp2 += ROTXOR1(temp3) + MAJORITY(temp3, temp4, temp5); temp1 += ROTXOR2(temp6) + CHOICE(temp6, temp7, temp0) + 0xf40e3585 + ((word14 += ROTXOR4(word12) + word07 + ROTXOR3(word15))); temp5 += temp1; temp1 += ROTXOR1(temp2) + MAJORITY(temp2, temp3, temp4); temp0 += ROTXOR2(temp5) + CHOICE(temp5, temp6, temp7) + 0x106aa070 + ((word15 += ROTXOR4(word13) + word08 + ROTXOR3(word00))); temp4 += temp0; temp0 += ROTXOR1(temp1) + MAJORITY(temp1, temp2, temp3); temp7 += ROTXOR2(temp4) + CHOICE(temp4, temp5, temp6) + 0x19a4c116 + ((word00 += ROTXOR4(word14) + word09 + ROTXOR3(word01))); temp3 += temp7; temp7 += ROTXOR1(temp0) + MAJORITY(temp0, temp1, temp2); temp6 += ROTXOR2(temp3) + CHOICE(temp3, temp4, temp5) + 0x1e376c08 + ((word01 += ROTXOR4(word15) + word10 + ROTXOR3(word02))); temp2 += temp6; temp6 += ROTXOR1(temp7) + MAJORITY(temp7, temp0, temp1); temp5 += ROTXOR2(temp2) + CHOICE(temp2, temp3, temp4) + 0x2748774c + ((word02 += ROTXOR4(word00) + word11 + ROTXOR3(word03))); temp1 += temp5; temp5 += ROTXOR1(temp6) + MAJORITY(temp6, temp7, temp0); temp4 += ROTXOR2(temp1) + CHOICE(temp1, temp2, temp3) + 0x34b0bcb5 + ((word03 += ROTXOR4(word01) + word12 + ROTXOR3(word04))); temp0 += temp4; temp4 += ROTXOR1(temp5) + MAJORITY(temp5, temp6, temp7); temp3 += ROTXOR2(temp0) + CHOICE(temp0, temp1, temp2) + 0x391c0cb3 + ((word04 += ROTXOR4(word02) + word13 + ROTXOR3(word05))); temp7 += temp3; temp3 += ROTXOR1(temp4) + MAJORITY(temp4, temp5, temp6); temp2 += ROTXOR2(temp7) + CHOICE(temp7, temp0, temp1) + 0x4ed8aa4a + ((word05 += ROTXOR4(word03) + word14 + ROTXOR3(word06))); temp6 += temp2; temp2 += ROTXOR1(temp3) + MAJORITY(temp3, temp4, temp5); temp1 += ROTXOR2(temp6) + CHOICE(temp6, temp7, temp0) + 0x5b9cca4f + ((word06 += ROTXOR4(word04) + word15 + ROTXOR3(word07))); temp5 += temp1; temp1 += ROTXOR1(temp2) + MAJORITY(temp2, temp3, temp4); temp0 += ROTXOR2(temp5) + CHOICE(temp5, temp6, temp7) + 0x682e6ff3 + ((word07 += ROTXOR4(word05) + word00 + ROTXOR3(word08))); temp4 += temp0; temp0 += ROTXOR1(temp1) + MAJORITY(temp1, temp2, temp3); temp7 += ROTXOR2(temp4) + CHOICE(temp4, temp5, temp6) + 0x748f82ee + ((word08 += ROTXOR4(word06) + word01 + ROTXOR3(word09))); temp3 += temp7; temp7 += ROTXOR1(temp0) + MAJORITY(temp0, temp1, temp2); temp6 += ROTXOR2(temp3) + CHOICE(temp3, temp4, temp5) + 0x78a5636f + ((word09 += ROTXOR4(word07) + word02 + ROTXOR3(word10))); temp2 += temp6; temp6 += ROTXOR1(temp7) + MAJORITY(temp7, temp0, temp1); temp5 += ROTXOR2(temp2) + CHOICE(temp2, temp3, temp4) + 0x84c87814 + ((word10 += ROTXOR4(word08) + word03 + ROTXOR3(word11))); temp1 += temp5; temp5 += ROTXOR1(temp6) + MAJORITY(temp6, temp7, temp0); temp4 += ROTXOR2(temp1) + CHOICE(temp1, temp2, temp3) + 0x8cc70208 + ((word11 += ROTXOR4(word09) + word04 + ROTXOR3(word12))); temp0 += temp4; temp4 += ROTXOR1(temp5) + MAJORITY(temp5, temp6, temp7); temp3 += ROTXOR2(temp0) + CHOICE(temp0, temp1, temp2) + 0x90befffa + ((word12 += ROTXOR4(word10) + word05 + ROTXOR3(word13))); temp7 += temp3; temp3 += ROTXOR1(temp4) + MAJORITY(temp4, temp5, temp6); temp2 += ROTXOR2(temp7) + CHOICE(temp7, temp0, temp1) + 0xa4506ceb + ((word13 += ROTXOR4(word11) + word06 + ROTXOR3(word14))); temp6 += temp2; temp2 += ROTXOR1(temp3) + MAJORITY(temp3, temp4, temp5); temp1 += ROTXOR2(temp6) + CHOICE(temp6, temp7, temp0) + 0xbef9a3f7 + ((word14 += ROTXOR4(word12) + word07 + ROTXOR3(word15))); temp5 += temp1; temp1 += ROTXOR1(temp2) + MAJORITY(temp2, temp3, temp4); temp0 += ROTXOR2(temp5) + CHOICE(temp5, temp6, temp7) + 0xc67178f2 + ((word15 += ROTXOR4(word13) + word08 + ROTXOR3(word00))); temp4 += temp0; temp0 += ROTXOR1(temp1) + MAJORITY(temp1, temp2, temp3); word00 = 0x6a09e667UL + temp0; word01 = 0xbb67ae85UL + temp1; word02 = 0x3c6ef372UL + temp2; word03 = 0xa54ff53aUL + temp3; word04 = 0x510e527fUL + temp4; word05 = 0x9b05688cUL + temp5; word06 = 0x1f83d9abUL + temp6; word07 = 0x5be0cd19UL + temp7; } state[0] = bytereverse(word00); state[1] = bytereverse(word01); state[2] = bytereverse(word02); state[3] = bytereverse(word03); state[4] = bytereverse(word04); state[5] = bytereverse(word05); state[6] = bytereverse(word06); state[7] = bytereverse(word07); } #endif static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) { SHA256_CTX ctx; #ifdef SIMD_COEF_32 unsigned int i; unsigned char _IBuf[64 * MAX_KEYS_PER_CRYPT + MEM_ALIGN_CACHE], *keys, tmpBuf[32]; uint32_t *keys32, j; keys = (unsigned char *)mem_align(_IBuf, MEM_ALIGN_CACHE); keys32 = (uint32_t *) keys; memset(keys, 0, 64 * MAX_KEYS_PER_CRYPT); for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { SHA256_Init(&ctx); SHA256_Update(&ctx, saved_key[index + i], strlen(saved_key[index + i])); SHA256_Update(&ctx, cur_salt->salt, 32); SHA256_Final(tmpBuf, &ctx); for (j = 0; j < 32; ++j) keys[GETPOS(j, i)] = tmpBuf[j]; keys[GETPOS(j, i)] = 0x80; //32 bytes of crypt data(0x100 bits). keys[GETPOS(62, i)] = 0x01; } for (i = 0; i < cur_salt->iterations; i++) { SIMDSHA256body(keys, keys32, NULL, SSEi_MIXED_IN | SSEi_OUTPUT_AS_INP_FMT); } //Last one with FLAT_OUT SIMDSHA256body(keys, crypt_out[index], NULL, SSEi_MIXED_IN | SSEi_OUTPUT_AS_INP_FMT | SSEi_FLAT_OUT); #else SHA256_Init(&ctx); SHA256_Update(&ctx, saved_key[index], strlen(saved_key[index])); SHA256_Update(&ctx, cur_salt->salt, 32); SHA256_Final((unsigned char *)crypt_out[index], &ctx); #if 1 //This complex crap only boosted speed on my quad - HT from 5016 to 5285. // A ton of complex code for VERY little gain.The SIMD change gave us // a 4 x improvement with very little change.This pwsafe_sha256_iterate // does get 5 % gain, but 400 % is so much better, lol.I put the other // code in to be able to dump data out easier, getting dump_stuff() // data in flat, to be able to help get the SIMD code working. #ifdef COMMON_DIGEST_FOR_OPENSSL pwsafe_sha256_iterate(ctx.hash, cur_salt->iterations); memcpy(crypt_out[index], ctx.hash, 32); #else pwsafe_sha256_iterate(ctx.h, cur_salt->iterations); memcpy(crypt_out[index], ctx.h, 32); #endif #else { int i; for (i = 0; i <= cur_salt->iterations; ++i) { SHA256_Init(&ctx); SHA256_Update(&ctx, (unsigned char *)crypt_out[index], 32); SHA256_Final((unsigned char *)crypt_out[index], &ctx); } } #endif #endif } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void pwsafe_set_key(char *key, int index) { strnzcpy(saved_key[index], key, sizeof(*saved_key)); } static char * get_key(int index) { return saved_key[index]; } static unsigned int iteration_count(void *salt) { struct custom_salt *my_salt; my_salt = salt; return (unsigned int)my_salt->iterations; } struct fmt_main fmt_pwsafe = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { "iteration count", }, {FORMAT_TAG}, pwsafe_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { iteration_count, }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, pwsafe_set_key, get_key, fmt_default_clear_keys, crypt_all, { #define COMMON_GET_HASH_LINK #include "common-get-hash.h" }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
#if FMT_EXTERNS_H extern struct fmt_main fmt_pwsafe; #elif FMT_REGISTERS_H john_register_one(&fmt_pwsafe); #else #include <string.h> #include <assert.h> #include <errno.h> #include "arch.h" // #undef SIMD_COEF_32 #include "sha2.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "johnswap.h" #include "simd-intrinsics.h" #ifdef _OPENMP static int omp_t = 1; #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 1 // tuned on core i7 #endif #endif #include "memdbg.h" #define FORMAT_LABEL "pwsafe" #define FORMAT_NAME "Password Safe" #define FORMAT_TAG "$pwsafe$*" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #define ALGORITHM_NAME "SHA256 " SHA256_ALGORITHM_NAME #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 125 #define BINARY_SIZE 32 #define SALT_SIZE sizeof(struct custom_salt) #define BINARY_ALIGN sizeof(uint32_t) #define SALT_ALIGN sizeof(int) #ifdef SIMD_COEF_32 #if ARCH_LITTLE_ENDIAN==1 #define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + (3-((i)&3)) + (unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32*4 ) #else #define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + ((i)&3) + (unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32*4 ) #endif #define MIN_KEYS_PER_CRYPT (SIMD_COEF_32*SIMD_PARA_SHA256) #define MAX_KEYS_PER_CRYPT (SIMD_COEF_32*SIMD_PARA_SHA256) #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif static struct fmt_tests pwsafe_tests[] = { {"$pwsafe$*3*fefc1172093344c9d5577b25f5b4b6e5d2942c94f9fc24c21733e28ae6527521*2048*88cbaf7d8668c1a98263f5dce7cb39c3304c49a3e0d76a7ea475dc02ab2f97a7", "12345678"}, {"$pwsafe$*3*581cd1135b9b993ccb0f6b01c1fcfacd799c69960496c96286f94fe1400c1b25*2048*4ab3c2d3af251e94eb2f753fdf30fb9da074bec6bac0fa9d9d152b95fc5795c6", "openwall"}, {"$pwsafe$*3*34ba0066d0fc594c126b60b9db98b6024e1cf585901b81b5b005ce386f173d4c*2048*cc86f1a5d930ff19b3602770a86586b5d9dea7bb657012aca875aa2a7dc71dc0", "12345678901234567890123"}, {"$pwsafe$*3*a42431191707895fb8d1121a3a6e255e33892d8eecb50fc616adab6185b5affb*2048*0f71d12df2b7c5394ae90771f6475a7ad0437007a8eeb5d9b58e35d8fd57c827", "123456789012345678901234567"}, {"$pwsafe$*3*c380dee0dbb536f5454f78603b020be76b33e294e9c2a0e047f43b9c61669fc8*2048*e88ed54a85e419d555be219d200563ae3ba864e24442826f412867fc0403917d", "this is an 87 character password to test the max bound of pwsafe-opencl................"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static uint32_t(*crypt_out)[BINARY_SIZE / sizeof(uint32_t)]; static struct custom_salt { int version; unsigned int iterations; unsigned char salt[32]; } *cur_salt; static void init(struct fmt_main *self) { #ifdef _OPENMP omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt); crypt_out = mem_calloc(sizeof(*crypt_out), self->params.max_keys_per_crypt); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { //format $pwsafe$version * salt * iterations * hash char *p; char *ctcopy; char *keeptr; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += FORMAT_TAG_LEN; /* skip over "$pwsafe$*" */ if ((p = strtokm(ctcopy, "*")) == NULL) /* version */ goto err; if (!isdec(p)) goto err; if (!atoi(p)) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* salt */ goto err; if (strlen(p) < 64) goto err; if (strspn(p, HEXCHARS_lc) != 64) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* iterations */ goto err; if (!isdec(p)) goto err; if (!atoi(p)) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* hash */ goto err; if (strlen(p) != 64) goto err; if (strspn(p, HEXCHARS_lc) != 64) goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void * get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; char *p; int i; static struct custom_salt cs; memset(&cs, 0, sizeof(cs)); ctcopy += FORMAT_TAG_LEN; /* skip over "$pwsafe$*" */ p = strtokm(ctcopy, "*"); cs.version = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < 32; i++) cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); cs.iterations = (unsigned int)atoi(p); MEM_FREE(keeptr); return (void *)&cs; } static void * get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; p = strrchr(ciphertext, '*') + 1; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } #define COMMON_GET_HASH_VAR crypt_out #include "common-get-hash.h" static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } #ifndef SIMD_COEF_32 #define rotl(x,y) ( x<<y | x>>(32-y) ) #define rotr(x,y) ( x>>y | x<<(32-y) ) #define CHOICE(x,y,z) ( z ^ (x & ( y ^ z)) ) #define MAJORITY(x,y,z) ( (x & y) | (z & (x | y)) ) #define ROTXOR1(x) (rotr(x,2) ^ rotr(x,13) ^ rotr(x,22)) #define ROTXOR2(x) (rotr(x,6) ^ rotr(x,11) ^ rotr(x,25)) #define ROTXOR3(x) (rotr(x,7) ^ rotr(x,18) ^ (x>>3)) #define ROTXOR4(x) (rotr(x,17) ^ rotr(x,19) ^ (x>>10)) #if ARCH_LITTLE_ENDIAN #define bytereverse(x) ( ((x) << 24) | (((x) << 8) & 0x00ff0000) | (((x) >> 8) & 0x0000ff00) | ((x) >> 24) ) #else #define bytereverse(x) (x) #endif static void pwsafe_sha256_iterate(unsigned int *state, unsigned int iterations) { unsigned int word00, word01, word02, word03, word04, word05, word06, word07; unsigned int word08, word09, word10, word11, word12, word13, word14, word15; unsigned int temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7; iterations++; word00 = state[0]; word01 = state[1]; word02 = state[2]; word03 = state[3]; word04 = state[4]; word05 = state[5]; word06 = state[6]; word07 = state[7]; while (iterations) { iterations--; temp0 = 0x6a09e667UL; temp1 = 0xbb67ae85UL; temp2 = 0x3c6ef372UL; temp3 = 0xa54ff53aUL; temp4 = 0x510e527fUL; temp5 = 0x9b05688cUL; temp6 = 0x1f83d9abUL; temp7 = 0x5be0cd19UL; temp7 += ROTXOR2(temp4) + CHOICE(temp4, temp5, temp6) + 0x428a2f98 + (word00); temp3 += temp7; temp7 += ROTXOR1(temp0) + MAJORITY(temp0, temp1, temp2); temp6 += ROTXOR2(temp3) + CHOICE(temp3, temp4, temp5) + 0x71374491 + (word01); temp2 += temp6; temp6 += ROTXOR1(temp7) + MAJORITY(temp7, temp0, temp1); temp5 += ROTXOR2(temp2) + CHOICE(temp2, temp3, temp4) + 0xb5c0fbcf + (word02); temp1 += temp5; temp5 += ROTXOR1(temp6) + MAJORITY(temp6, temp7, temp0); temp4 += ROTXOR2(temp1) + CHOICE(temp1, temp2, temp3) + 0xe9b5dba5 + (word03); temp0 += temp4; temp4 += ROTXOR1(temp5) + MAJORITY(temp5, temp6, temp7); temp3 += ROTXOR2(temp0) + CHOICE(temp0, temp1, temp2) + 0x3956c25b + (word04); temp7 += temp3; temp3 += ROTXOR1(temp4) + MAJORITY(temp4, temp5, temp6); temp2 += ROTXOR2(temp7) + CHOICE(temp7, temp0, temp1) + 0x59f111f1 + (word05); temp6 += temp2; temp2 += ROTXOR1(temp3) + MAJORITY(temp3, temp4, temp5); temp1 += ROTXOR2(temp6) + CHOICE(temp6, temp7, temp0) + 0x923f82a4 + (word06); temp5 += temp1; temp1 += ROTXOR1(temp2) + MAJORITY(temp2, temp3, temp4); temp0 += ROTXOR2(temp5) + CHOICE(temp5, temp6, temp7) + 0xab1c5ed5 + (word07); temp4 += temp0; temp0 += ROTXOR1(temp1) + MAJORITY(temp1, temp2, temp3); temp7 += ROTXOR2(temp4) + CHOICE(temp4, temp5, temp6) + 0xd807aa98 + ((word08 = 0x80000000U)); temp3 += temp7; temp7 += ROTXOR1(temp0) + MAJORITY(temp0, temp1, temp2); temp6 += ROTXOR2(temp3) + CHOICE(temp3, temp4, temp5) + 0x12835b01 + ((word09 = 0)); temp2 += temp6; temp6 += ROTXOR1(temp7) + MAJORITY(temp7, temp0, temp1); temp5 += ROTXOR2(temp2) + CHOICE(temp2, temp3, temp4) + 0x243185be + ((word10 = 0)); temp1 += temp5; temp5 += ROTXOR1(temp6) + MAJORITY(temp6, temp7, temp0); temp4 += ROTXOR2(temp1) + CHOICE(temp1, temp2, temp3) + 0x550c7dc3 + ((word11 = 0)); temp0 += temp4; temp4 += ROTXOR1(temp5) + MAJORITY(temp5, temp6, temp7); temp3 += ROTXOR2(temp0) + CHOICE(temp0, temp1, temp2) + 0x72be5d74 + ((word12 = 0)); temp7 += temp3; temp3 += ROTXOR1(temp4) + MAJORITY(temp4, temp5, temp6); temp2 += ROTXOR2(temp7) + CHOICE(temp7, temp0, temp1) + 0x80deb1fe + ((word13 = 0)); temp6 += temp2; temp2 += ROTXOR1(temp3) + MAJORITY(temp3, temp4, temp5); temp1 += ROTXOR2(temp6) + CHOICE(temp6, temp7, temp0) + 0x9bdc06a7 + ((word14 = 0)); temp5 += temp1; temp1 += ROTXOR1(temp2) + MAJORITY(temp2, temp3, temp4); temp0 += ROTXOR2(temp5) + CHOICE(temp5, temp6, temp7) + 0xc19bf174 + ((word15 = 256)); temp4 += temp0; temp0 += ROTXOR1(temp1) + MAJORITY(temp1, temp2, temp3); temp7 += ROTXOR2(temp4) + CHOICE(temp4, temp5, temp6) + 0xe49b69c1 + ((word00 += ROTXOR4(word14) + word09 + ROTXOR3(word01))); temp3 += temp7; temp7 += ROTXOR1(temp0) + MAJORITY(temp0, temp1, temp2); temp6 += ROTXOR2(temp3) + CHOICE(temp3, temp4, temp5) + 0xefbe4786 + ((word01 += ROTXOR4(word15) + word10 + ROTXOR3(word02))); temp2 += temp6; temp6 += ROTXOR1(temp7) + MAJORITY(temp7, temp0, temp1); temp5 += ROTXOR2(temp2) + CHOICE(temp2, temp3, temp4) + 0x0fc19dc6 + ((word02 += ROTXOR4(word00) + word11 + ROTXOR3(word03))); temp1 += temp5; temp5 += ROTXOR1(temp6) + MAJORITY(temp6, temp7, temp0); temp4 += ROTXOR2(temp1) + CHOICE(temp1, temp2, temp3) + 0x240ca1cc + ((word03 += ROTXOR4(word01) + word12 + ROTXOR3(word04))); temp0 += temp4; temp4 += ROTXOR1(temp5) + MAJORITY(temp5, temp6, temp7); temp3 += ROTXOR2(temp0) + CHOICE(temp0, temp1, temp2) + 0x2de92c6f + ((word04 += ROTXOR4(word02) + word13 + ROTXOR3(word05))); temp7 += temp3; temp3 += ROTXOR1(temp4) + MAJORITY(temp4, temp5, temp6); temp2 += ROTXOR2(temp7) + CHOICE(temp7, temp0, temp1) + 0x4a7484aa + ((word05 += ROTXOR4(word03) + word14 + ROTXOR3(word06))); temp6 += temp2; temp2 += ROTXOR1(temp3) + MAJORITY(temp3, temp4, temp5); temp1 += ROTXOR2(temp6) + CHOICE(temp6, temp7, temp0) + 0x5cb0a9dc + ((word06 += ROTXOR4(word04) + word15 + ROTXOR3(word07))); temp5 += temp1; temp1 += ROTXOR1(temp2) + MAJORITY(temp2, temp3, temp4); temp0 += ROTXOR2(temp5) + CHOICE(temp5, temp6, temp7) + 0x76f988da + ((word07 += ROTXOR4(word05) + word00 + ROTXOR3(word08))); temp4 += temp0; temp0 += ROTXOR1(temp1) + MAJORITY(temp1, temp2, temp3); temp7 += ROTXOR2(temp4) + CHOICE(temp4, temp5, temp6) + 0x983e5152 + ((word08 += ROTXOR4(word06) + word01 + ROTXOR3(word09))); temp3 += temp7; temp7 += ROTXOR1(temp0) + MAJORITY(temp0, temp1, temp2); temp6 += ROTXOR2(temp3) + CHOICE(temp3, temp4, temp5) + 0xa831c66d + ((word09 += ROTXOR4(word07) + word02 + ROTXOR3(word10))); temp2 += temp6; temp6 += ROTXOR1(temp7) + MAJORITY(temp7, temp0, temp1); temp5 += ROTXOR2(temp2) + CHOICE(temp2, temp3, temp4) + 0xb00327c8 + ((word10 += ROTXOR4(word08) + word03 + ROTXOR3(word11))); temp1 += temp5; temp5 += ROTXOR1(temp6) + MAJORITY(temp6, temp7, temp0); temp4 += ROTXOR2(temp1) + CHOICE(temp1, temp2, temp3) + 0xbf597fc7 + ((word11 += ROTXOR4(word09) + word04 + ROTXOR3(word12))); temp0 += temp4; temp4 += ROTXOR1(temp5) + MAJORITY(temp5, temp6, temp7); temp3 += ROTXOR2(temp0) + CHOICE(temp0, temp1, temp2) + 0xc6e00bf3 + ((word12 += ROTXOR4(word10) + word05 + ROTXOR3(word13))); temp7 += temp3; temp3 += ROTXOR1(temp4) + MAJORITY(temp4, temp5, temp6); temp2 += ROTXOR2(temp7) + CHOICE(temp7, temp0, temp1) + 0xd5a79147 + ((word13 += ROTXOR4(word11) + word06 + ROTXOR3(word14))); temp6 += temp2; temp2 += ROTXOR1(temp3) + MAJORITY(temp3, temp4, temp5); temp1 += ROTXOR2(temp6) + CHOICE(temp6, temp7, temp0) + 0x06ca6351 + ((word14 += ROTXOR4(word12) + word07 + ROTXOR3(word15))); temp5 += temp1; temp1 += ROTXOR1(temp2) + MAJORITY(temp2, temp3, temp4); temp0 += ROTXOR2(temp5) + CHOICE(temp5, temp6, temp7) + 0x14292967 + ((word15 += ROTXOR4(word13) + word08 + ROTXOR3(word00))); temp4 += temp0; temp0 += ROTXOR1(temp1) + MAJORITY(temp1, temp2, temp3); temp7 += ROTXOR2(temp4) + CHOICE(temp4, temp5, temp6) + 0x27b70a85 + ((word00 += ROTXOR4(word14) + word09 + ROTXOR3(word01))); temp3 += temp7; temp7 += ROTXOR1(temp0) + MAJORITY(temp0, temp1, temp2); temp6 += ROTXOR2(temp3) + CHOICE(temp3, temp4, temp5) + 0x2e1b2138 + ((word01 += ROTXOR4(word15) + word10 + ROTXOR3(word02))); temp2 += temp6; temp6 += ROTXOR1(temp7) + MAJORITY(temp7, temp0, temp1); temp5 += ROTXOR2(temp2) + CHOICE(temp2, temp3, temp4) + 0x4d2c6dfc + ((word02 += ROTXOR4(word00) + word11 + ROTXOR3(word03))); temp1 += temp5; temp5 += ROTXOR1(temp6) + MAJORITY(temp6, temp7, temp0); temp4 += ROTXOR2(temp1) + CHOICE(temp1, temp2, temp3) + 0x53380d13 + ((word03 += ROTXOR4(word01) + word12 + ROTXOR3(word04))); temp0 += temp4; temp4 += ROTXOR1(temp5) + MAJORITY(temp5, temp6, temp7); temp3 += ROTXOR2(temp0) + CHOICE(temp0, temp1, temp2) + 0x650a7354 + ((word04 += ROTXOR4(word02) + word13 + ROTXOR3(word05))); temp7 += temp3; temp3 += ROTXOR1(temp4) + MAJORITY(temp4, temp5, temp6); temp2 += ROTXOR2(temp7) + CHOICE(temp7, temp0, temp1) + 0x766a0abb + ((word05 += ROTXOR4(word03) + word14 + ROTXOR3(word06))); temp6 += temp2; temp2 += ROTXOR1(temp3) + MAJORITY(temp3, temp4, temp5); temp1 += ROTXOR2(temp6) + CHOICE(temp6, temp7, temp0) + 0x81c2c92e + ((word06 += ROTXOR4(word04) + word15 + ROTXOR3(word07))); temp5 += temp1; temp1 += ROTXOR1(temp2) + MAJORITY(temp2, temp3, temp4); temp0 += ROTXOR2(temp5) + CHOICE(temp5, temp6, temp7) + 0x92722c85 + ((word07 += ROTXOR4(word05) + word00 + ROTXOR3(word08))); temp4 += temp0; temp0 += ROTXOR1(temp1) + MAJORITY(temp1, temp2, temp3); temp7 += ROTXOR2(temp4) + CHOICE(temp4, temp5, temp6) + 0xa2bfe8a1 + ((word08 += ROTXOR4(word06) + word01 + ROTXOR3(word09))); temp3 += temp7; temp7 += ROTXOR1(temp0) + MAJORITY(temp0, temp1, temp2); temp6 += ROTXOR2(temp3) + CHOICE(temp3, temp4, temp5) + 0xa81a664b + ((word09 += ROTXOR4(word07) + word02 + ROTXOR3(word10))); temp2 += temp6; temp6 += ROTXOR1(temp7) + MAJORITY(temp7, temp0, temp1); temp5 += ROTXOR2(temp2) + CHOICE(temp2, temp3, temp4) + 0xc24b8b70 + ((word10 += ROTXOR4(word08) + word03 + ROTXOR3(word11))); temp1 += temp5; temp5 += ROTXOR1(temp6) + MAJORITY(temp6, temp7, temp0); temp4 += ROTXOR2(temp1) + CHOICE(temp1, temp2, temp3) + 0xc76c51a3 + ((word11 += ROTXOR4(word09) + word04 + ROTXOR3(word12))); temp0 += temp4; temp4 += ROTXOR1(temp5) + MAJORITY(temp5, temp6, temp7); temp3 += ROTXOR2(temp0) + CHOICE(temp0, temp1, temp2) + 0xd192e819 + ((word12 += ROTXOR4(word10) + word05 + ROTXOR3(word13))); temp7 += temp3; temp3 += ROTXOR1(temp4) + MAJORITY(temp4, temp5, temp6); temp2 += ROTXOR2(temp7) + CHOICE(temp7, temp0, temp1) + 0xd6990624 + ((word13 += ROTXOR4(word11) + word06 + ROTXOR3(word14))); temp6 += temp2; temp2 += ROTXOR1(temp3) + MAJORITY(temp3, temp4, temp5); temp1 += ROTXOR2(temp6) + CHOICE(temp6, temp7, temp0) + 0xf40e3585 + ((word14 += ROTXOR4(word12) + word07 + ROTXOR3(word15))); temp5 += temp1; temp1 += ROTXOR1(temp2) + MAJORITY(temp2, temp3, temp4); temp0 += ROTXOR2(temp5) + CHOICE(temp5, temp6, temp7) + 0x106aa070 + ((word15 += ROTXOR4(word13) + word08 + ROTXOR3(word00))); temp4 += temp0; temp0 += ROTXOR1(temp1) + MAJORITY(temp1, temp2, temp3); temp7 += ROTXOR2(temp4) + CHOICE(temp4, temp5, temp6) + 0x19a4c116 + ((word00 += ROTXOR4(word14) + word09 + ROTXOR3(word01))); temp3 += temp7; temp7 += ROTXOR1(temp0) + MAJORITY(temp0, temp1, temp2); temp6 += ROTXOR2(temp3) + CHOICE(temp3, temp4, temp5) + 0x1e376c08 + ((word01 += ROTXOR4(word15) + word10 + ROTXOR3(word02))); temp2 += temp6; temp6 += ROTXOR1(temp7) + MAJORITY(temp7, temp0, temp1); temp5 += ROTXOR2(temp2) + CHOICE(temp2, temp3, temp4) + 0x2748774c + ((word02 += ROTXOR4(word00) + word11 + ROTXOR3(word03))); temp1 += temp5; temp5 += ROTXOR1(temp6) + MAJORITY(temp6, temp7, temp0); temp4 += ROTXOR2(temp1) + CHOICE(temp1, temp2, temp3) + 0x34b0bcb5 + ((word03 += ROTXOR4(word01) + word12 + ROTXOR3(word04))); temp0 += temp4; temp4 += ROTXOR1(temp5) + MAJORITY(temp5, temp6, temp7); temp3 += ROTXOR2(temp0) + CHOICE(temp0, temp1, temp2) + 0x391c0cb3 + ((word04 += ROTXOR4(word02) + word13 + ROTXOR3(word05))); temp7 += temp3; temp3 += ROTXOR1(temp4) + MAJORITY(temp4, temp5, temp6); temp2 += ROTXOR2(temp7) + CHOICE(temp7, temp0, temp1) + 0x4ed8aa4a + ((word05 += ROTXOR4(word03) + word14 + ROTXOR3(word06))); temp6 += temp2; temp2 += ROTXOR1(temp3) + MAJORITY(temp3, temp4, temp5); temp1 += ROTXOR2(temp6) + CHOICE(temp6, temp7, temp0) + 0x5b9cca4f + ((word06 += ROTXOR4(word04) + word15 + ROTXOR3(word07))); temp5 += temp1; temp1 += ROTXOR1(temp2) + MAJORITY(temp2, temp3, temp4); temp0 += ROTXOR2(temp5) + CHOICE(temp5, temp6, temp7) + 0x682e6ff3 + ((word07 += ROTXOR4(word05) + word00 + ROTXOR3(word08))); temp4 += temp0; temp0 += ROTXOR1(temp1) + MAJORITY(temp1, temp2, temp3); temp7 += ROTXOR2(temp4) + CHOICE(temp4, temp5, temp6) + 0x748f82ee + ((word08 += ROTXOR4(word06) + word01 + ROTXOR3(word09))); temp3 += temp7; temp7 += ROTXOR1(temp0) + MAJORITY(temp0, temp1, temp2); temp6 += ROTXOR2(temp3) + CHOICE(temp3, temp4, temp5) + 0x78a5636f + ((word09 += ROTXOR4(word07) + word02 + ROTXOR3(word10))); temp2 += temp6; temp6 += ROTXOR1(temp7) + MAJORITY(temp7, temp0, temp1); temp5 += ROTXOR2(temp2) + CHOICE(temp2, temp3, temp4) + 0x84c87814 + ((word10 += ROTXOR4(word08) + word03 + ROTXOR3(word11))); temp1 += temp5; temp5 += ROTXOR1(temp6) + MAJORITY(temp6, temp7, temp0); temp4 += ROTXOR2(temp1) + CHOICE(temp1, temp2, temp3) + 0x8cc70208 + ((word11 += ROTXOR4(word09) + word04 + ROTXOR3(word12))); temp0 += temp4; temp4 += ROTXOR1(temp5) + MAJORITY(temp5, temp6, temp7); temp3 += ROTXOR2(temp0) + CHOICE(temp0, temp1, temp2) + 0x90befffa + ((word12 += ROTXOR4(word10) + word05 + ROTXOR3(word13))); temp7 += temp3; temp3 += ROTXOR1(temp4) + MAJORITY(temp4, temp5, temp6); temp2 += ROTXOR2(temp7) + CHOICE(temp7, temp0, temp1) + 0xa4506ceb + ((word13 += ROTXOR4(word11) + word06 + ROTXOR3(word14))); temp6 += temp2; temp2 += ROTXOR1(temp3) + MAJORITY(temp3, temp4, temp5); temp1 += ROTXOR2(temp6) + CHOICE(temp6, temp7, temp0) + 0xbef9a3f7 + ((word14 += ROTXOR4(word12) + word07 + ROTXOR3(word15))); temp5 += temp1; temp1 += ROTXOR1(temp2) + MAJORITY(temp2, temp3, temp4); temp0 += ROTXOR2(temp5) + CHOICE(temp5, temp6, temp7) + 0xc67178f2 + ((word15 += ROTXOR4(word13) + word08 + ROTXOR3(word00))); temp4 += temp0; temp0 += ROTXOR1(temp1) + MAJORITY(temp1, temp2, temp3); word00 = 0x6a09e667UL + temp0; word01 = 0xbb67ae85UL + temp1; word02 = 0x3c6ef372UL + temp2; word03 = 0xa54ff53aUL + temp3; word04 = 0x510e527fUL + temp4; word05 = 0x9b05688cUL + temp5; word06 = 0x1f83d9abUL + temp6; word07 = 0x5be0cd19UL + temp7; } state[0] = bytereverse(word00); state[1] = bytereverse(word01); state[2] = bytereverse(word02); state[3] = bytereverse(word03); state[4] = bytereverse(word04); state[5] = bytereverse(word05); state[6] = bytereverse(word06); state[7] = bytereverse(word07); } #endif static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) { SHA256_CTX ctx; #ifdef SIMD_COEF_32 unsigned int i; unsigned char _IBuf[64 * MAX_KEYS_PER_CRYPT + MEM_ALIGN_CACHE], *keys, tmpBuf[32]; uint32_t *keys32, j; keys = (unsigned char *)mem_align(_IBuf, MEM_ALIGN_CACHE); keys32 = (uint32_t *) keys; memset(keys, 0, 64 * MAX_KEYS_PER_CRYPT); for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { SHA256_Init(&ctx); SHA256_Update(&ctx, saved_key[index + i], strlen(saved_key[index + i])); SHA256_Update(&ctx, cur_salt->salt, 32); SHA256_Final(tmpBuf, &ctx); for (j = 0; j < 32; ++j) keys[GETPOS(j, i)] = tmpBuf[j]; keys[GETPOS(j, i)] = 0x80; //32 bytes of crypt data(0x100 bits). keys[GETPOS(62, i)] = 0x01; } for (i = 0; i < cur_salt->iterations; i++) { SIMDSHA256body(keys, keys32, NULL, SSEi_MIXED_IN | SSEi_OUTPUT_AS_INP_FMT); } //Last one with FLAT_OUT SIMDSHA256body(keys, crypt_out[index], NULL, SSEi_MIXED_IN | SSEi_OUTPUT_AS_INP_FMT | SSEi_FLAT_OUT); #else SHA256_Init(&ctx); SHA256_Update(&ctx, saved_key[index], strlen(saved_key[index])); SHA256_Update(&ctx, cur_salt->salt, 32); SHA256_Final((unsigned char *)crypt_out[index], &ctx); #if 1 //This complex crap only boosted speed on my quad - HT from 5016 to 5285. // A ton of complex code for VERY little gain.The SIMD change gave us // a 4 x improvement with very little change.This pwsafe_sha256_iterate // does get 5 % gain, but 400 % is so much better, lol.I put the other // code in to be able to dump data out easier, getting dump_stuff() // data in flat, to be able to help get the SIMD code working. #ifdef COMMON_DIGEST_FOR_OPENSSL pwsafe_sha256_iterate(ctx.hash, cur_salt->iterations); memcpy(crypt_out[index], ctx.hash, 32); #else pwsafe_sha256_iterate(ctx.h, cur_salt->iterations); memcpy(crypt_out[index], ctx.h, 32); #endif #else { int i; for (i = 0; i <= cur_salt->iterations; ++i) { SHA256_Init(&ctx); SHA256_Update(&ctx, (unsigned char *)crypt_out[index], 32); SHA256_Final((unsigned char *)crypt_out[index], &ctx); } } #endif #endif } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void pwsafe_set_key(char *key, int index) { strnzcpy(saved_key[index], key, sizeof(*saved_key)); } static char * get_key(int index) { return saved_key[index]; } static unsigned int iteration_count(void *salt) { struct custom_salt *my_salt; my_salt = salt; return (unsigned int)my_salt->iterations; } struct fmt_main fmt_pwsafe = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { "iteration count", }, {FORMAT_TAG}, pwsafe_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { iteration_count, }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, pwsafe_set_key, get_key, fmt_default_clear_keys, crypt_all, { #define COMMON_GET_HASH_LINK #include "common-get-hash.h" }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
calcium_sparks_old.c
#include <math.h> #include <assert.h> #include <stdio.h> #include <stdlib.h> #include <time.h> #include <string.h> #include "mpi.h" #include "hdf5.h" #include <sys/stat.h> //#define DEBUG_TEST #define DB_PF 0 #define MAX_LINE_LENGTH 80 //#define __PAPI__ #ifdef __PAPI__ #include <papi.h> #endif typedef int(*CONDCF)(int a, int b); #define H5T_DATA_TYPE H5T_NATIVE_SHORT typedef short int hdf5_data_type; #define H5_DATA_LIMIT_0 -32768 // Data type specific #define H5_DATA_LIMIT_1 32767 // Data type specific #define H5_DATA_SIZE H5_DATA_LIMIT_1 - H5_DATA_LIMIT_0 // Data type specific double timing(); void *mpi_malloc ( int id, int bytes); /* IN - Bytes to allocate */ inline double my_random(); double my_min(double* ar, int len); double my_max(double* ar, int len); void stern(double t, double* y0, double* y1, double Ca); void stern_discrete(double dt, int* y0, int* y1, double Ca); void laplace3D (int nx0, int ny0, int nz0, double* C0, int nx1, int ny1, int nz1, double* C1, double alpha);//, int num_threads) void reaction3D (int nx0, int ny0, int nz0, double* Ca, int nx1, int ny1, int nz1, double* buff, double B_tot, double k_on, double k_off, double dt);//, int num_threads) void serca3D (int nx0, int ny0, int nz0, double* Ca_i, int nx1, int ny1, int nz1, double* Ca_SR, double dt, double gamma, double fudge);//, int num_threads) void update_ryr(int h_scale,int nx0, int ny0, int nz0, double* Ca_i, double* Ca_SR, double* Ca_CSQN, double* C10, double* C12, double* C13, double* C14, double k_on_CSQN, double k_off_CSQN, double CSQN_tot, double gamma, double K, double dt, int ryr_len, int* i0_ryr, int* i1_ryr, int* i2_ryr, int csqn_len, int* i0_csqn, int* i1_csqn, int* i2_csqn, int cleft_len, int* i0_cleft, int* i1_cleft, int* i2_cleft,int* cleft_nb, int* states0, int* states1); void store2Dmatrixfile_double_1D(char* outfile, double* ar, int rows, int cols, int x_strid); void store2Dmatrixfile_double_bin(char* outfile, double* ar, int rows, int cols, int x_strid); void transfer_hdf5_data(hdf5_data_type* h5_data, double* ar0, double* ar1, double scale_value, hsize_t* chunk_dims); void store2Dmatrixfile_int_1D(char* outfile, int* ar, int rows, int cols); //int less(int a, int b); //int giant(int a, int b); //int* loadRyRindexfile_int(char* infile, CONDFN cf, int cond); int* loadRyRindexfile_int(char* infile, int* count); int idxinrank(int nx, int ny, int nz, int i0, int i1, int i2, int rank, MPI_Comm comm3d); int idxbl2rank(int nx, int ny, int nz, int i0, int i1, int i2, int* coords, MPI_Comm comm3d); int load_indices_serial(int nx, int ny, int nz, int h, int** i0_ryr, int** i1_ryr, int** i2_ryr, int* ryr_len, int** i0_csqn, int** i1_csqn, int** i2_csqn, int* csqn_len, int** i0_cleft, int** i1_cleft, int** i2_cleft, int** cleft_nb, int* cleft_len, int x_slice_mid,int x_slice_width, int x_slice_num, int use_failing); int IsTrueCleft(int coord_y, int coord_z, int size_y, int size_z, int *i1_csqn, int *i2_csqn, int* y_index, int csqn_len); void BinarySort_two(int* pData, int* vData, int Count); void dichotomy_two(int* pData,int* vData, int left,int right); int distr_ryr_csqn_state(int h, int size_x, int size_y, int size_z, int nx, int ny, int nz, int** i0_ryr, int** i1_ryr, int** i2_ryr, int* ryr_len, int** i0_csqn, int** i1_csqn, int** i2_csqn, int* csqn_len, int** i0_cleft, int** i1_cleft, int** i2_cleft,int** cleft_nb, int* cleft_len, int** states0, int** states1, int x_slice_mid,int x_slice_width, int x_slice_num, MPI_Comm comm3d, MPI_Comm, int use_failing); void readparam(int* iconf, double* conf); void updateBound(double* C00, double* C01, double* C02, double* C03, double* C04, int C_flag, int nx0, int ny0, int nz0, double* yz_sbuf0,double* yz_rbuf0, double* xz_sbuf0,double* xz_rbuf0, double* xy_sbuf0,double* xy_rbuf0, double* yz_sbuf1,double* yz_rbuf1, double* xz_sbuf1,double* xz_rbuf1, double* xy_sbuf1,double* xy_rbuf1, int* neighbor, MPI_Status* ar_status, MPI_Request* ar_send_req, MPI_Request* ar_recv_req, MPI_Comm comm,MPI_Comm comm3d); void putin_sendbuffer_yz(int base_addr,int nx0,int ny0, int nz0, double* arr, int nx, int ny, int nz, double* sbuf, int sbuf_len); void putin_sendbuffer_xz(int base_addr,int nx0,int ny0, int nz0, double* arr, int nx, int ny, int nz, double* sbuf, int sbuf_len); void putin_sendbuffer_xy(int base_addr,int nx0,int ny0, int nz0, double* arr, int nx, int ny, int nz, double* sbuf, int sbuf_len); void getout_recvbuffer_yz(int base_addr,int nx0,int ny0, int nz0, double* arr, int nx, int ny, int nz, double* sbuf, int sbuf_len); void getout_recvbuffer_xz(int base_addr,int nx0,int ny0, int nz0, double* arr, int nx, int ny, int nz, double* sbuf, int sbuf_len); void getout_recvbuffer_xy(int base_addr,int nx0,int ny0, int nz0, double* arr, int nx, int ny, int nz, double* sbuf, int sbuf_len); void compute_pde_ode(int nx0, int ny0, int nz0, double dt,double gamma, double fudge, double* alpha, double* B_tot, double* k_on, double* k_off, double** C0, double** C1, int div_y); #define NUM_SAVE_SPECIES 5 int save_species[NUM_SAVE_SPECIES] = {0,1,4,5,6}; char* species_names[7] = {"Cai", "CaSR", "CaCMDN", "CaATP", "CaFluo", "CaTRPN", "CaCSQN"}; int main(int argc, char **argv) { int i,j,k; #ifdef __PAPI__ // int Events[] = { PAPI_L1_DCA, PAPI_L1_DCM }; // int Events[] = {PAPI_L3_TCM, PAPI_L3_TCA, PAPI_L2_TCM,PAPI_L2_TCA}; int Events[] = {PAPI_DP_OPS,PAPI_L3_TCM}; int NUM_EVENTS = sizeof(Events)/sizeof(Events[0]); long long res_papi[NUM_EVENTS]; char EventName[128]; int num_hwcntrs = 0; int EventSet = PAPI_NULL; int retval; retval = PAPI_library_init( PAPI_VER_CURRENT ); retval = PAPI_create_eventset( &EventSet ); if (PAPI_add_events( EventSet, Events, NUM_EVENTS) != PAPI_OK){ printf("PAPI_add_events failed\n"); } for (i=0; i<NUM_EVENTS; i++){ res_papi[i] = 0; } #endif double time_main=0.0; double time_comm=0.0; double time_conc=0.0; double time_ryr=0.0; double time_io=0.0; int save_data=0; int use_rand_seed=1; int use_failing=0; int idx; int h_scale=1; int h=30; int div_y=1; int save_binary_file=0; int save_hdf5=0; double T=1.0; double DT=0.05; // plotting time step int TimeStep=2; int size_x, size_y, size_z, my_id, x_domains, y_domains, z_domains; int iconf[12]; double conf[2]; /* MPI variables */ int nproc, ndims; MPI_Comm comm, comm3d; int dims[3]; int periods[3]; int reorganisation = 0; MPI_Datatype matrix_type_oyz, matrix_type_oxz, matrix_type_oxy; int ZN=0, ZP=1, YN=2, YP=3, XN=4, XP=5; int NeighBor[6]; hid_t h5_file_id; hdf5_data_type* h5_data; MPI_Init(&argc, &argv); comm = MPI_COMM_WORLD; MPI_Comm_size(comm, &nproc); MPI_Comm_rank(comm, &my_id); MPI_Info info = MPI_INFO_NULL; if (my_id==0) { readparam(iconf, conf); } MPI_Bcast(iconf, 12, MPI_INT, 0, comm); MPI_Bcast(conf, 2, MPI_DOUBLE, 0, comm); h = iconf[0]; size_x = iconf[1]; size_y = iconf[2]; size_z = iconf[3]; x_domains = iconf[4]; y_domains = iconf[5]; z_domains = iconf[6]; save_data = iconf[7]; use_failing = iconf[8]; save_binary_file = iconf[9]; // Save Ca in binary file instead of ascii file save_hdf5 = iconf[10]; // Save data in hdf5 file format div_y = iconf[11]; // Block size on y direction for cache T = conf[0]; DT = conf[1]; h_scale=30/h; if(use_rand_seed) srand(my_id); char hdf5_dataset_name[200]; char hdf5_group_name[200]; char h5_basename[200]; char outdirname[200]; if(save_hdf5) { sprintf(h5_basename, "output_%d_%d_%d_%d_%d", h, size_x, size_y, size_z, use_failing); } else if(save_binary_file) { sprintf(outdirname, "output_%d_%d_%d_%d_%d_bin", h, size_x, size_y, size_z, use_failing); } else { sprintf(outdirname, "output_%d_%d_%d_%d_%d", h, size_x, size_y, size_z, use_failing); } if(!my_id) { if(save_data && !save_hdf5){ if(access(outdirname,0)) { if (mkdir(outdirname, 0755)==-1) { printf("make directory failed\n"); } else { printf("make directory: %s\n", outdirname); } } else { printf("directory %s existed\n",outdirname); } } } MPI_Barrier(comm); if((my_id==0) && (nproc!=(x_domains*y_domains*z_domains))) { printf("Number of processes not equal to Number of subdomains\n"); MPI_Abort(MPI_COMM_WORLD, 1); } if((my_id==0)&&(size_x%x_domains!=0)) { printf("Number of x_domains is not divisible in scale\n"); MPI_Abort(MPI_COMM_WORLD, 1); } if((my_id==0)&&(size_y%y_domains!=0)) { printf("Number of y_domains is not divisible in scale\n"); MPI_Abort(MPI_COMM_WORLD, 1); } if((my_id==0)&&(size_z%z_domains!=0)) { printf("Number of z_domains is not divisible in scale\n"); MPI_Abort(MPI_COMM_WORLD, 1); } if(((size_y/y_domains)%div_y)!=0){ div_y=1; if(my_id==0){ printf("Warning: div_y is not divisible on each node, so set div_y=1 for default \n"); } } /* Create 3D cartesian grid */ periods[0] = 0; periods[1] = 0; periods[2] = 0; ndims = 3; dims[0]=z_domains; dims[1]=y_domains; dims[2]=x_domains; MPI_Cart_create(comm, ndims, dims, periods, reorganisation, &comm3d); /* MPI variables */ MPI_Status ar_status[6]; MPI_Request ar_send_req[6]; MPI_Request ar_recv_req[6]; int coord[3]; int dim[3]; int period[3]; int mid_coord_x=0; int in_midx_slice=0; int x_slice_num; int x_slice_width; int x_slice_mid; MPI_Cart_get(comm3d, 3, dim, period, coord); x_slice_num=(int)(ceil((double)(size_x*h)/2100.0)); if((size_x%x_slice_num)!=0) { printf("x dimension can not be divided by %d\n", x_slice_num); MPI_Abort(comm,5); } x_slice_width=size_x/x_slice_num; x_slice_mid=(x_slice_width+1)/2; for(i=0;i<x_slice_num;i++) { if(((x_slice_width*i+x_slice_mid)>=(coord[2]*size_x/x_domains))&& ((x_slice_width*i+x_slice_mid)<((coord[2]+1)*size_x/x_domains))){ if(in_midx_slice==1){ printf("dont put two x_slice in a x partition\n"); MPI_Abort(comm,5); } in_midx_slice=1; mid_coord_x=(x_slice_width*i+x_slice_mid)-(coord[2]*size_x/x_domains)+1;//+1 for ghost bound //check x partition thickness, so far, for simplify, dont cut a csqn and no-flux into two x-partitions if((mid_coord_x)<(h_scale+3)||(size_x/x_domains-mid_coord_x)<(h_scale+3)){ printf("x partition is too thine for CSQN and cleft extend \n"); MPI_Abort(comm,5); } } } //printf("Rank: %d, coord: [%d, %d, %d]\n", my_id, coord[0], coord[1], coord[2]); /* Identify process neighbors */ NeighBor[0] = MPI_PROC_NULL; NeighBor[1] = MPI_PROC_NULL; NeighBor[2] = MPI_PROC_NULL; NeighBor[3] = MPI_PROC_NULL; NeighBor[4] = MPI_PROC_NULL; NeighBor[5] = MPI_PROC_NULL; /* Left/West and right/Est neigbors Z direction*/ MPI_Cart_shift(comm3d,0,1,&NeighBor[ZN],&NeighBor[ZP]); /* Bottom/South and Upper/North neigbors Y direction*/ MPI_Cart_shift(comm3d,1,1,&NeighBor[YN],&NeighBor[YP]); /* Zdown/South and Zup/North neigbors X direction*/ MPI_Cart_shift(comm3d,2,1,&NeighBor[XN],&NeighBor[XP]); //-------------------------------------------------------------------- int nx=(size_x/x_domains); int ny=(size_y/y_domains); int nz=(size_z/z_domains); int nx0, ny0, nz0; int nx1, ny1, nz1; nx0=nx+2; ny0=ny+2; nz0=nz+2; nx1=nx+2; ny1=ny+2; nz1=nz+2; int len; len=nx0*ny0*nz0; /* Create matrix data types to communicate */ MPI_Type_vector(ny, nz, nz0, MPI_DOUBLE, &matrix_type_oyz); MPI_Type_commit(&matrix_type_oyz); /* Create matrix data type to communicate on vertical Oxz plan */ MPI_Type_vector(nx, nz, ny0*nz0, MPI_DOUBLE, &matrix_type_oxz); MPI_Type_commit(&matrix_type_oxz); /* Create matrix data type to communicate on vertical Oxy plan */ MPI_Datatype matrix_type_liney; MPI_Type_vector(ny, 1, nz0, MPI_DOUBLE, &matrix_type_liney); MPI_Type_commit(&matrix_type_liney); // MPI_Type_vector(nx*ny, 1, nz0, MPI_DOUBLE, &matrix_type_oxy); MPI_Type_hvector(nx, 1, ny0*nz0*sizeof(double), matrix_type_liney, &matrix_type_oxy); MPI_Type_commit(&matrix_type_oxy); if(!my_id) printf("Simulation Begin!\n"); //Define where the RyRs are: int* i0_ryr; int* i1_ryr; int* i2_ryr; int* i0_csqn; int* i1_csqn; int* i2_csqn; int* i0_cleft; int* i1_cleft; int* i2_cleft; int* cleft_nb; int ryr_len; int csqn_len; int cleft_len; int* states0; int* states1; h_scale=distr_ryr_csqn_state( h, size_x, size_y, size_z, nx, ny, nz, &i0_ryr, &i1_ryr, &i2_ryr, &ryr_len, &i0_csqn, &i1_csqn, &i2_csqn, &csqn_len, &i0_cleft, &i1_cleft, &i2_cleft, &cleft_nb,&cleft_len, &states0, &states1, x_slice_mid,x_slice_width, x_slice_num, comm3d, comm, use_failing); // store2Dmatrixfile_int_1D("i0.txt",i0,n_ryr,1); // store2Dmatrixfile_int_1D("i1.txt",i1,n_ryr,1); // store2Dmatrixfile_int_1D("i2.txt",i2,n_ryr,1); double Vfraction; //first set the numbers of RyR in a CaRU; //All CaRU placed mid-sarcomere Vfraction=(30.0/h)*(30.0/h)*(30.0/h); // scaling of RyR when changing dx // Set constants and dt based on these: double D_i=250e3; // 220e3 double D_SR=73e3; // 73.3e3; double D_ATP=140e3; double D_CMDN=22e3; double D_Fluo=42e3; double dt=(1./6)*h*h/D_i; double alpha_i = dt*D_i/(h*h); double Ca0 = 140e-3; double CaSR0 = 1.3e3; double* Ca_i; Ca_i=(double*)malloc(len*sizeof(double)); for ( i = 0; i < len; i += 1 ) { Ca_i[i]=Ca0; } double alpha_SR = dt*D_SR/(h*h); double* Ca_SR; Ca_SR=(double*)malloc(len*sizeof(double)); for ( i = 0; i < len; i += 1 ) { Ca_SR[i]=CaSR0; } double k_on_CMDN = 34e-3; double k_off_CMDN = 238e-3; double CMDN_tot = 24; double alpha_CMDN = dt*D_CMDN/(h*h); double k_on_ATP = 255e-3; double k_off_ATP = 45; double ATP_tot = 455; double alpha_ATP = dt*D_ATP/(h*h); double k_on_Fluo = 110e-3; double k_off_Fluo = 110e-3; double Fluo_tot = 25; // 25; double alpha_Fluo = dt*D_Fluo/(h*h); double k_on_TRPN = 32.7e-3; double k_off_TRPN = 19.6e-3; // 26.16e-3; double TRPN_tot = 70; // 50; double k_on_CSQN = 102e-3; double k_off_CSQN = 65; double CSQN_tot = 30e3; double alpha[7]; double k_on[7]; double k_off[7]; double B_tot[7]; alpha[0]=alpha_i; alpha[1]=alpha_SR; alpha[2]=alpha_CMDN; alpha[3]=alpha_ATP; alpha[4]=alpha_Fluo; alpha[5]=0; alpha[6]=0; k_on[0]=0 ; k_on[1]= 0; k_on[2]= k_on_CMDN; k_on[3]=k_on_ATP ; k_on[4]=k_on_Fluo ; k_on[5]=k_on_TRPN; k_on[6]=k_on_CSQN; k_off[0]=0 ; k_off[1]= 0; k_off[2]=k_off_CMDN; k_off[3]=k_off_ATP; k_off[4]=k_off_Fluo; k_off[5]=k_off_TRPN; k_off[6]=k_off_CSQN; B_tot[0]=0 ; B_tot[1]= 0; B_tot[2]=CMDN_tot ; B_tot[3]=ATP_tot ; B_tot[4]=Fluo_tot ; B_tot[5]=TRPN_tot; B_tot[6]=CSQN_tot; // Calculate steady state IC for the buffers based on Ca_i ... double Ca_CMDN0=B_tot[2]*Ca0/(Ca0+k_off[2]/k_on[2]); double Ca_ATP0 =B_tot[3]*Ca0/(Ca0+k_off[3]/k_on[3]); double Ca_Fluo0=B_tot[4]*Ca0/(Ca0+k_off[4]/k_on[4]); double Ca_TRPN0=B_tot[5]*Ca0/(Ca0+k_off[5]/k_on[5]); // and Ca_SR: double Ca_CSQN0 = CSQN_tot*Ca_SR[0]/(Ca_SR[0] + k_off_CSQN/k_on_CSQN); double init_values[7] = {Ca0, CaSR0, Ca_CMDN0, Ca_ATP0, Ca_Fluo0, Ca_TRPN0, Ca_CSQN0}; //printf("%f %f %f %f %f \n ", Ca_ATP0, Ca_CMDN0, Ca_Fluo0, Ca_TRPN0, Ca_CSQN0); if(my_id==0) printf("cubiod_c: h:%d size_x:%d size_y:%d size_z:%d dt:%f, T:%f, TimeStep:%d, DT:%f outfilenum:%d, x_slice_num:%d, use_failing:%d, div_y:%d, save_binary:%d \n", h, size_x, size_y, size_z,dt,T, (int)(T/dt),DT,(int)(T/DT)*save_data,x_slice_num,use_failing, div_y,save_binary_file); // Allocate the data structure for the solution double *Ca_ATP ; double *Ca_CMDN ; double *Ca_Fluo ; double *Ca_TRPN ; double *Ca_CSQN ; Ca_ATP =(double*)malloc(len*sizeof(double)); Ca_CMDN=(double*)malloc(len*sizeof(double)); Ca_Fluo=(double*)malloc(len*sizeof(double)); Ca_TRPN=(double*)malloc(len*sizeof(double)); Ca_CSQN=(double*)malloc(len*sizeof(double)); for ( i = 0; i < len; i += 1 ) { Ca_ATP[i] = Ca_ATP0; Ca_CMDN[i] = Ca_CMDN0; Ca_Fluo[i] = Ca_Fluo0; Ca_TRPN[i] = Ca_TRPN0; Ca_CSQN[i] = Ca_CSQN0; } double* C0[7]; double* C1[7]; double* C_temp; C0[0]=(double*)malloc(len*sizeof(double)); C1[0]=Ca_i; memcpy(C0[0],C1[0],len*sizeof(double)); C0[1]=(double*)malloc(len*sizeof(double)); C1[1]=Ca_SR; memcpy(C0[1],C1[1],len*sizeof(double)); C0[2]=(double*)malloc(len*sizeof(double)); C1[2]=Ca_CMDN; memcpy(C0[2],C1[2],len*sizeof(double)); C0[3]=(double*)malloc(len*sizeof(double)); C1[3]=Ca_ATP; memcpy(C0[3],C1[3],len*sizeof(double)); C0[4]=(double*)malloc(len*sizeof(double)); C1[4]=Ca_Fluo; memcpy(C0[4],C1[4],len*sizeof(double)); C0[5]=(double*)malloc(len*sizeof(double)); C1[5]=Ca_TRPN; memcpy(C0[5],C1[5],len*sizeof(double)); C0[6]=(double*)malloc(len*sizeof(double)); C1[6]=Ca_CSQN; memcpy(C0[6],C1[6],len*sizeof(double)); //Ca = [[Ca_i.copy(), Ca_i ], // [Ca_SR.copy(), Ca_SR ], // [Ca_CMDN.copy(), Ca_CMDN], // [Ca_ATP.copy(), Ca_ATP ], // [Ca_Fluo.copy(), Ca_Fluo], // [Ca_TRPN, Ca_TRPN], // [Ca_CSQN, Ca_CSQN]] double gamma = 0.02; // SR volume fraction int cai=0; int sri=1; // int cmdni=2; // int atpi=3; // int fluoi=4; // int trpni=5; int csqni=6; double fraction[7]={1,1,1,1,1,1,1}; fraction[1]=gamma; fraction[6]=gamma; // Ryr conductance: double k_s = (Vfraction)*150/2; // 1/ms, based on 0.5pA of Ca2+ into (30nm)^3. double K = exp(-k_s*dt*(1+1/gamma)); // factor need in the integration below if(my_id==0){ printf("dt = dt: %e\n", dt); printf("k_s = (Vfraction)*150/2: %e\n", k_s); printf("K = exp(-k_s*dt*(1+1/gamma)): %e\n", K); } double t=0; int counter=0; // int mean[7]; time_main-=timing(); FILE *fpdata; char meanfile[200]; if (save_hdf5) sprintf(meanfile,"%s_mean.txt", h5_basename); else sprintf(meanfile,"%s/mean.txt", outdirname); if(!my_id){ if(save_data){ if ((fpdata=fopen(meanfile, "w"))==NULL) { printf("failed open output file "); printf("%s", meanfile); printf(" ! \n "); exit(0); } } } // H5 Setup if (save_hdf5) { char h5_data_file[200]; // Set up file access property list with parallel I/O access // property list identifier hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS); H5Pset_fapl_mpio(plist_id, comm, info); sprintf(h5_data_file, "%s.h5", h5_basename); // Create a new file collectively and release property list identifier. h5_file_id = H5Fcreate(h5_data_file, H5F_ACC_TRUNC, H5P_DEFAULT, plist_id); H5Pclose(plist_id); const int data_rank = 2; hsize_t dimsf[2] = {size_y, size_z}; /* dataset dimensions */ hsize_t chunk_dims[2] = {ny, nz}; /* chunk dimensions */ // Offset into dataset based on the MPI coord from MPI_Cart_get hsize_t h5_offset[2] = {coord[1]*nz, coord[0]*ny}; hsize_t h5_count[2] = {1, 1}; hsize_t data_size=ny*nz; h5_data = (hdf5_data_type*)malloc(data_size*sizeof(hdf5_data_type)); if (!my_id) { printf("Total data size per species: %zu, %zu\n", dimsf[0], dimsf[1]); printf("Total data size per chunk per species: %zu, %zu\n", chunk_dims[0], chunk_dims[1]); } printf("rank %d | h5 offset [%zu, %zu]\n", my_id, h5_offset[0], h5_offset[1]); // Create data space for the datatype limits hsize_t dims = 1; hid_t attr_space = H5Screate_simple(1, &dims, NULL); // Create a time attribute hid_t limit_id = H5Acreate(h5_file_id, "data_type_size", H5T_NATIVE_DOUBLE, attr_space, H5P_DEFAULT, H5P_DEFAULT); // Write the attribute data double data_type_size = (double)H5_DATA_SIZE; herr_t status = H5Awrite(limit_id, H5T_NATIVE_DOUBLE, &data_type_size); // Cleanup H5Aclose(limit_id); H5Sclose(attr_space); // Save hard coded data ranges for (i=0; i<NUM_SAVE_SPECIES; i++) { // Get species int species = save_species[i]; // Create data scale attribute sprintf(hdf5_dataset_name, "%s_scale", species_names[species]); // Create data space for the species scale attribute hsize_t dims = 1; hid_t attr_space = H5Screate_simple(1, &dims, NULL); // Create a time attribute hid_t scale_id = H5Acreate(h5_file_id, hdf5_dataset_name, H5T_NATIVE_DOUBLE, attr_space, H5P_DEFAULT, H5P_DEFAULT); // Write the attribute data herr_t status = H5Awrite(scale_id, H5T_NATIVE_DOUBLE, &init_values[species]); // Cleanup H5Aclose(scale_id); H5Sclose(attr_space); // Create init value attribute sprintf(hdf5_dataset_name, "%s_init", species_names[species]); // Create data space for the species init attribute dims = 1; attr_space = H5Screate_simple(1, &dims, NULL); // Create a time attribute hid_t init_id = H5Acreate(h5_file_id, hdf5_dataset_name, H5T_NATIVE_DOUBLE, attr_space, H5P_DEFAULT, H5P_DEFAULT); // Write the attribute data status = H5Awrite(init_id, H5T_NATIVE_DOUBLE, &init_values[species]); // Cleanup H5Aclose(init_id); H5Sclose(attr_space); } } double* yz_sbuf0; double* yz_rbuf0; double* xz_sbuf0; double* xz_rbuf0; double* xy_sbuf0; double* xy_rbuf0; double* yz_sbuf1; double* yz_rbuf1; double* xz_sbuf1; double* xz_rbuf1; double* xy_sbuf1; double* xy_rbuf1; yz_sbuf0=(double*)mpi_malloc(my_id,5*ny*nz*sizeof(double)); xz_sbuf0=(double*)mpi_malloc(my_id,5*nx*nz*sizeof(double)); xy_sbuf0=(double*)mpi_malloc(my_id,5*nx*ny*sizeof(double)); yz_sbuf1=(double*)mpi_malloc(my_id,5*ny*nz*sizeof(double)); xz_sbuf1=(double*)mpi_malloc(my_id,5*nx*nz*sizeof(double)); xy_sbuf1=(double*)mpi_malloc(my_id,5*nx*ny*sizeof(double)); yz_rbuf0=(double*)mpi_malloc(my_id,5*ny*nz*sizeof(double)); xz_rbuf0=(double*)mpi_malloc(my_id,5*nx*nz*sizeof(double)); xy_rbuf0=(double*)mpi_malloc(my_id,5*nx*ny*sizeof(double)); yz_rbuf1=(double*)mpi_malloc(my_id,5*ny*nz*sizeof(double)); xz_rbuf1=(double*)mpi_malloc(my_id,5*nx*nz*sizeof(double)); xy_rbuf1=(double*)mpi_malloc(my_id,5*nx*ny*sizeof(double)); #ifdef __PAPI__ if ( PAPI_start( EventSet ) != PAPI_OK){ printf("PAPI_read_counters failed\n"); } #endif //settime //T=1000*dt; //for ( T = 0; T < TimeStep; T += 1 ) int t_counter=0; while(t<T) //while(0) { t+=dt; t_counter++; time_comm-=timing(); updateBound(C0[0], C0[1], C0[2], C0[3], C0[4], t_counter, nx0, ny0, nz0, yz_sbuf0,yz_rbuf0, xz_sbuf0,xz_rbuf0, xy_sbuf0,xy_rbuf0, yz_sbuf1,yz_rbuf1, xz_sbuf1,xz_rbuf1, xy_sbuf1,xy_rbuf1, NeighBor, ar_status,ar_send_req,ar_recv_req, comm, comm3d); time_comm+=timing(); // Diffusion update time_conc-=timing(); // Change to use a faster computing function compute_pde_ode(nx0, ny0, nz0, dt, gamma, 1e-4, alpha, B_tot, k_on, k_off, C0, C1, div_y); // for ( i = 0; i < 5; i += 1 ) { // laplace3D(nx0,ny0,nz0,C0[i],nx1,ny1,nz1,C1[i],alpha[i]); // } // for ( i = 2; i < 6; i += 1 ) { // reaction3D(nx1,ny1,nz1,C1[cai],nx1,ny1,nz1,C1[i],B_tot[i],k_on[i],k_off[i],dt); // } // serca3D(nx1,ny1,nz1, C1[cai],nx1,ny1,nz1, C1[sri], dt, gamma, 1.0); time_conc+=timing(); // Update at RyRs, one at the time time_ryr-=timing(); update_ryr(h_scale, nx0, ny0, nz0, C1[cai], C1[sri], C1[csqni], C1[0],C1[2],C1[3],C1[4], k_on_CSQN, k_off_CSQN,CSQN_tot, gamma, K, dt, ryr_len, i0_ryr, i1_ryr, i2_ryr, csqn_len, i0_csqn, i1_csqn, i2_csqn, cleft_len, i0_cleft, i1_cleft, i2_cleft,cleft_nb, states0, states1); time_ryr+=timing(); double sum_c_i_root[7]; double sum_c_i[7]; double cai_min; double cai_min_root=0.0; double cai_max; double cai_max_root=1.0; double sm; double ca[8]; char caoutfile[100]; if ((fmod(t,DT)<dt)||(t==dt)){ time_io-=timing(); for(idx=0; idx<7; idx++){ sum_c_i[idx]=0.0; for ( i = 1; i <= nx; i += 1 ) for ( j = 1; j <= ny; j += 1 ) for ( k = 1; k <= nz; k += 1 ) sum_c_i[idx]+=C1[idx][i*ny0*nz0+j*nz0+k]; } cai_min=my_min(C1[cai],len); cai_max=my_max(C1[cai],len); /* reduce operation comm*/ MPI_Reduce(&sum_c_i[0], &sum_c_i_root[0], 7, MPI_DOUBLE, MPI_SUM, 0, comm); MPI_Reduce(&cai_min, &cai_min_root, 1, MPI_DOUBLE, MPI_MIN, 0, comm); MPI_Reduce(&cai_max, &cai_max_root, 1, MPI_DOUBLE, MPI_MAX, 0, comm); if(!my_id){ sm = 0; ca[0] = t; if(save_data) fprintf(fpdata,"%f ", ca[0]); for(idx=0; idx<7; idx++){ sm += fraction[idx]*sum_c_i_root[idx]; ca[idx+1] = sum_c_i_root[idx]/((double)nx*x_domains*(double)ny*y_domains*(double)nz*z_domains); if(DB_PF){ printf("ca[%d]: %f , sum : %f, nx ny nz: %d %d %d \n",idx+1, ca[idx+1], sum_c_i_root[idx],nx*x_domains,ny*y_domains,nz*z_domains); } if(save_data) fprintf(fpdata,"%f ", ca[idx+1]); } if(save_data) fprintf(fpdata,"\n "); printf("%3d, %.3f, %3.2f, %7.2f, %3.2f, %4.2f, %.2f \n", counter, t, ca[1], ca[2], cai_min_root, cai_max_root, sm); } if(save_data && in_midx_slice) { // If saving in hdf5 if (save_hdf5) { hsize_t dimsf[2] = {size_y, size_z}; /* dataset dimensions */ hsize_t chunk_dims[2] = {ny, nz}; /* chunk dimensions */ hsize_t h5_offset[2] = {coord[1]*nz, coord[0]*ny}; hsize_t h5_count[2] = {1, 1}; // Create group name sprintf(hdf5_group_name, "/data_%d", counter); hid_t group_id = H5Gcreate(h5_file_id, hdf5_group_name, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); // Create data space for the time attribute hsize_t dims = 1; hid_t attr_space = H5Screate_simple(1, &dims, NULL); // Create a time attribute hid_t time_id = H5Acreate(group_id, "time", H5T_NATIVE_DOUBLE, attr_space, H5P_DEFAULT, H5P_DEFAULT); // Write the attribute data double time_data = counter*DT; herr_t status = H5Awrite(time_id, H5T_NATIVE_DOUBLE, &time_data); // Cleanup H5Aclose(time_id); H5Sclose(attr_space); for (i=0; i<NUM_SAVE_SPECIES; i++) { // Get species int species = save_species[i]; sprintf(hdf5_dataset_name, "%s/%s", hdf5_group_name, species_names[species]); // file and dataset identifiers hid_t filespace = H5Screate_simple(2, dimsf, NULL); hid_t memspace = H5Screate_simple(2, chunk_dims, NULL); // Create chunked dataset. hid_t plist_id = H5Pcreate(H5P_DATASET_CREATE); H5Pset_chunk(plist_id, 2, chunk_dims); // Create compression filter (Not supported in parallel yet...) //unsigned int gzip_level = 9; //herr_t status = H5Pset_filter(plist_id, H5Z_FILTER_DEFLATE, // H5Z_FLAG_OPTIONAL, 1, &gzip_level); hid_t dset_id = H5Dcreate(h5_file_id, hdf5_dataset_name, H5T_DATA_TYPE, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); H5Pclose(plist_id); H5Sclose(filespace); // Select hyperslab in the file. filespace = H5Dget_space(dset_id); status = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, h5_offset, NULL, h5_count, chunk_dims); // Copy data to h5_data transfer_hdf5_data(h5_data, &(C0[species][ny0*nz0*mid_coord_x]), &(C1[species][ny0*nz0*mid_coord_x]), init_values[species], chunk_dims); // Create property list for collective dataset write. plist_id = H5Pcreate(H5P_DATASET_XFER); H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE); status = H5Dwrite(dset_id, H5T_DATA_TYPE, memspace, filespace, plist_id, h5_data); // Close/release resources. H5Dclose(dset_id); H5Sclose(filespace); H5Sclose(memspace); H5Pclose(plist_id); } H5Gclose(group_id); } // No HDF5 else { // Get species int species = save_species[i]; for (i=0; i<NUM_SAVE_SPECIES; i++) { sprintf(caoutfile, "%s/Ca%d_T%d_rank%d_%d_%d.np", outdirname, species, counter, coord[2], coord[1], coord[0]); if(save_binary_file) store2Dmatrixfile_double_bin(caoutfile, C1[species], ny0, nz0, mid_coord_x); else store2Dmatrixfile_double_1D(caoutfile, C1[species], ny0, nz0, mid_coord_x); } } } counter += 1; } // # Update Ca for(i=0;i<7;i++){ C_temp=C0[i]; C0[i]=C1[i]; C1[i]=C_temp; } MPI_Waitall(6, ar_send_req, ar_status); } time_main+=timing(); if(my_id==0){ if(save_data) fclose(fpdata); printf("cubiod_c: h:%d size_x:%d size_y:%d size_z:%d dt:%f, T:%f, TimeStep:%d, DT:%f, x_slice_num:%d\n", h, size_x, size_y, size_z,dt,T,(int)(T/dt),DT,x_slice_num); printf("nx0:%d ny0:%d nz0:%d size/array:%7.3f MB total size:%7.3f MB\n", nx0,ny0,nz0,nx0*ny0*nz0*8*1e-6,nx0*ny0*nz0*8*1e-6*12); #ifdef __PAPI__ if ( PAPI_stop( EventSet, res_papi ) != PAPI_OK){ printf("PAPI_accum_counters failed\n"); } for (i = 0; i<NUM_EVENTS; i++){ PAPI_event_code_to_name(Events[i], EventName); printf("PAPI Event name: %s, value: %lld\n", EventName, res_papi[i]); } #endif printf("computing time: %7.3f \n", time_conc); printf("updateryr time: %7.3f \n", time_ryr); printf("communica time: %7.3f \n", time_comm); printf("main time: %7.3f \n", time_main); #ifdef __PAPI__ printf("PAPI Performanc/core: %7.3f GFLOPS\n", res_papi[0]/1e9/time_conc); #endif } if (save_hdf5) { H5Fclose(h5_file_id); free(h5_data); } for(i=0;i<5;i++){ free(C0[i]); free(C1[i]); } free(C0[6]); free(C0[5]); free(i0_ryr); free(i1_ryr); free(i2_ryr); free(i0_csqn); free(i1_csqn); free(i2_csqn); free(i0_cleft); free(i1_cleft); free(i2_cleft); free(cleft_nb); MPI_Finalize(); return 0; } void laplace3D (int nx0, int ny0, int nz0, double* C0, int nx1, int ny1, int nz1, double* C1, double alpha)//, int num_threads) { // Set num threads // omp_set_num_threads(num_threads); // Local variables int i, j, k; double C0_tmp; // Main kernel loop // #pragma omp parallel for private(i, j, k, C0_tmp) //collapse(3) for (i=1; i<nx0-1; i++) { for (j=1; j<ny0-1; j++) { for (k=1; k<nz0-1; k++) { // Main kernel C0_tmp = -6*C0[i*nz0*ny0+j*nz0+k] + C0[(i-1)*nz0*ny0+j*nz0+k] + C0[(i+1)*nz0*ny0+j*nz0+k] + C0[i*nz0*ny0+(j-1)*nz0+k] + C0[i*nz0*ny0+(j+1)*nz0+k] + C0[i*nz0*ny0+j*nz0+k-1] + C0[i*nz0*ny0+j*nz0+k+1]; // Put value back into return array with offset to indices C1[i*nz1*ny1+j*nz1+k] = C0[i*nz1*ny1+j*nz1+k] + C0_tmp*alpha; } } } } void reaction3D (int nx0, int ny0, int nz0, double* Ca, int nx1, int ny1, int nz1, double* buff, double B_tot, double k_on, double k_off, double dt)//, int num_threads) { // Set num threads // omp_set_num_threads(num_threads); // Local variables int i, j, k; double J; // Use pointers reducing indexing into memory to once double* Ca_ijk; double* buff_ijk; // Main kernel loop // #pragma omp parallel for private(i, j, k, J, Ca_ijk, buff_ijk) //collapse(3) for (i=1; i<nx0-1; i++) { for (j=1; j<ny0-1; j++) { for (k=1; k<nz0-1; k++) { // Main kernel Ca_ijk = &Ca[i*nz0*ny0+j*nz0+k]; buff_ijk = &buff[i*nz0*ny0+j*nz0+k]; J = k_on*(B_tot - *buff_ijk)*(*Ca_ijk) - k_off*(*buff_ijk); *Ca_ijk -= dt*J; *buff_ijk += dt*J; } } } } void serca3D (int nx0, int ny0, int nz0, double* Ca_i, int nx1, int ny1, int nz1, double* Ca_SR, double dt, double gamma, double fudge)//, int num_threads) { // Set num threads // omp_set_num_threads(num_threads); // Local variables int i, j, k; double J; // Use pointers reducing indexing into memory to once double Ca_i2_ijk; double Ca_SR2_ijk; // Main kernel loop // #pragma omp parallel for private(i, j, k, J, Ca_i2_ijk, Ca_SR2_ijk) //collapse(3) for (i=1; i<nx0-1; i++) { for (j=1; j<ny0-1; j++) { for (k=1; k<nz0-1; k++) { // Main kernel Ca_i2_ijk = Ca_i[i*nz0*ny0+j*nz0+k]; Ca_SR2_ijk = Ca_SR[i*nz0*ny0+j*nz0+k]; Ca_i2_ijk *= Ca_i2_ijk; Ca_SR2_ijk *= Ca_SR2_ijk; J = fudge*(570997802.885875*Ca_i2_ijk - 0.0425239333622699*Ca_SR2_ijk)/(106720651.206402*Ca_i2_ijk + 182.498197548666*Ca_SR2_ijk + 5.35062954944879); Ca_i[i*nz0*ny0+j*nz0+k] -= dt*J; Ca_SR[i*nz0*ny0+j*nz0+k] += dt*J/gamma; } } } } void update_ryr(int h_scale,int nx0, int ny0, int nz0, double* Ca_i, double* Ca_SR, double* Ca_CSQN, double* C10, double* C12, double* C13, double* C14, double k_on_CSQN, double k_off_CSQN, double CSQN_tot, double gamma, double K, double dt, int ryr_len, int* i0_ryr, int* i1_ryr, int* i2_ryr, int csqn_len, int* i0_csqn, int* i1_csqn, int* i2_csqn, int cleft_len, int* i0_cleft, int* i1_cleft, int* i2_cleft,int* cleft_nb, int* states0, int* states1) { int i,j; int x_copy_from; int x,y,z; int nb_y,nb_z; int idx,idx_cleft,idx_csqn; double J; int open; double c0,c1; //extend csqn on x direction // for(j=(1-h_scale);j<h_scale;j++){ //extend cdqn on x+ direction for 30nm for(j=0;j<h_scale;j++){ for(i=0;i<csqn_len;i+=1){ x=i0_csqn[i]+j; #ifdef DEBUG_TEST if((x<0)||x>(nx0-1)) { printf("wrong csqn x index\n"); exit(0); } #endif y=i1_csqn[i]; z=i2_csqn[i]; idx=x*ny0*nz0+y*nz0+z; //CSQN step: J = k_on_CSQN*(CSQN_tot - Ca_CSQN[idx])*Ca_SR[idx] - k_off_CSQN*Ca_CSQN[idx]; Ca_SR[idx] -= dt*J; Ca_CSQN[idx] += dt*J; } } //add no_flux boundary by copy the neighbour's value on no_flux voxel //add x+ front no-flux plane on ryr with +1 offset, and copy from -1 x-plane(where ryr is on) j=1; x_copy_from=-1; for(i=0;i<csqn_len;i+=1){ x=i0_csqn[i]+j; #ifdef DEBUG_TEST if((x<0)||x>(nx0-1)) { printf("wrong csqn x index\n"); exit(0); } #endif y=i1_csqn[i]; z=i2_csqn[i]; idx_cleft=x*ny0*nz0+y*nz0+z; idx_csqn =(x+x_copy_from)*ny0*nz0+y*nz0+z; C10[idx_cleft]=C10[idx_csqn]; C12[idx_cleft]=C12[idx_csqn]; C13[idx_cleft]=C13[idx_csqn]; C14[idx_cleft]=C14[idx_csqn]; } //add x+ back no-flux plane on ryr with h_scale offset, and copy from +1 x-plane(outside of csqn) if(h_scale==2)//15 nm j=h_scale+1;//guarantee that there is at least one voxel inner the no-flux boundary else//5nm 3mn 1nm, j=h_scale; x_copy_from=+1; for(i=0;i<csqn_len;i+=1){ x=i0_csqn[i]+j; #ifdef DEBUG_TEST if((x<0)||x>(nx0-1)) { printf("wrong csqn x index\n"); exit(0); } #endif y=i1_csqn[i]; z=i2_csqn[i]; idx_cleft=x*ny0*nz0+y*nz0+z; idx_csqn =(x+x_copy_from)*ny0*nz0+y*nz0+z; C10[idx_cleft]=C10[idx_csqn]; C12[idx_cleft]=C12[idx_csqn]; C13[idx_cleft]=C13[idx_csqn]; C14[idx_cleft]=C14[idx_csqn]; } //extend y-z plane no_flux boundary along x+ direction with +1 offset and copy value from outside of CSQN by cleft_nb index int k; if(h_scale==2)//15 nm k=1;//guarantee that there is at least one voxel inner the no-flux boundary else//5nm 3mn 1nm, k=0; for(j=2;j<h_scale+k;j++){ for(i=0;i<cleft_len;i+=1){ x=i0_cleft[i]+j; #ifdef DEBUG_TEST if((x<0)||x>(nx0-1)) { printf("wrong csqn x index\n"); exit(0); } #endif y=i1_cleft[i]; z=i2_cleft[i]; nb_y=cleft_nb[i]/8-1; nb_z=cleft_nb[i]%8-1; idx_cleft=x*ny0*nz0+y*nz0+z; idx_csqn =x*ny0*nz0+(y+nb_y)*nz0+z+nb_z; C10[idx_cleft]=C10[idx_csqn]; C12[idx_cleft]=C12[idx_csqn]; C13[idx_cleft]=C13[idx_csqn]; C14[idx_cleft]=C14[idx_csqn]; } } //add x- front no-flux plane on ryr with -h_scale/2(15nm) offset, and copy from +1 x-plane(t-tubule) j=0-h_scale/2; x_copy_from=1; for(i=0;i<csqn_len;i+=1){ x=i0_csqn[i]+j; #ifdef DEBUG_TEST if((x<0)||x>(nx0-1)) { printf("wrong csqn x index\n"); exit(0); } #endif y=i1_csqn[i]; z=i2_csqn[i]; idx_cleft=x*ny0*nz0+y*nz0+z; idx_csqn =(x+x_copy_from)*ny0*nz0+y*nz0+z; C10[idx_cleft]=C10[idx_csqn]; C12[idx_cleft]=C12[idx_csqn]; C13[idx_cleft]=C13[idx_csqn]; C14[idx_cleft]=C14[idx_csqn]; } //add x- back no-flux plane on ryr with -h_scale/2+1 offset, and copy from -1 x-plane(t-tubule) /* if(h_scale=2) j=0-h_scale/2-h_scale; else j=0-h_scale/2-h_scale+1; */ /* how thick should t-tubule be? now, just set it 2 lines on x- direction */ // j=0-h_scale/2-h_scale-1; j=0-h_scale/2-1; x_copy_from=-1; for(i=0;i<csqn_len;i+=1){ x=i0_csqn[i]+j; #ifdef DEBUG_TEST if((x<0)||x>(nx0-1)) { printf("wrong csqn x index\n"); exit(0); } #endif y=i1_csqn[i]; z=i2_csqn[i]; idx_cleft=x*ny0*nz0+y*nz0+z; idx_csqn =(x+x_copy_from)*ny0*nz0+y*nz0+z; C10[idx_cleft]=C10[idx_csqn]; C12[idx_cleft]=C12[idx_csqn]; C13[idx_cleft]=C13[idx_csqn]; C14[idx_cleft]=C14[idx_csqn]; } /* how thick should t-tubule be? */ /* //extend y-z plane no_flux boundary along x- direction with +1 offset and copy value from outside of CSQN by cleft_nb index int k; if(h_scale==2)//15 nm k=1;//guarantee that there is at least one voxel inner the no-flux boundary else//5nm 3mn 1nm, k=0; for(j=0-h_scale/2-1;j>0-h_scale/2-h_scale+1-k;j--){ for(i=0;i<cleft_len;i+=1){ x=i0_cleft[i]+j; #ifdef DEBUG_TEST if((x<0)||x>(nx0-1)) { printf("wrong csqn x index\n"); exit(0); } #endif y=i1_cleft[i]; z=i2_cleft[i]; nb_y=cleft_nb[i]/8-1; nb_z=cleft_nb[i]%8-1; idx_cleft=x*ny0*nz0+y*nz0+z; idx_csqn =x*ny0*nz0+(y+nb_y)*nz0+z+nb_z; C10[idx_cleft]=C10[idx_csqn]; C12[idx_cleft]=C12[idx_csqn]; C13[idx_cleft]=C13[idx_csqn]; C14[idx_cleft]=C14[idx_csqn]; } } */ for ( i = 0; i < ryr_len; i += 1 ) { x=i0_ryr[i]; y=i1_ryr[i]; z=i2_ryr[i]; idx=x*ny0*nz0+y*nz0+z; // #Continous formulation // #states[:,i] += dt*stern(t, states[:,i], Ca_i[idx]) stern_discrete(dt, &states0[i],&states1[i], Ca_i[idx]); open = states0[i]*(1-states1[i]); // #Exp Euler: // #J_RyR = k*open*(Ca_SR[idx]-Ca_i[idx]) // #Ca_i[idx] += dt*J_RyR // #Ca_SR[idx] -= dt*J_RyR/gamma; // #Analytical update: // K = exp(-k_s*dt*(1+1/gamma)) if (open){ if(DB_PF) printf("open [%d] ryr[%d,%d,%d] \n", i, x, y,z); c0 = (Ca_i[idx] + gamma*Ca_SR[idx])/(1+gamma); c1 = (Ca_i[idx] - Ca_SR[idx])/(1+1/gamma); Ca_i[idx] = c0 + c1*K; Ca_SR[idx] = c0 - c1*K/gamma; } } } void stern(double t, double* y0, double* y1, double Ca){ double m = *y0; double h = *y1; double kim = 0.005; double kom = 0.06; double K_i = 0.01*10; double K_o = 0.01*41.4; double ki = kim/K_i; double ko = kom/(K_o*K_o); double dm = ko*Ca*Ca*(1-m)-kom*m; double dh = ki*Ca*(1-h)-kim*h; *y0=dm; *y1=dh; } void stern_discrete(double dt, int* y0, int* y1, double Ca){ double kim = 0.002; // 1/ms double kom = 1.5; // 0.5 1/ms double kd_i = 20.0; // 20.0 um*ms double kd_o = 0.9; // um*ms^N 0.7, 0.8, 0.9, 1.0 double Ca_ki = Ca/kd_i; double Ca_ko = Ca/kd_o; double ki = Ca_ki*Ca_ki; // (Ca/kd_i)^2 double ko = Ca_ko*Ca_ko*Ca_ko*Ca_ko; // ko = (Ca/kd_o)^4 //double kim = 0.005; // Original: 0.005 //double kom = 0.04; // Original: 0.06 //double ki = Ca*1.5*1e-3; // Original: Ca*0.5*1e-3 //double ko = 1e-6*Ca*Ca*3500; // Original: 1e-6*Ca*Ca*{35,1200,2000,3500} double r; int m, h; m = *y0; if(m==1){ r = my_random(); m = 1 - (r<(dt*kom)); } else { r=my_random(); m = 1*(r<(dt*ko)); } h = *y1; if(h==1){ r = my_random(); h = 1 - (r<(dt*kim)); } else{ r = my_random(); h = 1*(r<(dt*ki)); } *y0=m; *y1=h; } inline double my_random() { double r; double x; // r=(double)(rand()%100000000); // x=(r*1e-8); x=((double)rand())/(double)RAND_MAX; return x; } void store2Dmatrixfile_double_1D(char* outfile, double* ar, int rows, int cols, int x_strid){ FILE *fpdata; int i,j; if ((fpdata=fopen(outfile, "w"))==NULL) { printf("fialed open output file "); printf("%s",outfile); printf(" ! \n "); exit(0); } // printf("----Generating list output to "); // printf("%s",outfile); // printf(" file----\n"); for(i=0;i<rows;i++) { for(j=0;j<cols;j++) { fprintf(fpdata,"%.9e ", ar[x_strid*rows*cols+i*cols+j]); } fprintf(fpdata,"\n"); } fclose(fpdata); return; } void store2Dmatrixfile_double_bin(char* outfile, double* ar, int rows, int cols, int x_strid) { FILE *fpdata; int i,j; if ((fpdata=fopen(outfile, "wb"))==NULL) { printf("failed open output file "); printf("%s",outfile); printf(" ! \n "); exit(0); } fwrite(&ar[x_strid*rows*cols],sizeof(double),rows*cols,fpdata); fclose(fpdata); return; } void transfer_hdf5_data(hdf5_data_type* h5_data, double* ar0, double* ar1, double scale_value, hsize_t* chunk_dims) { int i,j; int rows=chunk_dims[0]; int cols=chunk_dims[1]; // Transfer data from padded ar to stripped data for(i=0;i<rows;i++) { for(j=0;j<cols;j++) { double rel_data_diff = (ar1[i*(cols+2)+j+1]-ar0[i*(cols+2)+j+1])/scale_value; h5_data[i*cols+j] = (hdf5_data_type)round(rel_data_diff*H5_DATA_LIMIT_1); } } } void store2Dmatrixfile_int_1D(char* outfile, int* ar, int rows, int cols){ FILE *fpdata; int i,j; if ((fpdata=fopen(outfile, "w"))==NULL) { printf("failed open output file "); printf("%s",outfile); printf(" ! \n "); exit(0); } printf("----Generating list output to "); printf("%s",outfile); printf(" file----\n"); for(i=0;i<rows;i++) { for(j=0;j<cols;j++) fprintf(fpdata,"%d ",ar[i*cols+j]); fprintf(fpdata,"\n"); } fclose(fpdata); return; } double my_min(double* ar, int len) { double min=ar[0]; int i; for ( i = 0; i < len; i += 1 ) { if(ar[i]<min) min=ar[i]; } return min; } double my_max(double* ar, int len) { double max=ar[0]; int i; for ( i = 0; i < len; i += 1 ) { if(ar[i]>max) max=ar[i]; } return max; } double timing(){ double time; struct timeval timmer; gettimeofday(&timmer,NULL); time = 1000000*timmer.tv_sec + timmer.tv_usec; time /= 1000000; return time; } int load_indices_serial(int nx, int ny, int nz, int h, int** i0_ryr, int** i1_ryr, int** i2_ryr, int* ryr_len, int** i0_csqn, int** i1_csqn, int** i2_csqn, int* csqn_len, int** i0_cleft, int** i1_cleft, int** i2_cleft, int** cleft_nb, int* cleft_len, int x_slice_mid, int x_slice_width, int x_slice_num, int use_failing) { int i,j,k; int nx_old; int ny_old; int nz_old; nx_old=nx; ny_old=ny; nz_old=nz; // Scale nx, xy, nz in terms of RyR if(30%h!=0){ printf("30 must be divisible by h!"); exit(1); } int h_scale; h_scale = 30/h; nx = nx/h_scale; ny = ny/h_scale; nz = nz/h_scale; // All CaRU placed mid-sarcomere // int mid_x = (nx+1)/2; // load RyR indices from file int* i1; int* i2; int i1_len; int i2_len; char i_RyR_indices_name[200]; char j_RyR_indices_name[200]; sprintf(i_RyR_indices_name, "i_RyR_indices%s.dat", use_failing ? "_failing" : ""); sprintf(j_RyR_indices_name, "j_RyR_indices%s.dat", use_failing ? "_failing" : ""); if (use_failing) printf("Load failing indices"); else printf("Load normal indices"); i1=loadRyRindexfile_int(i_RyR_indices_name, &i1_len); i2=loadRyRindexfile_int(j_RyR_indices_name, &i2_len); // # Only use the subset which are inside the geometry if(i1_len==i2_len) printf("num RyR before reduction: %d\n", i1_len); else printf("num RyR is wrong: i1_len!=i2_len\n"); int* i1_temp; int* i2_temp; int i1_temp_len=0; for ( i = 0; i < i1_len; i += 1 ) { if(i1[i]<ny) i1_temp_len++; } i1_temp=malloc(i1_temp_len*sizeof(int)); i2_temp=malloc(i1_temp_len*sizeof(int)); j=0; for ( i = 0; i < i1_len; i += 1 ) { if(i1[i]<ny){ i1_temp[j]=i1[i]; i2_temp[j]=i2[i]; j++; } } free(i1); free(i2); int i1_ryr_len=0; for ( i = 0; i < i1_temp_len; i += 1 ) { if(i2_temp[i]<nz) i1_ryr_len++; } *i0_ryr=malloc(x_slice_num*i1_ryr_len*sizeof(int)); *i1_ryr=malloc(x_slice_num*i1_ryr_len*sizeof(int)); *i2_ryr=malloc(x_slice_num*i1_ryr_len*sizeof(int)); j=0; for ( i = 0; i < i1_temp_len; i += 1 ) { if(i2_temp[i]<nz){ for(k=0; k < x_slice_num; k++){ (*i1_ryr)[k*i1_ryr_len+j]=i1_temp[i]; (*i2_ryr)[k*i1_ryr_len+j]=i2_temp[i]; } j++; } } free(i1_temp); free(i2_temp); // Scale indices and move to center of macro voxel for ( i = 0; i < i1_ryr_len; i += 1 ) { for(k=0; k < x_slice_num; k++){ (*i0_ryr)[k*i1_ryr_len+i] = k*x_slice_width+x_slice_mid; //for those ryr just on 0 boundary, avoid to subtracting their coords to negative if((*i1_ryr)[k*i1_ryr_len+i]>0) (*i1_ryr)[k*i1_ryr_len+i] = (*i1_ryr)[k*i1_ryr_len+i]*h_scale - floor((double)h_scale/2); else (*i1_ryr)[k*i1_ryr_len+i] = (*i1_ryr)[k*i1_ryr_len+i]*h_scale; if((*i2_ryr)[k*i1_ryr_len+i]>0) (*i2_ryr)[k*i1_ryr_len+i] = (*i2_ryr)[k*i1_ryr_len+i]*h_scale - floor((double)h_scale/2); else (*i2_ryr)[k*i1_ryr_len+i] = (*i2_ryr)[k*i1_ryr_len+i]*h_scale; } } *ryr_len=i1_ryr_len*x_slice_num; // load CSQN indices from file char i_csqn_indices_name[200]; char j_csqn_indices_name[200]; sprintf(i_csqn_indices_name, "i_csqn_indices%s.dat", use_failing ? "_failing" : ""); sprintf(j_csqn_indices_name, "j_csqn_indices%s.dat", use_failing ? "_failing" : ""); i1 = loadRyRindexfile_int(i_csqn_indices_name, &i1_len); i2 = loadRyRindexfile_int(j_csqn_indices_name, &i2_len); if(i1_len==i2_len) printf("num CSQN before reduction: %d\n", i1_len); else printf("num CSQN is wrong: i1_len!=i2_len\n"); //# Only use the subset which are inside the geometry // i1_csqn = i1[i2<nz]*h_scale // i2_csqn = i2[i2<nz]*h_scale // i0_csqn = np.ones(len(i1_csqn), dtype=int)*mid_x*h_scale i1_temp_len=0; for ( i = 0; i < i1_len; i += 1 ) { if(i1[i]<ny) i1_temp_len++; } i1_temp=malloc(i1_temp_len*sizeof(int)); i2_temp=malloc(i1_temp_len*sizeof(int)); j=0; for ( i = 0; i < i1_len; i += 1 ) { if(i1[i]<ny){ i1_temp[j]=i1[i]; i2_temp[j]=i2[i]; j++; } } free(i1); free(i2); int i1_csqn_len=0; for ( i = 0; i < i1_temp_len; i += 1 ) { if(i2_temp[i]<nz) i1_csqn_len++; } *i0_csqn=malloc(x_slice_num*i1_csqn_len*sizeof(int)); *i1_csqn=malloc(x_slice_num*i1_csqn_len*sizeof(int)); *i2_csqn=malloc(x_slice_num*i1_csqn_len*sizeof(int)); j=0; for ( i = 0; i < i1_temp_len; i += 1 ) { if(i2_temp[i]<nz){ for(k=0; k < x_slice_num; k++){ (*i1_csqn)[k*i1_csqn_len+j]=i1_temp[i]; (*i2_csqn)[k*i1_csqn_len+j]=i2_temp[i]; } j++; } } free(i1_temp); free(i2_temp); // Scale indices and move to center of macro voxel for(k=0; k < x_slice_num; k++){ for ( i = 0; i < i1_csqn_len; i += 1 ) { (*i0_csqn)[k*i1_csqn_len+i] = k*x_slice_width+x_slice_mid; (*i1_csqn)[k*i1_csqn_len+i] = (*i1_csqn)[k*i1_csqn_len+i]*h_scale; (*i2_csqn)[k*i1_csqn_len+i] = (*i2_csqn)[k*i1_csqn_len+i]*h_scale; } } int* i0_csqn_list; int* i1_csqn_list; int* i2_csqn_list; int m; int csqn_count; *csqn_len=x_slice_num*i1_csqn_len*h_scale*h_scale; *cleft_len=0;//x_slice_num*i1_csqn_len*4*h_scale; // # Add CSQN to all voxels covered by the original CSQN array if (h_scale > 1){ i0_csqn_list=malloc(x_slice_num*i1_csqn_len*h_scale*h_scale*sizeof(int)); i1_csqn_list=malloc(x_slice_num*i1_csqn_len*h_scale*h_scale*sizeof(int)); i2_csqn_list=malloc(x_slice_num*i1_csqn_len*h_scale*h_scale*sizeof(int)); csqn_count=0; // # Add offsetted versions of the csqn for ( m = 0; m < x_slice_num; m += 1 ) { for ( i = 0; i < h_scale; i += 1 ) { for ( j = 0; j < h_scale; j += 1 ) { for ( k = 0; k < i1_csqn_len; k += 1 ) { i0_csqn_list[csqn_count]=(*i0_csqn)[m*i1_csqn_len+k]; i1_csqn_list[csqn_count]=(*i1_csqn)[m*i1_csqn_len+k]+i; i2_csqn_list[csqn_count]=(*i2_csqn)[m*i1_csqn_len+k]+j; csqn_count++; } } } } if(csqn_count!=(*csqn_len)) { printf("csqn_count wrong\n"); exit(0); } } else { i0_csqn_list=(*i0_csqn); i1_csqn_list=(*i1_csqn); i2_csqn_list=(*i2_csqn); } int a_slice_csqn_len=i1_csqn_len*h_scale*h_scale; BinarySort_two(&i1_csqn_list[0],&i2_csqn_list[0],a_slice_csqn_len); int* y_index; y_index=malloc(ny_old*sizeof(int)); for ( i = 0; i < ny_old; i += 1 ) { y_index[i]=-1; } for ( i = a_slice_csqn_len-1; i >= 0; i -= 1 ) { y_index[i1_csqn_list[i]]=i; } //generate cleft index on Y-Z plane,just wrapping the outside of a group of CSQN, //If cleft is in the outside of the mesh or is already indexed by a CSQN, then it is not a true cleft. //Also generate the relative coordinates for th neighbour of each cleft from which to copy the value. //the relative coordinate of y is cleft_nb%8-1, and that of z is cleft_nb/8-1 int coord_y,coord_z; *i1_cleft=(int*)malloc(i1_csqn_len*4*h_scale*sizeof(int)); *i2_cleft=(int*)malloc(i1_csqn_len*4*h_scale*sizeof(int)); *cleft_nb=(int*)malloc(i1_csqn_len*4*h_scale*sizeof(int)); *cleft_len=0; for ( k = 0; k < i1_csqn_len; k += 1 ) { for ( j = 0; j < h_scale; j += 1 ) { //z bottom line coord_y=(*i1_csqn)[k]-1; coord_z=(*i2_csqn)[k]+j; if(IsTrueCleft(coord_y, coord_z, ny_old, nz_old, i1_csqn_list, i2_csqn_list, y_index, a_slice_csqn_len)) { (*i1_cleft)[(*cleft_len)]=coord_y; (*i2_cleft)[(*cleft_len)]=coord_z; //copy from outside // (*cleft_nb)[(*cleft_len)]=0+1; // copy from inside (*cleft_nb)[(*cleft_len)]=16+1; (*cleft_len)++; } //y left line coord_y=(*i1_csqn)[k]+j; coord_z=(*i2_csqn)[k]-1; if(IsTrueCleft(coord_y, coord_z, ny_old, nz_old, i1_csqn_list, i2_csqn_list, y_index, a_slice_csqn_len)) { (*i1_cleft)[(*cleft_len)]=coord_y; (*i2_cleft)[(*cleft_len)]=coord_z; //copy from inside // (*cleft_nb)[(*cleft_len)]=8+0; //copy from inside (*cleft_nb)[(*cleft_len)]=8+2; (*cleft_len)++; } //z top line coord_y=(*i1_csqn)[k]+h_scale; coord_z=(*i2_csqn)[k]+j; if(IsTrueCleft(coord_y, coord_z, ny_old, nz_old, i1_csqn_list, i2_csqn_list, y_index, a_slice_csqn_len)) { (*i1_cleft)[(*cleft_len)]=coord_y; (*i2_cleft)[(*cleft_len)]=coord_z; //copy from outside // (*cleft_nb)[(*cleft_len)]=16+1; // copy from inside (*cleft_nb)[(*cleft_len)]=0+1; (*cleft_len)++; } //y right line coord_y=(*i1_csqn)[k]+j; coord_z=(*i2_csqn)[k]+h_scale; if(IsTrueCleft(coord_y, coord_z, ny_old, nz_old, i1_csqn_list, i2_csqn_list, y_index, a_slice_csqn_len)) { (*i1_cleft)[(*cleft_len)]=coord_y; (*i2_cleft)[(*cleft_len)]=coord_z; //copy from outside // (*cleft_nb)[(*cleft_len)]=8+2; // copy from inside (*cleft_nb)[(*cleft_len)]=8+0; (*cleft_len)++; } } } if((*cleft_len)>i1_csqn_len*4*h_scale){ printf("wrong cleft_len found\n"); exit(0); } //add cleft for multiple 2um x-slices int* i0_cleft_list; int* i1_cleft_list; int* i2_cleft_list; int* cleft_nb_list; i0_cleft_list=malloc(x_slice_num*(*cleft_len)*sizeof(int)); i1_cleft_list=malloc(x_slice_num*(*cleft_len)*sizeof(int)); i2_cleft_list=malloc(x_slice_num*(*cleft_len)*sizeof(int)); cleft_nb_list=malloc(x_slice_num*(*cleft_len)*sizeof(int)); for(k=0; k < x_slice_num; k++){ for ( i = 0; i < (*cleft_len); i += 1 ) { i0_cleft_list[k*(*cleft_len)+i] = k*x_slice_width+x_slice_mid; i1_cleft_list[k*(*cleft_len)+i] = (*i1_cleft)[i]; i2_cleft_list[k*(*cleft_len)+i] = (*i2_cleft)[i]; cleft_nb_list[k*(*cleft_len)+i] = (*cleft_nb)[i]; } } free(*i1_cleft); free(*i2_cleft); free(*cleft_nb); *i0_cleft=i0_cleft_list; *i1_cleft=i1_cleft_list; *i2_cleft=i2_cleft_list; *cleft_nb=cleft_nb_list; *cleft_len=x_slice_num*(*cleft_len); if (h_scale > 1){ free(*i0_csqn); free(*i1_csqn); free(*i2_csqn); *i0_csqn=i0_csqn_list; *i1_csqn=i1_csqn_list; *i2_csqn=i2_csqn_list; } return h_scale; } int IsTrueCleft(int coord_y, int coord_z, int size_y, int size_z, int *i1_csqn, int *i2_csqn, int* y_index, int csqn_len) { int i; //in outside of the mesh if((coord_y<0)||(coord_y>=size_y)||(coord_z<0)||(coord_z>=size_z)) return 0; i=y_index[coord_y]; //not in CSQN if(i<0) return 1; while(i1_csqn[i]==coord_y){ //in CSQN if(i2_csqn[i]==coord_z) return 0; i++; //not in CSQN if(i>=csqn_len) return 1; } return 1; } int idxinrank(int nx, int ny, int nz, int i0, int i1, int i2, int rank, MPI_Comm comm3d) { int coords[3]; MPI_Cart_coords(comm3d,rank,3,coords); if( (i0>=coords[2]*nx)&&((i0<coords[2]+1)*nx)&& (i1>=coords[1]*ny)&&((i1<coords[1]+1)*ny)&& (i2>=coords[0]*nz)&&((i2<coords[0]+1)*nz)) { return 1; } else return 0; } int idxbl2rank(int nx, int ny, int nz, int i0, int i1, int i2, int* coords, MPI_Comm comm3d) { int rank=0; coords[2]=i0/nx; coords[1]=i1/ny; coords[0]=i2/nz; MPI_Cart_rank(comm3d,coords,&rank); return rank; } int distr_ryr_csqn_state(int h, int size_x, int size_y, int size_z, int nx, int ny, int nz, int** i0_ryr, int** i1_ryr, int** i2_ryr, int* ryr_len, int** i0_csqn, int** i1_csqn, int** i2_csqn, int* csqn_len, int** i0_cleft, int** i1_cleft, int** i2_cleft,int** cleft_nb, int* cleft_len, int** states0, int** states1, int x_slice_mid,int x_slice_width, int x_slice_num, MPI_Comm comm3d, MPI_Comm comm, int use_failing) { int i,j; int h_scale; int* global_i0_ryr; int* global_i1_ryr; int* global_i2_ryr; int* global_i0_ryr_reorder; int* global_i1_ryr_reorder; int* global_i2_ryr_reorder; int* global_i0_csqn; int* global_i1_csqn; int* global_i2_csqn; int* global_i0_csqn_reorder; int* global_i1_csqn_reorder; int* global_i2_csqn_reorder; int* global_i0_cleft; int* global_i1_cleft; int* global_i2_cleft; int* global_cleft_nb; int* global_i0_cleft_reorder; int* global_i1_cleft_reorder; int* global_i2_cleft_reorder; int* global_cleft_nb_reorder; int global_ryr_len; int global_csqn_len; int global_cleft_len; int* global_states0; int* global_states0_reorder; int* ryr_rec_count; int* ryr_rec_disp; int* ryr_rec_offset; int* csqn_rec_count; int* csqn_rec_disp; int* csqn_rec_offset; int* cleft_rec_count; int* cleft_rec_disp; int* cleft_rec_offset; int my_id; int nproc; int coords[3]; MPI_Comm_rank(comm,&my_id); MPI_Comm_size(comm,&nproc); if(my_id==0){ h_scale=load_indices_serial(size_x, size_y, size_z, h, &global_i0_ryr, &global_i1_ryr, &global_i2_ryr, &global_ryr_len, &global_i0_csqn, &global_i1_csqn,&global_i2_csqn,&global_csqn_len, &global_i0_cleft, &global_i1_cleft, &global_i2_cleft, &global_cleft_nb, &global_cleft_len, x_slice_mid,x_slice_width,x_slice_num, use_failing); printf("load indices from file: h:%d, h_scale:%d, nx:%d, ny:%d, nz:%d, ryr_len:%d, csqn_len:%d cleft_len:%d\n", h, h_scale, nx, ny, nz, global_ryr_len, global_csqn_len, global_cleft_len); if(global_ryr_len>0) global_states0=malloc(global_ryr_len*sizeof(int)); else global_states0=malloc(1*sizeof(int)); for ( i = 0; i < global_ryr_len; i++) global_states0[i]=0; if(global_ryr_len>=23){ for ( i = 1; i < 23; i =i+3 ) global_states0[i]=1; } else { for ( i = 1; i < global_ryr_len ; i =i+10 ) global_states0[i]=1; } if(DB_PF){ for(i=0;i<global_ryr_len;i++){ if(global_states0[i]==1) printf("ryr[%d]:%d,%d,%d \n",i,global_i0_ryr[i],global_i1_ryr[i],global_i2_ryr[i]); } } ryr_rec_count=malloc(nproc*sizeof(int)); csqn_rec_count=malloc(nproc*sizeof(int)); cleft_rec_count=malloc(nproc*sizeof(int)); for (i = 0; i < nproc; i++) { ryr_rec_count[i]=0; csqn_rec_count[i]=0; cleft_rec_count[i]=0; } for(i=0;i<global_ryr_len;i++) { j=idxbl2rank(nx,ny,nz,global_i0_ryr[i],global_i1_ryr[i],global_i2_ryr[i],coords,comm3d); ryr_rec_count[j]++; } for(i=0;i<global_csqn_len;i++) { j=idxbl2rank(nx,ny,nz,global_i0_csqn[i],global_i1_csqn[i],global_i2_csqn[i],coords,comm3d); csqn_rec_count[j]++; } for(i=0;i<global_cleft_len;i++) { j=idxbl2rank(nx,ny,nz,global_i0_cleft[i],global_i1_cleft[i],global_i2_cleft[i],coords,comm3d); cleft_rec_count[j]++; } for (i = 0; i < nproc; i++) { if(DB_PF) printf("ryr_rec_count[%d]: %d\n",i, ryr_rec_count[i]); if(DB_PF) printf("csqn_rec_count[%d]: %d\n",i, csqn_rec_count[i]); if(DB_PF) printf("cleft_rec_count[%d]: %d\n",i, cleft_rec_count[i]); } ryr_rec_disp = malloc(nproc*sizeof(int)); csqn_rec_disp = malloc(nproc*sizeof(int)); cleft_rec_disp = malloc(nproc*sizeof(int)); ryr_rec_disp[0] = 0; csqn_rec_disp[0] = 0; cleft_rec_disp[0] = 0; for (i = 1; i < nproc; i++) { ryr_rec_disp[i] = ryr_rec_disp[i-1] + ryr_rec_count[i-1]; csqn_rec_disp[i] = csqn_rec_disp[i-1] + csqn_rec_count[i-1]; cleft_rec_disp[i] = cleft_rec_disp[i-1] + cleft_rec_count[i-1]; } if(global_ryr_len!=ryr_rec_disp[nproc-1]+ryr_rec_count[nproc-1]) { printf("Global ryr Count mismatch %d\n", ryr_rec_disp[nproc-1]+ryr_rec_count[nproc-1]); } if(global_csqn_len!=csqn_rec_disp[nproc-1]+csqn_rec_count[nproc-1]) { printf("Global csqn Count mismatch %d\n", csqn_rec_disp[nproc-1]+csqn_rec_count[nproc-1]); } if(global_cleft_len!=cleft_rec_disp[nproc-1]+cleft_rec_count[nproc-1]) { printf("Global cleft Count mismatch %d\n", cleft_rec_disp[nproc-1]+cleft_rec_count[nproc-1]); } ryr_rec_offset = malloc(nproc*sizeof(int)); csqn_rec_offset = malloc(nproc*sizeof(int)); cleft_rec_offset = malloc(nproc*sizeof(int)); for (i = 0; i < nproc; i++) { ryr_rec_offset[i]=0; csqn_rec_offset[i]=0; cleft_rec_offset[i]=0; } global_i0_ryr_reorder=malloc(global_ryr_len*sizeof(int)); global_i1_ryr_reorder=malloc(global_ryr_len*sizeof(int)); global_i2_ryr_reorder=malloc(global_ryr_len*sizeof(int)); global_states0_reorder=malloc(global_ryr_len*sizeof(int)); for(i=0;i<global_ryr_len;i++) { j=idxbl2rank(nx,ny,nz,global_i0_ryr[i],global_i1_ryr[i],global_i2_ryr[i],coords,comm3d); global_i0_ryr_reorder[ryr_rec_disp[j]+ryr_rec_offset[j]]=global_i0_ryr[i]-coords[2]*nx+1; global_i1_ryr_reorder[ryr_rec_disp[j]+ryr_rec_offset[j]]=global_i1_ryr[i]-coords[1]*ny+1; global_i2_ryr_reorder[ryr_rec_disp[j]+ryr_rec_offset[j]]=global_i2_ryr[i]-coords[0]*nz+1; global_states0_reorder[ryr_rec_disp[j]+ryr_rec_offset[j]]=global_states0[i]; ryr_rec_offset[j]++; } for (i = 0; i < nproc; i++) { if(ryr_rec_offset[i]!=ryr_rec_count[i]) printf("ryr reorder count error on proc %d \n",i); } free(global_i0_ryr); free(global_i1_ryr); free(global_i2_ryr); free(global_states0); free(ryr_rec_offset); //distribute cleft to there own MPI process global_i0_csqn_reorder=malloc(global_csqn_len*sizeof(int)); global_i1_csqn_reorder=malloc(global_csqn_len*sizeof(int)); global_i2_csqn_reorder=malloc(global_csqn_len*sizeof(int)); for(i=0;i<global_csqn_len;i++) { j=idxbl2rank(nx,ny,nz,global_i0_csqn[i],global_i1_csqn[i],global_i2_csqn[i],coords,comm3d); global_i0_csqn_reorder[csqn_rec_disp[j]+csqn_rec_offset[j]]=global_i0_csqn[i]-coords[2]*nx+1; global_i1_csqn_reorder[csqn_rec_disp[j]+csqn_rec_offset[j]]=global_i1_csqn[i]-coords[1]*ny+1; global_i2_csqn_reorder[csqn_rec_disp[j]+csqn_rec_offset[j]]=global_i2_csqn[i]-coords[0]*nz+1; csqn_rec_offset[j]++; } for (i = 0; i < nproc; i++) { if(csqn_rec_offset[i]!=csqn_rec_count[i]) printf("csqn reorder count error on proc %d \n",i); } free(global_i0_csqn); free(global_i1_csqn); free(global_i2_csqn); free(csqn_rec_offset); global_i0_cleft_reorder=malloc(global_cleft_len*sizeof(int)); global_i1_cleft_reorder=malloc(global_cleft_len*sizeof(int)); global_i2_cleft_reorder=malloc(global_cleft_len*sizeof(int)); global_cleft_nb_reorder=malloc(global_cleft_len*sizeof(int)); for(i=0;i<global_cleft_len;i++) { j=idxbl2rank(nx,ny,nz,global_i0_cleft[i],global_i1_cleft[i],global_i2_cleft[i],coords,comm3d); global_i0_cleft_reorder[cleft_rec_disp[j]+cleft_rec_offset[j]]=global_i0_cleft[i]-coords[2]*nx+1; global_i1_cleft_reorder[cleft_rec_disp[j]+cleft_rec_offset[j]]=global_i1_cleft[i]-coords[1]*ny+1; global_i2_cleft_reorder[cleft_rec_disp[j]+cleft_rec_offset[j]]=global_i2_cleft[i]-coords[0]*nz+1; global_cleft_nb_reorder[cleft_rec_disp[j]+cleft_rec_offset[j]]=global_cleft_nb[i]; cleft_rec_offset[j]++; } for (i = 0; i < nproc; i++) { if(cleft_rec_offset[i]!=cleft_rec_count[i]) printf("cleft reorder count error on proc %d \n",i); } free(global_i0_cleft); free(global_i1_cleft); free(global_i2_cleft); free(global_cleft_nb); free(cleft_rec_offset); } //MPI_Gather(&n_ryr,1,MPI_INT,&states_rec_count[0],1,MPI_INT,0,comm); MPI_Scatter(&ryr_rec_count[0],1,MPI_INT,ryr_len,1, MPI_INT,0,comm); MPI_Scatter(&csqn_rec_count[0],1,MPI_INT,csqn_len,1, MPI_INT,0,comm); MPI_Scatter(&cleft_rec_count[0],1,MPI_INT,cleft_len,1, MPI_INT,0,comm); if(*ryr_len>0){ *i0_ryr=(int*)mpi_malloc(my_id,*ryr_len*sizeof(int)); *i1_ryr=(int*)mpi_malloc(my_id,*ryr_len*sizeof(int)); *i2_ryr=(int*)mpi_malloc(my_id,*ryr_len*sizeof(int)); } else { *i0_ryr=(int*)mpi_malloc(my_id,1*sizeof(int)); *i1_ryr=(int*)mpi_malloc(my_id,1*sizeof(int)); *i2_ryr=(int*)mpi_malloc(my_id,1*sizeof(int)); } if(*csqn_len>0) { *i0_csqn=(int*)mpi_malloc(my_id,*csqn_len*sizeof(int)); *i1_csqn=(int*)mpi_malloc(my_id,*csqn_len*sizeof(int)); *i2_csqn=(int*)mpi_malloc(my_id,*csqn_len*sizeof(int)); } else { *i0_csqn=(int*)mpi_malloc(my_id,1*sizeof(int)); *i1_csqn=(int*)mpi_malloc(my_id,1*sizeof(int)); *i2_csqn=(int*)mpi_malloc(my_id,1*sizeof(int)); } if(*cleft_len>0) { *i0_cleft=(int*)mpi_malloc(my_id,*cleft_len*sizeof(int)); *i1_cleft=(int*)mpi_malloc(my_id,*cleft_len*sizeof(int)); *i2_cleft=(int*)mpi_malloc(my_id,*cleft_len*sizeof(int)); *cleft_nb=(int*)mpi_malloc(my_id,*cleft_len*sizeof(int)); } else { *i0_cleft=(int*)mpi_malloc(my_id,1*sizeof(int)); *i1_cleft=(int*)mpi_malloc(my_id,1*sizeof(int)); *i2_cleft=(int*)mpi_malloc(my_id,1*sizeof(int)); *cleft_nb=(int*)mpi_malloc(my_id,1*sizeof(int)); } if(*ryr_len>0){ *states0=(int*)mpi_malloc(my_id,*ryr_len*sizeof(int)); *states1=(int*)mpi_malloc(my_id,*ryr_len*sizeof(int)); for ( i = 0; i < *ryr_len; i += 1 ) { (*states0)[i]=0; (*states1)[i]=0; } } else { *states0=(int*)mpi_malloc(my_id,1*sizeof(int)); *states1=(int*)mpi_malloc(my_id,1*sizeof(int)); (*states0)[0]=0; (*states1)[0]=0; } MPI_Scatterv(global_i0_ryr_reorder, ryr_rec_count,ryr_rec_disp, MPI_INT, *i0_ryr, *ryr_len, MPI_INT, 0, comm); MPI_Scatterv(global_i1_ryr_reorder, ryr_rec_count,ryr_rec_disp, MPI_INT, *i1_ryr, *ryr_len, MPI_INT, 0, comm); MPI_Scatterv(global_i2_ryr_reorder, ryr_rec_count,ryr_rec_disp, MPI_INT, *i2_ryr, *ryr_len, MPI_INT, 0, comm); MPI_Scatterv(global_i0_csqn_reorder, csqn_rec_count,csqn_rec_disp, MPI_INT, *i0_csqn, *csqn_len, MPI_INT, 0, comm); MPI_Scatterv(global_i1_csqn_reorder, csqn_rec_count,csqn_rec_disp, MPI_INT, *i1_csqn, *csqn_len, MPI_INT, 0, comm); MPI_Scatterv(global_i2_csqn_reorder, csqn_rec_count,csqn_rec_disp, MPI_INT, *i2_csqn, *csqn_len, MPI_INT, 0, comm); MPI_Scatterv(global_i0_cleft_reorder, cleft_rec_count,cleft_rec_disp, MPI_INT, *i0_cleft, *cleft_len, MPI_INT, 0, comm); MPI_Scatterv(global_i1_cleft_reorder, cleft_rec_count,cleft_rec_disp, MPI_INT, *i1_cleft, *cleft_len, MPI_INT, 0, comm); MPI_Scatterv(global_i2_cleft_reorder, cleft_rec_count,cleft_rec_disp, MPI_INT, *i2_cleft, *cleft_len, MPI_INT, 0, comm); MPI_Scatterv(global_cleft_nb_reorder, cleft_rec_count,cleft_rec_disp, MPI_INT, *cleft_nb, *cleft_len, MPI_INT, 0, comm); MPI_Scatterv(global_states0_reorder, ryr_rec_count, ryr_rec_disp, MPI_INT, *states0, *ryr_len, MPI_INT, 0, comm); //MPI_Bcast(&global_ryr_num,1,MPI_INT,0,comm); if(DB_PF) printf("Thread%d: ryr_len=%d\n",my_id, *ryr_len); // sprintf(caoutfile,"%s/Ca%d_T%d_rank%d_%d_%d_s0.np",outdirname,i,counter,coord[2],coord[1],coord[0]); // store2Dmatrixfile_double_1D(caoutfile,C1[i],ny0,nz0,30); //MPI_Gatherv(states0, n_ryr, MPI_INT, global_states0, states_rec_count, states_rec_disp, MPI_INT, 0, comm); // if(my_id==2) { // for(i=0;i<*ryr_len;i++) printf("Thread2 states[%d]: %d\n",i,(*states0)[i]); // } if(DB_PF){ for(i=0;i<*ryr_len;i++){ if((*states0)[i]==1){ printf("Proc%d,ryr_len=%d,ryr[%d]:%d,%d,%d \n",my_id, *ryr_len,i,(*i0_ryr)[i],(*i1_ryr)[i],(*i2_ryr)[i]); } } } if(my_id==0){ free(ryr_rec_count); free(ryr_rec_disp); free(csqn_rec_count); free(csqn_rec_disp); free(cleft_rec_count); free(cleft_rec_disp); free(global_i0_ryr_reorder); free(global_i1_ryr_reorder); free(global_i2_ryr_reorder); free(global_i0_csqn_reorder); free(global_i1_csqn_reorder); free(global_i2_csqn_reorder); free(global_i0_cleft_reorder); free(global_i1_cleft_reorder); free(global_i2_cleft_reorder); free(global_cleft_nb_reorder); free(global_states0_reorder); } return 30/h; } //int* loadRyRindexfile_int(char* infile, CONDFN cf, int cond) int* loadRyRindexfile_int(char* infile, int* count) { FILE *fpdata; int* arreturn; int i; int temp_d; *count=0; if(DB_PF) printf("Load file name: %s\n", infile); fpdata = fopen(infile, "r"); if(fpdata==NULL) { printf("\nFailure to open input file.\n"); exit(0); } while(fscanf(fpdata, "%d", &temp_d)!=EOF){ // if(cf(temp_d,cond)) count++; (*count)++; // printf("%d,",temp_d); } if(DB_PF) printf("There are %d indices satisfy the condition\n",*count); arreturn = malloc((*count)*sizeof(int)); if (arreturn == NULL) { printf("\nFailure trying to allocate room for array.\n"); exit(0); } rewind(fpdata); i=0; while(fscanf(fpdata, "%d", &temp_d)!=EOF){ // if(cf(temp_d,cond)) { arreturn[i]=temp_d; i++; // } } fclose(fpdata); if (*count != i) { printf("Wrong indices number\n"); exit(0); } if(DB_PF) printf("load file %s over \n", infile); return arreturn; } void readparam(int* iconf, double* conf) { FILE* file2; char Data[MAX_LINE_LENGTH]; if((file2=fopen("param","r")) == NULL) { printf("Error opening param file\n"); return; } // h fgets(Data,MAX_LINE_LENGTH,file2); fscanf(file2,"%d\n",&iconf[0]); // size_x fgets(Data,MAX_LINE_LENGTH,file2); fscanf(file2,"%d\n",&iconf[1]); // size_y fgets(Data,MAX_LINE_LENGTH,file2); fscanf(file2,"%d\n",&iconf[2]); // size_z fgets(Data,MAX_LINE_LENGTH,file2); fscanf(file2,"%d\n",&iconf[3]); // x_domains fgets(Data,MAX_LINE_LENGTH,file2); fscanf(file2,"%d\n",&iconf[4]); // y_domains fgets(Data,MAX_LINE_LENGTH,file2); fscanf(file2,"%d\n",&iconf[5]); // z_domains fgets(Data,MAX_LINE_LENGTH,file2); fscanf(file2,"%d\n",&iconf[6]); // save_data fgets(Data,MAX_LINE_LENGTH,file2); fscanf(file2,"%d\n",&iconf[7]); // use_failing fgets(Data,MAX_LINE_LENGTH,file2); fscanf(file2,"%d\n",&iconf[8]); // T fgets(Data,MAX_LINE_LENGTH,file2); fscanf(file2,"%le\n",&conf[0]); // DT fgets(Data,MAX_LINE_LENGTH,file2); fscanf(file2,"%le\n",&conf[1]); // save data in binary file fgets(Data,MAX_LINE_LENGTH,file2); fscanf(file2,"%d\n",&iconf[9]); // save data in hdf5 format fgets(Data,MAX_LINE_LENGTH,file2); fscanf(file2,"%d\n",&iconf[10]); // blocking_y_for_cache fgets(Data,MAX_LINE_LENGTH,file2); fscanf(file2,"%d",&iconf[11]); fclose(file2); } void updateBound(double* C00, double* C01, double* C02, double* C03, double* C04, int C_flag, int nx0, int ny0, int nz0, double* yz_sbuf0,double* yz_rbuf0, double* xz_sbuf0,double* xz_rbuf0, double* xy_sbuf0,double* xy_rbuf0, double* yz_sbuf1,double* yz_rbuf1, double* xz_sbuf1,double* xz_rbuf1, double* xy_sbuf1,double* xy_rbuf1, int* neighbor, MPI_Status* ar_status, MPI_Request* ar_send_req, MPI_Request* ar_recv_req, MPI_Comm comm, MPI_Comm comm3d) { int i,j,k; int nx=nx0-2; int ny=ny0-2; int nz=nz0-2; int dims[3]; int periods[3]; int coords[3]; int ZN=0, ZP=1, YN=2, YP=3, XN=4, XP=5; MPI_Cart_get(comm3d, 3, dims, periods, coords); // Ghost X end sheet if(coords[2]==0){ i=0; for (j=1; j<ny0-1; j++) for (k=1; k<nz0-1; k++){ C00[i*nz0*ny0+j*nz0+k] = C00[(i+1)*nz0*ny0+j*nz0+k]; C01[i*nz0*ny0+j*nz0+k] = C01[(i+1)*nz0*ny0+j*nz0+k]; C02[i*nz0*ny0+j*nz0+k] = C02[(i+1)*nz0*ny0+j*nz0+k]; C03[i*nz0*ny0+j*nz0+k] = C03[(i+1)*nz0*ny0+j*nz0+k]; C04[i*nz0*ny0+j*nz0+k] = C04[(i+1)*nz0*ny0+j*nz0+k]; } } else { putin_sendbuffer_yz(1*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C00, nx, ny, nz, &yz_sbuf0[0*ny*nz],ny*nz); putin_sendbuffer_yz(1*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C01, nx, ny, nz, &yz_sbuf0[1*ny*nz],ny*nz); putin_sendbuffer_yz(1*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C02, nx, ny, nz, &yz_sbuf0[2*ny*nz],ny*nz); putin_sendbuffer_yz(1*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C03, nx, ny, nz, &yz_sbuf0[3*ny*nz],ny*nz); putin_sendbuffer_yz(1*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C04, nx, ny, nz, &yz_sbuf0[4*ny*nz],ny*nz); } MPI_Isend(yz_sbuf0,5*ny*nz,MPI_DOUBLE,neighbor[XN],C_flag+1000, comm, &ar_send_req[0]); MPI_Irecv(yz_rbuf0,5*ny*nz,MPI_DOUBLE,neighbor[XN],C_flag+1000, comm, &ar_recv_req[0]); // MPI_Sendrecv(yz_sbuf0,5*ny*nz,MPI_DOUBLE,neighbor[XN],C_flag+1000, // yz_rbuf0,5*ny*nz,MPI_DOUBLE,neighbor[XN],C_flag+1000,comm,&status); if(coords[2]==(dims[2]-1)) { i=nx0-1; for (j=1; j<ny0-1; j++) for (k=1; k<nz0-1; k++){ C00[i*nz0*ny0+j*nz0+k] = C00[(i-1)*nz0*ny0+j*nz0+k]; C01[i*nz0*ny0+j*nz0+k] = C01[(i-1)*nz0*ny0+j*nz0+k]; C02[i*nz0*ny0+j*nz0+k] = C02[(i-1)*nz0*ny0+j*nz0+k]; C03[i*nz0*ny0+j*nz0+k] = C03[(i-1)*nz0*ny0+j*nz0+k]; C04[i*nz0*ny0+j*nz0+k] = C04[(i-1)*nz0*ny0+j*nz0+k]; } } else { putin_sendbuffer_yz( (nx0-2)*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C00, nx, ny, nz, &yz_sbuf1[0*ny*nz],ny*nz); putin_sendbuffer_yz( (nx0-2)*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C01, nx, ny, nz, &yz_sbuf1[1*ny*nz],ny*nz); putin_sendbuffer_yz( (nx0-2)*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C02, nx, ny, nz, &yz_sbuf1[2*ny*nz],ny*nz); putin_sendbuffer_yz( (nx0-2)*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C03, nx, ny, nz, &yz_sbuf1[3*ny*nz],ny*nz); putin_sendbuffer_yz( (nx0-2)*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C04, nx, ny, nz, &yz_sbuf1[4*ny*nz],ny*nz); } MPI_Isend(yz_sbuf1,5*ny*nz,MPI_DOUBLE,neighbor[XP],C_flag+1000, comm, &ar_send_req[1]); MPI_Irecv(yz_rbuf1,5*ny*nz,MPI_DOUBLE,neighbor[XP],C_flag+1000, comm, &ar_recv_req[1]); // MPI_Sendrecv(yz_sbuf1,5*ny*nz,MPI_DOUBLE,neighbor[XP],C_flag+1000, // yz_rbuf1,5*ny*nz,MPI_DOUBLE,neighbor[XP],C_flag+1000,comm,&status); // printf("exchange X end sheet ok! coords[%d,%d,%d]\n",coords[0],coords[1],coords[2]); // Ghost Y end sheet if(coords[1]==0){ j=0; for (i=1; i<nx0-1; i++) for (k=1; k<nz0-1; k++){ C00[i*nz0*ny0+j*nz0+k] = C00[i*nz0*ny0+(j+1)*nz0+k]; C01[i*nz0*ny0+j*nz0+k] = C01[i*nz0*ny0+(j+1)*nz0+k]; C02[i*nz0*ny0+j*nz0+k] = C02[i*nz0*ny0+(j+1)*nz0+k]; C03[i*nz0*ny0+j*nz0+k] = C03[i*nz0*ny0+(j+1)*nz0+k]; C04[i*nz0*ny0+j*nz0+k] = C04[i*nz0*ny0+(j+1)*nz0+k]; } } else { putin_sendbuffer_xz( 1*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C00, nx, ny, nz, &xz_sbuf0[0*nx*nz],nx*nz); putin_sendbuffer_xz( 1*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C01, nx, ny, nz, &xz_sbuf0[1*nx*nz],nx*nz); putin_sendbuffer_xz( 1*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C02, nx, ny, nz, &xz_sbuf0[2*nx*nz],nx*nz); putin_sendbuffer_xz( 1*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C03, nx, ny, nz, &xz_sbuf0[3*nx*nz],nx*nz); putin_sendbuffer_xz( 1*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C04, nx, ny, nz, &xz_sbuf0[4*nx*nz],nx*nz); } MPI_Isend(xz_sbuf0,5*nx*nz,MPI_DOUBLE,neighbor[YN],C_flag+2000, comm, &ar_send_req[2]); MPI_Irecv(xz_rbuf0,5*nx*nz,MPI_DOUBLE,neighbor[YN],C_flag+2000, comm, &ar_recv_req[2]); // MPI_Sendrecv(xz_sbuf0,5*nx*nz,MPI_DOUBLE,neighbor[YN],C_flag+2000, // xz_rbuf0,5*nx*nz,MPI_DOUBLE,neighbor[YN],C_flag+2000,comm,&status); if(coords[1]==(dims[1]-1)) { j=ny0-1; for (i=1; i<nx0-1; i++) for (k=1; k<nz0-1; k++){ C00[i*nz0*ny0+j*nz0+k] = C00[i*nz0*ny0+(j-1)*nz0+k]; C01[i*nz0*ny0+j*nz0+k] = C01[i*nz0*ny0+(j-1)*nz0+k]; C02[i*nz0*ny0+j*nz0+k] = C02[i*nz0*ny0+(j-1)*nz0+k]; C03[i*nz0*ny0+j*nz0+k] = C03[i*nz0*ny0+(j-1)*nz0+k]; C04[i*nz0*ny0+j*nz0+k] = C04[i*nz0*ny0+(j-1)*nz0+k]; } } else { putin_sendbuffer_xz( 1*nz0*ny0+(ny0-2)*nz0+1,nx0,ny0, nz0, C00, nx, ny, nz, &xz_sbuf1[0*nx*nz],nx*nz); putin_sendbuffer_xz( 1*nz0*ny0+(ny0-2)*nz0+1,nx0,ny0, nz0, C01, nx, ny, nz, &xz_sbuf1[1*nx*nz],nx*nz); putin_sendbuffer_xz( 1*nz0*ny0+(ny0-2)*nz0+1,nx0,ny0, nz0, C02, nx, ny, nz, &xz_sbuf1[2*nx*nz],nx*nz); putin_sendbuffer_xz( 1*nz0*ny0+(ny0-2)*nz0+1,nx0,ny0, nz0, C03, nx, ny, nz, &xz_sbuf1[3*nx*nz],nx*nz); putin_sendbuffer_xz( 1*nz0*ny0+(ny0-2)*nz0+1,nx0,ny0, nz0, C04, nx, ny, nz, &xz_sbuf1[4*nx*nz],nx*nz); } MPI_Isend(xz_sbuf1,5*nx*nz,MPI_DOUBLE,neighbor[YP],C_flag+2000, comm, &ar_send_req[3]); MPI_Irecv(xz_rbuf1,5*nx*nz,MPI_DOUBLE,neighbor[YP],C_flag+2000, comm, &ar_recv_req[3]); // MPI_Sendrecv(xz_sbuf1,5*nx*nz,MPI_DOUBLE,neighbor[YP],C_flag+2000, // xz_rbuf1,5*nx*nz,MPI_DOUBLE,neighbor[YP],C_flag+2000,comm,&status); // printf("exchange Y end sheet ok! coords[%d,%d,%d]\n",coords[0],coords[1],coords[2]); // Ghost Z end sheet if(coords[0]==0){ k=0; for (i=1; i<nx0-1; i++) for (j=1; j<ny0-1; j++){ C00[i*nz0*ny0+j*nz0+k] = C00[i*nz0*ny0+j*nz0+k+1]; C01[i*nz0*ny0+j*nz0+k] = C01[i*nz0*ny0+j*nz0+k+1]; C02[i*nz0*ny0+j*nz0+k] = C02[i*nz0*ny0+j*nz0+k+1]; C03[i*nz0*ny0+j*nz0+k] = C03[i*nz0*ny0+j*nz0+k+1]; C04[i*nz0*ny0+j*nz0+k] = C04[i*nz0*ny0+j*nz0+k+1]; } } else { putin_sendbuffer_xy(1*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C00, nx, ny, nz, &xy_sbuf0[0*nx*ny],nx*ny); putin_sendbuffer_xy(1*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C01, nx, ny, nz, &xy_sbuf0[1*nx*ny],nx*ny); putin_sendbuffer_xy(1*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C02, nx, ny, nz, &xy_sbuf0[2*nx*ny],nx*ny); putin_sendbuffer_xy(1*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C03, nx, ny, nz, &xy_sbuf0[3*nx*ny],nx*ny); putin_sendbuffer_xy(1*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C04, nx, ny, nz, &xy_sbuf0[4*nx*ny],nx*ny); } MPI_Isend(xy_sbuf0,5*nx*ny,MPI_DOUBLE,neighbor[ZN],C_flag+3000, comm, &ar_send_req[4]); MPI_Irecv(xy_rbuf0,5*nx*ny,MPI_DOUBLE,neighbor[ZN],C_flag+3000, comm, &ar_recv_req[4]); // MPI_Sendrecv(xy_sbuf0,5*nx*ny,MPI_DOUBLE,neighbor[ZN],C_flag+3000, // xy_rbuf0,5*nx*ny,MPI_DOUBLE,neighbor[ZN],C_flag+3000,comm,&status); if(coords[0]==(dims[0]-1)) { k=nz0-1; for (i=1; i<nx0-1; i++) for (j=1; j<ny0-1; j++){ C00[i*nz0*ny0+j*nz0+k] = C00[i*nz0*ny0+j*nz0+k-1]; C01[i*nz0*ny0+j*nz0+k] = C01[i*nz0*ny0+j*nz0+k-1]; C02[i*nz0*ny0+j*nz0+k] = C02[i*nz0*ny0+j*nz0+k-1]; C03[i*nz0*ny0+j*nz0+k] = C03[i*nz0*ny0+j*nz0+k-1]; C04[i*nz0*ny0+j*nz0+k] = C04[i*nz0*ny0+j*nz0+k-1]; } } else { putin_sendbuffer_xy( 1*nz0*ny0+nz0+nz0-2,nx0,ny0, nz0, C00, nx, ny, nz, &xy_sbuf1[0*nx*ny],nx*ny); putin_sendbuffer_xy( 1*nz0*ny0+nz0+nz0-2,nx0,ny0, nz0, C01, nx, ny, nz, &xy_sbuf1[1*nx*ny],nx*ny); putin_sendbuffer_xy( 1*nz0*ny0+nz0+nz0-2,nx0,ny0, nz0, C02, nx, ny, nz, &xy_sbuf1[2*nx*ny],nx*ny); putin_sendbuffer_xy( 1*nz0*ny0+nz0+nz0-2,nx0,ny0, nz0, C03, nx, ny, nz, &xy_sbuf1[3*nx*ny],nx*ny); putin_sendbuffer_xy( 1*nz0*ny0+nz0+nz0-2,nx0,ny0, nz0, C04, nx, ny, nz, &xy_sbuf1[4*nx*ny],nx*ny); } MPI_Isend(xy_sbuf1,5*nx*ny,MPI_DOUBLE,neighbor[ZP],C_flag+3000, comm, &ar_send_req[5]); MPI_Irecv(xy_rbuf1,5*nx*ny,MPI_DOUBLE,neighbor[ZP],C_flag+3000, comm, &ar_recv_req[5]); // MPI_Sendrecv(xy_sbuf1,5*nx*ny,MPI_DOUBLE,neighbor[ZP],C_flag+3000, // xy_rbuf1,5*nx*ny,MPI_DOUBLE,neighbor[ZP],C_flag+3000,comm,&status); MPI_Waitall(6, ar_recv_req, ar_status); if(coords[2]!=0){ getout_recvbuffer_yz( 1*nz0+1,nx0,ny0, nz0, C00, nx, ny, nz, &yz_rbuf0[0*ny*nz],ny*nz); getout_recvbuffer_yz( 1*nz0+1,nx0,ny0, nz0, C01, nx, ny, nz, &yz_rbuf0[1*ny*nz],ny*nz); getout_recvbuffer_yz( 1*nz0+1,nx0,ny0, nz0, C02, nx, ny, nz, &yz_rbuf0[2*ny*nz],ny*nz); getout_recvbuffer_yz( 1*nz0+1,nx0,ny0, nz0, C03, nx, ny, nz, &yz_rbuf0[3*ny*nz],ny*nz); getout_recvbuffer_yz( 1*nz0+1,nx0,ny0, nz0, C04, nx, ny, nz, &yz_rbuf0[4*ny*nz],ny*nz); } if(coords[2]!=(dims[2]-1)){ getout_recvbuffer_yz((nx0-1)*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C00, nx, ny, nz, &yz_rbuf1[0*ny*nz],ny*nz); getout_recvbuffer_yz((nx0-1)*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C01, nx, ny, nz, &yz_rbuf1[1*ny*nz],ny*nz); getout_recvbuffer_yz((nx0-1)*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C02, nx, ny, nz, &yz_rbuf1[2*ny*nz],ny*nz); getout_recvbuffer_yz((nx0-1)*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C03, nx, ny, nz, &yz_rbuf1[3*ny*nz],ny*nz); getout_recvbuffer_yz((nx0-1)*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C04, nx, ny, nz, &yz_rbuf1[4*ny*nz],ny*nz); } if(coords[1]!=0){ getout_recvbuffer_xz( 1*nz0*ny0+1,nx0,ny0, nz0, C00, nx, ny, nz, &xz_rbuf0[0*nx*nz],nx*nz); getout_recvbuffer_xz( 1*nz0*ny0+1,nx0,ny0, nz0, C01, nx, ny, nz, &xz_rbuf0[1*nx*nz],nx*nz); getout_recvbuffer_xz( 1*nz0*ny0+1,nx0,ny0, nz0, C02, nx, ny, nz, &xz_rbuf0[2*nx*nz],nx*nz); getout_recvbuffer_xz( 1*nz0*ny0+1,nx0,ny0, nz0, C03, nx, ny, nz, &xz_rbuf0[3*nx*nz],nx*nz); getout_recvbuffer_xz( 1*nz0*ny0+1,nx0,ny0, nz0, C04, nx, ny, nz, &xz_rbuf0[4*nx*nz],nx*nz); } if(coords[1]!=(dims[1]-1)){ getout_recvbuffer_xz(1*nz0*ny0+(ny0-1)*nz0+1,nx0,ny0, nz0, C00, nx, ny, nz, &xz_rbuf1[0*nx*nz],nx*nz); getout_recvbuffer_xz(1*nz0*ny0+(ny0-1)*nz0+1,nx0,ny0, nz0, C01, nx, ny, nz, &xz_rbuf1[1*nx*nz],nx*nz); getout_recvbuffer_xz(1*nz0*ny0+(ny0-1)*nz0+1,nx0,ny0, nz0, C02, nx, ny, nz, &xz_rbuf1[2*nx*nz],nx*nz); getout_recvbuffer_xz(1*nz0*ny0+(ny0-1)*nz0+1,nx0,ny0, nz0, C03, nx, ny, nz, &xz_rbuf1[3*nx*nz],nx*nz); getout_recvbuffer_xz(1*nz0*ny0+(ny0-1)*nz0+1,nx0,ny0, nz0, C04, nx, ny, nz, &xz_rbuf1[4*nx*nz],nx*nz); } if(coords[0]!=0){ getout_recvbuffer_xy( 1*nz0*ny0+1*nz0, nx0,ny0, nz0, C00, nx, ny, nz, &xy_rbuf0[0*nx*ny],nx*ny); getout_recvbuffer_xy( 1*nz0*ny0+1*nz0, nx0,ny0, nz0, C01, nx, ny, nz, &xy_rbuf0[1*nx*ny],nx*ny); getout_recvbuffer_xy( 1*nz0*ny0+1*nz0, nx0,ny0, nz0, C02, nx, ny, nz, &xy_rbuf0[2*nx*ny],nx*ny); getout_recvbuffer_xy( 1*nz0*ny0+1*nz0, nx0,ny0, nz0, C03, nx, ny, nz, &xy_rbuf0[3*nx*ny],nx*ny); getout_recvbuffer_xy( 1*nz0*ny0+1*nz0, nx0,ny0, nz0, C04, nx, ny, nz, &xy_rbuf0[4*nx*ny],nx*ny); } if(coords[0]!=(dims[0]-1)){ getout_recvbuffer_xy( 1*nz0*ny0+nz0+nz0-1,nx0,ny0, nz0, C00, nx, ny, nz, &xy_rbuf1[0*nx*ny],nx*ny); getout_recvbuffer_xy( 1*nz0*ny0+nz0+nz0-1,nx0,ny0, nz0, C01, nx, ny, nz, &xy_rbuf1[1*nx*ny],nx*ny); getout_recvbuffer_xy( 1*nz0*ny0+nz0+nz0-1,nx0,ny0, nz0, C02, nx, ny, nz, &xy_rbuf1[2*nx*ny],nx*ny); getout_recvbuffer_xy( 1*nz0*ny0+nz0+nz0-1,nx0,ny0, nz0, C03, nx, ny, nz, &xy_rbuf1[3*nx*ny],nx*ny); getout_recvbuffer_xy( 1*nz0*ny0+nz0+nz0-1,nx0,ny0, nz0, C04, nx, ny, nz, &xy_rbuf1[4*nx*ny],nx*ny); } // printf("exchange Z end sheet ok! coords[%d,%d,%d]\n",coords[0],coords[1],coords[2]); } void putin_sendbuffer_yz(int base_addr,int nx0,int ny0, int nz0, double* arr, int nx, int ny, int nz, double* sbuf, int sbuf_len) { int i; if(sbuf_len!=ny*nz) { printf("yz sbuf_len error!\n"); exit(0); } for ( i = 0; i < ny; i += 1 ) { memcpy(&sbuf[i*nz],&arr[base_addr+i*nz0],nz*sizeof(double)); } } void putin_sendbuffer_xz(int base_addr,int nx0,int ny0, int nz0, double* arr, int nx, int ny, int nz, double* sbuf, int sbuf_len) { int i; if(sbuf_len!=nx*nz) { printf("xz sbuf_len error!\n"); exit(0); } for ( i = 0; i < nx; i += 1 ) { memcpy(&sbuf[i*nz],&arr[base_addr+i*ny0*nz0],nz*sizeof(double)); } } void putin_sendbuffer_xy(int base_addr,int nx0,int ny0, int nz0, double* arr, int nx, int ny, int nz, double* sbuf, int sbuf_len) { int i, j; if(sbuf_len!=nx*ny) { printf("xy sbuf_len error!\n"); exit(0); } for ( i = 0; i < nx; i += 1 ) { for ( j = 0; j < ny; j += 1 ) { sbuf[i*ny+j]=arr[base_addr+i*ny0*nz0+j*nz0]; } } } void getout_recvbuffer_yz(int base_addr,int nx0,int ny0, int nz0, double* arr, int nx, int ny, int nz, double* sbuf, int sbuf_len) { int i; if(sbuf_len!=ny*nz) { printf("yz rbuf_len error!\n"); exit(0); } for ( i = 0; i < ny; i += 1 ) { memcpy(&arr[base_addr+i*nz0],&sbuf[i*nz],nz*sizeof(double)); } } void getout_recvbuffer_xz(int base_addr,int nx0,int ny0, int nz0, double* arr, int nx, int ny, int nz, double* sbuf, int sbuf_len) { int i; if(sbuf_len!=nx*nz) { printf("xz rbuf_len error!\n"); exit(0); } for ( i = 0; i < nx; i += 1 ) { memcpy(&arr[base_addr+i*ny0*nz0],&sbuf[i*nz],nz*sizeof(double)); } } void getout_recvbuffer_xy(int base_addr,int nx0,int ny0, int nz0, double* arr, int nx, int ny, int nz, double* sbuf, int sbuf_len) { int i, j; if(sbuf_len!=nx*ny) { printf("xy rbuf_len error!\n"); exit(0); } for ( i = 0; i < nx; i += 1 ) { for ( j = 0; j < ny; j += 1 ) { arr[base_addr+i*ny0*nz0+j*nz0]=sbuf[i*ny+j]; } } } void BinarySort_two(int* pData, int* vData, int Count) { dichotomy_two(pData,vData,0,Count-1); } void dichotomy_two(int* pData,int* vData, int left,int right) { int i,j; int middle,iTemp; i = left; j = right; middle = pData[(left+right)/2]; do{ while((pData[i]<middle) && (i<right)) i++; while((pData[j]>middle) && (j>left)) j--; if(i<=j) { iTemp = pData[i]; pData[i] = pData[j]; pData[j] = iTemp; iTemp =vData[i]; vData[i]=vData[j]; vData[j]=iTemp; i++; j--; } }while(i<=j); if(left<j) dichotomy_two(pData,vData,left,j); if(right>i) dichotomy_two(pData,vData,i,right); } void *mpi_malloc ( int id, /* IN - Process rank */ int bytes) /* IN - Bytes to allocate */ { void *buffer; if ((buffer = malloc ((size_t) bytes)) == NULL) { printf ("Error: Malloc failed for process %d\n", id); fflush (stdout); MPI_Abort (MPI_COMM_WORLD, 4); } return buffer; } void compute_pde_ode(int nx0, int ny0, int nz0, double dt,double gamma, double fudge, double* alpha, double* B_tot, double* k_on, double* k_off, double** C0, double** C1, int div_y) { // Main kernel int i,j,k,jj,idx; int ny; double J; double Ca_ijk; double buff_ijk; double Ca_i2_ijk; double Ca_SR2_ijk; ny=ny0-2; for (i=1; i<nx0-1; i++) { for (jj=0; jj<ny/div_y; jj++) { //blocking for cache size on y line for (j=jj*div_y+1; j<(jj+1)*div_y+1; j++) { //Laplace diffusion process five array together for(idx=0;idx<5;idx++) { #pragma ivdep #pragma prefetch for (k=1; k<nz0-1; k++) { C1[idx][i*nz0*ny0+j*nz0+k] =alpha[idx]*( C0[idx][i*nz0*ny0+j*nz0+k]*(-6)+ C0[idx][(i-1)*nz0*ny0+j*nz0+k] + C0[idx][(i+1)*nz0*ny0+j*nz0+k] + C0[idx][i*nz0*ny0+(j-1)*nz0+k] + C0[idx][i*nz0*ny0+(j+1)*nz0+k] + C0[idx][i*nz0*ny0+j*nz0+k-1] + C0[idx][i*nz0*ny0+j*nz0+k+1]) + C0[idx][i*nz0*ny0+j*nz0+k]; } } //Reaction for(idx=2;idx<6;idx++) { #pragma ivdep #pragma prefetch for (k=1; k<nz0-1; k++) { Ca_ijk = C1[0][i*nz0*ny0+j*nz0+k]; buff_ijk = C1[idx][i*nz0*ny0+j*nz0+k]; J = k_on[idx]*(B_tot[idx] - buff_ijk)*Ca_ijk - k_off[idx]*buff_ijk; C1[0][i*nz0*ny0+j*nz0+k] -= dt*J; C1[idx][i*nz0*ny0+j*nz0+k] += dt*J; } } // serca3D #pragma ivdep #pragma prefetch for (k=1; k<nz0-1; k++) { // Main kernel Ca_i2_ijk = C1[0][i*nz0*ny0+j*nz0+k]; Ca_SR2_ijk = C1[1][i*nz0*ny0+j*nz0+k]; Ca_i2_ijk *= Ca_i2_ijk; Ca_SR2_ijk *= Ca_SR2_ijk; J = fudge*(570997802.885875*Ca_i2_ijk - 0.0425239333622699*Ca_SR2_ijk)/(106720651.206402*Ca_i2_ijk + 182.498197548666*Ca_SR2_ijk + 5.35062954944879); C1[0][i*nz0*ny0+j*nz0+k] -= dt*J; C1[1][i*nz0*ny0+j*nz0+k] += dt*J/gamma; } } } } }
#include <math.h> #include <assert.h> #include <stdio.h> #include <stdlib.h> #include <time.h> #include <string.h> #include "mpi.h" #include "hdf5.h" #include <sys/stat.h> //#define DEBUG_TEST #define DB_PF 0 #define MAX_LINE_LENGTH 80 //#define __PAPI__ #ifdef __PAPI__ #include <papi.h> #endif typedef int(*CONDCF)(int a, int b); #define H5T_DATA_TYPE H5T_NATIVE_SHORT typedef short int hdf5_data_type; #define H5_DATA_LIMIT_0 -32768 // Data type specific #define H5_DATA_LIMIT_1 32767 // Data type specific #define H5_DATA_SIZE H5_DATA_LIMIT_1 - H5_DATA_LIMIT_0 // Data type specific double timing(); void *mpi_malloc ( int id, int bytes); /* IN - Bytes to allocate */ inline double my_random(); double my_min(double* ar, int len); double my_max(double* ar, int len); void stern(double t, double* y0, double* y1, double Ca); void stern_discrete(double dt, int* y0, int* y1, double Ca); void laplace3D (int nx0, int ny0, int nz0, double* C0, int nx1, int ny1, int nz1, double* C1, double alpha);//, int num_threads) void reaction3D (int nx0, int ny0, int nz0, double* Ca, int nx1, int ny1, int nz1, double* buff, double B_tot, double k_on, double k_off, double dt);//, int num_threads) void serca3D (int nx0, int ny0, int nz0, double* Ca_i, int nx1, int ny1, int nz1, double* Ca_SR, double dt, double gamma, double fudge);//, int num_threads) void update_ryr(int h_scale,int nx0, int ny0, int nz0, double* Ca_i, double* Ca_SR, double* Ca_CSQN, double* C10, double* C12, double* C13, double* C14, double k_on_CSQN, double k_off_CSQN, double CSQN_tot, double gamma, double K, double dt, int ryr_len, int* i0_ryr, int* i1_ryr, int* i2_ryr, int csqn_len, int* i0_csqn, int* i1_csqn, int* i2_csqn, int cleft_len, int* i0_cleft, int* i1_cleft, int* i2_cleft,int* cleft_nb, int* states0, int* states1); void store2Dmatrixfile_double_1D(char* outfile, double* ar, int rows, int cols, int x_strid); void store2Dmatrixfile_double_bin(char* outfile, double* ar, int rows, int cols, int x_strid); void transfer_hdf5_data(hdf5_data_type* h5_data, double* ar0, double* ar1, double scale_value, hsize_t* chunk_dims); void store2Dmatrixfile_int_1D(char* outfile, int* ar, int rows, int cols); //int less(int a, int b); //int giant(int a, int b); //int* loadRyRindexfile_int(char* infile, CONDFN cf, int cond); int* loadRyRindexfile_int(char* infile, int* count); int idxinrank(int nx, int ny, int nz, int i0, int i1, int i2, int rank, MPI_Comm comm3d); int idxbl2rank(int nx, int ny, int nz, int i0, int i1, int i2, int* coords, MPI_Comm comm3d); int load_indices_serial(int nx, int ny, int nz, int h, int** i0_ryr, int** i1_ryr, int** i2_ryr, int* ryr_len, int** i0_csqn, int** i1_csqn, int** i2_csqn, int* csqn_len, int** i0_cleft, int** i1_cleft, int** i2_cleft, int** cleft_nb, int* cleft_len, int x_slice_mid,int x_slice_width, int x_slice_num, int use_failing); int IsTrueCleft(int coord_y, int coord_z, int size_y, int size_z, int *i1_csqn, int *i2_csqn, int* y_index, int csqn_len); void BinarySort_two(int* pData, int* vData, int Count); void dichotomy_two(int* pData,int* vData, int left,int right); int distr_ryr_csqn_state(int h, int size_x, int size_y, int size_z, int nx, int ny, int nz, int** i0_ryr, int** i1_ryr, int** i2_ryr, int* ryr_len, int** i0_csqn, int** i1_csqn, int** i2_csqn, int* csqn_len, int** i0_cleft, int** i1_cleft, int** i2_cleft,int** cleft_nb, int* cleft_len, int** states0, int** states1, int x_slice_mid,int x_slice_width, int x_slice_num, MPI_Comm comm3d, MPI_Comm, int use_failing); void readparam(int* iconf, double* conf); void updateBound(double* C00, double* C01, double* C02, double* C03, double* C04, int C_flag, int nx0, int ny0, int nz0, double* yz_sbuf0,double* yz_rbuf0, double* xz_sbuf0,double* xz_rbuf0, double* xy_sbuf0,double* xy_rbuf0, double* yz_sbuf1,double* yz_rbuf1, double* xz_sbuf1,double* xz_rbuf1, double* xy_sbuf1,double* xy_rbuf1, int* neighbor, MPI_Status* ar_status, MPI_Request* ar_send_req, MPI_Request* ar_recv_req, MPI_Comm comm,MPI_Comm comm3d); void putin_sendbuffer_yz(int base_addr,int nx0,int ny0, int nz0, double* arr, int nx, int ny, int nz, double* sbuf, int sbuf_len); void putin_sendbuffer_xz(int base_addr,int nx0,int ny0, int nz0, double* arr, int nx, int ny, int nz, double* sbuf, int sbuf_len); void putin_sendbuffer_xy(int base_addr,int nx0,int ny0, int nz0, double* arr, int nx, int ny, int nz, double* sbuf, int sbuf_len); void getout_recvbuffer_yz(int base_addr,int nx0,int ny0, int nz0, double* arr, int nx, int ny, int nz, double* sbuf, int sbuf_len); void getout_recvbuffer_xz(int base_addr,int nx0,int ny0, int nz0, double* arr, int nx, int ny, int nz, double* sbuf, int sbuf_len); void getout_recvbuffer_xy(int base_addr,int nx0,int ny0, int nz0, double* arr, int nx, int ny, int nz, double* sbuf, int sbuf_len); void compute_pde_ode(int nx0, int ny0, int nz0, double dt,double gamma, double fudge, double* alpha, double* B_tot, double* k_on, double* k_off, double** C0, double** C1, int div_y); #define NUM_SAVE_SPECIES 5 int save_species[NUM_SAVE_SPECIES] = {0,1,4,5,6}; char* species_names[7] = {"Cai", "CaSR", "CaCMDN", "CaATP", "CaFluo", "CaTRPN", "CaCSQN"}; int main(int argc, char **argv) { int i,j,k; #ifdef __PAPI__ // int Events[] = { PAPI_L1_DCA, PAPI_L1_DCM }; // int Events[] = {PAPI_L3_TCM, PAPI_L3_TCA, PAPI_L2_TCM,PAPI_L2_TCA}; int Events[] = {PAPI_DP_OPS,PAPI_L3_TCM}; int NUM_EVENTS = sizeof(Events)/sizeof(Events[0]); long long res_papi[NUM_EVENTS]; char EventName[128]; int num_hwcntrs = 0; int EventSet = PAPI_NULL; int retval; retval = PAPI_library_init( PAPI_VER_CURRENT ); retval = PAPI_create_eventset( &EventSet ); if (PAPI_add_events( EventSet, Events, NUM_EVENTS) != PAPI_OK){ printf("PAPI_add_events failed\n"); } for (i=0; i<NUM_EVENTS; i++){ res_papi[i] = 0; } #endif double time_main=0.0; double time_comm=0.0; double time_conc=0.0; double time_ryr=0.0; double time_io=0.0; int save_data=0; int use_rand_seed=1; int use_failing=0; int idx; int h_scale=1; int h=30; int div_y=1; int save_binary_file=0; int save_hdf5=0; double T=1.0; double DT=0.05; // plotting time step int TimeStep=2; int size_x, size_y, size_z, my_id, x_domains, y_domains, z_domains; int iconf[12]; double conf[2]; /* MPI variables */ int nproc, ndims; MPI_Comm comm, comm3d; int dims[3]; int periods[3]; int reorganisation = 0; MPI_Datatype matrix_type_oyz, matrix_type_oxz, matrix_type_oxy; int ZN=0, ZP=1, YN=2, YP=3, XN=4, XP=5; int NeighBor[6]; hid_t h5_file_id; hdf5_data_type* h5_data; MPI_Init(&argc, &argv); comm = MPI_COMM_WORLD; MPI_Comm_size(comm, &nproc); MPI_Comm_rank(comm, &my_id); MPI_Info info = MPI_INFO_NULL; if (my_id==0) { readparam(iconf, conf); } MPI_Bcast(iconf, 12, MPI_INT, 0, comm); MPI_Bcast(conf, 2, MPI_DOUBLE, 0, comm); h = iconf[0]; size_x = iconf[1]; size_y = iconf[2]; size_z = iconf[3]; x_domains = iconf[4]; y_domains = iconf[5]; z_domains = iconf[6]; save_data = iconf[7]; use_failing = iconf[8]; save_binary_file = iconf[9]; // Save Ca in binary file instead of ascii file save_hdf5 = iconf[10]; // Save data in hdf5 file format div_y = iconf[11]; // Block size on y direction for cache T = conf[0]; DT = conf[1]; h_scale=30/h; if(use_rand_seed) srand(my_id); char hdf5_dataset_name[200]; char hdf5_group_name[200]; char h5_basename[200]; char outdirname[200]; if(save_hdf5) { sprintf(h5_basename, "output_%d_%d_%d_%d_%d", h, size_x, size_y, size_z, use_failing); } else if(save_binary_file) { sprintf(outdirname, "output_%d_%d_%d_%d_%d_bin", h, size_x, size_y, size_z, use_failing); } else { sprintf(outdirname, "output_%d_%d_%d_%d_%d", h, size_x, size_y, size_z, use_failing); } if(!my_id) { if(save_data && !save_hdf5){ if(access(outdirname,0)) { if (mkdir(outdirname, 0755)==-1) { printf("make directory failed\n"); } else { printf("make directory: %s\n", outdirname); } } else { printf("directory %s existed\n",outdirname); } } } MPI_Barrier(comm); if((my_id==0) && (nproc!=(x_domains*y_domains*z_domains))) { printf("Number of processes not equal to Number of subdomains\n"); MPI_Abort(MPI_COMM_WORLD, 1); } if((my_id==0)&&(size_x%x_domains!=0)) { printf("Number of x_domains is not divisible in scale\n"); MPI_Abort(MPI_COMM_WORLD, 1); } if((my_id==0)&&(size_y%y_domains!=0)) { printf("Number of y_domains is not divisible in scale\n"); MPI_Abort(MPI_COMM_WORLD, 1); } if((my_id==0)&&(size_z%z_domains!=0)) { printf("Number of z_domains is not divisible in scale\n"); MPI_Abort(MPI_COMM_WORLD, 1); } if(((size_y/y_domains)%div_y)!=0){ div_y=1; if(my_id==0){ printf("Warning: div_y is not divisible on each node, so set div_y=1 for default \n"); } } /* Create 3D cartesian grid */ periods[0] = 0; periods[1] = 0; periods[2] = 0; ndims = 3; dims[0]=z_domains; dims[1]=y_domains; dims[2]=x_domains; MPI_Cart_create(comm, ndims, dims, periods, reorganisation, &comm3d); /* MPI variables */ MPI_Status ar_status[6]; MPI_Request ar_send_req[6]; MPI_Request ar_recv_req[6]; int coord[3]; int dim[3]; int period[3]; int mid_coord_x=0; int in_midx_slice=0; int x_slice_num; int x_slice_width; int x_slice_mid; MPI_Cart_get(comm3d, 3, dim, period, coord); x_slice_num=(int)(ceil((double)(size_x*h)/2100.0)); if((size_x%x_slice_num)!=0) { printf("x dimension can not be divided by %d\n", x_slice_num); MPI_Abort(comm,5); } x_slice_width=size_x/x_slice_num; x_slice_mid=(x_slice_width+1)/2; for(i=0;i<x_slice_num;i++) { if(((x_slice_width*i+x_slice_mid)>=(coord[2]*size_x/x_domains))&& ((x_slice_width*i+x_slice_mid)<((coord[2]+1)*size_x/x_domains))){ if(in_midx_slice==1){ printf("dont put two x_slice in a x partition\n"); MPI_Abort(comm,5); } in_midx_slice=1; mid_coord_x=(x_slice_width*i+x_slice_mid)-(coord[2]*size_x/x_domains)+1;//+1 for ghost bound //check x partition thickness, so far, for simplify, dont cut a csqn and no-flux into two x-partitions if((mid_coord_x)<(h_scale+3)||(size_x/x_domains-mid_coord_x)<(h_scale+3)){ printf("x partition is too thine for CSQN and cleft extend \n"); MPI_Abort(comm,5); } } } //printf("Rank: %d, coord: [%d, %d, %d]\n", my_id, coord[0], coord[1], coord[2]); /* Identify process neighbors */ NeighBor[0] = MPI_PROC_NULL; NeighBor[1] = MPI_PROC_NULL; NeighBor[2] = MPI_PROC_NULL; NeighBor[3] = MPI_PROC_NULL; NeighBor[4] = MPI_PROC_NULL; NeighBor[5] = MPI_PROC_NULL; /* Left/West and right/Est neigbors Z direction*/ MPI_Cart_shift(comm3d,0,1,&NeighBor[ZN],&NeighBor[ZP]); /* Bottom/South and Upper/North neigbors Y direction*/ MPI_Cart_shift(comm3d,1,1,&NeighBor[YN],&NeighBor[YP]); /* Zdown/South and Zup/North neigbors X direction*/ MPI_Cart_shift(comm3d,2,1,&NeighBor[XN],&NeighBor[XP]); //-------------------------------------------------------------------- int nx=(size_x/x_domains); int ny=(size_y/y_domains); int nz=(size_z/z_domains); int nx0, ny0, nz0; int nx1, ny1, nz1; nx0=nx+2; ny0=ny+2; nz0=nz+2; nx1=nx+2; ny1=ny+2; nz1=nz+2; int len; len=nx0*ny0*nz0; /* Create matrix data types to communicate */ MPI_Type_vector(ny, nz, nz0, MPI_DOUBLE, &matrix_type_oyz); MPI_Type_commit(&matrix_type_oyz); /* Create matrix data type to communicate on vertical Oxz plan */ MPI_Type_vector(nx, nz, ny0*nz0, MPI_DOUBLE, &matrix_type_oxz); MPI_Type_commit(&matrix_type_oxz); /* Create matrix data type to communicate on vertical Oxy plan */ MPI_Datatype matrix_type_liney; MPI_Type_vector(ny, 1, nz0, MPI_DOUBLE, &matrix_type_liney); MPI_Type_commit(&matrix_type_liney); // MPI_Type_vector(nx*ny, 1, nz0, MPI_DOUBLE, &matrix_type_oxy); MPI_Type_hvector(nx, 1, ny0*nz0*sizeof(double), matrix_type_liney, &matrix_type_oxy); MPI_Type_commit(&matrix_type_oxy); if(!my_id) printf("Simulation Begin!\n"); //Define where the RyRs are: int* i0_ryr; int* i1_ryr; int* i2_ryr; int* i0_csqn; int* i1_csqn; int* i2_csqn; int* i0_cleft; int* i1_cleft; int* i2_cleft; int* cleft_nb; int ryr_len; int csqn_len; int cleft_len; int* states0; int* states1; h_scale=distr_ryr_csqn_state( h, size_x, size_y, size_z, nx, ny, nz, &i0_ryr, &i1_ryr, &i2_ryr, &ryr_len, &i0_csqn, &i1_csqn, &i2_csqn, &csqn_len, &i0_cleft, &i1_cleft, &i2_cleft, &cleft_nb,&cleft_len, &states0, &states1, x_slice_mid,x_slice_width, x_slice_num, comm3d, comm, use_failing); // store2Dmatrixfile_int_1D("i0.txt",i0,n_ryr,1); // store2Dmatrixfile_int_1D("i1.txt",i1,n_ryr,1); // store2Dmatrixfile_int_1D("i2.txt",i2,n_ryr,1); double Vfraction; //first set the numbers of RyR in a CaRU; //All CaRU placed mid-sarcomere Vfraction=(30.0/h)*(30.0/h)*(30.0/h); // scaling of RyR when changing dx // Set constants and dt based on these: double D_i=250e3; // 220e3 double D_SR=73e3; // 73.3e3; double D_ATP=140e3; double D_CMDN=22e3; double D_Fluo=42e3; double dt=(1./6)*h*h/D_i; double alpha_i = dt*D_i/(h*h); double Ca0 = 140e-3; double CaSR0 = 1.3e3; double* Ca_i; Ca_i=(double*)malloc(len*sizeof(double)); for ( i = 0; i < len; i += 1 ) { Ca_i[i]=Ca0; } double alpha_SR = dt*D_SR/(h*h); double* Ca_SR; Ca_SR=(double*)malloc(len*sizeof(double)); for ( i = 0; i < len; i += 1 ) { Ca_SR[i]=CaSR0; } double k_on_CMDN = 34e-3; double k_off_CMDN = 238e-3; double CMDN_tot = 24; double alpha_CMDN = dt*D_CMDN/(h*h); double k_on_ATP = 255e-3; double k_off_ATP = 45; double ATP_tot = 455; double alpha_ATP = dt*D_ATP/(h*h); double k_on_Fluo = 110e-3; double k_off_Fluo = 110e-3; double Fluo_tot = 25; // 25; double alpha_Fluo = dt*D_Fluo/(h*h); double k_on_TRPN = 32.7e-3; double k_off_TRPN = 19.6e-3; // 26.16e-3; double TRPN_tot = 70; // 50; double k_on_CSQN = 102e-3; double k_off_CSQN = 65; double CSQN_tot = 30e3; double alpha[7]; double k_on[7]; double k_off[7]; double B_tot[7]; alpha[0]=alpha_i; alpha[1]=alpha_SR; alpha[2]=alpha_CMDN; alpha[3]=alpha_ATP; alpha[4]=alpha_Fluo; alpha[5]=0; alpha[6]=0; k_on[0]=0 ; k_on[1]= 0; k_on[2]= k_on_CMDN; k_on[3]=k_on_ATP ; k_on[4]=k_on_Fluo ; k_on[5]=k_on_TRPN; k_on[6]=k_on_CSQN; k_off[0]=0 ; k_off[1]= 0; k_off[2]=k_off_CMDN; k_off[3]=k_off_ATP; k_off[4]=k_off_Fluo; k_off[5]=k_off_TRPN; k_off[6]=k_off_CSQN; B_tot[0]=0 ; B_tot[1]= 0; B_tot[2]=CMDN_tot ; B_tot[3]=ATP_tot ; B_tot[4]=Fluo_tot ; B_tot[5]=TRPN_tot; B_tot[6]=CSQN_tot; // Calculate steady state IC for the buffers based on Ca_i ... double Ca_CMDN0=B_tot[2]*Ca0/(Ca0+k_off[2]/k_on[2]); double Ca_ATP0 =B_tot[3]*Ca0/(Ca0+k_off[3]/k_on[3]); double Ca_Fluo0=B_tot[4]*Ca0/(Ca0+k_off[4]/k_on[4]); double Ca_TRPN0=B_tot[5]*Ca0/(Ca0+k_off[5]/k_on[5]); // and Ca_SR: double Ca_CSQN0 = CSQN_tot*Ca_SR[0]/(Ca_SR[0] + k_off_CSQN/k_on_CSQN); double init_values[7] = {Ca0, CaSR0, Ca_CMDN0, Ca_ATP0, Ca_Fluo0, Ca_TRPN0, Ca_CSQN0}; //printf("%f %f %f %f %f \n ", Ca_ATP0, Ca_CMDN0, Ca_Fluo0, Ca_TRPN0, Ca_CSQN0); if(my_id==0) printf("cubiod_c: h:%d size_x:%d size_y:%d size_z:%d dt:%f, T:%f, TimeStep:%d, DT:%f outfilenum:%d, x_slice_num:%d, use_failing:%d, div_y:%d, save_binary:%d \n", h, size_x, size_y, size_z,dt,T, (int)(T/dt),DT,(int)(T/DT)*save_data,x_slice_num,use_failing, div_y,save_binary_file); // Allocate the data structure for the solution double *Ca_ATP ; double *Ca_CMDN ; double *Ca_Fluo ; double *Ca_TRPN ; double *Ca_CSQN ; Ca_ATP =(double*)malloc(len*sizeof(double)); Ca_CMDN=(double*)malloc(len*sizeof(double)); Ca_Fluo=(double*)malloc(len*sizeof(double)); Ca_TRPN=(double*)malloc(len*sizeof(double)); Ca_CSQN=(double*)malloc(len*sizeof(double)); for ( i = 0; i < len; i += 1 ) { Ca_ATP[i] = Ca_ATP0; Ca_CMDN[i] = Ca_CMDN0; Ca_Fluo[i] = Ca_Fluo0; Ca_TRPN[i] = Ca_TRPN0; Ca_CSQN[i] = Ca_CSQN0; } double* C0[7]; double* C1[7]; double* C_temp; C0[0]=(double*)malloc(len*sizeof(double)); C1[0]=Ca_i; memcpy(C0[0],C1[0],len*sizeof(double)); C0[1]=(double*)malloc(len*sizeof(double)); C1[1]=Ca_SR; memcpy(C0[1],C1[1],len*sizeof(double)); C0[2]=(double*)malloc(len*sizeof(double)); C1[2]=Ca_CMDN; memcpy(C0[2],C1[2],len*sizeof(double)); C0[3]=(double*)malloc(len*sizeof(double)); C1[3]=Ca_ATP; memcpy(C0[3],C1[3],len*sizeof(double)); C0[4]=(double*)malloc(len*sizeof(double)); C1[4]=Ca_Fluo; memcpy(C0[4],C1[4],len*sizeof(double)); C0[5]=(double*)malloc(len*sizeof(double)); C1[5]=Ca_TRPN; memcpy(C0[5],C1[5],len*sizeof(double)); C0[6]=(double*)malloc(len*sizeof(double)); C1[6]=Ca_CSQN; memcpy(C0[6],C1[6],len*sizeof(double)); //Ca = [[Ca_i.copy(), Ca_i ], // [Ca_SR.copy(), Ca_SR ], // [Ca_CMDN.copy(), Ca_CMDN], // [Ca_ATP.copy(), Ca_ATP ], // [Ca_Fluo.copy(), Ca_Fluo], // [Ca_TRPN, Ca_TRPN], // [Ca_CSQN, Ca_CSQN]] double gamma = 0.02; // SR volume fraction int cai=0; int sri=1; // int cmdni=2; // int atpi=3; // int fluoi=4; // int trpni=5; int csqni=6; double fraction[7]={1,1,1,1,1,1,1}; fraction[1]=gamma; fraction[6]=gamma; // Ryr conductance: double k_s = (Vfraction)*150/2; // 1/ms, based on 0.5pA of Ca2+ into (30nm)^3. double K = exp(-k_s*dt*(1+1/gamma)); // factor need in the integration below if(my_id==0){ printf("dt = dt: %e\n", dt); printf("k_s = (Vfraction)*150/2: %e\n", k_s); printf("K = exp(-k_s*dt*(1+1/gamma)): %e\n", K); } double t=0; int counter=0; // int mean[7]; time_main-=timing(); FILE *fpdata; char meanfile[200]; if (save_hdf5) sprintf(meanfile,"%s_mean.txt", h5_basename); else sprintf(meanfile,"%s/mean.txt", outdirname); if(!my_id){ if(save_data){ if ((fpdata=fopen(meanfile, "w"))==NULL) { printf("failed open output file "); printf("%s", meanfile); printf(" ! \n "); exit(0); } } } // H5 Setup if (save_hdf5) { char h5_data_file[200]; // Set up file access property list with parallel I/O access // property list identifier hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS); H5Pset_fapl_mpio(plist_id, comm, info); sprintf(h5_data_file, "%s.h5", h5_basename); // Create a new file collectively and release property list identifier. h5_file_id = H5Fcreate(h5_data_file, H5F_ACC_TRUNC, H5P_DEFAULT, plist_id); H5Pclose(plist_id); const int data_rank = 2; hsize_t dimsf[2] = {size_y, size_z}; /* dataset dimensions */ hsize_t chunk_dims[2] = {ny, nz}; /* chunk dimensions */ // Offset into dataset based on the MPI coord from MPI_Cart_get hsize_t h5_offset[2] = {coord[1]*nz, coord[0]*ny}; hsize_t h5_count[2] = {1, 1}; hsize_t data_size=ny*nz; h5_data = (hdf5_data_type*)malloc(data_size*sizeof(hdf5_data_type)); if (!my_id) { printf("Total data size per species: %zu, %zu\n", dimsf[0], dimsf[1]); printf("Total data size per chunk per species: %zu, %zu\n", chunk_dims[0], chunk_dims[1]); } printf("rank %d | h5 offset [%zu, %zu]\n", my_id, h5_offset[0], h5_offset[1]); // Create data space for the datatype limits hsize_t dims = 1; hid_t attr_space = H5Screate_simple(1, &dims, NULL); // Create a time attribute hid_t limit_id = H5Acreate(h5_file_id, "data_type_size", H5T_NATIVE_DOUBLE, attr_space, H5P_DEFAULT, H5P_DEFAULT); // Write the attribute data double data_type_size = (double)H5_DATA_SIZE; herr_t status = H5Awrite(limit_id, H5T_NATIVE_DOUBLE, &data_type_size); // Cleanup H5Aclose(limit_id); H5Sclose(attr_space); // Save hard coded data ranges for (i=0; i<NUM_SAVE_SPECIES; i++) { // Get species int species = save_species[i]; // Create data scale attribute sprintf(hdf5_dataset_name, "%s_scale", species_names[species]); // Create data space for the species scale attribute hsize_t dims = 1; hid_t attr_space = H5Screate_simple(1, &dims, NULL); // Create a time attribute hid_t scale_id = H5Acreate(h5_file_id, hdf5_dataset_name, H5T_NATIVE_DOUBLE, attr_space, H5P_DEFAULT, H5P_DEFAULT); // Write the attribute data herr_t status = H5Awrite(scale_id, H5T_NATIVE_DOUBLE, &init_values[species]); // Cleanup H5Aclose(scale_id); H5Sclose(attr_space); // Create init value attribute sprintf(hdf5_dataset_name, "%s_init", species_names[species]); // Create data space for the species init attribute dims = 1; attr_space = H5Screate_simple(1, &dims, NULL); // Create a time attribute hid_t init_id = H5Acreate(h5_file_id, hdf5_dataset_name, H5T_NATIVE_DOUBLE, attr_space, H5P_DEFAULT, H5P_DEFAULT); // Write the attribute data status = H5Awrite(init_id, H5T_NATIVE_DOUBLE, &init_values[species]); // Cleanup H5Aclose(init_id); H5Sclose(attr_space); } } double* yz_sbuf0; double* yz_rbuf0; double* xz_sbuf0; double* xz_rbuf0; double* xy_sbuf0; double* xy_rbuf0; double* yz_sbuf1; double* yz_rbuf1; double* xz_sbuf1; double* xz_rbuf1; double* xy_sbuf1; double* xy_rbuf1; yz_sbuf0=(double*)mpi_malloc(my_id,5*ny*nz*sizeof(double)); xz_sbuf0=(double*)mpi_malloc(my_id,5*nx*nz*sizeof(double)); xy_sbuf0=(double*)mpi_malloc(my_id,5*nx*ny*sizeof(double)); yz_sbuf1=(double*)mpi_malloc(my_id,5*ny*nz*sizeof(double)); xz_sbuf1=(double*)mpi_malloc(my_id,5*nx*nz*sizeof(double)); xy_sbuf1=(double*)mpi_malloc(my_id,5*nx*ny*sizeof(double)); yz_rbuf0=(double*)mpi_malloc(my_id,5*ny*nz*sizeof(double)); xz_rbuf0=(double*)mpi_malloc(my_id,5*nx*nz*sizeof(double)); xy_rbuf0=(double*)mpi_malloc(my_id,5*nx*ny*sizeof(double)); yz_rbuf1=(double*)mpi_malloc(my_id,5*ny*nz*sizeof(double)); xz_rbuf1=(double*)mpi_malloc(my_id,5*nx*nz*sizeof(double)); xy_rbuf1=(double*)mpi_malloc(my_id,5*nx*ny*sizeof(double)); #ifdef __PAPI__ if ( PAPI_start( EventSet ) != PAPI_OK){ printf("PAPI_read_counters failed\n"); } #endif //settime //T=1000*dt; //for ( T = 0; T < TimeStep; T += 1 ) int t_counter=0; while(t<T) //while(0) { t+=dt; t_counter++; time_comm-=timing(); updateBound(C0[0], C0[1], C0[2], C0[3], C0[4], t_counter, nx0, ny0, nz0, yz_sbuf0,yz_rbuf0, xz_sbuf0,xz_rbuf0, xy_sbuf0,xy_rbuf0, yz_sbuf1,yz_rbuf1, xz_sbuf1,xz_rbuf1, xy_sbuf1,xy_rbuf1, NeighBor, ar_status,ar_send_req,ar_recv_req, comm, comm3d); time_comm+=timing(); // Diffusion update time_conc-=timing(); // Change to use a faster computing function compute_pde_ode(nx0, ny0, nz0, dt, gamma, 1e-4, alpha, B_tot, k_on, k_off, C0, C1, div_y); // for ( i = 0; i < 5; i += 1 ) { // laplace3D(nx0,ny0,nz0,C0[i],nx1,ny1,nz1,C1[i],alpha[i]); // } // for ( i = 2; i < 6; i += 1 ) { // reaction3D(nx1,ny1,nz1,C1[cai],nx1,ny1,nz1,C1[i],B_tot[i],k_on[i],k_off[i],dt); // } // serca3D(nx1,ny1,nz1, C1[cai],nx1,ny1,nz1, C1[sri], dt, gamma, 1.0); time_conc+=timing(); // Update at RyRs, one at the time time_ryr-=timing(); update_ryr(h_scale, nx0, ny0, nz0, C1[cai], C1[sri], C1[csqni], C1[0],C1[2],C1[3],C1[4], k_on_CSQN, k_off_CSQN,CSQN_tot, gamma, K, dt, ryr_len, i0_ryr, i1_ryr, i2_ryr, csqn_len, i0_csqn, i1_csqn, i2_csqn, cleft_len, i0_cleft, i1_cleft, i2_cleft,cleft_nb, states0, states1); time_ryr+=timing(); double sum_c_i_root[7]; double sum_c_i[7]; double cai_min; double cai_min_root=0.0; double cai_max; double cai_max_root=1.0; double sm; double ca[8]; char caoutfile[100]; if ((fmod(t,DT)<dt)||(t==dt)){ time_io-=timing(); for(idx=0; idx<7; idx++){ sum_c_i[idx]=0.0; for ( i = 1; i <= nx; i += 1 ) for ( j = 1; j <= ny; j += 1 ) for ( k = 1; k <= nz; k += 1 ) sum_c_i[idx]+=C1[idx][i*ny0*nz0+j*nz0+k]; } cai_min=my_min(C1[cai],len); cai_max=my_max(C1[cai],len); /* reduce operation comm*/ MPI_Reduce(&sum_c_i[0], &sum_c_i_root[0], 7, MPI_DOUBLE, MPI_SUM, 0, comm); MPI_Reduce(&cai_min, &cai_min_root, 1, MPI_DOUBLE, MPI_MIN, 0, comm); MPI_Reduce(&cai_max, &cai_max_root, 1, MPI_DOUBLE, MPI_MAX, 0, comm); if(!my_id){ sm = 0; ca[0] = t; if(save_data) fprintf(fpdata,"%f ", ca[0]); for(idx=0; idx<7; idx++){ sm += fraction[idx]*sum_c_i_root[idx]; ca[idx+1] = sum_c_i_root[idx]/((double)nx*x_domains*(double)ny*y_domains*(double)nz*z_domains); if(DB_PF){ printf("ca[%d]: %f , sum : %f, nx ny nz: %d %d %d \n",idx+1, ca[idx+1], sum_c_i_root[idx],nx*x_domains,ny*y_domains,nz*z_domains); } if(save_data) fprintf(fpdata,"%f ", ca[idx+1]); } if(save_data) fprintf(fpdata,"\n "); printf("%3d, %.3f, %3.2f, %7.2f, %3.2f, %4.2f, %.2f \n", counter, t, ca[1], ca[2], cai_min_root, cai_max_root, sm); } if(save_data && in_midx_slice) { // If saving in hdf5 if (save_hdf5) { hsize_t dimsf[2] = {size_y, size_z}; /* dataset dimensions */ hsize_t chunk_dims[2] = {ny, nz}; /* chunk dimensions */ hsize_t h5_offset[2] = {coord[1]*nz, coord[0]*ny}; hsize_t h5_count[2] = {1, 1}; // Create group name sprintf(hdf5_group_name, "/data_%d", counter); hid_t group_id = H5Gcreate(h5_file_id, hdf5_group_name, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); // Create data space for the time attribute hsize_t dims = 1; hid_t attr_space = H5Screate_simple(1, &dims, NULL); // Create a time attribute hid_t time_id = H5Acreate(group_id, "time", H5T_NATIVE_DOUBLE, attr_space, H5P_DEFAULT, H5P_DEFAULT); // Write the attribute data double time_data = counter*DT; herr_t status = H5Awrite(time_id, H5T_NATIVE_DOUBLE, &time_data); // Cleanup H5Aclose(time_id); H5Sclose(attr_space); for (i=0; i<NUM_SAVE_SPECIES; i++) { // Get species int species = save_species[i]; sprintf(hdf5_dataset_name, "%s/%s", hdf5_group_name, species_names[species]); // file and dataset identifiers hid_t filespace = H5Screate_simple(2, dimsf, NULL); hid_t memspace = H5Screate_simple(2, chunk_dims, NULL); // Create chunked dataset. hid_t plist_id = H5Pcreate(H5P_DATASET_CREATE); H5Pset_chunk(plist_id, 2, chunk_dims); // Create compression filter (Not supported in parallel yet...) //unsigned int gzip_level = 9; //herr_t status = H5Pset_filter(plist_id, H5Z_FILTER_DEFLATE, // H5Z_FLAG_OPTIONAL, 1, &gzip_level); hid_t dset_id = H5Dcreate(h5_file_id, hdf5_dataset_name, H5T_DATA_TYPE, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); H5Pclose(plist_id); H5Sclose(filespace); // Select hyperslab in the file. filespace = H5Dget_space(dset_id); status = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, h5_offset, NULL, h5_count, chunk_dims); // Copy data to h5_data transfer_hdf5_data(h5_data, &(C0[species][ny0*nz0*mid_coord_x]), &(C1[species][ny0*nz0*mid_coord_x]), init_values[species], chunk_dims); // Create property list for collective dataset write. plist_id = H5Pcreate(H5P_DATASET_XFER); H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE); status = H5Dwrite(dset_id, H5T_DATA_TYPE, memspace, filespace, plist_id, h5_data); // Close/release resources. H5Dclose(dset_id); H5Sclose(filespace); H5Sclose(memspace); H5Pclose(plist_id); } H5Gclose(group_id); } // No HDF5 else { // Get species int species = save_species[i]; for (i=0; i<NUM_SAVE_SPECIES; i++) { sprintf(caoutfile, "%s/Ca%d_T%d_rank%d_%d_%d.np", outdirname, species, counter, coord[2], coord[1], coord[0]); if(save_binary_file) store2Dmatrixfile_double_bin(caoutfile, C1[species], ny0, nz0, mid_coord_x); else store2Dmatrixfile_double_1D(caoutfile, C1[species], ny0, nz0, mid_coord_x); } } } counter += 1; } // # Update Ca for(i=0;i<7;i++){ C_temp=C0[i]; C0[i]=C1[i]; C1[i]=C_temp; } MPI_Waitall(6, ar_send_req, ar_status); } time_main+=timing(); if(my_id==0){ if(save_data) fclose(fpdata); printf("cubiod_c: h:%d size_x:%d size_y:%d size_z:%d dt:%f, T:%f, TimeStep:%d, DT:%f, x_slice_num:%d\n", h, size_x, size_y, size_z,dt,T,(int)(T/dt),DT,x_slice_num); printf("nx0:%d ny0:%d nz0:%d size/array:%7.3f MB total size:%7.3f MB\n", nx0,ny0,nz0,nx0*ny0*nz0*8*1e-6,nx0*ny0*nz0*8*1e-6*12); #ifdef __PAPI__ if ( PAPI_stop( EventSet, res_papi ) != PAPI_OK){ printf("PAPI_accum_counters failed\n"); } for (i = 0; i<NUM_EVENTS; i++){ PAPI_event_code_to_name(Events[i], EventName); printf("PAPI Event name: %s, value: %lld\n", EventName, res_papi[i]); } #endif printf("computing time: %7.3f \n", time_conc); printf("updateryr time: %7.3f \n", time_ryr); printf("communica time: %7.3f \n", time_comm); printf("main time: %7.3f \n", time_main); #ifdef __PAPI__ printf("PAPI Performanc/core: %7.3f GFLOPS\n", res_papi[0]/1e9/time_conc); #endif } if (save_hdf5) { H5Fclose(h5_file_id); free(h5_data); } for(i=0;i<5;i++){ free(C0[i]); free(C1[i]); } free(C0[6]); free(C0[5]); free(i0_ryr); free(i1_ryr); free(i2_ryr); free(i0_csqn); free(i1_csqn); free(i2_csqn); free(i0_cleft); free(i1_cleft); free(i2_cleft); free(cleft_nb); MPI_Finalize(); return 0; } void laplace3D (int nx0, int ny0, int nz0, double* C0, int nx1, int ny1, int nz1, double* C1, double alpha)//, int num_threads) { // Set num threads // omp_set_num_threads(num_threads); // Local variables int i, j, k; double C0_tmp; // Main kernel loop // for (i=1; i<nx0-1; i++) { for (j=1; j<ny0-1; j++) { for (k=1; k<nz0-1; k++) { // Main kernel C0_tmp = -6*C0[i*nz0*ny0+j*nz0+k] + C0[(i-1)*nz0*ny0+j*nz0+k] + C0[(i+1)*nz0*ny0+j*nz0+k] + C0[i*nz0*ny0+(j-1)*nz0+k] + C0[i*nz0*ny0+(j+1)*nz0+k] + C0[i*nz0*ny0+j*nz0+k-1] + C0[i*nz0*ny0+j*nz0+k+1]; // Put value back into return array with offset to indices C1[i*nz1*ny1+j*nz1+k] = C0[i*nz1*ny1+j*nz1+k] + C0_tmp*alpha; } } } } void reaction3D (int nx0, int ny0, int nz0, double* Ca, int nx1, int ny1, int nz1, double* buff, double B_tot, double k_on, double k_off, double dt)//, int num_threads) { // Set num threads // omp_set_num_threads(num_threads); // Local variables int i, j, k; double J; // Use pointers reducing indexing into memory to once double* Ca_ijk; double* buff_ijk; // Main kernel loop // for (i=1; i<nx0-1; i++) { for (j=1; j<ny0-1; j++) { for (k=1; k<nz0-1; k++) { // Main kernel Ca_ijk = &Ca[i*nz0*ny0+j*nz0+k]; buff_ijk = &buff[i*nz0*ny0+j*nz0+k]; J = k_on*(B_tot - *buff_ijk)*(*Ca_ijk) - k_off*(*buff_ijk); *Ca_ijk -= dt*J; *buff_ijk += dt*J; } } } } void serca3D (int nx0, int ny0, int nz0, double* Ca_i, int nx1, int ny1, int nz1, double* Ca_SR, double dt, double gamma, double fudge)//, int num_threads) { // Set num threads // omp_set_num_threads(num_threads); // Local variables int i, j, k; double J; // Use pointers reducing indexing into memory to once double Ca_i2_ijk; double Ca_SR2_ijk; // Main kernel loop // for (i=1; i<nx0-1; i++) { for (j=1; j<ny0-1; j++) { for (k=1; k<nz0-1; k++) { // Main kernel Ca_i2_ijk = Ca_i[i*nz0*ny0+j*nz0+k]; Ca_SR2_ijk = Ca_SR[i*nz0*ny0+j*nz0+k]; Ca_i2_ijk *= Ca_i2_ijk; Ca_SR2_ijk *= Ca_SR2_ijk; J = fudge*(570997802.885875*Ca_i2_ijk - 0.0425239333622699*Ca_SR2_ijk)/(106720651.206402*Ca_i2_ijk + 182.498197548666*Ca_SR2_ijk + 5.35062954944879); Ca_i[i*nz0*ny0+j*nz0+k] -= dt*J; Ca_SR[i*nz0*ny0+j*nz0+k] += dt*J/gamma; } } } } void update_ryr(int h_scale,int nx0, int ny0, int nz0, double* Ca_i, double* Ca_SR, double* Ca_CSQN, double* C10, double* C12, double* C13, double* C14, double k_on_CSQN, double k_off_CSQN, double CSQN_tot, double gamma, double K, double dt, int ryr_len, int* i0_ryr, int* i1_ryr, int* i2_ryr, int csqn_len, int* i0_csqn, int* i1_csqn, int* i2_csqn, int cleft_len, int* i0_cleft, int* i1_cleft, int* i2_cleft,int* cleft_nb, int* states0, int* states1) { int i,j; int x_copy_from; int x,y,z; int nb_y,nb_z; int idx,idx_cleft,idx_csqn; double J; int open; double c0,c1; //extend csqn on x direction // for(j=(1-h_scale);j<h_scale;j++){ //extend cdqn on x+ direction for 30nm for(j=0;j<h_scale;j++){ for(i=0;i<csqn_len;i+=1){ x=i0_csqn[i]+j; #ifdef DEBUG_TEST if((x<0)||x>(nx0-1)) { printf("wrong csqn x index\n"); exit(0); } #endif y=i1_csqn[i]; z=i2_csqn[i]; idx=x*ny0*nz0+y*nz0+z; //CSQN step: J = k_on_CSQN*(CSQN_tot - Ca_CSQN[idx])*Ca_SR[idx] - k_off_CSQN*Ca_CSQN[idx]; Ca_SR[idx] -= dt*J; Ca_CSQN[idx] += dt*J; } } //add no_flux boundary by copy the neighbour's value on no_flux voxel //add x+ front no-flux plane on ryr with +1 offset, and copy from -1 x-plane(where ryr is on) j=1; x_copy_from=-1; for(i=0;i<csqn_len;i+=1){ x=i0_csqn[i]+j; #ifdef DEBUG_TEST if((x<0)||x>(nx0-1)) { printf("wrong csqn x index\n"); exit(0); } #endif y=i1_csqn[i]; z=i2_csqn[i]; idx_cleft=x*ny0*nz0+y*nz0+z; idx_csqn =(x+x_copy_from)*ny0*nz0+y*nz0+z; C10[idx_cleft]=C10[idx_csqn]; C12[idx_cleft]=C12[idx_csqn]; C13[idx_cleft]=C13[idx_csqn]; C14[idx_cleft]=C14[idx_csqn]; } //add x+ back no-flux plane on ryr with h_scale offset, and copy from +1 x-plane(outside of csqn) if(h_scale==2)//15 nm j=h_scale+1;//guarantee that there is at least one voxel inner the no-flux boundary else//5nm 3mn 1nm, j=h_scale; x_copy_from=+1; for(i=0;i<csqn_len;i+=1){ x=i0_csqn[i]+j; #ifdef DEBUG_TEST if((x<0)||x>(nx0-1)) { printf("wrong csqn x index\n"); exit(0); } #endif y=i1_csqn[i]; z=i2_csqn[i]; idx_cleft=x*ny0*nz0+y*nz0+z; idx_csqn =(x+x_copy_from)*ny0*nz0+y*nz0+z; C10[idx_cleft]=C10[idx_csqn]; C12[idx_cleft]=C12[idx_csqn]; C13[idx_cleft]=C13[idx_csqn]; C14[idx_cleft]=C14[idx_csqn]; } //extend y-z plane no_flux boundary along x+ direction with +1 offset and copy value from outside of CSQN by cleft_nb index int k; if(h_scale==2)//15 nm k=1;//guarantee that there is at least one voxel inner the no-flux boundary else//5nm 3mn 1nm, k=0; for(j=2;j<h_scale+k;j++){ for(i=0;i<cleft_len;i+=1){ x=i0_cleft[i]+j; #ifdef DEBUG_TEST if((x<0)||x>(nx0-1)) { printf("wrong csqn x index\n"); exit(0); } #endif y=i1_cleft[i]; z=i2_cleft[i]; nb_y=cleft_nb[i]/8-1; nb_z=cleft_nb[i]%8-1; idx_cleft=x*ny0*nz0+y*nz0+z; idx_csqn =x*ny0*nz0+(y+nb_y)*nz0+z+nb_z; C10[idx_cleft]=C10[idx_csqn]; C12[idx_cleft]=C12[idx_csqn]; C13[idx_cleft]=C13[idx_csqn]; C14[idx_cleft]=C14[idx_csqn]; } } //add x- front no-flux plane on ryr with -h_scale/2(15nm) offset, and copy from +1 x-plane(t-tubule) j=0-h_scale/2; x_copy_from=1; for(i=0;i<csqn_len;i+=1){ x=i0_csqn[i]+j; #ifdef DEBUG_TEST if((x<0)||x>(nx0-1)) { printf("wrong csqn x index\n"); exit(0); } #endif y=i1_csqn[i]; z=i2_csqn[i]; idx_cleft=x*ny0*nz0+y*nz0+z; idx_csqn =(x+x_copy_from)*ny0*nz0+y*nz0+z; C10[idx_cleft]=C10[idx_csqn]; C12[idx_cleft]=C12[idx_csqn]; C13[idx_cleft]=C13[idx_csqn]; C14[idx_cleft]=C14[idx_csqn]; } //add x- back no-flux plane on ryr with -h_scale/2+1 offset, and copy from -1 x-plane(t-tubule) /* if(h_scale=2) j=0-h_scale/2-h_scale; else j=0-h_scale/2-h_scale+1; */ /* how thick should t-tubule be? now, just set it 2 lines on x- direction */ // j=0-h_scale/2-h_scale-1; j=0-h_scale/2-1; x_copy_from=-1; for(i=0;i<csqn_len;i+=1){ x=i0_csqn[i]+j; #ifdef DEBUG_TEST if((x<0)||x>(nx0-1)) { printf("wrong csqn x index\n"); exit(0); } #endif y=i1_csqn[i]; z=i2_csqn[i]; idx_cleft=x*ny0*nz0+y*nz0+z; idx_csqn =(x+x_copy_from)*ny0*nz0+y*nz0+z; C10[idx_cleft]=C10[idx_csqn]; C12[idx_cleft]=C12[idx_csqn]; C13[idx_cleft]=C13[idx_csqn]; C14[idx_cleft]=C14[idx_csqn]; } /* how thick should t-tubule be? */ /* //extend y-z plane no_flux boundary along x- direction with +1 offset and copy value from outside of CSQN by cleft_nb index int k; if(h_scale==2)//15 nm k=1;//guarantee that there is at least one voxel inner the no-flux boundary else//5nm 3mn 1nm, k=0; for(j=0-h_scale/2-1;j>0-h_scale/2-h_scale+1-k;j--){ for(i=0;i<cleft_len;i+=1){ x=i0_cleft[i]+j; #ifdef DEBUG_TEST if((x<0)||x>(nx0-1)) { printf("wrong csqn x index\n"); exit(0); } #endif y=i1_cleft[i]; z=i2_cleft[i]; nb_y=cleft_nb[i]/8-1; nb_z=cleft_nb[i]%8-1; idx_cleft=x*ny0*nz0+y*nz0+z; idx_csqn =x*ny0*nz0+(y+nb_y)*nz0+z+nb_z; C10[idx_cleft]=C10[idx_csqn]; C12[idx_cleft]=C12[idx_csqn]; C13[idx_cleft]=C13[idx_csqn]; C14[idx_cleft]=C14[idx_csqn]; } } */ for ( i = 0; i < ryr_len; i += 1 ) { x=i0_ryr[i]; y=i1_ryr[i]; z=i2_ryr[i]; idx=x*ny0*nz0+y*nz0+z; // #Continous formulation // #states[:,i] += dt*stern(t, states[:,i], Ca_i[idx]) stern_discrete(dt, &states0[i],&states1[i], Ca_i[idx]); open = states0[i]*(1-states1[i]); // #Exp Euler: // #J_RyR = k*open*(Ca_SR[idx]-Ca_i[idx]) // #Ca_i[idx] += dt*J_RyR // #Ca_SR[idx] -= dt*J_RyR/gamma; // #Analytical update: // K = exp(-k_s*dt*(1+1/gamma)) if (open){ if(DB_PF) printf("open [%d] ryr[%d,%d,%d] \n", i, x, y,z); c0 = (Ca_i[idx] + gamma*Ca_SR[idx])/(1+gamma); c1 = (Ca_i[idx] - Ca_SR[idx])/(1+1/gamma); Ca_i[idx] = c0 + c1*K; Ca_SR[idx] = c0 - c1*K/gamma; } } } void stern(double t, double* y0, double* y1, double Ca){ double m = *y0; double h = *y1; double kim = 0.005; double kom = 0.06; double K_i = 0.01*10; double K_o = 0.01*41.4; double ki = kim/K_i; double ko = kom/(K_o*K_o); double dm = ko*Ca*Ca*(1-m)-kom*m; double dh = ki*Ca*(1-h)-kim*h; *y0=dm; *y1=dh; } void stern_discrete(double dt, int* y0, int* y1, double Ca){ double kim = 0.002; // 1/ms double kom = 1.5; // 0.5 1/ms double kd_i = 20.0; // 20.0 um*ms double kd_o = 0.9; // um*ms^N 0.7, 0.8, 0.9, 1.0 double Ca_ki = Ca/kd_i; double Ca_ko = Ca/kd_o; double ki = Ca_ki*Ca_ki; // (Ca/kd_i)^2 double ko = Ca_ko*Ca_ko*Ca_ko*Ca_ko; // ko = (Ca/kd_o)^4 //double kim = 0.005; // Original: 0.005 //double kom = 0.04; // Original: 0.06 //double ki = Ca*1.5*1e-3; // Original: Ca*0.5*1e-3 //double ko = 1e-6*Ca*Ca*3500; // Original: 1e-6*Ca*Ca*{35,1200,2000,3500} double r; int m, h; m = *y0; if(m==1){ r = my_random(); m = 1 - (r<(dt*kom)); } else { r=my_random(); m = 1*(r<(dt*ko)); } h = *y1; if(h==1){ r = my_random(); h = 1 - (r<(dt*kim)); } else{ r = my_random(); h = 1*(r<(dt*ki)); } *y0=m; *y1=h; } inline double my_random() { double r; double x; // r=(double)(rand()%100000000); // x=(r*1e-8); x=((double)rand())/(double)RAND_MAX; return x; } void store2Dmatrixfile_double_1D(char* outfile, double* ar, int rows, int cols, int x_strid){ FILE *fpdata; int i,j; if ((fpdata=fopen(outfile, "w"))==NULL) { printf("fialed open output file "); printf("%s",outfile); printf(" ! \n "); exit(0); } // printf("----Generating list output to "); // printf("%s",outfile); // printf(" file----\n"); for(i=0;i<rows;i++) { for(j=0;j<cols;j++) { fprintf(fpdata,"%.9e ", ar[x_strid*rows*cols+i*cols+j]); } fprintf(fpdata,"\n"); } fclose(fpdata); return; } void store2Dmatrixfile_double_bin(char* outfile, double* ar, int rows, int cols, int x_strid) { FILE *fpdata; int i,j; if ((fpdata=fopen(outfile, "wb"))==NULL) { printf("failed open output file "); printf("%s",outfile); printf(" ! \n "); exit(0); } fwrite(&ar[x_strid*rows*cols],sizeof(double),rows*cols,fpdata); fclose(fpdata); return; } void transfer_hdf5_data(hdf5_data_type* h5_data, double* ar0, double* ar1, double scale_value, hsize_t* chunk_dims) { int i,j; int rows=chunk_dims[0]; int cols=chunk_dims[1]; // Transfer data from padded ar to stripped data for(i=0;i<rows;i++) { for(j=0;j<cols;j++) { double rel_data_diff = (ar1[i*(cols+2)+j+1]-ar0[i*(cols+2)+j+1])/scale_value; h5_data[i*cols+j] = (hdf5_data_type)round(rel_data_diff*H5_DATA_LIMIT_1); } } } void store2Dmatrixfile_int_1D(char* outfile, int* ar, int rows, int cols){ FILE *fpdata; int i,j; if ((fpdata=fopen(outfile, "w"))==NULL) { printf("failed open output file "); printf("%s",outfile); printf(" ! \n "); exit(0); } printf("----Generating list output to "); printf("%s",outfile); printf(" file----\n"); for(i=0;i<rows;i++) { for(j=0;j<cols;j++) fprintf(fpdata,"%d ",ar[i*cols+j]); fprintf(fpdata,"\n"); } fclose(fpdata); return; } double my_min(double* ar, int len) { double min=ar[0]; int i; for ( i = 0; i < len; i += 1 ) { if(ar[i]<min) min=ar[i]; } return min; } double my_max(double* ar, int len) { double max=ar[0]; int i; for ( i = 0; i < len; i += 1 ) { if(ar[i]>max) max=ar[i]; } return max; } double timing(){ double time; struct timeval timmer; gettimeofday(&timmer,NULL); time = 1000000*timmer.tv_sec + timmer.tv_usec; time /= 1000000; return time; } int load_indices_serial(int nx, int ny, int nz, int h, int** i0_ryr, int** i1_ryr, int** i2_ryr, int* ryr_len, int** i0_csqn, int** i1_csqn, int** i2_csqn, int* csqn_len, int** i0_cleft, int** i1_cleft, int** i2_cleft, int** cleft_nb, int* cleft_len, int x_slice_mid, int x_slice_width, int x_slice_num, int use_failing) { int i,j,k; int nx_old; int ny_old; int nz_old; nx_old=nx; ny_old=ny; nz_old=nz; // Scale nx, xy, nz in terms of RyR if(30%h!=0){ printf("30 must be divisible by h!"); exit(1); } int h_scale; h_scale = 30/h; nx = nx/h_scale; ny = ny/h_scale; nz = nz/h_scale; // All CaRU placed mid-sarcomere // int mid_x = (nx+1)/2; // load RyR indices from file int* i1; int* i2; int i1_len; int i2_len; char i_RyR_indices_name[200]; char j_RyR_indices_name[200]; sprintf(i_RyR_indices_name, "i_RyR_indices%s.dat", use_failing ? "_failing" : ""); sprintf(j_RyR_indices_name, "j_RyR_indices%s.dat", use_failing ? "_failing" : ""); if (use_failing) printf("Load failing indices"); else printf("Load normal indices"); i1=loadRyRindexfile_int(i_RyR_indices_name, &i1_len); i2=loadRyRindexfile_int(j_RyR_indices_name, &i2_len); // # Only use the subset which are inside the geometry if(i1_len==i2_len) printf("num RyR before reduction: %d\n", i1_len); else printf("num RyR is wrong: i1_len!=i2_len\n"); int* i1_temp; int* i2_temp; int i1_temp_len=0; for ( i = 0; i < i1_len; i += 1 ) { if(i1[i]<ny) i1_temp_len++; } i1_temp=malloc(i1_temp_len*sizeof(int)); i2_temp=malloc(i1_temp_len*sizeof(int)); j=0; for ( i = 0; i < i1_len; i += 1 ) { if(i1[i]<ny){ i1_temp[j]=i1[i]; i2_temp[j]=i2[i]; j++; } } free(i1); free(i2); int i1_ryr_len=0; for ( i = 0; i < i1_temp_len; i += 1 ) { if(i2_temp[i]<nz) i1_ryr_len++; } *i0_ryr=malloc(x_slice_num*i1_ryr_len*sizeof(int)); *i1_ryr=malloc(x_slice_num*i1_ryr_len*sizeof(int)); *i2_ryr=malloc(x_slice_num*i1_ryr_len*sizeof(int)); j=0; for ( i = 0; i < i1_temp_len; i += 1 ) { if(i2_temp[i]<nz){ for(k=0; k < x_slice_num; k++){ (*i1_ryr)[k*i1_ryr_len+j]=i1_temp[i]; (*i2_ryr)[k*i1_ryr_len+j]=i2_temp[i]; } j++; } } free(i1_temp); free(i2_temp); // Scale indices and move to center of macro voxel for ( i = 0; i < i1_ryr_len; i += 1 ) { for(k=0; k < x_slice_num; k++){ (*i0_ryr)[k*i1_ryr_len+i] = k*x_slice_width+x_slice_mid; //for those ryr just on 0 boundary, avoid to subtracting their coords to negative if((*i1_ryr)[k*i1_ryr_len+i]>0) (*i1_ryr)[k*i1_ryr_len+i] = (*i1_ryr)[k*i1_ryr_len+i]*h_scale - floor((double)h_scale/2); else (*i1_ryr)[k*i1_ryr_len+i] = (*i1_ryr)[k*i1_ryr_len+i]*h_scale; if((*i2_ryr)[k*i1_ryr_len+i]>0) (*i2_ryr)[k*i1_ryr_len+i] = (*i2_ryr)[k*i1_ryr_len+i]*h_scale - floor((double)h_scale/2); else (*i2_ryr)[k*i1_ryr_len+i] = (*i2_ryr)[k*i1_ryr_len+i]*h_scale; } } *ryr_len=i1_ryr_len*x_slice_num; // load CSQN indices from file char i_csqn_indices_name[200]; char j_csqn_indices_name[200]; sprintf(i_csqn_indices_name, "i_csqn_indices%s.dat", use_failing ? "_failing" : ""); sprintf(j_csqn_indices_name, "j_csqn_indices%s.dat", use_failing ? "_failing" : ""); i1 = loadRyRindexfile_int(i_csqn_indices_name, &i1_len); i2 = loadRyRindexfile_int(j_csqn_indices_name, &i2_len); if(i1_len==i2_len) printf("num CSQN before reduction: %d\n", i1_len); else printf("num CSQN is wrong: i1_len!=i2_len\n"); //# Only use the subset which are inside the geometry // i1_csqn = i1[i2<nz]*h_scale // i2_csqn = i2[i2<nz]*h_scale // i0_csqn = np.ones(len(i1_csqn), dtype=int)*mid_x*h_scale i1_temp_len=0; for ( i = 0; i < i1_len; i += 1 ) { if(i1[i]<ny) i1_temp_len++; } i1_temp=malloc(i1_temp_len*sizeof(int)); i2_temp=malloc(i1_temp_len*sizeof(int)); j=0; for ( i = 0; i < i1_len; i += 1 ) { if(i1[i]<ny){ i1_temp[j]=i1[i]; i2_temp[j]=i2[i]; j++; } } free(i1); free(i2); int i1_csqn_len=0; for ( i = 0; i < i1_temp_len; i += 1 ) { if(i2_temp[i]<nz) i1_csqn_len++; } *i0_csqn=malloc(x_slice_num*i1_csqn_len*sizeof(int)); *i1_csqn=malloc(x_slice_num*i1_csqn_len*sizeof(int)); *i2_csqn=malloc(x_slice_num*i1_csqn_len*sizeof(int)); j=0; for ( i = 0; i < i1_temp_len; i += 1 ) { if(i2_temp[i]<nz){ for(k=0; k < x_slice_num; k++){ (*i1_csqn)[k*i1_csqn_len+j]=i1_temp[i]; (*i2_csqn)[k*i1_csqn_len+j]=i2_temp[i]; } j++; } } free(i1_temp); free(i2_temp); // Scale indices and move to center of macro voxel for(k=0; k < x_slice_num; k++){ for ( i = 0; i < i1_csqn_len; i += 1 ) { (*i0_csqn)[k*i1_csqn_len+i] = k*x_slice_width+x_slice_mid; (*i1_csqn)[k*i1_csqn_len+i] = (*i1_csqn)[k*i1_csqn_len+i]*h_scale; (*i2_csqn)[k*i1_csqn_len+i] = (*i2_csqn)[k*i1_csqn_len+i]*h_scale; } } int* i0_csqn_list; int* i1_csqn_list; int* i2_csqn_list; int m; int csqn_count; *csqn_len=x_slice_num*i1_csqn_len*h_scale*h_scale; *cleft_len=0;//x_slice_num*i1_csqn_len*4*h_scale; // # Add CSQN to all voxels covered by the original CSQN array if (h_scale > 1){ i0_csqn_list=malloc(x_slice_num*i1_csqn_len*h_scale*h_scale*sizeof(int)); i1_csqn_list=malloc(x_slice_num*i1_csqn_len*h_scale*h_scale*sizeof(int)); i2_csqn_list=malloc(x_slice_num*i1_csqn_len*h_scale*h_scale*sizeof(int)); csqn_count=0; // # Add offsetted versions of the csqn for ( m = 0; m < x_slice_num; m += 1 ) { for ( i = 0; i < h_scale; i += 1 ) { for ( j = 0; j < h_scale; j += 1 ) { for ( k = 0; k < i1_csqn_len; k += 1 ) { i0_csqn_list[csqn_count]=(*i0_csqn)[m*i1_csqn_len+k]; i1_csqn_list[csqn_count]=(*i1_csqn)[m*i1_csqn_len+k]+i; i2_csqn_list[csqn_count]=(*i2_csqn)[m*i1_csqn_len+k]+j; csqn_count++; } } } } if(csqn_count!=(*csqn_len)) { printf("csqn_count wrong\n"); exit(0); } } else { i0_csqn_list=(*i0_csqn); i1_csqn_list=(*i1_csqn); i2_csqn_list=(*i2_csqn); } int a_slice_csqn_len=i1_csqn_len*h_scale*h_scale; BinarySort_two(&i1_csqn_list[0],&i2_csqn_list[0],a_slice_csqn_len); int* y_index; y_index=malloc(ny_old*sizeof(int)); for ( i = 0; i < ny_old; i += 1 ) { y_index[i]=-1; } for ( i = a_slice_csqn_len-1; i >= 0; i -= 1 ) { y_index[i1_csqn_list[i]]=i; } //generate cleft index on Y-Z plane,just wrapping the outside of a group of CSQN, //If cleft is in the outside of the mesh or is already indexed by a CSQN, then it is not a true cleft. //Also generate the relative coordinates for th neighbour of each cleft from which to copy the value. //the relative coordinate of y is cleft_nb%8-1, and that of z is cleft_nb/8-1 int coord_y,coord_z; *i1_cleft=(int*)malloc(i1_csqn_len*4*h_scale*sizeof(int)); *i2_cleft=(int*)malloc(i1_csqn_len*4*h_scale*sizeof(int)); *cleft_nb=(int*)malloc(i1_csqn_len*4*h_scale*sizeof(int)); *cleft_len=0; for ( k = 0; k < i1_csqn_len; k += 1 ) { for ( j = 0; j < h_scale; j += 1 ) { //z bottom line coord_y=(*i1_csqn)[k]-1; coord_z=(*i2_csqn)[k]+j; if(IsTrueCleft(coord_y, coord_z, ny_old, nz_old, i1_csqn_list, i2_csqn_list, y_index, a_slice_csqn_len)) { (*i1_cleft)[(*cleft_len)]=coord_y; (*i2_cleft)[(*cleft_len)]=coord_z; //copy from outside // (*cleft_nb)[(*cleft_len)]=0+1; // copy from inside (*cleft_nb)[(*cleft_len)]=16+1; (*cleft_len)++; } //y left line coord_y=(*i1_csqn)[k]+j; coord_z=(*i2_csqn)[k]-1; if(IsTrueCleft(coord_y, coord_z, ny_old, nz_old, i1_csqn_list, i2_csqn_list, y_index, a_slice_csqn_len)) { (*i1_cleft)[(*cleft_len)]=coord_y; (*i2_cleft)[(*cleft_len)]=coord_z; //copy from inside // (*cleft_nb)[(*cleft_len)]=8+0; //copy from inside (*cleft_nb)[(*cleft_len)]=8+2; (*cleft_len)++; } //z top line coord_y=(*i1_csqn)[k]+h_scale; coord_z=(*i2_csqn)[k]+j; if(IsTrueCleft(coord_y, coord_z, ny_old, nz_old, i1_csqn_list, i2_csqn_list, y_index, a_slice_csqn_len)) { (*i1_cleft)[(*cleft_len)]=coord_y; (*i2_cleft)[(*cleft_len)]=coord_z; //copy from outside // (*cleft_nb)[(*cleft_len)]=16+1; // copy from inside (*cleft_nb)[(*cleft_len)]=0+1; (*cleft_len)++; } //y right line coord_y=(*i1_csqn)[k]+j; coord_z=(*i2_csqn)[k]+h_scale; if(IsTrueCleft(coord_y, coord_z, ny_old, nz_old, i1_csqn_list, i2_csqn_list, y_index, a_slice_csqn_len)) { (*i1_cleft)[(*cleft_len)]=coord_y; (*i2_cleft)[(*cleft_len)]=coord_z; //copy from outside // (*cleft_nb)[(*cleft_len)]=8+2; // copy from inside (*cleft_nb)[(*cleft_len)]=8+0; (*cleft_len)++; } } } if((*cleft_len)>i1_csqn_len*4*h_scale){ printf("wrong cleft_len found\n"); exit(0); } //add cleft for multiple 2um x-slices int* i0_cleft_list; int* i1_cleft_list; int* i2_cleft_list; int* cleft_nb_list; i0_cleft_list=malloc(x_slice_num*(*cleft_len)*sizeof(int)); i1_cleft_list=malloc(x_slice_num*(*cleft_len)*sizeof(int)); i2_cleft_list=malloc(x_slice_num*(*cleft_len)*sizeof(int)); cleft_nb_list=malloc(x_slice_num*(*cleft_len)*sizeof(int)); for(k=0; k < x_slice_num; k++){ for ( i = 0; i < (*cleft_len); i += 1 ) { i0_cleft_list[k*(*cleft_len)+i] = k*x_slice_width+x_slice_mid; i1_cleft_list[k*(*cleft_len)+i] = (*i1_cleft)[i]; i2_cleft_list[k*(*cleft_len)+i] = (*i2_cleft)[i]; cleft_nb_list[k*(*cleft_len)+i] = (*cleft_nb)[i]; } } free(*i1_cleft); free(*i2_cleft); free(*cleft_nb); *i0_cleft=i0_cleft_list; *i1_cleft=i1_cleft_list; *i2_cleft=i2_cleft_list; *cleft_nb=cleft_nb_list; *cleft_len=x_slice_num*(*cleft_len); if (h_scale > 1){ free(*i0_csqn); free(*i1_csqn); free(*i2_csqn); *i0_csqn=i0_csqn_list; *i1_csqn=i1_csqn_list; *i2_csqn=i2_csqn_list; } return h_scale; } int IsTrueCleft(int coord_y, int coord_z, int size_y, int size_z, int *i1_csqn, int *i2_csqn, int* y_index, int csqn_len) { int i; //in outside of the mesh if((coord_y<0)||(coord_y>=size_y)||(coord_z<0)||(coord_z>=size_z)) return 0; i=y_index[coord_y]; //not in CSQN if(i<0) return 1; while(i1_csqn[i]==coord_y){ //in CSQN if(i2_csqn[i]==coord_z) return 0; i++; //not in CSQN if(i>=csqn_len) return 1; } return 1; } int idxinrank(int nx, int ny, int nz, int i0, int i1, int i2, int rank, MPI_Comm comm3d) { int coords[3]; MPI_Cart_coords(comm3d,rank,3,coords); if( (i0>=coords[2]*nx)&&((i0<coords[2]+1)*nx)&& (i1>=coords[1]*ny)&&((i1<coords[1]+1)*ny)&& (i2>=coords[0]*nz)&&((i2<coords[0]+1)*nz)) { return 1; } else return 0; } int idxbl2rank(int nx, int ny, int nz, int i0, int i1, int i2, int* coords, MPI_Comm comm3d) { int rank=0; coords[2]=i0/nx; coords[1]=i1/ny; coords[0]=i2/nz; MPI_Cart_rank(comm3d,coords,&rank); return rank; } int distr_ryr_csqn_state(int h, int size_x, int size_y, int size_z, int nx, int ny, int nz, int** i0_ryr, int** i1_ryr, int** i2_ryr, int* ryr_len, int** i0_csqn, int** i1_csqn, int** i2_csqn, int* csqn_len, int** i0_cleft, int** i1_cleft, int** i2_cleft,int** cleft_nb, int* cleft_len, int** states0, int** states1, int x_slice_mid,int x_slice_width, int x_slice_num, MPI_Comm comm3d, MPI_Comm comm, int use_failing) { int i,j; int h_scale; int* global_i0_ryr; int* global_i1_ryr; int* global_i2_ryr; int* global_i0_ryr_reorder; int* global_i1_ryr_reorder; int* global_i2_ryr_reorder; int* global_i0_csqn; int* global_i1_csqn; int* global_i2_csqn; int* global_i0_csqn_reorder; int* global_i1_csqn_reorder; int* global_i2_csqn_reorder; int* global_i0_cleft; int* global_i1_cleft; int* global_i2_cleft; int* global_cleft_nb; int* global_i0_cleft_reorder; int* global_i1_cleft_reorder; int* global_i2_cleft_reorder; int* global_cleft_nb_reorder; int global_ryr_len; int global_csqn_len; int global_cleft_len; int* global_states0; int* global_states0_reorder; int* ryr_rec_count; int* ryr_rec_disp; int* ryr_rec_offset; int* csqn_rec_count; int* csqn_rec_disp; int* csqn_rec_offset; int* cleft_rec_count; int* cleft_rec_disp; int* cleft_rec_offset; int my_id; int nproc; int coords[3]; MPI_Comm_rank(comm,&my_id); MPI_Comm_size(comm,&nproc); if(my_id==0){ h_scale=load_indices_serial(size_x, size_y, size_z, h, &global_i0_ryr, &global_i1_ryr, &global_i2_ryr, &global_ryr_len, &global_i0_csqn, &global_i1_csqn,&global_i2_csqn,&global_csqn_len, &global_i0_cleft, &global_i1_cleft, &global_i2_cleft, &global_cleft_nb, &global_cleft_len, x_slice_mid,x_slice_width,x_slice_num, use_failing); printf("load indices from file: h:%d, h_scale:%d, nx:%d, ny:%d, nz:%d, ryr_len:%d, csqn_len:%d cleft_len:%d\n", h, h_scale, nx, ny, nz, global_ryr_len, global_csqn_len, global_cleft_len); if(global_ryr_len>0) global_states0=malloc(global_ryr_len*sizeof(int)); else global_states0=malloc(1*sizeof(int)); for ( i = 0; i < global_ryr_len; i++) global_states0[i]=0; if(global_ryr_len>=23){ for ( i = 1; i < 23; i =i+3 ) global_states0[i]=1; } else { for ( i = 1; i < global_ryr_len ; i =i+10 ) global_states0[i]=1; } if(DB_PF){ for(i=0;i<global_ryr_len;i++){ if(global_states0[i]==1) printf("ryr[%d]:%d,%d,%d \n",i,global_i0_ryr[i],global_i1_ryr[i],global_i2_ryr[i]); } } ryr_rec_count=malloc(nproc*sizeof(int)); csqn_rec_count=malloc(nproc*sizeof(int)); cleft_rec_count=malloc(nproc*sizeof(int)); for (i = 0; i < nproc; i++) { ryr_rec_count[i]=0; csqn_rec_count[i]=0; cleft_rec_count[i]=0; } for(i=0;i<global_ryr_len;i++) { j=idxbl2rank(nx,ny,nz,global_i0_ryr[i],global_i1_ryr[i],global_i2_ryr[i],coords,comm3d); ryr_rec_count[j]++; } for(i=0;i<global_csqn_len;i++) { j=idxbl2rank(nx,ny,nz,global_i0_csqn[i],global_i1_csqn[i],global_i2_csqn[i],coords,comm3d); csqn_rec_count[j]++; } for(i=0;i<global_cleft_len;i++) { j=idxbl2rank(nx,ny,nz,global_i0_cleft[i],global_i1_cleft[i],global_i2_cleft[i],coords,comm3d); cleft_rec_count[j]++; } for (i = 0; i < nproc; i++) { if(DB_PF) printf("ryr_rec_count[%d]: %d\n",i, ryr_rec_count[i]); if(DB_PF) printf("csqn_rec_count[%d]: %d\n",i, csqn_rec_count[i]); if(DB_PF) printf("cleft_rec_count[%d]: %d\n",i, cleft_rec_count[i]); } ryr_rec_disp = malloc(nproc*sizeof(int)); csqn_rec_disp = malloc(nproc*sizeof(int)); cleft_rec_disp = malloc(nproc*sizeof(int)); ryr_rec_disp[0] = 0; csqn_rec_disp[0] = 0; cleft_rec_disp[0] = 0; for (i = 1; i < nproc; i++) { ryr_rec_disp[i] = ryr_rec_disp[i-1] + ryr_rec_count[i-1]; csqn_rec_disp[i] = csqn_rec_disp[i-1] + csqn_rec_count[i-1]; cleft_rec_disp[i] = cleft_rec_disp[i-1] + cleft_rec_count[i-1]; } if(global_ryr_len!=ryr_rec_disp[nproc-1]+ryr_rec_count[nproc-1]) { printf("Global ryr Count mismatch %d\n", ryr_rec_disp[nproc-1]+ryr_rec_count[nproc-1]); } if(global_csqn_len!=csqn_rec_disp[nproc-1]+csqn_rec_count[nproc-1]) { printf("Global csqn Count mismatch %d\n", csqn_rec_disp[nproc-1]+csqn_rec_count[nproc-1]); } if(global_cleft_len!=cleft_rec_disp[nproc-1]+cleft_rec_count[nproc-1]) { printf("Global cleft Count mismatch %d\n", cleft_rec_disp[nproc-1]+cleft_rec_count[nproc-1]); } ryr_rec_offset = malloc(nproc*sizeof(int)); csqn_rec_offset = malloc(nproc*sizeof(int)); cleft_rec_offset = malloc(nproc*sizeof(int)); for (i = 0; i < nproc; i++) { ryr_rec_offset[i]=0; csqn_rec_offset[i]=0; cleft_rec_offset[i]=0; } global_i0_ryr_reorder=malloc(global_ryr_len*sizeof(int)); global_i1_ryr_reorder=malloc(global_ryr_len*sizeof(int)); global_i2_ryr_reorder=malloc(global_ryr_len*sizeof(int)); global_states0_reorder=malloc(global_ryr_len*sizeof(int)); for(i=0;i<global_ryr_len;i++) { j=idxbl2rank(nx,ny,nz,global_i0_ryr[i],global_i1_ryr[i],global_i2_ryr[i],coords,comm3d); global_i0_ryr_reorder[ryr_rec_disp[j]+ryr_rec_offset[j]]=global_i0_ryr[i]-coords[2]*nx+1; global_i1_ryr_reorder[ryr_rec_disp[j]+ryr_rec_offset[j]]=global_i1_ryr[i]-coords[1]*ny+1; global_i2_ryr_reorder[ryr_rec_disp[j]+ryr_rec_offset[j]]=global_i2_ryr[i]-coords[0]*nz+1; global_states0_reorder[ryr_rec_disp[j]+ryr_rec_offset[j]]=global_states0[i]; ryr_rec_offset[j]++; } for (i = 0; i < nproc; i++) { if(ryr_rec_offset[i]!=ryr_rec_count[i]) printf("ryr reorder count error on proc %d \n",i); } free(global_i0_ryr); free(global_i1_ryr); free(global_i2_ryr); free(global_states0); free(ryr_rec_offset); //distribute cleft to there own MPI process global_i0_csqn_reorder=malloc(global_csqn_len*sizeof(int)); global_i1_csqn_reorder=malloc(global_csqn_len*sizeof(int)); global_i2_csqn_reorder=malloc(global_csqn_len*sizeof(int)); for(i=0;i<global_csqn_len;i++) { j=idxbl2rank(nx,ny,nz,global_i0_csqn[i],global_i1_csqn[i],global_i2_csqn[i],coords,comm3d); global_i0_csqn_reorder[csqn_rec_disp[j]+csqn_rec_offset[j]]=global_i0_csqn[i]-coords[2]*nx+1; global_i1_csqn_reorder[csqn_rec_disp[j]+csqn_rec_offset[j]]=global_i1_csqn[i]-coords[1]*ny+1; global_i2_csqn_reorder[csqn_rec_disp[j]+csqn_rec_offset[j]]=global_i2_csqn[i]-coords[0]*nz+1; csqn_rec_offset[j]++; } for (i = 0; i < nproc; i++) { if(csqn_rec_offset[i]!=csqn_rec_count[i]) printf("csqn reorder count error on proc %d \n",i); } free(global_i0_csqn); free(global_i1_csqn); free(global_i2_csqn); free(csqn_rec_offset); global_i0_cleft_reorder=malloc(global_cleft_len*sizeof(int)); global_i1_cleft_reorder=malloc(global_cleft_len*sizeof(int)); global_i2_cleft_reorder=malloc(global_cleft_len*sizeof(int)); global_cleft_nb_reorder=malloc(global_cleft_len*sizeof(int)); for(i=0;i<global_cleft_len;i++) { j=idxbl2rank(nx,ny,nz,global_i0_cleft[i],global_i1_cleft[i],global_i2_cleft[i],coords,comm3d); global_i0_cleft_reorder[cleft_rec_disp[j]+cleft_rec_offset[j]]=global_i0_cleft[i]-coords[2]*nx+1; global_i1_cleft_reorder[cleft_rec_disp[j]+cleft_rec_offset[j]]=global_i1_cleft[i]-coords[1]*ny+1; global_i2_cleft_reorder[cleft_rec_disp[j]+cleft_rec_offset[j]]=global_i2_cleft[i]-coords[0]*nz+1; global_cleft_nb_reorder[cleft_rec_disp[j]+cleft_rec_offset[j]]=global_cleft_nb[i]; cleft_rec_offset[j]++; } for (i = 0; i < nproc; i++) { if(cleft_rec_offset[i]!=cleft_rec_count[i]) printf("cleft reorder count error on proc %d \n",i); } free(global_i0_cleft); free(global_i1_cleft); free(global_i2_cleft); free(global_cleft_nb); free(cleft_rec_offset); } //MPI_Gather(&n_ryr,1,MPI_INT,&states_rec_count[0],1,MPI_INT,0,comm); MPI_Scatter(&ryr_rec_count[0],1,MPI_INT,ryr_len,1, MPI_INT,0,comm); MPI_Scatter(&csqn_rec_count[0],1,MPI_INT,csqn_len,1, MPI_INT,0,comm); MPI_Scatter(&cleft_rec_count[0],1,MPI_INT,cleft_len,1, MPI_INT,0,comm); if(*ryr_len>0){ *i0_ryr=(int*)mpi_malloc(my_id,*ryr_len*sizeof(int)); *i1_ryr=(int*)mpi_malloc(my_id,*ryr_len*sizeof(int)); *i2_ryr=(int*)mpi_malloc(my_id,*ryr_len*sizeof(int)); } else { *i0_ryr=(int*)mpi_malloc(my_id,1*sizeof(int)); *i1_ryr=(int*)mpi_malloc(my_id,1*sizeof(int)); *i2_ryr=(int*)mpi_malloc(my_id,1*sizeof(int)); } if(*csqn_len>0) { *i0_csqn=(int*)mpi_malloc(my_id,*csqn_len*sizeof(int)); *i1_csqn=(int*)mpi_malloc(my_id,*csqn_len*sizeof(int)); *i2_csqn=(int*)mpi_malloc(my_id,*csqn_len*sizeof(int)); } else { *i0_csqn=(int*)mpi_malloc(my_id,1*sizeof(int)); *i1_csqn=(int*)mpi_malloc(my_id,1*sizeof(int)); *i2_csqn=(int*)mpi_malloc(my_id,1*sizeof(int)); } if(*cleft_len>0) { *i0_cleft=(int*)mpi_malloc(my_id,*cleft_len*sizeof(int)); *i1_cleft=(int*)mpi_malloc(my_id,*cleft_len*sizeof(int)); *i2_cleft=(int*)mpi_malloc(my_id,*cleft_len*sizeof(int)); *cleft_nb=(int*)mpi_malloc(my_id,*cleft_len*sizeof(int)); } else { *i0_cleft=(int*)mpi_malloc(my_id,1*sizeof(int)); *i1_cleft=(int*)mpi_malloc(my_id,1*sizeof(int)); *i2_cleft=(int*)mpi_malloc(my_id,1*sizeof(int)); *cleft_nb=(int*)mpi_malloc(my_id,1*sizeof(int)); } if(*ryr_len>0){ *states0=(int*)mpi_malloc(my_id,*ryr_len*sizeof(int)); *states1=(int*)mpi_malloc(my_id,*ryr_len*sizeof(int)); for ( i = 0; i < *ryr_len; i += 1 ) { (*states0)[i]=0; (*states1)[i]=0; } } else { *states0=(int*)mpi_malloc(my_id,1*sizeof(int)); *states1=(int*)mpi_malloc(my_id,1*sizeof(int)); (*states0)[0]=0; (*states1)[0]=0; } MPI_Scatterv(global_i0_ryr_reorder, ryr_rec_count,ryr_rec_disp, MPI_INT, *i0_ryr, *ryr_len, MPI_INT, 0, comm); MPI_Scatterv(global_i1_ryr_reorder, ryr_rec_count,ryr_rec_disp, MPI_INT, *i1_ryr, *ryr_len, MPI_INT, 0, comm); MPI_Scatterv(global_i2_ryr_reorder, ryr_rec_count,ryr_rec_disp, MPI_INT, *i2_ryr, *ryr_len, MPI_INT, 0, comm); MPI_Scatterv(global_i0_csqn_reorder, csqn_rec_count,csqn_rec_disp, MPI_INT, *i0_csqn, *csqn_len, MPI_INT, 0, comm); MPI_Scatterv(global_i1_csqn_reorder, csqn_rec_count,csqn_rec_disp, MPI_INT, *i1_csqn, *csqn_len, MPI_INT, 0, comm); MPI_Scatterv(global_i2_csqn_reorder, csqn_rec_count,csqn_rec_disp, MPI_INT, *i2_csqn, *csqn_len, MPI_INT, 0, comm); MPI_Scatterv(global_i0_cleft_reorder, cleft_rec_count,cleft_rec_disp, MPI_INT, *i0_cleft, *cleft_len, MPI_INT, 0, comm); MPI_Scatterv(global_i1_cleft_reorder, cleft_rec_count,cleft_rec_disp, MPI_INT, *i1_cleft, *cleft_len, MPI_INT, 0, comm); MPI_Scatterv(global_i2_cleft_reorder, cleft_rec_count,cleft_rec_disp, MPI_INT, *i2_cleft, *cleft_len, MPI_INT, 0, comm); MPI_Scatterv(global_cleft_nb_reorder, cleft_rec_count,cleft_rec_disp, MPI_INT, *cleft_nb, *cleft_len, MPI_INT, 0, comm); MPI_Scatterv(global_states0_reorder, ryr_rec_count, ryr_rec_disp, MPI_INT, *states0, *ryr_len, MPI_INT, 0, comm); //MPI_Bcast(&global_ryr_num,1,MPI_INT,0,comm); if(DB_PF) printf("Thread%d: ryr_len=%d\n",my_id, *ryr_len); // sprintf(caoutfile,"%s/Ca%d_T%d_rank%d_%d_%d_s0.np",outdirname,i,counter,coord[2],coord[1],coord[0]); // store2Dmatrixfile_double_1D(caoutfile,C1[i],ny0,nz0,30); //MPI_Gatherv(states0, n_ryr, MPI_INT, global_states0, states_rec_count, states_rec_disp, MPI_INT, 0, comm); // if(my_id==2) { // for(i=0;i<*ryr_len;i++) printf("Thread2 states[%d]: %d\n",i,(*states0)[i]); // } if(DB_PF){ for(i=0;i<*ryr_len;i++){ if((*states0)[i]==1){ printf("Proc%d,ryr_len=%d,ryr[%d]:%d,%d,%d \n",my_id, *ryr_len,i,(*i0_ryr)[i],(*i1_ryr)[i],(*i2_ryr)[i]); } } } if(my_id==0){ free(ryr_rec_count); free(ryr_rec_disp); free(csqn_rec_count); free(csqn_rec_disp); free(cleft_rec_count); free(cleft_rec_disp); free(global_i0_ryr_reorder); free(global_i1_ryr_reorder); free(global_i2_ryr_reorder); free(global_i0_csqn_reorder); free(global_i1_csqn_reorder); free(global_i2_csqn_reorder); free(global_i0_cleft_reorder); free(global_i1_cleft_reorder); free(global_i2_cleft_reorder); free(global_cleft_nb_reorder); free(global_states0_reorder); } return 30/h; } //int* loadRyRindexfile_int(char* infile, CONDFN cf, int cond) int* loadRyRindexfile_int(char* infile, int* count) { FILE *fpdata; int* arreturn; int i; int temp_d; *count=0; if(DB_PF) printf("Load file name: %s\n", infile); fpdata = fopen(infile, "r"); if(fpdata==NULL) { printf("\nFailure to open input file.\n"); exit(0); } while(fscanf(fpdata, "%d", &temp_d)!=EOF){ // if(cf(temp_d,cond)) count++; (*count)++; // printf("%d,",temp_d); } if(DB_PF) printf("There are %d indices satisfy the condition\n",*count); arreturn = malloc((*count)*sizeof(int)); if (arreturn == NULL) { printf("\nFailure trying to allocate room for array.\n"); exit(0); } rewind(fpdata); i=0; while(fscanf(fpdata, "%d", &temp_d)!=EOF){ // if(cf(temp_d,cond)) { arreturn[i]=temp_d; i++; // } } fclose(fpdata); if (*count != i) { printf("Wrong indices number\n"); exit(0); } if(DB_PF) printf("load file %s over \n", infile); return arreturn; } void readparam(int* iconf, double* conf) { FILE* file2; char Data[MAX_LINE_LENGTH]; if((file2=fopen("param","r")) == NULL) { printf("Error opening param file\n"); return; } // h fgets(Data,MAX_LINE_LENGTH,file2); fscanf(file2,"%d\n",&iconf[0]); // size_x fgets(Data,MAX_LINE_LENGTH,file2); fscanf(file2,"%d\n",&iconf[1]); // size_y fgets(Data,MAX_LINE_LENGTH,file2); fscanf(file2,"%d\n",&iconf[2]); // size_z fgets(Data,MAX_LINE_LENGTH,file2); fscanf(file2,"%d\n",&iconf[3]); // x_domains fgets(Data,MAX_LINE_LENGTH,file2); fscanf(file2,"%d\n",&iconf[4]); // y_domains fgets(Data,MAX_LINE_LENGTH,file2); fscanf(file2,"%d\n",&iconf[5]); // z_domains fgets(Data,MAX_LINE_LENGTH,file2); fscanf(file2,"%d\n",&iconf[6]); // save_data fgets(Data,MAX_LINE_LENGTH,file2); fscanf(file2,"%d\n",&iconf[7]); // use_failing fgets(Data,MAX_LINE_LENGTH,file2); fscanf(file2,"%d\n",&iconf[8]); // T fgets(Data,MAX_LINE_LENGTH,file2); fscanf(file2,"%le\n",&conf[0]); // DT fgets(Data,MAX_LINE_LENGTH,file2); fscanf(file2,"%le\n",&conf[1]); // save data in binary file fgets(Data,MAX_LINE_LENGTH,file2); fscanf(file2,"%d\n",&iconf[9]); // save data in hdf5 format fgets(Data,MAX_LINE_LENGTH,file2); fscanf(file2,"%d\n",&iconf[10]); // blocking_y_for_cache fgets(Data,MAX_LINE_LENGTH,file2); fscanf(file2,"%d",&iconf[11]); fclose(file2); } void updateBound(double* C00, double* C01, double* C02, double* C03, double* C04, int C_flag, int nx0, int ny0, int nz0, double* yz_sbuf0,double* yz_rbuf0, double* xz_sbuf0,double* xz_rbuf0, double* xy_sbuf0,double* xy_rbuf0, double* yz_sbuf1,double* yz_rbuf1, double* xz_sbuf1,double* xz_rbuf1, double* xy_sbuf1,double* xy_rbuf1, int* neighbor, MPI_Status* ar_status, MPI_Request* ar_send_req, MPI_Request* ar_recv_req, MPI_Comm comm, MPI_Comm comm3d) { int i,j,k; int nx=nx0-2; int ny=ny0-2; int nz=nz0-2; int dims[3]; int periods[3]; int coords[3]; int ZN=0, ZP=1, YN=2, YP=3, XN=4, XP=5; MPI_Cart_get(comm3d, 3, dims, periods, coords); // Ghost X end sheet if(coords[2]==0){ i=0; for (j=1; j<ny0-1; j++) for (k=1; k<nz0-1; k++){ C00[i*nz0*ny0+j*nz0+k] = C00[(i+1)*nz0*ny0+j*nz0+k]; C01[i*nz0*ny0+j*nz0+k] = C01[(i+1)*nz0*ny0+j*nz0+k]; C02[i*nz0*ny0+j*nz0+k] = C02[(i+1)*nz0*ny0+j*nz0+k]; C03[i*nz0*ny0+j*nz0+k] = C03[(i+1)*nz0*ny0+j*nz0+k]; C04[i*nz0*ny0+j*nz0+k] = C04[(i+1)*nz0*ny0+j*nz0+k]; } } else { putin_sendbuffer_yz(1*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C00, nx, ny, nz, &yz_sbuf0[0*ny*nz],ny*nz); putin_sendbuffer_yz(1*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C01, nx, ny, nz, &yz_sbuf0[1*ny*nz],ny*nz); putin_sendbuffer_yz(1*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C02, nx, ny, nz, &yz_sbuf0[2*ny*nz],ny*nz); putin_sendbuffer_yz(1*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C03, nx, ny, nz, &yz_sbuf0[3*ny*nz],ny*nz); putin_sendbuffer_yz(1*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C04, nx, ny, nz, &yz_sbuf0[4*ny*nz],ny*nz); } MPI_Isend(yz_sbuf0,5*ny*nz,MPI_DOUBLE,neighbor[XN],C_flag+1000, comm, &ar_send_req[0]); MPI_Irecv(yz_rbuf0,5*ny*nz,MPI_DOUBLE,neighbor[XN],C_flag+1000, comm, &ar_recv_req[0]); // MPI_Sendrecv(yz_sbuf0,5*ny*nz,MPI_DOUBLE,neighbor[XN],C_flag+1000, // yz_rbuf0,5*ny*nz,MPI_DOUBLE,neighbor[XN],C_flag+1000,comm,&status); if(coords[2]==(dims[2]-1)) { i=nx0-1; for (j=1; j<ny0-1; j++) for (k=1; k<nz0-1; k++){ C00[i*nz0*ny0+j*nz0+k] = C00[(i-1)*nz0*ny0+j*nz0+k]; C01[i*nz0*ny0+j*nz0+k] = C01[(i-1)*nz0*ny0+j*nz0+k]; C02[i*nz0*ny0+j*nz0+k] = C02[(i-1)*nz0*ny0+j*nz0+k]; C03[i*nz0*ny0+j*nz0+k] = C03[(i-1)*nz0*ny0+j*nz0+k]; C04[i*nz0*ny0+j*nz0+k] = C04[(i-1)*nz0*ny0+j*nz0+k]; } } else { putin_sendbuffer_yz( (nx0-2)*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C00, nx, ny, nz, &yz_sbuf1[0*ny*nz],ny*nz); putin_sendbuffer_yz( (nx0-2)*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C01, nx, ny, nz, &yz_sbuf1[1*ny*nz],ny*nz); putin_sendbuffer_yz( (nx0-2)*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C02, nx, ny, nz, &yz_sbuf1[2*ny*nz],ny*nz); putin_sendbuffer_yz( (nx0-2)*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C03, nx, ny, nz, &yz_sbuf1[3*ny*nz],ny*nz); putin_sendbuffer_yz( (nx0-2)*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C04, nx, ny, nz, &yz_sbuf1[4*ny*nz],ny*nz); } MPI_Isend(yz_sbuf1,5*ny*nz,MPI_DOUBLE,neighbor[XP],C_flag+1000, comm, &ar_send_req[1]); MPI_Irecv(yz_rbuf1,5*ny*nz,MPI_DOUBLE,neighbor[XP],C_flag+1000, comm, &ar_recv_req[1]); // MPI_Sendrecv(yz_sbuf1,5*ny*nz,MPI_DOUBLE,neighbor[XP],C_flag+1000, // yz_rbuf1,5*ny*nz,MPI_DOUBLE,neighbor[XP],C_flag+1000,comm,&status); // printf("exchange X end sheet ok! coords[%d,%d,%d]\n",coords[0],coords[1],coords[2]); // Ghost Y end sheet if(coords[1]==0){ j=0; for (i=1; i<nx0-1; i++) for (k=1; k<nz0-1; k++){ C00[i*nz0*ny0+j*nz0+k] = C00[i*nz0*ny0+(j+1)*nz0+k]; C01[i*nz0*ny0+j*nz0+k] = C01[i*nz0*ny0+(j+1)*nz0+k]; C02[i*nz0*ny0+j*nz0+k] = C02[i*nz0*ny0+(j+1)*nz0+k]; C03[i*nz0*ny0+j*nz0+k] = C03[i*nz0*ny0+(j+1)*nz0+k]; C04[i*nz0*ny0+j*nz0+k] = C04[i*nz0*ny0+(j+1)*nz0+k]; } } else { putin_sendbuffer_xz( 1*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C00, nx, ny, nz, &xz_sbuf0[0*nx*nz],nx*nz); putin_sendbuffer_xz( 1*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C01, nx, ny, nz, &xz_sbuf0[1*nx*nz],nx*nz); putin_sendbuffer_xz( 1*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C02, nx, ny, nz, &xz_sbuf0[2*nx*nz],nx*nz); putin_sendbuffer_xz( 1*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C03, nx, ny, nz, &xz_sbuf0[3*nx*nz],nx*nz); putin_sendbuffer_xz( 1*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C04, nx, ny, nz, &xz_sbuf0[4*nx*nz],nx*nz); } MPI_Isend(xz_sbuf0,5*nx*nz,MPI_DOUBLE,neighbor[YN],C_flag+2000, comm, &ar_send_req[2]); MPI_Irecv(xz_rbuf0,5*nx*nz,MPI_DOUBLE,neighbor[YN],C_flag+2000, comm, &ar_recv_req[2]); // MPI_Sendrecv(xz_sbuf0,5*nx*nz,MPI_DOUBLE,neighbor[YN],C_flag+2000, // xz_rbuf0,5*nx*nz,MPI_DOUBLE,neighbor[YN],C_flag+2000,comm,&status); if(coords[1]==(dims[1]-1)) { j=ny0-1; for (i=1; i<nx0-1; i++) for (k=1; k<nz0-1; k++){ C00[i*nz0*ny0+j*nz0+k] = C00[i*nz0*ny0+(j-1)*nz0+k]; C01[i*nz0*ny0+j*nz0+k] = C01[i*nz0*ny0+(j-1)*nz0+k]; C02[i*nz0*ny0+j*nz0+k] = C02[i*nz0*ny0+(j-1)*nz0+k]; C03[i*nz0*ny0+j*nz0+k] = C03[i*nz0*ny0+(j-1)*nz0+k]; C04[i*nz0*ny0+j*nz0+k] = C04[i*nz0*ny0+(j-1)*nz0+k]; } } else { putin_sendbuffer_xz( 1*nz0*ny0+(ny0-2)*nz0+1,nx0,ny0, nz0, C00, nx, ny, nz, &xz_sbuf1[0*nx*nz],nx*nz); putin_sendbuffer_xz( 1*nz0*ny0+(ny0-2)*nz0+1,nx0,ny0, nz0, C01, nx, ny, nz, &xz_sbuf1[1*nx*nz],nx*nz); putin_sendbuffer_xz( 1*nz0*ny0+(ny0-2)*nz0+1,nx0,ny0, nz0, C02, nx, ny, nz, &xz_sbuf1[2*nx*nz],nx*nz); putin_sendbuffer_xz( 1*nz0*ny0+(ny0-2)*nz0+1,nx0,ny0, nz0, C03, nx, ny, nz, &xz_sbuf1[3*nx*nz],nx*nz); putin_sendbuffer_xz( 1*nz0*ny0+(ny0-2)*nz0+1,nx0,ny0, nz0, C04, nx, ny, nz, &xz_sbuf1[4*nx*nz],nx*nz); } MPI_Isend(xz_sbuf1,5*nx*nz,MPI_DOUBLE,neighbor[YP],C_flag+2000, comm, &ar_send_req[3]); MPI_Irecv(xz_rbuf1,5*nx*nz,MPI_DOUBLE,neighbor[YP],C_flag+2000, comm, &ar_recv_req[3]); // MPI_Sendrecv(xz_sbuf1,5*nx*nz,MPI_DOUBLE,neighbor[YP],C_flag+2000, // xz_rbuf1,5*nx*nz,MPI_DOUBLE,neighbor[YP],C_flag+2000,comm,&status); // printf("exchange Y end sheet ok! coords[%d,%d,%d]\n",coords[0],coords[1],coords[2]); // Ghost Z end sheet if(coords[0]==0){ k=0; for (i=1; i<nx0-1; i++) for (j=1; j<ny0-1; j++){ C00[i*nz0*ny0+j*nz0+k] = C00[i*nz0*ny0+j*nz0+k+1]; C01[i*nz0*ny0+j*nz0+k] = C01[i*nz0*ny0+j*nz0+k+1]; C02[i*nz0*ny0+j*nz0+k] = C02[i*nz0*ny0+j*nz0+k+1]; C03[i*nz0*ny0+j*nz0+k] = C03[i*nz0*ny0+j*nz0+k+1]; C04[i*nz0*ny0+j*nz0+k] = C04[i*nz0*ny0+j*nz0+k+1]; } } else { putin_sendbuffer_xy(1*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C00, nx, ny, nz, &xy_sbuf0[0*nx*ny],nx*ny); putin_sendbuffer_xy(1*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C01, nx, ny, nz, &xy_sbuf0[1*nx*ny],nx*ny); putin_sendbuffer_xy(1*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C02, nx, ny, nz, &xy_sbuf0[2*nx*ny],nx*ny); putin_sendbuffer_xy(1*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C03, nx, ny, nz, &xy_sbuf0[3*nx*ny],nx*ny); putin_sendbuffer_xy(1*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C04, nx, ny, nz, &xy_sbuf0[4*nx*ny],nx*ny); } MPI_Isend(xy_sbuf0,5*nx*ny,MPI_DOUBLE,neighbor[ZN],C_flag+3000, comm, &ar_send_req[4]); MPI_Irecv(xy_rbuf0,5*nx*ny,MPI_DOUBLE,neighbor[ZN],C_flag+3000, comm, &ar_recv_req[4]); // MPI_Sendrecv(xy_sbuf0,5*nx*ny,MPI_DOUBLE,neighbor[ZN],C_flag+3000, // xy_rbuf0,5*nx*ny,MPI_DOUBLE,neighbor[ZN],C_flag+3000,comm,&status); if(coords[0]==(dims[0]-1)) { k=nz0-1; for (i=1; i<nx0-1; i++) for (j=1; j<ny0-1; j++){ C00[i*nz0*ny0+j*nz0+k] = C00[i*nz0*ny0+j*nz0+k-1]; C01[i*nz0*ny0+j*nz0+k] = C01[i*nz0*ny0+j*nz0+k-1]; C02[i*nz0*ny0+j*nz0+k] = C02[i*nz0*ny0+j*nz0+k-1]; C03[i*nz0*ny0+j*nz0+k] = C03[i*nz0*ny0+j*nz0+k-1]; C04[i*nz0*ny0+j*nz0+k] = C04[i*nz0*ny0+j*nz0+k-1]; } } else { putin_sendbuffer_xy( 1*nz0*ny0+nz0+nz0-2,nx0,ny0, nz0, C00, nx, ny, nz, &xy_sbuf1[0*nx*ny],nx*ny); putin_sendbuffer_xy( 1*nz0*ny0+nz0+nz0-2,nx0,ny0, nz0, C01, nx, ny, nz, &xy_sbuf1[1*nx*ny],nx*ny); putin_sendbuffer_xy( 1*nz0*ny0+nz0+nz0-2,nx0,ny0, nz0, C02, nx, ny, nz, &xy_sbuf1[2*nx*ny],nx*ny); putin_sendbuffer_xy( 1*nz0*ny0+nz0+nz0-2,nx0,ny0, nz0, C03, nx, ny, nz, &xy_sbuf1[3*nx*ny],nx*ny); putin_sendbuffer_xy( 1*nz0*ny0+nz0+nz0-2,nx0,ny0, nz0, C04, nx, ny, nz, &xy_sbuf1[4*nx*ny],nx*ny); } MPI_Isend(xy_sbuf1,5*nx*ny,MPI_DOUBLE,neighbor[ZP],C_flag+3000, comm, &ar_send_req[5]); MPI_Irecv(xy_rbuf1,5*nx*ny,MPI_DOUBLE,neighbor[ZP],C_flag+3000, comm, &ar_recv_req[5]); // MPI_Sendrecv(xy_sbuf1,5*nx*ny,MPI_DOUBLE,neighbor[ZP],C_flag+3000, // xy_rbuf1,5*nx*ny,MPI_DOUBLE,neighbor[ZP],C_flag+3000,comm,&status); MPI_Waitall(6, ar_recv_req, ar_status); if(coords[2]!=0){ getout_recvbuffer_yz( 1*nz0+1,nx0,ny0, nz0, C00, nx, ny, nz, &yz_rbuf0[0*ny*nz],ny*nz); getout_recvbuffer_yz( 1*nz0+1,nx0,ny0, nz0, C01, nx, ny, nz, &yz_rbuf0[1*ny*nz],ny*nz); getout_recvbuffer_yz( 1*nz0+1,nx0,ny0, nz0, C02, nx, ny, nz, &yz_rbuf0[2*ny*nz],ny*nz); getout_recvbuffer_yz( 1*nz0+1,nx0,ny0, nz0, C03, nx, ny, nz, &yz_rbuf0[3*ny*nz],ny*nz); getout_recvbuffer_yz( 1*nz0+1,nx0,ny0, nz0, C04, nx, ny, nz, &yz_rbuf0[4*ny*nz],ny*nz); } if(coords[2]!=(dims[2]-1)){ getout_recvbuffer_yz((nx0-1)*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C00, nx, ny, nz, &yz_rbuf1[0*ny*nz],ny*nz); getout_recvbuffer_yz((nx0-1)*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C01, nx, ny, nz, &yz_rbuf1[1*ny*nz],ny*nz); getout_recvbuffer_yz((nx0-1)*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C02, nx, ny, nz, &yz_rbuf1[2*ny*nz],ny*nz); getout_recvbuffer_yz((nx0-1)*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C03, nx, ny, nz, &yz_rbuf1[3*ny*nz],ny*nz); getout_recvbuffer_yz((nx0-1)*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C04, nx, ny, nz, &yz_rbuf1[4*ny*nz],ny*nz); } if(coords[1]!=0){ getout_recvbuffer_xz( 1*nz0*ny0+1,nx0,ny0, nz0, C00, nx, ny, nz, &xz_rbuf0[0*nx*nz],nx*nz); getout_recvbuffer_xz( 1*nz0*ny0+1,nx0,ny0, nz0, C01, nx, ny, nz, &xz_rbuf0[1*nx*nz],nx*nz); getout_recvbuffer_xz( 1*nz0*ny0+1,nx0,ny0, nz0, C02, nx, ny, nz, &xz_rbuf0[2*nx*nz],nx*nz); getout_recvbuffer_xz( 1*nz0*ny0+1,nx0,ny0, nz0, C03, nx, ny, nz, &xz_rbuf0[3*nx*nz],nx*nz); getout_recvbuffer_xz( 1*nz0*ny0+1,nx0,ny0, nz0, C04, nx, ny, nz, &xz_rbuf0[4*nx*nz],nx*nz); } if(coords[1]!=(dims[1]-1)){ getout_recvbuffer_xz(1*nz0*ny0+(ny0-1)*nz0+1,nx0,ny0, nz0, C00, nx, ny, nz, &xz_rbuf1[0*nx*nz],nx*nz); getout_recvbuffer_xz(1*nz0*ny0+(ny0-1)*nz0+1,nx0,ny0, nz0, C01, nx, ny, nz, &xz_rbuf1[1*nx*nz],nx*nz); getout_recvbuffer_xz(1*nz0*ny0+(ny0-1)*nz0+1,nx0,ny0, nz0, C02, nx, ny, nz, &xz_rbuf1[2*nx*nz],nx*nz); getout_recvbuffer_xz(1*nz0*ny0+(ny0-1)*nz0+1,nx0,ny0, nz0, C03, nx, ny, nz, &xz_rbuf1[3*nx*nz],nx*nz); getout_recvbuffer_xz(1*nz0*ny0+(ny0-1)*nz0+1,nx0,ny0, nz0, C04, nx, ny, nz, &xz_rbuf1[4*nx*nz],nx*nz); } if(coords[0]!=0){ getout_recvbuffer_xy( 1*nz0*ny0+1*nz0, nx0,ny0, nz0, C00, nx, ny, nz, &xy_rbuf0[0*nx*ny],nx*ny); getout_recvbuffer_xy( 1*nz0*ny0+1*nz0, nx0,ny0, nz0, C01, nx, ny, nz, &xy_rbuf0[1*nx*ny],nx*ny); getout_recvbuffer_xy( 1*nz0*ny0+1*nz0, nx0,ny0, nz0, C02, nx, ny, nz, &xy_rbuf0[2*nx*ny],nx*ny); getout_recvbuffer_xy( 1*nz0*ny0+1*nz0, nx0,ny0, nz0, C03, nx, ny, nz, &xy_rbuf0[3*nx*ny],nx*ny); getout_recvbuffer_xy( 1*nz0*ny0+1*nz0, nx0,ny0, nz0, C04, nx, ny, nz, &xy_rbuf0[4*nx*ny],nx*ny); } if(coords[0]!=(dims[0]-1)){ getout_recvbuffer_xy( 1*nz0*ny0+nz0+nz0-1,nx0,ny0, nz0, C00, nx, ny, nz, &xy_rbuf1[0*nx*ny],nx*ny); getout_recvbuffer_xy( 1*nz0*ny0+nz0+nz0-1,nx0,ny0, nz0, C01, nx, ny, nz, &xy_rbuf1[1*nx*ny],nx*ny); getout_recvbuffer_xy( 1*nz0*ny0+nz0+nz0-1,nx0,ny0, nz0, C02, nx, ny, nz, &xy_rbuf1[2*nx*ny],nx*ny); getout_recvbuffer_xy( 1*nz0*ny0+nz0+nz0-1,nx0,ny0, nz0, C03, nx, ny, nz, &xy_rbuf1[3*nx*ny],nx*ny); getout_recvbuffer_xy( 1*nz0*ny0+nz0+nz0-1,nx0,ny0, nz0, C04, nx, ny, nz, &xy_rbuf1[4*nx*ny],nx*ny); } // printf("exchange Z end sheet ok! coords[%d,%d,%d]\n",coords[0],coords[1],coords[2]); } void putin_sendbuffer_yz(int base_addr,int nx0,int ny0, int nz0, double* arr, int nx, int ny, int nz, double* sbuf, int sbuf_len) { int i; if(sbuf_len!=ny*nz) { printf("yz sbuf_len error!\n"); exit(0); } for ( i = 0; i < ny; i += 1 ) { memcpy(&sbuf[i*nz],&arr[base_addr+i*nz0],nz*sizeof(double)); } } void putin_sendbuffer_xz(int base_addr,int nx0,int ny0, int nz0, double* arr, int nx, int ny, int nz, double* sbuf, int sbuf_len) { int i; if(sbuf_len!=nx*nz) { printf("xz sbuf_len error!\n"); exit(0); } for ( i = 0; i < nx; i += 1 ) { memcpy(&sbuf[i*nz],&arr[base_addr+i*ny0*nz0],nz*sizeof(double)); } } void putin_sendbuffer_xy(int base_addr,int nx0,int ny0, int nz0, double* arr, int nx, int ny, int nz, double* sbuf, int sbuf_len) { int i, j; if(sbuf_len!=nx*ny) { printf("xy sbuf_len error!\n"); exit(0); } for ( i = 0; i < nx; i += 1 ) { for ( j = 0; j < ny; j += 1 ) { sbuf[i*ny+j]=arr[base_addr+i*ny0*nz0+j*nz0]; } } } void getout_recvbuffer_yz(int base_addr,int nx0,int ny0, int nz0, double* arr, int nx, int ny, int nz, double* sbuf, int sbuf_len) { int i; if(sbuf_len!=ny*nz) { printf("yz rbuf_len error!\n"); exit(0); } for ( i = 0; i < ny; i += 1 ) { memcpy(&arr[base_addr+i*nz0],&sbuf[i*nz],nz*sizeof(double)); } } void getout_recvbuffer_xz(int base_addr,int nx0,int ny0, int nz0, double* arr, int nx, int ny, int nz, double* sbuf, int sbuf_len) { int i; if(sbuf_len!=nx*nz) { printf("xz rbuf_len error!\n"); exit(0); } for ( i = 0; i < nx; i += 1 ) { memcpy(&arr[base_addr+i*ny0*nz0],&sbuf[i*nz],nz*sizeof(double)); } } void getout_recvbuffer_xy(int base_addr,int nx0,int ny0, int nz0, double* arr, int nx, int ny, int nz, double* sbuf, int sbuf_len) { int i, j; if(sbuf_len!=nx*ny) { printf("xy rbuf_len error!\n"); exit(0); } for ( i = 0; i < nx; i += 1 ) { for ( j = 0; j < ny; j += 1 ) { arr[base_addr+i*ny0*nz0+j*nz0]=sbuf[i*ny+j]; } } } void BinarySort_two(int* pData, int* vData, int Count) { dichotomy_two(pData,vData,0,Count-1); } void dichotomy_two(int* pData,int* vData, int left,int right) { int i,j; int middle,iTemp; i = left; j = right; middle = pData[(left+right)/2]; do{ while((pData[i]<middle) && (i<right)) i++; while((pData[j]>middle) && (j>left)) j--; if(i<=j) { iTemp = pData[i]; pData[i] = pData[j]; pData[j] = iTemp; iTemp =vData[i]; vData[i]=vData[j]; vData[j]=iTemp; i++; j--; } }while(i<=j); if(left<j) dichotomy_two(pData,vData,left,j); if(right>i) dichotomy_two(pData,vData,i,right); } void *mpi_malloc ( int id, /* IN - Process rank */ int bytes) /* IN - Bytes to allocate */ { void *buffer; if ((buffer = malloc ((size_t) bytes)) == NULL) { printf ("Error: Malloc failed for process %d\n", id); fflush (stdout); MPI_Abort (MPI_COMM_WORLD, 4); } return buffer; } void compute_pde_ode(int nx0, int ny0, int nz0, double dt,double gamma, double fudge, double* alpha, double* B_tot, double* k_on, double* k_off, double** C0, double** C1, int div_y) { // Main kernel int i,j,k,jj,idx; int ny; double J; double Ca_ijk; double buff_ijk; double Ca_i2_ijk; double Ca_SR2_ijk; ny=ny0-2; for (i=1; i<nx0-1; i++) { for (jj=0; jj<ny/div_y; jj++) { //blocking for cache size on y line for (j=jj*div_y+1; j<(jj+1)*div_y+1; j++) { //Laplace diffusion process five array together for(idx=0;idx<5;idx++) { #pragma ivdep #pragma prefetch for (k=1; k<nz0-1; k++) { C1[idx][i*nz0*ny0+j*nz0+k] =alpha[idx]*( C0[idx][i*nz0*ny0+j*nz0+k]*(-6)+ C0[idx][(i-1)*nz0*ny0+j*nz0+k] + C0[idx][(i+1)*nz0*ny0+j*nz0+k] + C0[idx][i*nz0*ny0+(j-1)*nz0+k] + C0[idx][i*nz0*ny0+(j+1)*nz0+k] + C0[idx][i*nz0*ny0+j*nz0+k-1] + C0[idx][i*nz0*ny0+j*nz0+k+1]) + C0[idx][i*nz0*ny0+j*nz0+k]; } } //Reaction for(idx=2;idx<6;idx++) { #pragma ivdep #pragma prefetch for (k=1; k<nz0-1; k++) { Ca_ijk = C1[0][i*nz0*ny0+j*nz0+k]; buff_ijk = C1[idx][i*nz0*ny0+j*nz0+k]; J = k_on[idx]*(B_tot[idx] - buff_ijk)*Ca_ijk - k_off[idx]*buff_ijk; C1[0][i*nz0*ny0+j*nz0+k] -= dt*J; C1[idx][i*nz0*ny0+j*nz0+k] += dt*J; } } // serca3D #pragma ivdep #pragma prefetch for (k=1; k<nz0-1; k++) { // Main kernel Ca_i2_ijk = C1[0][i*nz0*ny0+j*nz0+k]; Ca_SR2_ijk = C1[1][i*nz0*ny0+j*nz0+k]; Ca_i2_ijk *= Ca_i2_ijk; Ca_SR2_ijk *= Ca_SR2_ijk; J = fudge*(570997802.885875*Ca_i2_ijk - 0.0425239333622699*Ca_SR2_ijk)/(106720651.206402*Ca_i2_ijk + 182.498197548666*Ca_SR2_ijk + 5.35062954944879); C1[0][i*nz0*ny0+j*nz0+k] -= dt*J; C1[1][i*nz0*ny0+j*nz0+k] += dt*J/gamma; } } } } }
#include <math.h> #include <assert.h> #include <stdio.h> #include <stdlib.h> #include <time.h> #include <string.h> #include "mpi.h" #include "hdf5.h" #include <sys/stat.h> //#define DEBUG_TEST #define DB_PF 0 #define MAX_LINE_LENGTH 80 //#define __PAPI__ #ifdef __PAPI__ #include <papi.h> #endif typedef int(*CONDCF)(int a, int b); #define H5T_DATA_TYPE H5T_NATIVE_SHORT typedef short int hdf5_data_type; #define H5_DATA_LIMIT_0 -32768 // Data type specific #define H5_DATA_LIMIT_1 32767 // Data type specific #define H5_DATA_SIZE H5_DATA_LIMIT_1 - H5_DATA_LIMIT_0 // Data type specific double timing(); void *mpi_malloc ( int id, int bytes); /* IN - Bytes to allocate */ inline double my_random(); double my_min(double* ar, int len); double my_max(double* ar, int len); void stern(double t, double* y0, double* y1, double Ca); void stern_discrete(double dt, int* y0, int* y1, double Ca); void laplace3D (int nx0, int ny0, int nz0, double* C0, int nx1, int ny1, int nz1, double* C1, double alpha);//, int num_threads) void reaction3D (int nx0, int ny0, int nz0, double* Ca, int nx1, int ny1, int nz1, double* buff, double B_tot, double k_on, double k_off, double dt);//, int num_threads) void serca3D (int nx0, int ny0, int nz0, double* Ca_i, int nx1, int ny1, int nz1, double* Ca_SR, double dt, double gamma, double fudge);//, int num_threads) void update_ryr(int h_scale,int nx0, int ny0, int nz0, double* Ca_i, double* Ca_SR, double* Ca_CSQN, double* C10, double* C12, double* C13, double* C14, double k_on_CSQN, double k_off_CSQN, double CSQN_tot, double gamma, double K, double dt, int ryr_len, int* i0_ryr, int* i1_ryr, int* i2_ryr, int csqn_len, int* i0_csqn, int* i1_csqn, int* i2_csqn, int cleft_len, int* i0_cleft, int* i1_cleft, int* i2_cleft,int* cleft_nb, int* states0, int* states1); void store2Dmatrixfile_double_1D(char* outfile, double* ar, int rows, int cols, int x_strid); void store2Dmatrixfile_double_bin(char* outfile, double* ar, int rows, int cols, int x_strid); void transfer_hdf5_data(hdf5_data_type* h5_data, double* ar0, double* ar1, double scale_value, hsize_t* chunk_dims); void store2Dmatrixfile_int_1D(char* outfile, int* ar, int rows, int cols); //int less(int a, int b); //int giant(int a, int b); //int* loadRyRindexfile_int(char* infile, CONDFN cf, int cond); int* loadRyRindexfile_int(char* infile, int* count); int idxinrank(int nx, int ny, int nz, int i0, int i1, int i2, int rank, MPI_Comm comm3d); int idxbl2rank(int nx, int ny, int nz, int i0, int i1, int i2, int* coords, MPI_Comm comm3d); int load_indices_serial(int nx, int ny, int nz, int h, int** i0_ryr, int** i1_ryr, int** i2_ryr, int* ryr_len, int** i0_csqn, int** i1_csqn, int** i2_csqn, int* csqn_len, int** i0_cleft, int** i1_cleft, int** i2_cleft, int** cleft_nb, int* cleft_len, int x_slice_mid,int x_slice_width, int x_slice_num, int use_failing); int IsTrueCleft(int coord_y, int coord_z, int size_y, int size_z, int *i1_csqn, int *i2_csqn, int* y_index, int csqn_len); void BinarySort_two(int* pData, int* vData, int Count); void dichotomy_two(int* pData,int* vData, int left,int right); int distr_ryr_csqn_state(int h, int size_x, int size_y, int size_z, int nx, int ny, int nz, int** i0_ryr, int** i1_ryr, int** i2_ryr, int* ryr_len, int** i0_csqn, int** i1_csqn, int** i2_csqn, int* csqn_len, int** i0_cleft, int** i1_cleft, int** i2_cleft,int** cleft_nb, int* cleft_len, int** states0, int** states1, int x_slice_mid,int x_slice_width, int x_slice_num, MPI_Comm comm3d, MPI_Comm, int use_failing); void readparam(int* iconf, double* conf); void updateBound(double* C00, double* C01, double* C02, double* C03, double* C04, int C_flag, int nx0, int ny0, int nz0, double* yz_sbuf0,double* yz_rbuf0, double* xz_sbuf0,double* xz_rbuf0, double* xy_sbuf0,double* xy_rbuf0, double* yz_sbuf1,double* yz_rbuf1, double* xz_sbuf1,double* xz_rbuf1, double* xy_sbuf1,double* xy_rbuf1, int* neighbor, MPI_Status* ar_status, MPI_Request* ar_send_req, MPI_Request* ar_recv_req, MPI_Comm comm,MPI_Comm comm3d); void putin_sendbuffer_yz(int base_addr,int nx0,int ny0, int nz0, double* arr, int nx, int ny, int nz, double* sbuf, int sbuf_len); void putin_sendbuffer_xz(int base_addr,int nx0,int ny0, int nz0, double* arr, int nx, int ny, int nz, double* sbuf, int sbuf_len); void putin_sendbuffer_xy(int base_addr,int nx0,int ny0, int nz0, double* arr, int nx, int ny, int nz, double* sbuf, int sbuf_len); void getout_recvbuffer_yz(int base_addr,int nx0,int ny0, int nz0, double* arr, int nx, int ny, int nz, double* sbuf, int sbuf_len); void getout_recvbuffer_xz(int base_addr,int nx0,int ny0, int nz0, double* arr, int nx, int ny, int nz, double* sbuf, int sbuf_len); void getout_recvbuffer_xy(int base_addr,int nx0,int ny0, int nz0, double* arr, int nx, int ny, int nz, double* sbuf, int sbuf_len); void compute_pde_ode(int nx0, int ny0, int nz0, double dt,double gamma, double fudge, double* alpha, double* B_tot, double* k_on, double* k_off, double** C0, double** C1, int div_y); #define NUM_SAVE_SPECIES 5 int save_species[NUM_SAVE_SPECIES] = {0,1,4,5,6}; char* species_names[7] = {"Cai", "CaSR", "CaCMDN", "CaATP", "CaFluo", "CaTRPN", "CaCSQN"}; int main(int argc, char **argv) { int i,j,k; #ifdef __PAPI__ // int Events[] = { PAPI_L1_DCA, PAPI_L1_DCM }; // int Events[] = {PAPI_L3_TCM, PAPI_L3_TCA, PAPI_L2_TCM,PAPI_L2_TCA}; int Events[] = {PAPI_DP_OPS,PAPI_L3_TCM}; int NUM_EVENTS = sizeof(Events)/sizeof(Events[0]); long long res_papi[NUM_EVENTS]; char EventName[128]; int num_hwcntrs = 0; int EventSet = PAPI_NULL; int retval; retval = PAPI_library_init( PAPI_VER_CURRENT ); retval = PAPI_create_eventset( &EventSet ); if (PAPI_add_events( EventSet, Events, NUM_EVENTS) != PAPI_OK){ printf("PAPI_add_events failed\n"); } for (i=0; i<NUM_EVENTS; i++){ res_papi[i] = 0; } #endif double time_main=0.0; double time_comm=0.0; double time_conc=0.0; double time_ryr=0.0; double time_io=0.0; int save_data=0; int use_rand_seed=1; int use_failing=0; int idx; int h_scale=1; int h=30; int div_y=1; int save_binary_file=0; int save_hdf5=0; double T=1.0; double DT=0.05; // plotting time step int TimeStep=2; int size_x, size_y, size_z, my_id, x_domains, y_domains, z_domains; int iconf[12]; double conf[2]; /* MPI variables */ int nproc, ndims; MPI_Comm comm, comm3d; int dims[3]; int periods[3]; int reorganisation = 0; MPI_Datatype matrix_type_oyz, matrix_type_oxz, matrix_type_oxy; int ZN=0, ZP=1, YN=2, YP=3, XN=4, XP=5; int NeighBor[6]; hid_t h5_file_id; hdf5_data_type* h5_data; MPI_Init(&argc, &argv); comm = MPI_COMM_WORLD; MPI_Comm_size(comm, &nproc); MPI_Comm_rank(comm, &my_id); MPI_Info info = MPI_INFO_NULL; if (my_id==0) { readparam(iconf, conf); } MPI_Bcast(iconf, 12, MPI_INT, 0, comm); MPI_Bcast(conf, 2, MPI_DOUBLE, 0, comm); h = iconf[0]; size_x = iconf[1]; size_y = iconf[2]; size_z = iconf[3]; x_domains = iconf[4]; y_domains = iconf[5]; z_domains = iconf[6]; save_data = iconf[7]; use_failing = iconf[8]; save_binary_file = iconf[9]; // Save Ca in binary file instead of ascii file save_hdf5 = iconf[10]; // Save data in hdf5 file format div_y = iconf[11]; // Block size on y direction for cache T = conf[0]; DT = conf[1]; h_scale=30/h; if(use_rand_seed) srand(my_id); char hdf5_dataset_name[200]; char hdf5_group_name[200]; char h5_basename[200]; char outdirname[200]; if(save_hdf5) { sprintf(h5_basename, "output_%d_%d_%d_%d_%d", h, size_x, size_y, size_z, use_failing); } else if(save_binary_file) { sprintf(outdirname, "output_%d_%d_%d_%d_%d_bin", h, size_x, size_y, size_z, use_failing); } else { sprintf(outdirname, "output_%d_%d_%d_%d_%d", h, size_x, size_y, size_z, use_failing); } if(!my_id) { if(save_data && !save_hdf5){ if(access(outdirname,0)) { if (mkdir(outdirname, 0755)==-1) { printf("make directory failed\n"); } else { printf("make directory: %s\n", outdirname); } } else { printf("directory %s existed\n",outdirname); } } } MPI_Barrier(comm); if((my_id==0) && (nproc!=(x_domains*y_domains*z_domains))) { printf("Number of processes not equal to Number of subdomains\n"); MPI_Abort(MPI_COMM_WORLD, 1); } if((my_id==0)&&(size_x%x_domains!=0)) { printf("Number of x_domains is not divisible in scale\n"); MPI_Abort(MPI_COMM_WORLD, 1); } if((my_id==0)&&(size_y%y_domains!=0)) { printf("Number of y_domains is not divisible in scale\n"); MPI_Abort(MPI_COMM_WORLD, 1); } if((my_id==0)&&(size_z%z_domains!=0)) { printf("Number of z_domains is not divisible in scale\n"); MPI_Abort(MPI_COMM_WORLD, 1); } if(((size_y/y_domains)%div_y)!=0){ div_y=1; if(my_id==0){ printf("Warning: div_y is not divisible on each node, so set div_y=1 for default \n"); } } /* Create 3D cartesian grid */ periods[0] = 0; periods[1] = 0; periods[2] = 0; ndims = 3; dims[0]=z_domains; dims[1]=y_domains; dims[2]=x_domains; MPI_Cart_create(comm, ndims, dims, periods, reorganisation, &comm3d); /* MPI variables */ MPI_Status ar_status[6]; MPI_Request ar_send_req[6]; MPI_Request ar_recv_req[6]; int coord[3]; int dim[3]; int period[3]; int mid_coord_x=0; int in_midx_slice=0; int x_slice_num; int x_slice_width; int x_slice_mid; MPI_Cart_get(comm3d, 3, dim, period, coord); x_slice_num=(int)(ceil((double)(size_x*h)/2100.0)); if((size_x%x_slice_num)!=0) { printf("x dimension can not be divided by %d\n", x_slice_num); MPI_Abort(comm,5); } x_slice_width=size_x/x_slice_num; x_slice_mid=(x_slice_width+1)/2; for(i=0;i<x_slice_num;i++) { if(((x_slice_width*i+x_slice_mid)>=(coord[2]*size_x/x_domains))&& ((x_slice_width*i+x_slice_mid)<((coord[2]+1)*size_x/x_domains))){ if(in_midx_slice==1){ printf("dont put two x_slice in a x partition\n"); MPI_Abort(comm,5); } in_midx_slice=1; mid_coord_x=(x_slice_width*i+x_slice_mid)-(coord[2]*size_x/x_domains)+1;//+1 for ghost bound //check x partition thickness, so far, for simplify, dont cut a csqn and no-flux into two x-partitions if((mid_coord_x)<(h_scale+3)||(size_x/x_domains-mid_coord_x)<(h_scale+3)){ printf("x partition is too thine for CSQN and cleft extend \n"); MPI_Abort(comm,5); } } } //printf("Rank: %d, coord: [%d, %d, %d]\n", my_id, coord[0], coord[1], coord[2]); /* Identify process neighbors */ NeighBor[0] = MPI_PROC_NULL; NeighBor[1] = MPI_PROC_NULL; NeighBor[2] = MPI_PROC_NULL; NeighBor[3] = MPI_PROC_NULL; NeighBor[4] = MPI_PROC_NULL; NeighBor[5] = MPI_PROC_NULL; /* Left/West and right/Est neigbors Z direction*/ MPI_Cart_shift(comm3d,0,1,&NeighBor[ZN],&NeighBor[ZP]); /* Bottom/South and Upper/North neigbors Y direction*/ MPI_Cart_shift(comm3d,1,1,&NeighBor[YN],&NeighBor[YP]); /* Zdown/South and Zup/North neigbors X direction*/ MPI_Cart_shift(comm3d,2,1,&NeighBor[XN],&NeighBor[XP]); //-------------------------------------------------------------------- int nx=(size_x/x_domains); int ny=(size_y/y_domains); int nz=(size_z/z_domains); int nx0, ny0, nz0; int nx1, ny1, nz1; nx0=nx+2; ny0=ny+2; nz0=nz+2; nx1=nx+2; ny1=ny+2; nz1=nz+2; int len; len=nx0*ny0*nz0; /* Create matrix data types to communicate */ MPI_Type_vector(ny, nz, nz0, MPI_DOUBLE, &matrix_type_oyz); MPI_Type_commit(&matrix_type_oyz); /* Create matrix data type to communicate on vertical Oxz plan */ MPI_Type_vector(nx, nz, ny0*nz0, MPI_DOUBLE, &matrix_type_oxz); MPI_Type_commit(&matrix_type_oxz); /* Create matrix data type to communicate on vertical Oxy plan */ MPI_Datatype matrix_type_liney; MPI_Type_vector(ny, 1, nz0, MPI_DOUBLE, &matrix_type_liney); MPI_Type_commit(&matrix_type_liney); // MPI_Type_vector(nx*ny, 1, nz0, MPI_DOUBLE, &matrix_type_oxy); MPI_Type_hvector(nx, 1, ny0*nz0*sizeof(double), matrix_type_liney, &matrix_type_oxy); MPI_Type_commit(&matrix_type_oxy); if(!my_id) printf("Simulation Begin!\n"); //Define where the RyRs are: int* i0_ryr; int* i1_ryr; int* i2_ryr; int* i0_csqn; int* i1_csqn; int* i2_csqn; int* i0_cleft; int* i1_cleft; int* i2_cleft; int* cleft_nb; int ryr_len; int csqn_len; int cleft_len; int* states0; int* states1; h_scale=distr_ryr_csqn_state( h, size_x, size_y, size_z, nx, ny, nz, &i0_ryr, &i1_ryr, &i2_ryr, &ryr_len, &i0_csqn, &i1_csqn, &i2_csqn, &csqn_len, &i0_cleft, &i1_cleft, &i2_cleft, &cleft_nb,&cleft_len, &states0, &states1, x_slice_mid,x_slice_width, x_slice_num, comm3d, comm, use_failing); // store2Dmatrixfile_int_1D("i0.txt",i0,n_ryr,1); // store2Dmatrixfile_int_1D("i1.txt",i1,n_ryr,1); // store2Dmatrixfile_int_1D("i2.txt",i2,n_ryr,1); double Vfraction; //first set the numbers of RyR in a CaRU; //All CaRU placed mid-sarcomere Vfraction=(30.0/h)*(30.0/h)*(30.0/h); // scaling of RyR when changing dx // Set constants and dt based on these: double D_i=250e3; // 220e3 double D_SR=73e3; // 73.3e3; double D_ATP=140e3; double D_CMDN=22e3; double D_Fluo=42e3; double dt=(1./6)*h*h/D_i; double alpha_i = dt*D_i/(h*h); double Ca0 = 140e-3; double CaSR0 = 1.3e3; double* Ca_i; Ca_i=(double*)malloc(len*sizeof(double)); for ( i = 0; i < len; i += 1 ) { Ca_i[i]=Ca0; } double alpha_SR = dt*D_SR/(h*h); double* Ca_SR; Ca_SR=(double*)malloc(len*sizeof(double)); for ( i = 0; i < len; i += 1 ) { Ca_SR[i]=CaSR0; } double k_on_CMDN = 34e-3; double k_off_CMDN = 238e-3; double CMDN_tot = 24; double alpha_CMDN = dt*D_CMDN/(h*h); double k_on_ATP = 255e-3; double k_off_ATP = 45; double ATP_tot = 455; double alpha_ATP = dt*D_ATP/(h*h); double k_on_Fluo = 110e-3; double k_off_Fluo = 110e-3; double Fluo_tot = 25; // 25; double alpha_Fluo = dt*D_Fluo/(h*h); double k_on_TRPN = 32.7e-3; double k_off_TRPN = 19.6e-3; // 26.16e-3; double TRPN_tot = 70; // 50; double k_on_CSQN = 102e-3; double k_off_CSQN = 65; double CSQN_tot = 30e3; double alpha[7]; double k_on[7]; double k_off[7]; double B_tot[7]; alpha[0]=alpha_i; alpha[1]=alpha_SR; alpha[2]=alpha_CMDN; alpha[3]=alpha_ATP; alpha[4]=alpha_Fluo; alpha[5]=0; alpha[6]=0; k_on[0]=0 ; k_on[1]= 0; k_on[2]= k_on_CMDN; k_on[3]=k_on_ATP ; k_on[4]=k_on_Fluo ; k_on[5]=k_on_TRPN; k_on[6]=k_on_CSQN; k_off[0]=0 ; k_off[1]= 0; k_off[2]=k_off_CMDN; k_off[3]=k_off_ATP; k_off[4]=k_off_Fluo; k_off[5]=k_off_TRPN; k_off[6]=k_off_CSQN; B_tot[0]=0 ; B_tot[1]= 0; B_tot[2]=CMDN_tot ; B_tot[3]=ATP_tot ; B_tot[4]=Fluo_tot ; B_tot[5]=TRPN_tot; B_tot[6]=CSQN_tot; // Calculate steady state IC for the buffers based on Ca_i ... double Ca_CMDN0=B_tot[2]*Ca0/(Ca0+k_off[2]/k_on[2]); double Ca_ATP0 =B_tot[3]*Ca0/(Ca0+k_off[3]/k_on[3]); double Ca_Fluo0=B_tot[4]*Ca0/(Ca0+k_off[4]/k_on[4]); double Ca_TRPN0=B_tot[5]*Ca0/(Ca0+k_off[5]/k_on[5]); // and Ca_SR: double Ca_CSQN0 = CSQN_tot*Ca_SR[0]/(Ca_SR[0] + k_off_CSQN/k_on_CSQN); double init_values[7] = {Ca0, CaSR0, Ca_CMDN0, Ca_ATP0, Ca_Fluo0, Ca_TRPN0, Ca_CSQN0}; //printf("%f %f %f %f %f \n ", Ca_ATP0, Ca_CMDN0, Ca_Fluo0, Ca_TRPN0, Ca_CSQN0); if(my_id==0) printf("cubiod_c: h:%d size_x:%d size_y:%d size_z:%d dt:%f, T:%f, TimeStep:%d, DT:%f outfilenum:%d, x_slice_num:%d, use_failing:%d, div_y:%d, save_binary:%d \n", h, size_x, size_y, size_z,dt,T, (int)(T/dt),DT,(int)(T/DT)*save_data,x_slice_num,use_failing, div_y,save_binary_file); // Allocate the data structure for the solution double *Ca_ATP ; double *Ca_CMDN ; double *Ca_Fluo ; double *Ca_TRPN ; double *Ca_CSQN ; Ca_ATP =(double*)malloc(len*sizeof(double)); Ca_CMDN=(double*)malloc(len*sizeof(double)); Ca_Fluo=(double*)malloc(len*sizeof(double)); Ca_TRPN=(double*)malloc(len*sizeof(double)); Ca_CSQN=(double*)malloc(len*sizeof(double)); for ( i = 0; i < len; i += 1 ) { Ca_ATP[i] = Ca_ATP0; Ca_CMDN[i] = Ca_CMDN0; Ca_Fluo[i] = Ca_Fluo0; Ca_TRPN[i] = Ca_TRPN0; Ca_CSQN[i] = Ca_CSQN0; } double* C0[7]; double* C1[7]; double* C_temp; C0[0]=(double*)malloc(len*sizeof(double)); C1[0]=Ca_i; memcpy(C0[0],C1[0],len*sizeof(double)); C0[1]=(double*)malloc(len*sizeof(double)); C1[1]=Ca_SR; memcpy(C0[1],C1[1],len*sizeof(double)); C0[2]=(double*)malloc(len*sizeof(double)); C1[2]=Ca_CMDN; memcpy(C0[2],C1[2],len*sizeof(double)); C0[3]=(double*)malloc(len*sizeof(double)); C1[3]=Ca_ATP; memcpy(C0[3],C1[3],len*sizeof(double)); C0[4]=(double*)malloc(len*sizeof(double)); C1[4]=Ca_Fluo; memcpy(C0[4],C1[4],len*sizeof(double)); C0[5]=(double*)malloc(len*sizeof(double)); C1[5]=Ca_TRPN; memcpy(C0[5],C1[5],len*sizeof(double)); C0[6]=(double*)malloc(len*sizeof(double)); C1[6]=Ca_CSQN; memcpy(C0[6],C1[6],len*sizeof(double)); //Ca = [[Ca_i.copy(), Ca_i ], // [Ca_SR.copy(), Ca_SR ], // [Ca_CMDN.copy(), Ca_CMDN], // [Ca_ATP.copy(), Ca_ATP ], // [Ca_Fluo.copy(), Ca_Fluo], // [Ca_TRPN, Ca_TRPN], // [Ca_CSQN, Ca_CSQN]] double gamma = 0.02; // SR volume fraction int cai=0; int sri=1; // int cmdni=2; // int atpi=3; // int fluoi=4; // int trpni=5; int csqni=6; double fraction[7]={1,1,1,1,1,1,1}; fraction[1]=gamma; fraction[6]=gamma; // Ryr conductance: double k_s = (Vfraction)*150/2; // 1/ms, based on 0.5pA of Ca2+ into (30nm)^3. double K = exp(-k_s*dt*(1+1/gamma)); // factor need in the integration below if(my_id==0){ printf("dt = dt: %e\n", dt); printf("k_s = (Vfraction)*150/2: %e\n", k_s); printf("K = exp(-k_s*dt*(1+1/gamma)): %e\n", K); } double t=0; int counter=0; // int mean[7]; time_main-=timing(); FILE *fpdata; char meanfile[200]; if (save_hdf5) sprintf(meanfile,"%s_mean.txt", h5_basename); else sprintf(meanfile,"%s/mean.txt", outdirname); if(!my_id){ if(save_data){ if ((fpdata=fopen(meanfile, "w"))==NULL) { printf("failed open output file "); printf("%s", meanfile); printf(" ! \n "); exit(0); } } } // H5 Setup if (save_hdf5) { char h5_data_file[200]; // Set up file access property list with parallel I/O access // property list identifier hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS); H5Pset_fapl_mpio(plist_id, comm, info); sprintf(h5_data_file, "%s.h5", h5_basename); // Create a new file collectively and release property list identifier. h5_file_id = H5Fcreate(h5_data_file, H5F_ACC_TRUNC, H5P_DEFAULT, plist_id); H5Pclose(plist_id); const int data_rank = 2; hsize_t dimsf[2] = {size_y, size_z}; /* dataset dimensions */ hsize_t chunk_dims[2] = {ny, nz}; /* chunk dimensions */ // Offset into dataset based on the MPI coord from MPI_Cart_get hsize_t h5_offset[2] = {coord[1]*nz, coord[0]*ny}; hsize_t h5_count[2] = {1, 1}; hsize_t data_size=ny*nz; h5_data = (hdf5_data_type*)malloc(data_size*sizeof(hdf5_data_type)); if (!my_id) { printf("Total data size per species: %zu, %zu\n", dimsf[0], dimsf[1]); printf("Total data size per chunk per species: %zu, %zu\n", chunk_dims[0], chunk_dims[1]); } printf("rank %d | h5 offset [%zu, %zu]\n", my_id, h5_offset[0], h5_offset[1]); // Create data space for the datatype limits hsize_t dims = 1; hid_t attr_space = H5Screate_simple(1, &dims, NULL); // Create a time attribute hid_t limit_id = H5Acreate(h5_file_id, "data_type_size", H5T_NATIVE_DOUBLE, attr_space, H5P_DEFAULT, H5P_DEFAULT); // Write the attribute data double data_type_size = (double)H5_DATA_SIZE; herr_t status = H5Awrite(limit_id, H5T_NATIVE_DOUBLE, &data_type_size); // Cleanup H5Aclose(limit_id); H5Sclose(attr_space); // Save hard coded data ranges for (i=0; i<NUM_SAVE_SPECIES; i++) { // Get species int species = save_species[i]; // Create data scale attribute sprintf(hdf5_dataset_name, "%s_scale", species_names[species]); // Create data space for the species scale attribute hsize_t dims = 1; hid_t attr_space = H5Screate_simple(1, &dims, NULL); // Create a time attribute hid_t scale_id = H5Acreate(h5_file_id, hdf5_dataset_name, H5T_NATIVE_DOUBLE, attr_space, H5P_DEFAULT, H5P_DEFAULT); // Write the attribute data herr_t status = H5Awrite(scale_id, H5T_NATIVE_DOUBLE, &init_values[species]); // Cleanup H5Aclose(scale_id); H5Sclose(attr_space); // Create init value attribute sprintf(hdf5_dataset_name, "%s_init", species_names[species]); // Create data space for the species init attribute dims = 1; attr_space = H5Screate_simple(1, &dims, NULL); // Create a time attribute hid_t init_id = H5Acreate(h5_file_id, hdf5_dataset_name, H5T_NATIVE_DOUBLE, attr_space, H5P_DEFAULT, H5P_DEFAULT); // Write the attribute data status = H5Awrite(init_id, H5T_NATIVE_DOUBLE, &init_values[species]); // Cleanup H5Aclose(init_id); H5Sclose(attr_space); } } double* yz_sbuf0; double* yz_rbuf0; double* xz_sbuf0; double* xz_rbuf0; double* xy_sbuf0; double* xy_rbuf0; double* yz_sbuf1; double* yz_rbuf1; double* xz_sbuf1; double* xz_rbuf1; double* xy_sbuf1; double* xy_rbuf1; yz_sbuf0=(double*)mpi_malloc(my_id,5*ny*nz*sizeof(double)); xz_sbuf0=(double*)mpi_malloc(my_id,5*nx*nz*sizeof(double)); xy_sbuf0=(double*)mpi_malloc(my_id,5*nx*ny*sizeof(double)); yz_sbuf1=(double*)mpi_malloc(my_id,5*ny*nz*sizeof(double)); xz_sbuf1=(double*)mpi_malloc(my_id,5*nx*nz*sizeof(double)); xy_sbuf1=(double*)mpi_malloc(my_id,5*nx*ny*sizeof(double)); yz_rbuf0=(double*)mpi_malloc(my_id,5*ny*nz*sizeof(double)); xz_rbuf0=(double*)mpi_malloc(my_id,5*nx*nz*sizeof(double)); xy_rbuf0=(double*)mpi_malloc(my_id,5*nx*ny*sizeof(double)); yz_rbuf1=(double*)mpi_malloc(my_id,5*ny*nz*sizeof(double)); xz_rbuf1=(double*)mpi_malloc(my_id,5*nx*nz*sizeof(double)); xy_rbuf1=(double*)mpi_malloc(my_id,5*nx*ny*sizeof(double)); #ifdef __PAPI__ if ( PAPI_start( EventSet ) != PAPI_OK){ printf("PAPI_read_counters failed\n"); } #endif //settime //T=1000*dt; //for ( T = 0; T < TimeStep; T += 1 ) int t_counter=0; while(t<T) //while(0) { t+=dt; t_counter++; time_comm-=timing(); updateBound(C0[0], C0[1], C0[2], C0[3], C0[4], t_counter, nx0, ny0, nz0, yz_sbuf0,yz_rbuf0, xz_sbuf0,xz_rbuf0, xy_sbuf0,xy_rbuf0, yz_sbuf1,yz_rbuf1, xz_sbuf1,xz_rbuf1, xy_sbuf1,xy_rbuf1, NeighBor, ar_status,ar_send_req,ar_recv_req, comm, comm3d); time_comm+=timing(); // Diffusion update time_conc-=timing(); // Change to use a faster computing function compute_pde_ode(nx0, ny0, nz0, dt, gamma, 1e-4, alpha, B_tot, k_on, k_off, C0, C1, div_y); // for ( i = 0; i < 5; i += 1 ) { // laplace3D(nx0,ny0,nz0,C0[i],nx1,ny1,nz1,C1[i],alpha[i]); // } // for ( i = 2; i < 6; i += 1 ) { // reaction3D(nx1,ny1,nz1,C1[cai],nx1,ny1,nz1,C1[i],B_tot[i],k_on[i],k_off[i],dt); // } // serca3D(nx1,ny1,nz1, C1[cai],nx1,ny1,nz1, C1[sri], dt, gamma, 1.0); time_conc+=timing(); // Update at RyRs, one at the time time_ryr-=timing(); update_ryr(h_scale, nx0, ny0, nz0, C1[cai], C1[sri], C1[csqni], C1[0],C1[2],C1[3],C1[4], k_on_CSQN, k_off_CSQN,CSQN_tot, gamma, K, dt, ryr_len, i0_ryr, i1_ryr, i2_ryr, csqn_len, i0_csqn, i1_csqn, i2_csqn, cleft_len, i0_cleft, i1_cleft, i2_cleft,cleft_nb, states0, states1); time_ryr+=timing(); double sum_c_i_root[7]; double sum_c_i[7]; double cai_min; double cai_min_root=0.0; double cai_max; double cai_max_root=1.0; double sm; double ca[8]; char caoutfile[100]; if ((fmod(t,DT)<dt)||(t==dt)){ time_io-=timing(); for(idx=0; idx<7; idx++){ sum_c_i[idx]=0.0; for ( i = 1; i <= nx; i += 1 ) for ( j = 1; j <= ny; j += 1 ) for ( k = 1; k <= nz; k += 1 ) sum_c_i[idx]+=C1[idx][i*ny0*nz0+j*nz0+k]; } cai_min=my_min(C1[cai],len); cai_max=my_max(C1[cai],len); /* reduce operation comm*/ MPI_Reduce(&sum_c_i[0], &sum_c_i_root[0], 7, MPI_DOUBLE, MPI_SUM, 0, comm); MPI_Reduce(&cai_min, &cai_min_root, 1, MPI_DOUBLE, MPI_MIN, 0, comm); MPI_Reduce(&cai_max, &cai_max_root, 1, MPI_DOUBLE, MPI_MAX, 0, comm); if(!my_id){ sm = 0; ca[0] = t; if(save_data) fprintf(fpdata,"%f ", ca[0]); for(idx=0; idx<7; idx++){ sm += fraction[idx]*sum_c_i_root[idx]; ca[idx+1] = sum_c_i_root[idx]/((double)nx*x_domains*(double)ny*y_domains*(double)nz*z_domains); if(DB_PF){ printf("ca[%d]: %f , sum : %f, nx ny nz: %d %d %d \n",idx+1, ca[idx+1], sum_c_i_root[idx],nx*x_domains,ny*y_domains,nz*z_domains); } if(save_data) fprintf(fpdata,"%f ", ca[idx+1]); } if(save_data) fprintf(fpdata,"\n "); printf("%3d, %.3f, %3.2f, %7.2f, %3.2f, %4.2f, %.2f \n", counter, t, ca[1], ca[2], cai_min_root, cai_max_root, sm); } if(save_data && in_midx_slice) { // If saving in hdf5 if (save_hdf5) { hsize_t dimsf[2] = {size_y, size_z}; /* dataset dimensions */ hsize_t chunk_dims[2] = {ny, nz}; /* chunk dimensions */ hsize_t h5_offset[2] = {coord[1]*nz, coord[0]*ny}; hsize_t h5_count[2] = {1, 1}; // Create group name sprintf(hdf5_group_name, "/data_%d", counter); hid_t group_id = H5Gcreate(h5_file_id, hdf5_group_name, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); // Create data space for the time attribute hsize_t dims = 1; hid_t attr_space = H5Screate_simple(1, &dims, NULL); // Create a time attribute hid_t time_id = H5Acreate(group_id, "time", H5T_NATIVE_DOUBLE, attr_space, H5P_DEFAULT, H5P_DEFAULT); // Write the attribute data double time_data = counter*DT; herr_t status = H5Awrite(time_id, H5T_NATIVE_DOUBLE, &time_data); // Cleanup H5Aclose(time_id); H5Sclose(attr_space); for (i=0; i<NUM_SAVE_SPECIES; i++) { // Get species int species = save_species[i]; sprintf(hdf5_dataset_name, "%s/%s", hdf5_group_name, species_names[species]); // file and dataset identifiers hid_t filespace = H5Screate_simple(2, dimsf, NULL); hid_t memspace = H5Screate_simple(2, chunk_dims, NULL); // Create chunked dataset. hid_t plist_id = H5Pcreate(H5P_DATASET_CREATE); H5Pset_chunk(plist_id, 2, chunk_dims); // Create compression filter (Not supported in parallel yet...) //unsigned int gzip_level = 9; //herr_t status = H5Pset_filter(plist_id, H5Z_FILTER_DEFLATE, // H5Z_FLAG_OPTIONAL, 1, &gzip_level); hid_t dset_id = H5Dcreate(h5_file_id, hdf5_dataset_name, H5T_DATA_TYPE, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); H5Pclose(plist_id); H5Sclose(filespace); // Select hyperslab in the file. filespace = H5Dget_space(dset_id); status = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, h5_offset, NULL, h5_count, chunk_dims); // Copy data to h5_data transfer_hdf5_data(h5_data, &(C0[species][ny0*nz0*mid_coord_x]), &(C1[species][ny0*nz0*mid_coord_x]), init_values[species], chunk_dims); // Create property list for collective dataset write. plist_id = H5Pcreate(H5P_DATASET_XFER); H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE); status = H5Dwrite(dset_id, H5T_DATA_TYPE, memspace, filespace, plist_id, h5_data); // Close/release resources. H5Dclose(dset_id); H5Sclose(filespace); H5Sclose(memspace); H5Pclose(plist_id); } H5Gclose(group_id); } // No HDF5 else { // Get species int species = save_species[i]; for (i=0; i<NUM_SAVE_SPECIES; i++) { sprintf(caoutfile, "%s/Ca%d_T%d_rank%d_%d_%d.np", outdirname, species, counter, coord[2], coord[1], coord[0]); if(save_binary_file) store2Dmatrixfile_double_bin(caoutfile, C1[species], ny0, nz0, mid_coord_x); else store2Dmatrixfile_double_1D(caoutfile, C1[species], ny0, nz0, mid_coord_x); } } } counter += 1; } // # Update Ca for(i=0;i<7;i++){ C_temp=C0[i]; C0[i]=C1[i]; C1[i]=C_temp; } MPI_Waitall(6, ar_send_req, ar_status); } time_main+=timing(); if(my_id==0){ if(save_data) fclose(fpdata); printf("cubiod_c: h:%d size_x:%d size_y:%d size_z:%d dt:%f, T:%f, TimeStep:%d, DT:%f, x_slice_num:%d\n", h, size_x, size_y, size_z,dt,T,(int)(T/dt),DT,x_slice_num); printf("nx0:%d ny0:%d nz0:%d size/array:%7.3f MB total size:%7.3f MB\n", nx0,ny0,nz0,nx0*ny0*nz0*8*1e-6,nx0*ny0*nz0*8*1e-6*12); #ifdef __PAPI__ if ( PAPI_stop( EventSet, res_papi ) != PAPI_OK){ printf("PAPI_accum_counters failed\n"); } for (i = 0; i<NUM_EVENTS; i++){ PAPI_event_code_to_name(Events[i], EventName); printf("PAPI Event name: %s, value: %lld\n", EventName, res_papi[i]); } #endif printf("computing time: %7.3f \n", time_conc); printf("updateryr time: %7.3f \n", time_ryr); printf("communica time: %7.3f \n", time_comm); printf("main time: %7.3f \n", time_main); #ifdef __PAPI__ printf("PAPI Performanc/core: %7.3f GFLOPS\n", res_papi[0]/1e9/time_conc); #endif } if (save_hdf5) { H5Fclose(h5_file_id); free(h5_data); } for(i=0;i<5;i++){ free(C0[i]); free(C1[i]); } free(C0[6]); free(C0[5]); free(i0_ryr); free(i1_ryr); free(i2_ryr); free(i0_csqn); free(i1_csqn); free(i2_csqn); free(i0_cleft); free(i1_cleft); free(i2_cleft); free(cleft_nb); MPI_Finalize(); return 0; } void laplace3D (int nx0, int ny0, int nz0, double* C0, int nx1, int ny1, int nz1, double* C1, double alpha)//, int num_threads) { // Set num threads // omp_set_num_threads(num_threads); // Local variables int i, j, k; double C0_tmp; // Main kernel loop // #pragma omp parallel for private(i, j, k, C0_tmp) //collapse(3) for (i=1; i<nx0-1; i++) { for (j=1; j<ny0-1; j++) { for (k=1; k<nz0-1; k++) { // Main kernel C0_tmp = -6*C0[i*nz0*ny0+j*nz0+k] + C0[(i-1)*nz0*ny0+j*nz0+k] + C0[(i+1)*nz0*ny0+j*nz0+k] + C0[i*nz0*ny0+(j-1)*nz0+k] + C0[i*nz0*ny0+(j+1)*nz0+k] + C0[i*nz0*ny0+j*nz0+k-1] + C0[i*nz0*ny0+j*nz0+k+1]; // Put value back into return array with offset to indices C1[i*nz1*ny1+j*nz1+k] = C0[i*nz1*ny1+j*nz1+k] + C0_tmp*alpha; } } } } void reaction3D (int nx0, int ny0, int nz0, double* Ca, int nx1, int ny1, int nz1, double* buff, double B_tot, double k_on, double k_off, double dt)//, int num_threads) { // Set num threads // omp_set_num_threads(num_threads); // Local variables int i, j, k; double J; // Use pointers reducing indexing into memory to once double* Ca_ijk; double* buff_ijk; // Main kernel loop // #pragma omp parallel for private(i, j, k, J, Ca_ijk, buff_ijk) //collapse(3) for (i=1; i<nx0-1; i++) { for (j=1; j<ny0-1; j++) { for (k=1; k<nz0-1; k++) { // Main kernel Ca_ijk = &Ca[i*nz0*ny0+j*nz0+k]; buff_ijk = &buff[i*nz0*ny0+j*nz0+k]; J = k_on*(B_tot - *buff_ijk)*(*Ca_ijk) - k_off*(*buff_ijk); *Ca_ijk -= dt*J; *buff_ijk += dt*J; } } } } void serca3D (int nx0, int ny0, int nz0, double* Ca_i, int nx1, int ny1, int nz1, double* Ca_SR, double dt, double gamma, double fudge)//, int num_threads) { // Set num threads // omp_set_num_threads(num_threads); // Local variables int i, j, k; double J; // Use pointers reducing indexing into memory to once double Ca_i2_ijk; double Ca_SR2_ijk; // Main kernel loop // #pragma omp parallel for private(i, j, k, J, Ca_i2_ijk, Ca_SR2_ijk) //collapse(3) for (i=1; i<nx0-1; i++) { for (j=1; j<ny0-1; j++) { for (k=1; k<nz0-1; k++) { // Main kernel Ca_i2_ijk = Ca_i[i*nz0*ny0+j*nz0+k]; Ca_SR2_ijk = Ca_SR[i*nz0*ny0+j*nz0+k]; Ca_i2_ijk *= Ca_i2_ijk; Ca_SR2_ijk *= Ca_SR2_ijk; J = fudge*(570997802.885875*Ca_i2_ijk - 0.0425239333622699*Ca_SR2_ijk)/(106720651.206402*Ca_i2_ijk + 182.498197548666*Ca_SR2_ijk + 5.35062954944879); Ca_i[i*nz0*ny0+j*nz0+k] -= dt*J; Ca_SR[i*nz0*ny0+j*nz0+k] += dt*J/gamma; } } } } void update_ryr(int h_scale,int nx0, int ny0, int nz0, double* Ca_i, double* Ca_SR, double* Ca_CSQN, double* C10, double* C12, double* C13, double* C14, double k_on_CSQN, double k_off_CSQN, double CSQN_tot, double gamma, double K, double dt, int ryr_len, int* i0_ryr, int* i1_ryr, int* i2_ryr, int csqn_len, int* i0_csqn, int* i1_csqn, int* i2_csqn, int cleft_len, int* i0_cleft, int* i1_cleft, int* i2_cleft,int* cleft_nb, int* states0, int* states1) { int i,j; int x_copy_from; int x,y,z; int nb_y,nb_z; int idx,idx_cleft,idx_csqn; double J; int open; double c0,c1; //extend csqn on x direction // for(j=(1-h_scale);j<h_scale;j++){ //extend cdqn on x+ direction for 30nm for(j=0;j<h_scale;j++){ for(i=0;i<csqn_len;i+=1){ x=i0_csqn[i]+j; #ifdef DEBUG_TEST if((x<0)||x>(nx0-1)) { printf("wrong csqn x index\n"); exit(0); } #endif y=i1_csqn[i]; z=i2_csqn[i]; idx=x*ny0*nz0+y*nz0+z; //CSQN step: J = k_on_CSQN*(CSQN_tot - Ca_CSQN[idx])*Ca_SR[idx] - k_off_CSQN*Ca_CSQN[idx]; Ca_SR[idx] -= dt*J; Ca_CSQN[idx] += dt*J; } } //add no_flux boundary by copy the neighbour's value on no_flux voxel //add x+ front no-flux plane on ryr with +1 offset, and copy from -1 x-plane(where ryr is on) j=1; x_copy_from=-1; for(i=0;i<csqn_len;i+=1){ x=i0_csqn[i]+j; #ifdef DEBUG_TEST if((x<0)||x>(nx0-1)) { printf("wrong csqn x index\n"); exit(0); } #endif y=i1_csqn[i]; z=i2_csqn[i]; idx_cleft=x*ny0*nz0+y*nz0+z; idx_csqn =(x+x_copy_from)*ny0*nz0+y*nz0+z; C10[idx_cleft]=C10[idx_csqn]; C12[idx_cleft]=C12[idx_csqn]; C13[idx_cleft]=C13[idx_csqn]; C14[idx_cleft]=C14[idx_csqn]; } //add x+ back no-flux plane on ryr with h_scale offset, and copy from +1 x-plane(outside of csqn) if(h_scale==2)//15 nm j=h_scale+1;//guarantee that there is at least one voxel inner the no-flux boundary else//5nm 3mn 1nm, j=h_scale; x_copy_from=+1; for(i=0;i<csqn_len;i+=1){ x=i0_csqn[i]+j; #ifdef DEBUG_TEST if((x<0)||x>(nx0-1)) { printf("wrong csqn x index\n"); exit(0); } #endif y=i1_csqn[i]; z=i2_csqn[i]; idx_cleft=x*ny0*nz0+y*nz0+z; idx_csqn =(x+x_copy_from)*ny0*nz0+y*nz0+z; C10[idx_cleft]=C10[idx_csqn]; C12[idx_cleft]=C12[idx_csqn]; C13[idx_cleft]=C13[idx_csqn]; C14[idx_cleft]=C14[idx_csqn]; } //extend y-z plane no_flux boundary along x+ direction with +1 offset and copy value from outside of CSQN by cleft_nb index int k; if(h_scale==2)//15 nm k=1;//guarantee that there is at least one voxel inner the no-flux boundary else//5nm 3mn 1nm, k=0; for(j=2;j<h_scale+k;j++){ for(i=0;i<cleft_len;i+=1){ x=i0_cleft[i]+j; #ifdef DEBUG_TEST if((x<0)||x>(nx0-1)) { printf("wrong csqn x index\n"); exit(0); } #endif y=i1_cleft[i]; z=i2_cleft[i]; nb_y=cleft_nb[i]/8-1; nb_z=cleft_nb[i]%8-1; idx_cleft=x*ny0*nz0+y*nz0+z; idx_csqn =x*ny0*nz0+(y+nb_y)*nz0+z+nb_z; C10[idx_cleft]=C10[idx_csqn]; C12[idx_cleft]=C12[idx_csqn]; C13[idx_cleft]=C13[idx_csqn]; C14[idx_cleft]=C14[idx_csqn]; } } //add x- front no-flux plane on ryr with -h_scale/2(15nm) offset, and copy from +1 x-plane(t-tubule) j=0-h_scale/2; x_copy_from=1; for(i=0;i<csqn_len;i+=1){ x=i0_csqn[i]+j; #ifdef DEBUG_TEST if((x<0)||x>(nx0-1)) { printf("wrong csqn x index\n"); exit(0); } #endif y=i1_csqn[i]; z=i2_csqn[i]; idx_cleft=x*ny0*nz0+y*nz0+z; idx_csqn =(x+x_copy_from)*ny0*nz0+y*nz0+z; C10[idx_cleft]=C10[idx_csqn]; C12[idx_cleft]=C12[idx_csqn]; C13[idx_cleft]=C13[idx_csqn]; C14[idx_cleft]=C14[idx_csqn]; } //add x- back no-flux plane on ryr with -h_scale/2+1 offset, and copy from -1 x-plane(t-tubule) /* if(h_scale=2) j=0-h_scale/2-h_scale; else j=0-h_scale/2-h_scale+1; */ /* how thick should t-tubule be? now, just set it 2 lines on x- direction */ // j=0-h_scale/2-h_scale-1; j=0-h_scale/2-1; x_copy_from=-1; for(i=0;i<csqn_len;i+=1){ x=i0_csqn[i]+j; #ifdef DEBUG_TEST if((x<0)||x>(nx0-1)) { printf("wrong csqn x index\n"); exit(0); } #endif y=i1_csqn[i]; z=i2_csqn[i]; idx_cleft=x*ny0*nz0+y*nz0+z; idx_csqn =(x+x_copy_from)*ny0*nz0+y*nz0+z; C10[idx_cleft]=C10[idx_csqn]; C12[idx_cleft]=C12[idx_csqn]; C13[idx_cleft]=C13[idx_csqn]; C14[idx_cleft]=C14[idx_csqn]; } /* how thick should t-tubule be? */ /* //extend y-z plane no_flux boundary along x- direction with +1 offset and copy value from outside of CSQN by cleft_nb index int k; if(h_scale==2)//15 nm k=1;//guarantee that there is at least one voxel inner the no-flux boundary else//5nm 3mn 1nm, k=0; for(j=0-h_scale/2-1;j>0-h_scale/2-h_scale+1-k;j--){ for(i=0;i<cleft_len;i+=1){ x=i0_cleft[i]+j; #ifdef DEBUG_TEST if((x<0)||x>(nx0-1)) { printf("wrong csqn x index\n"); exit(0); } #endif y=i1_cleft[i]; z=i2_cleft[i]; nb_y=cleft_nb[i]/8-1; nb_z=cleft_nb[i]%8-1; idx_cleft=x*ny0*nz0+y*nz0+z; idx_csqn =x*ny0*nz0+(y+nb_y)*nz0+z+nb_z; C10[idx_cleft]=C10[idx_csqn]; C12[idx_cleft]=C12[idx_csqn]; C13[idx_cleft]=C13[idx_csqn]; C14[idx_cleft]=C14[idx_csqn]; } } */ for ( i = 0; i < ryr_len; i += 1 ) { x=i0_ryr[i]; y=i1_ryr[i]; z=i2_ryr[i]; idx=x*ny0*nz0+y*nz0+z; // #Continous formulation // #states[:,i] += dt*stern(t, states[:,i], Ca_i[idx]) stern_discrete(dt, &states0[i],&states1[i], Ca_i[idx]); open = states0[i]*(1-states1[i]); // #Exp Euler: // #J_RyR = k*open*(Ca_SR[idx]-Ca_i[idx]) // #Ca_i[idx] += dt*J_RyR // #Ca_SR[idx] -= dt*J_RyR/gamma; // #Analytical update: // K = exp(-k_s*dt*(1+1/gamma)) if (open){ if(DB_PF) printf("open [%d] ryr[%d,%d,%d] \n", i, x, y,z); c0 = (Ca_i[idx] + gamma*Ca_SR[idx])/(1+gamma); c1 = (Ca_i[idx] - Ca_SR[idx])/(1+1/gamma); Ca_i[idx] = c0 + c1*K; Ca_SR[idx] = c0 - c1*K/gamma; } } } void stern(double t, double* y0, double* y1, double Ca){ double m = *y0; double h = *y1; double kim = 0.005; double kom = 0.06; double K_i = 0.01*10; double K_o = 0.01*41.4; double ki = kim/K_i; double ko = kom/(K_o*K_o); double dm = ko*Ca*Ca*(1-m)-kom*m; double dh = ki*Ca*(1-h)-kim*h; *y0=dm; *y1=dh; } void stern_discrete(double dt, int* y0, int* y1, double Ca){ double kim = 0.002; // 1/ms double kom = 1.5; // 0.5 1/ms double kd_i = 20.0; // 20.0 um*ms double kd_o = 0.9; // um*ms^N 0.7, 0.8, 0.9, 1.0 double Ca_ki = Ca/kd_i; double Ca_ko = Ca/kd_o; double ki = Ca_ki*Ca_ki; // (Ca/kd_i)^2 double ko = Ca_ko*Ca_ko*Ca_ko*Ca_ko; // ko = (Ca/kd_o)^4 //double kim = 0.005; // Original: 0.005 //double kom = 0.04; // Original: 0.06 //double ki = Ca*1.5*1e-3; // Original: Ca*0.5*1e-3 //double ko = 1e-6*Ca*Ca*3500; // Original: 1e-6*Ca*Ca*{35,1200,2000,3500} double r; int m, h; m = *y0; if(m==1){ r = my_random(); m = 1 - (r<(dt*kom)); } else { r=my_random(); m = 1*(r<(dt*ko)); } h = *y1; if(h==1){ r = my_random(); h = 1 - (r<(dt*kim)); } else{ r = my_random(); h = 1*(r<(dt*ki)); } *y0=m; *y1=h; } inline double my_random() { double r; double x; // r=(double)(rand()%100000000); // x=(r*1e-8); x=((double)rand())/(double)RAND_MAX; return x; } void store2Dmatrixfile_double_1D(char* outfile, double* ar, int rows, int cols, int x_strid){ FILE *fpdata; int i,j; if ((fpdata=fopen(outfile, "w"))==NULL) { printf("fialed open output file "); printf("%s",outfile); printf(" ! \n "); exit(0); } // printf("----Generating list output to "); // printf("%s",outfile); // printf(" file----\n"); for(i=0;i<rows;i++) { for(j=0;j<cols;j++) { fprintf(fpdata,"%.9e ", ar[x_strid*rows*cols+i*cols+j]); } fprintf(fpdata,"\n"); } fclose(fpdata); return; } void store2Dmatrixfile_double_bin(char* outfile, double* ar, int rows, int cols, int x_strid) { FILE *fpdata; int i,j; if ((fpdata=fopen(outfile, "wb"))==NULL) { printf("failed open output file "); printf("%s",outfile); printf(" ! \n "); exit(0); } fwrite(&ar[x_strid*rows*cols],sizeof(double),rows*cols,fpdata); fclose(fpdata); return; } void transfer_hdf5_data(hdf5_data_type* h5_data, double* ar0, double* ar1, double scale_value, hsize_t* chunk_dims) { int i,j; int rows=chunk_dims[0]; int cols=chunk_dims[1]; // Transfer data from padded ar to stripped data for(i=0;i<rows;i++) { for(j=0;j<cols;j++) { double rel_data_diff = (ar1[i*(cols+2)+j+1]-ar0[i*(cols+2)+j+1])/scale_value; h5_data[i*cols+j] = (hdf5_data_type)round(rel_data_diff*H5_DATA_LIMIT_1); } } } void store2Dmatrixfile_int_1D(char* outfile, int* ar, int rows, int cols){ FILE *fpdata; int i,j; if ((fpdata=fopen(outfile, "w"))==NULL) { printf("failed open output file "); printf("%s",outfile); printf(" ! \n "); exit(0); } printf("----Generating list output to "); printf("%s",outfile); printf(" file----\n"); for(i=0;i<rows;i++) { for(j=0;j<cols;j++) fprintf(fpdata,"%d ",ar[i*cols+j]); fprintf(fpdata,"\n"); } fclose(fpdata); return; } double my_min(double* ar, int len) { double min=ar[0]; int i; for ( i = 0; i < len; i += 1 ) { if(ar[i]<min) min=ar[i]; } return min; } double my_max(double* ar, int len) { double max=ar[0]; int i; for ( i = 0; i < len; i += 1 ) { if(ar[i]>max) max=ar[i]; } return max; } double timing(){ double time; struct timeval timmer; gettimeofday(&timmer,NULL); time = 1000000*timmer.tv_sec + timmer.tv_usec; time /= 1000000; return time; } int load_indices_serial(int nx, int ny, int nz, int h, int** i0_ryr, int** i1_ryr, int** i2_ryr, int* ryr_len, int** i0_csqn, int** i1_csqn, int** i2_csqn, int* csqn_len, int** i0_cleft, int** i1_cleft, int** i2_cleft, int** cleft_nb, int* cleft_len, int x_slice_mid, int x_slice_width, int x_slice_num, int use_failing) { int i,j,k; int nx_old; int ny_old; int nz_old; nx_old=nx; ny_old=ny; nz_old=nz; // Scale nx, xy, nz in terms of RyR if(30%h!=0){ printf("30 must be divisible by h!"); exit(1); } int h_scale; h_scale = 30/h; nx = nx/h_scale; ny = ny/h_scale; nz = nz/h_scale; // All CaRU placed mid-sarcomere // int mid_x = (nx+1)/2; // load RyR indices from file int* i1; int* i2; int i1_len; int i2_len; char i_RyR_indices_name[200]; char j_RyR_indices_name[200]; sprintf(i_RyR_indices_name, "i_RyR_indices%s.dat", use_failing ? "_failing" : ""); sprintf(j_RyR_indices_name, "j_RyR_indices%s.dat", use_failing ? "_failing" : ""); if (use_failing) printf("Load failing indices"); else printf("Load normal indices"); i1=loadRyRindexfile_int(i_RyR_indices_name, &i1_len); i2=loadRyRindexfile_int(j_RyR_indices_name, &i2_len); // # Only use the subset which are inside the geometry if(i1_len==i2_len) printf("num RyR before reduction: %d\n", i1_len); else printf("num RyR is wrong: i1_len!=i2_len\n"); int* i1_temp; int* i2_temp; int i1_temp_len=0; for ( i = 0; i < i1_len; i += 1 ) { if(i1[i]<ny) i1_temp_len++; } i1_temp=malloc(i1_temp_len*sizeof(int)); i2_temp=malloc(i1_temp_len*sizeof(int)); j=0; for ( i = 0; i < i1_len; i += 1 ) { if(i1[i]<ny){ i1_temp[j]=i1[i]; i2_temp[j]=i2[i]; j++; } } free(i1); free(i2); int i1_ryr_len=0; for ( i = 0; i < i1_temp_len; i += 1 ) { if(i2_temp[i]<nz) i1_ryr_len++; } *i0_ryr=malloc(x_slice_num*i1_ryr_len*sizeof(int)); *i1_ryr=malloc(x_slice_num*i1_ryr_len*sizeof(int)); *i2_ryr=malloc(x_slice_num*i1_ryr_len*sizeof(int)); j=0; for ( i = 0; i < i1_temp_len; i += 1 ) { if(i2_temp[i]<nz){ for(k=0; k < x_slice_num; k++){ (*i1_ryr)[k*i1_ryr_len+j]=i1_temp[i]; (*i2_ryr)[k*i1_ryr_len+j]=i2_temp[i]; } j++; } } free(i1_temp); free(i2_temp); // Scale indices and move to center of macro voxel for ( i = 0; i < i1_ryr_len; i += 1 ) { for(k=0; k < x_slice_num; k++){ (*i0_ryr)[k*i1_ryr_len+i] = k*x_slice_width+x_slice_mid; //for those ryr just on 0 boundary, avoid to subtracting their coords to negative if((*i1_ryr)[k*i1_ryr_len+i]>0) (*i1_ryr)[k*i1_ryr_len+i] = (*i1_ryr)[k*i1_ryr_len+i]*h_scale - floor((double)h_scale/2); else (*i1_ryr)[k*i1_ryr_len+i] = (*i1_ryr)[k*i1_ryr_len+i]*h_scale; if((*i2_ryr)[k*i1_ryr_len+i]>0) (*i2_ryr)[k*i1_ryr_len+i] = (*i2_ryr)[k*i1_ryr_len+i]*h_scale - floor((double)h_scale/2); else (*i2_ryr)[k*i1_ryr_len+i] = (*i2_ryr)[k*i1_ryr_len+i]*h_scale; } } *ryr_len=i1_ryr_len*x_slice_num; // load CSQN indices from file char i_csqn_indices_name[200]; char j_csqn_indices_name[200]; sprintf(i_csqn_indices_name, "i_csqn_indices%s.dat", use_failing ? "_failing" : ""); sprintf(j_csqn_indices_name, "j_csqn_indices%s.dat", use_failing ? "_failing" : ""); i1 = loadRyRindexfile_int(i_csqn_indices_name, &i1_len); i2 = loadRyRindexfile_int(j_csqn_indices_name, &i2_len); if(i1_len==i2_len) printf("num CSQN before reduction: %d\n", i1_len); else printf("num CSQN is wrong: i1_len!=i2_len\n"); //# Only use the subset which are inside the geometry // i1_csqn = i1[i2<nz]*h_scale // i2_csqn = i2[i2<nz]*h_scale // i0_csqn = np.ones(len(i1_csqn), dtype=int)*mid_x*h_scale i1_temp_len=0; for ( i = 0; i < i1_len; i += 1 ) { if(i1[i]<ny) i1_temp_len++; } i1_temp=malloc(i1_temp_len*sizeof(int)); i2_temp=malloc(i1_temp_len*sizeof(int)); j=0; for ( i = 0; i < i1_len; i += 1 ) { if(i1[i]<ny){ i1_temp[j]=i1[i]; i2_temp[j]=i2[i]; j++; } } free(i1); free(i2); int i1_csqn_len=0; for ( i = 0; i < i1_temp_len; i += 1 ) { if(i2_temp[i]<nz) i1_csqn_len++; } *i0_csqn=malloc(x_slice_num*i1_csqn_len*sizeof(int)); *i1_csqn=malloc(x_slice_num*i1_csqn_len*sizeof(int)); *i2_csqn=malloc(x_slice_num*i1_csqn_len*sizeof(int)); j=0; for ( i = 0; i < i1_temp_len; i += 1 ) { if(i2_temp[i]<nz){ for(k=0; k < x_slice_num; k++){ (*i1_csqn)[k*i1_csqn_len+j]=i1_temp[i]; (*i2_csqn)[k*i1_csqn_len+j]=i2_temp[i]; } j++; } } free(i1_temp); free(i2_temp); // Scale indices and move to center of macro voxel for(k=0; k < x_slice_num; k++){ for ( i = 0; i < i1_csqn_len; i += 1 ) { (*i0_csqn)[k*i1_csqn_len+i] = k*x_slice_width+x_slice_mid; (*i1_csqn)[k*i1_csqn_len+i] = (*i1_csqn)[k*i1_csqn_len+i]*h_scale; (*i2_csqn)[k*i1_csqn_len+i] = (*i2_csqn)[k*i1_csqn_len+i]*h_scale; } } int* i0_csqn_list; int* i1_csqn_list; int* i2_csqn_list; int m; int csqn_count; *csqn_len=x_slice_num*i1_csqn_len*h_scale*h_scale; *cleft_len=0;//x_slice_num*i1_csqn_len*4*h_scale; // # Add CSQN to all voxels covered by the original CSQN array if (h_scale > 1){ i0_csqn_list=malloc(x_slice_num*i1_csqn_len*h_scale*h_scale*sizeof(int)); i1_csqn_list=malloc(x_slice_num*i1_csqn_len*h_scale*h_scale*sizeof(int)); i2_csqn_list=malloc(x_slice_num*i1_csqn_len*h_scale*h_scale*sizeof(int)); csqn_count=0; // # Add offsetted versions of the csqn for ( m = 0; m < x_slice_num; m += 1 ) { for ( i = 0; i < h_scale; i += 1 ) { for ( j = 0; j < h_scale; j += 1 ) { for ( k = 0; k < i1_csqn_len; k += 1 ) { i0_csqn_list[csqn_count]=(*i0_csqn)[m*i1_csqn_len+k]; i1_csqn_list[csqn_count]=(*i1_csqn)[m*i1_csqn_len+k]+i; i2_csqn_list[csqn_count]=(*i2_csqn)[m*i1_csqn_len+k]+j; csqn_count++; } } } } if(csqn_count!=(*csqn_len)) { printf("csqn_count wrong\n"); exit(0); } } else { i0_csqn_list=(*i0_csqn); i1_csqn_list=(*i1_csqn); i2_csqn_list=(*i2_csqn); } int a_slice_csqn_len=i1_csqn_len*h_scale*h_scale; BinarySort_two(&i1_csqn_list[0],&i2_csqn_list[0],a_slice_csqn_len); int* y_index; y_index=malloc(ny_old*sizeof(int)); for ( i = 0; i < ny_old; i += 1 ) { y_index[i]=-1; } for ( i = a_slice_csqn_len-1; i >= 0; i -= 1 ) { y_index[i1_csqn_list[i]]=i; } //generate cleft index on Y-Z plane,just wrapping the outside of a group of CSQN, //If cleft is in the outside of the mesh or is already indexed by a CSQN, then it is not a true cleft. //Also generate the relative coordinates for th neighbour of each cleft from which to copy the value. //the relative coordinate of y is cleft_nb%8-1, and that of z is cleft_nb/8-1 int coord_y,coord_z; *i1_cleft=(int*)malloc(i1_csqn_len*4*h_scale*sizeof(int)); *i2_cleft=(int*)malloc(i1_csqn_len*4*h_scale*sizeof(int)); *cleft_nb=(int*)malloc(i1_csqn_len*4*h_scale*sizeof(int)); *cleft_len=0; for ( k = 0; k < i1_csqn_len; k += 1 ) { for ( j = 0; j < h_scale; j += 1 ) { //z bottom line coord_y=(*i1_csqn)[k]-1; coord_z=(*i2_csqn)[k]+j; if(IsTrueCleft(coord_y, coord_z, ny_old, nz_old, i1_csqn_list, i2_csqn_list, y_index, a_slice_csqn_len)) { (*i1_cleft)[(*cleft_len)]=coord_y; (*i2_cleft)[(*cleft_len)]=coord_z; //copy from outside // (*cleft_nb)[(*cleft_len)]=0+1; // copy from inside (*cleft_nb)[(*cleft_len)]=16+1; (*cleft_len)++; } //y left line coord_y=(*i1_csqn)[k]+j; coord_z=(*i2_csqn)[k]-1; if(IsTrueCleft(coord_y, coord_z, ny_old, nz_old, i1_csqn_list, i2_csqn_list, y_index, a_slice_csqn_len)) { (*i1_cleft)[(*cleft_len)]=coord_y; (*i2_cleft)[(*cleft_len)]=coord_z; //copy from inside // (*cleft_nb)[(*cleft_len)]=8+0; //copy from inside (*cleft_nb)[(*cleft_len)]=8+2; (*cleft_len)++; } //z top line coord_y=(*i1_csqn)[k]+h_scale; coord_z=(*i2_csqn)[k]+j; if(IsTrueCleft(coord_y, coord_z, ny_old, nz_old, i1_csqn_list, i2_csqn_list, y_index, a_slice_csqn_len)) { (*i1_cleft)[(*cleft_len)]=coord_y; (*i2_cleft)[(*cleft_len)]=coord_z; //copy from outside // (*cleft_nb)[(*cleft_len)]=16+1; // copy from inside (*cleft_nb)[(*cleft_len)]=0+1; (*cleft_len)++; } //y right line coord_y=(*i1_csqn)[k]+j; coord_z=(*i2_csqn)[k]+h_scale; if(IsTrueCleft(coord_y, coord_z, ny_old, nz_old, i1_csqn_list, i2_csqn_list, y_index, a_slice_csqn_len)) { (*i1_cleft)[(*cleft_len)]=coord_y; (*i2_cleft)[(*cleft_len)]=coord_z; //copy from outside // (*cleft_nb)[(*cleft_len)]=8+2; // copy from inside (*cleft_nb)[(*cleft_len)]=8+0; (*cleft_len)++; } } } if((*cleft_len)>i1_csqn_len*4*h_scale){ printf("wrong cleft_len found\n"); exit(0); } //add cleft for multiple 2um x-slices int* i0_cleft_list; int* i1_cleft_list; int* i2_cleft_list; int* cleft_nb_list; i0_cleft_list=malloc(x_slice_num*(*cleft_len)*sizeof(int)); i1_cleft_list=malloc(x_slice_num*(*cleft_len)*sizeof(int)); i2_cleft_list=malloc(x_slice_num*(*cleft_len)*sizeof(int)); cleft_nb_list=malloc(x_slice_num*(*cleft_len)*sizeof(int)); for(k=0; k < x_slice_num; k++){ for ( i = 0; i < (*cleft_len); i += 1 ) { i0_cleft_list[k*(*cleft_len)+i] = k*x_slice_width+x_slice_mid; i1_cleft_list[k*(*cleft_len)+i] = (*i1_cleft)[i]; i2_cleft_list[k*(*cleft_len)+i] = (*i2_cleft)[i]; cleft_nb_list[k*(*cleft_len)+i] = (*cleft_nb)[i]; } } free(*i1_cleft); free(*i2_cleft); free(*cleft_nb); *i0_cleft=i0_cleft_list; *i1_cleft=i1_cleft_list; *i2_cleft=i2_cleft_list; *cleft_nb=cleft_nb_list; *cleft_len=x_slice_num*(*cleft_len); if (h_scale > 1){ free(*i0_csqn); free(*i1_csqn); free(*i2_csqn); *i0_csqn=i0_csqn_list; *i1_csqn=i1_csqn_list; *i2_csqn=i2_csqn_list; } return h_scale; } int IsTrueCleft(int coord_y, int coord_z, int size_y, int size_z, int *i1_csqn, int *i2_csqn, int* y_index, int csqn_len) { int i; //in outside of the mesh if((coord_y<0)||(coord_y>=size_y)||(coord_z<0)||(coord_z>=size_z)) return 0; i=y_index[coord_y]; //not in CSQN if(i<0) return 1; while(i1_csqn[i]==coord_y){ //in CSQN if(i2_csqn[i]==coord_z) return 0; i++; //not in CSQN if(i>=csqn_len) return 1; } return 1; } int idxinrank(int nx, int ny, int nz, int i0, int i1, int i2, int rank, MPI_Comm comm3d) { int coords[3]; MPI_Cart_coords(comm3d,rank,3,coords); if( (i0>=coords[2]*nx)&&((i0<coords[2]+1)*nx)&& (i1>=coords[1]*ny)&&((i1<coords[1]+1)*ny)&& (i2>=coords[0]*nz)&&((i2<coords[0]+1)*nz)) { return 1; } else return 0; } int idxbl2rank(int nx, int ny, int nz, int i0, int i1, int i2, int* coords, MPI_Comm comm3d) { int rank=0; coords[2]=i0/nx; coords[1]=i1/ny; coords[0]=i2/nz; MPI_Cart_rank(comm3d,coords,&rank); return rank; } int distr_ryr_csqn_state(int h, int size_x, int size_y, int size_z, int nx, int ny, int nz, int** i0_ryr, int** i1_ryr, int** i2_ryr, int* ryr_len, int** i0_csqn, int** i1_csqn, int** i2_csqn, int* csqn_len, int** i0_cleft, int** i1_cleft, int** i2_cleft,int** cleft_nb, int* cleft_len, int** states0, int** states1, int x_slice_mid,int x_slice_width, int x_slice_num, MPI_Comm comm3d, MPI_Comm comm, int use_failing) { int i,j; int h_scale; int* global_i0_ryr; int* global_i1_ryr; int* global_i2_ryr; int* global_i0_ryr_reorder; int* global_i1_ryr_reorder; int* global_i2_ryr_reorder; int* global_i0_csqn; int* global_i1_csqn; int* global_i2_csqn; int* global_i0_csqn_reorder; int* global_i1_csqn_reorder; int* global_i2_csqn_reorder; int* global_i0_cleft; int* global_i1_cleft; int* global_i2_cleft; int* global_cleft_nb; int* global_i0_cleft_reorder; int* global_i1_cleft_reorder; int* global_i2_cleft_reorder; int* global_cleft_nb_reorder; int global_ryr_len; int global_csqn_len; int global_cleft_len; int* global_states0; int* global_states0_reorder; int* ryr_rec_count; int* ryr_rec_disp; int* ryr_rec_offset; int* csqn_rec_count; int* csqn_rec_disp; int* csqn_rec_offset; int* cleft_rec_count; int* cleft_rec_disp; int* cleft_rec_offset; int my_id; int nproc; int coords[3]; MPI_Comm_rank(comm,&my_id); MPI_Comm_size(comm,&nproc); if(my_id==0){ h_scale=load_indices_serial(size_x, size_y, size_z, h, &global_i0_ryr, &global_i1_ryr, &global_i2_ryr, &global_ryr_len, &global_i0_csqn, &global_i1_csqn,&global_i2_csqn,&global_csqn_len, &global_i0_cleft, &global_i1_cleft, &global_i2_cleft, &global_cleft_nb, &global_cleft_len, x_slice_mid,x_slice_width,x_slice_num, use_failing); printf("load indices from file: h:%d, h_scale:%d, nx:%d, ny:%d, nz:%d, ryr_len:%d, csqn_len:%d cleft_len:%d\n", h, h_scale, nx, ny, nz, global_ryr_len, global_csqn_len, global_cleft_len); if(global_ryr_len>0) global_states0=malloc(global_ryr_len*sizeof(int)); else global_states0=malloc(1*sizeof(int)); for ( i = 0; i < global_ryr_len; i++) global_states0[i]=0; if(global_ryr_len>=23){ for ( i = 1; i < 23; i =i+3 ) global_states0[i]=1; } else { for ( i = 1; i < global_ryr_len ; i =i+10 ) global_states0[i]=1; } if(DB_PF){ for(i=0;i<global_ryr_len;i++){ if(global_states0[i]==1) printf("ryr[%d]:%d,%d,%d \n",i,global_i0_ryr[i],global_i1_ryr[i],global_i2_ryr[i]); } } ryr_rec_count=malloc(nproc*sizeof(int)); csqn_rec_count=malloc(nproc*sizeof(int)); cleft_rec_count=malloc(nproc*sizeof(int)); for (i = 0; i < nproc; i++) { ryr_rec_count[i]=0; csqn_rec_count[i]=0; cleft_rec_count[i]=0; } for(i=0;i<global_ryr_len;i++) { j=idxbl2rank(nx,ny,nz,global_i0_ryr[i],global_i1_ryr[i],global_i2_ryr[i],coords,comm3d); ryr_rec_count[j]++; } for(i=0;i<global_csqn_len;i++) { j=idxbl2rank(nx,ny,nz,global_i0_csqn[i],global_i1_csqn[i],global_i2_csqn[i],coords,comm3d); csqn_rec_count[j]++; } for(i=0;i<global_cleft_len;i++) { j=idxbl2rank(nx,ny,nz,global_i0_cleft[i],global_i1_cleft[i],global_i2_cleft[i],coords,comm3d); cleft_rec_count[j]++; } for (i = 0; i < nproc; i++) { if(DB_PF) printf("ryr_rec_count[%d]: %d\n",i, ryr_rec_count[i]); if(DB_PF) printf("csqn_rec_count[%d]: %d\n",i, csqn_rec_count[i]); if(DB_PF) printf("cleft_rec_count[%d]: %d\n",i, cleft_rec_count[i]); } ryr_rec_disp = malloc(nproc*sizeof(int)); csqn_rec_disp = malloc(nproc*sizeof(int)); cleft_rec_disp = malloc(nproc*sizeof(int)); ryr_rec_disp[0] = 0; csqn_rec_disp[0] = 0; cleft_rec_disp[0] = 0; for (i = 1; i < nproc; i++) { ryr_rec_disp[i] = ryr_rec_disp[i-1] + ryr_rec_count[i-1]; csqn_rec_disp[i] = csqn_rec_disp[i-1] + csqn_rec_count[i-1]; cleft_rec_disp[i] = cleft_rec_disp[i-1] + cleft_rec_count[i-1]; } if(global_ryr_len!=ryr_rec_disp[nproc-1]+ryr_rec_count[nproc-1]) { printf("Global ryr Count mismatch %d\n", ryr_rec_disp[nproc-1]+ryr_rec_count[nproc-1]); } if(global_csqn_len!=csqn_rec_disp[nproc-1]+csqn_rec_count[nproc-1]) { printf("Global csqn Count mismatch %d\n", csqn_rec_disp[nproc-1]+csqn_rec_count[nproc-1]); } if(global_cleft_len!=cleft_rec_disp[nproc-1]+cleft_rec_count[nproc-1]) { printf("Global cleft Count mismatch %d\n", cleft_rec_disp[nproc-1]+cleft_rec_count[nproc-1]); } ryr_rec_offset = malloc(nproc*sizeof(int)); csqn_rec_offset = malloc(nproc*sizeof(int)); cleft_rec_offset = malloc(nproc*sizeof(int)); for (i = 0; i < nproc; i++) { ryr_rec_offset[i]=0; csqn_rec_offset[i]=0; cleft_rec_offset[i]=0; } global_i0_ryr_reorder=malloc(global_ryr_len*sizeof(int)); global_i1_ryr_reorder=malloc(global_ryr_len*sizeof(int)); global_i2_ryr_reorder=malloc(global_ryr_len*sizeof(int)); global_states0_reorder=malloc(global_ryr_len*sizeof(int)); for(i=0;i<global_ryr_len;i++) { j=idxbl2rank(nx,ny,nz,global_i0_ryr[i],global_i1_ryr[i],global_i2_ryr[i],coords,comm3d); global_i0_ryr_reorder[ryr_rec_disp[j]+ryr_rec_offset[j]]=global_i0_ryr[i]-coords[2]*nx+1; global_i1_ryr_reorder[ryr_rec_disp[j]+ryr_rec_offset[j]]=global_i1_ryr[i]-coords[1]*ny+1; global_i2_ryr_reorder[ryr_rec_disp[j]+ryr_rec_offset[j]]=global_i2_ryr[i]-coords[0]*nz+1; global_states0_reorder[ryr_rec_disp[j]+ryr_rec_offset[j]]=global_states0[i]; ryr_rec_offset[j]++; } for (i = 0; i < nproc; i++) { if(ryr_rec_offset[i]!=ryr_rec_count[i]) printf("ryr reorder count error on proc %d \n",i); } free(global_i0_ryr); free(global_i1_ryr); free(global_i2_ryr); free(global_states0); free(ryr_rec_offset); //distribute cleft to there own MPI process global_i0_csqn_reorder=malloc(global_csqn_len*sizeof(int)); global_i1_csqn_reorder=malloc(global_csqn_len*sizeof(int)); global_i2_csqn_reorder=malloc(global_csqn_len*sizeof(int)); for(i=0;i<global_csqn_len;i++) { j=idxbl2rank(nx,ny,nz,global_i0_csqn[i],global_i1_csqn[i],global_i2_csqn[i],coords,comm3d); global_i0_csqn_reorder[csqn_rec_disp[j]+csqn_rec_offset[j]]=global_i0_csqn[i]-coords[2]*nx+1; global_i1_csqn_reorder[csqn_rec_disp[j]+csqn_rec_offset[j]]=global_i1_csqn[i]-coords[1]*ny+1; global_i2_csqn_reorder[csqn_rec_disp[j]+csqn_rec_offset[j]]=global_i2_csqn[i]-coords[0]*nz+1; csqn_rec_offset[j]++; } for (i = 0; i < nproc; i++) { if(csqn_rec_offset[i]!=csqn_rec_count[i]) printf("csqn reorder count error on proc %d \n",i); } free(global_i0_csqn); free(global_i1_csqn); free(global_i2_csqn); free(csqn_rec_offset); global_i0_cleft_reorder=malloc(global_cleft_len*sizeof(int)); global_i1_cleft_reorder=malloc(global_cleft_len*sizeof(int)); global_i2_cleft_reorder=malloc(global_cleft_len*sizeof(int)); global_cleft_nb_reorder=malloc(global_cleft_len*sizeof(int)); for(i=0;i<global_cleft_len;i++) { j=idxbl2rank(nx,ny,nz,global_i0_cleft[i],global_i1_cleft[i],global_i2_cleft[i],coords,comm3d); global_i0_cleft_reorder[cleft_rec_disp[j]+cleft_rec_offset[j]]=global_i0_cleft[i]-coords[2]*nx+1; global_i1_cleft_reorder[cleft_rec_disp[j]+cleft_rec_offset[j]]=global_i1_cleft[i]-coords[1]*ny+1; global_i2_cleft_reorder[cleft_rec_disp[j]+cleft_rec_offset[j]]=global_i2_cleft[i]-coords[0]*nz+1; global_cleft_nb_reorder[cleft_rec_disp[j]+cleft_rec_offset[j]]=global_cleft_nb[i]; cleft_rec_offset[j]++; } for (i = 0; i < nproc; i++) { if(cleft_rec_offset[i]!=cleft_rec_count[i]) printf("cleft reorder count error on proc %d \n",i); } free(global_i0_cleft); free(global_i1_cleft); free(global_i2_cleft); free(global_cleft_nb); free(cleft_rec_offset); } //MPI_Gather(&n_ryr,1,MPI_INT,&states_rec_count[0],1,MPI_INT,0,comm); MPI_Scatter(&ryr_rec_count[0],1,MPI_INT,ryr_len,1, MPI_INT,0,comm); MPI_Scatter(&csqn_rec_count[0],1,MPI_INT,csqn_len,1, MPI_INT,0,comm); MPI_Scatter(&cleft_rec_count[0],1,MPI_INT,cleft_len,1, MPI_INT,0,comm); if(*ryr_len>0){ *i0_ryr=(int*)mpi_malloc(my_id,*ryr_len*sizeof(int)); *i1_ryr=(int*)mpi_malloc(my_id,*ryr_len*sizeof(int)); *i2_ryr=(int*)mpi_malloc(my_id,*ryr_len*sizeof(int)); } else { *i0_ryr=(int*)mpi_malloc(my_id,1*sizeof(int)); *i1_ryr=(int*)mpi_malloc(my_id,1*sizeof(int)); *i2_ryr=(int*)mpi_malloc(my_id,1*sizeof(int)); } if(*csqn_len>0) { *i0_csqn=(int*)mpi_malloc(my_id,*csqn_len*sizeof(int)); *i1_csqn=(int*)mpi_malloc(my_id,*csqn_len*sizeof(int)); *i2_csqn=(int*)mpi_malloc(my_id,*csqn_len*sizeof(int)); } else { *i0_csqn=(int*)mpi_malloc(my_id,1*sizeof(int)); *i1_csqn=(int*)mpi_malloc(my_id,1*sizeof(int)); *i2_csqn=(int*)mpi_malloc(my_id,1*sizeof(int)); } if(*cleft_len>0) { *i0_cleft=(int*)mpi_malloc(my_id,*cleft_len*sizeof(int)); *i1_cleft=(int*)mpi_malloc(my_id,*cleft_len*sizeof(int)); *i2_cleft=(int*)mpi_malloc(my_id,*cleft_len*sizeof(int)); *cleft_nb=(int*)mpi_malloc(my_id,*cleft_len*sizeof(int)); } else { *i0_cleft=(int*)mpi_malloc(my_id,1*sizeof(int)); *i1_cleft=(int*)mpi_malloc(my_id,1*sizeof(int)); *i2_cleft=(int*)mpi_malloc(my_id,1*sizeof(int)); *cleft_nb=(int*)mpi_malloc(my_id,1*sizeof(int)); } if(*ryr_len>0){ *states0=(int*)mpi_malloc(my_id,*ryr_len*sizeof(int)); *states1=(int*)mpi_malloc(my_id,*ryr_len*sizeof(int)); for ( i = 0; i < *ryr_len; i += 1 ) { (*states0)[i]=0; (*states1)[i]=0; } } else { *states0=(int*)mpi_malloc(my_id,1*sizeof(int)); *states1=(int*)mpi_malloc(my_id,1*sizeof(int)); (*states0)[0]=0; (*states1)[0]=0; } MPI_Scatterv(global_i0_ryr_reorder, ryr_rec_count,ryr_rec_disp, MPI_INT, *i0_ryr, *ryr_len, MPI_INT, 0, comm); MPI_Scatterv(global_i1_ryr_reorder, ryr_rec_count,ryr_rec_disp, MPI_INT, *i1_ryr, *ryr_len, MPI_INT, 0, comm); MPI_Scatterv(global_i2_ryr_reorder, ryr_rec_count,ryr_rec_disp, MPI_INT, *i2_ryr, *ryr_len, MPI_INT, 0, comm); MPI_Scatterv(global_i0_csqn_reorder, csqn_rec_count,csqn_rec_disp, MPI_INT, *i0_csqn, *csqn_len, MPI_INT, 0, comm); MPI_Scatterv(global_i1_csqn_reorder, csqn_rec_count,csqn_rec_disp, MPI_INT, *i1_csqn, *csqn_len, MPI_INT, 0, comm); MPI_Scatterv(global_i2_csqn_reorder, csqn_rec_count,csqn_rec_disp, MPI_INT, *i2_csqn, *csqn_len, MPI_INT, 0, comm); MPI_Scatterv(global_i0_cleft_reorder, cleft_rec_count,cleft_rec_disp, MPI_INT, *i0_cleft, *cleft_len, MPI_INT, 0, comm); MPI_Scatterv(global_i1_cleft_reorder, cleft_rec_count,cleft_rec_disp, MPI_INT, *i1_cleft, *cleft_len, MPI_INT, 0, comm); MPI_Scatterv(global_i2_cleft_reorder, cleft_rec_count,cleft_rec_disp, MPI_INT, *i2_cleft, *cleft_len, MPI_INT, 0, comm); MPI_Scatterv(global_cleft_nb_reorder, cleft_rec_count,cleft_rec_disp, MPI_INT, *cleft_nb, *cleft_len, MPI_INT, 0, comm); MPI_Scatterv(global_states0_reorder, ryr_rec_count, ryr_rec_disp, MPI_INT, *states0, *ryr_len, MPI_INT, 0, comm); //MPI_Bcast(&global_ryr_num,1,MPI_INT,0,comm); if(DB_PF) printf("Thread%d: ryr_len=%d\n",my_id, *ryr_len); // sprintf(caoutfile,"%s/Ca%d_T%d_rank%d_%d_%d_s0.np",outdirname,i,counter,coord[2],coord[1],coord[0]); // store2Dmatrixfile_double_1D(caoutfile,C1[i],ny0,nz0,30); //MPI_Gatherv(states0, n_ryr, MPI_INT, global_states0, states_rec_count, states_rec_disp, MPI_INT, 0, comm); // if(my_id==2) { // for(i=0;i<*ryr_len;i++) printf("Thread2 states[%d]: %d\n",i,(*states0)[i]); // } if(DB_PF){ for(i=0;i<*ryr_len;i++){ if((*states0)[i]==1){ printf("Proc%d,ryr_len=%d,ryr[%d]:%d,%d,%d \n",my_id, *ryr_len,i,(*i0_ryr)[i],(*i1_ryr)[i],(*i2_ryr)[i]); } } } if(my_id==0){ free(ryr_rec_count); free(ryr_rec_disp); free(csqn_rec_count); free(csqn_rec_disp); free(cleft_rec_count); free(cleft_rec_disp); free(global_i0_ryr_reorder); free(global_i1_ryr_reorder); free(global_i2_ryr_reorder); free(global_i0_csqn_reorder); free(global_i1_csqn_reorder); free(global_i2_csqn_reorder); free(global_i0_cleft_reorder); free(global_i1_cleft_reorder); free(global_i2_cleft_reorder); free(global_cleft_nb_reorder); free(global_states0_reorder); } return 30/h; } //int* loadRyRindexfile_int(char* infile, CONDFN cf, int cond) int* loadRyRindexfile_int(char* infile, int* count) { FILE *fpdata; int* arreturn; int i; int temp_d; *count=0; if(DB_PF) printf("Load file name: %s\n", infile); fpdata = fopen(infile, "r"); if(fpdata==NULL) { printf("\nFailure to open input file.\n"); exit(0); } while(fscanf(fpdata, "%d", &temp_d)!=EOF){ // if(cf(temp_d,cond)) count++; (*count)++; // printf("%d,",temp_d); } if(DB_PF) printf("There are %d indices satisfy the condition\n",*count); arreturn = malloc((*count)*sizeof(int)); if (arreturn == NULL) { printf("\nFailure trying to allocate room for array.\n"); exit(0); } rewind(fpdata); i=0; while(fscanf(fpdata, "%d", &temp_d)!=EOF){ // if(cf(temp_d,cond)) { arreturn[i]=temp_d; i++; // } } fclose(fpdata); if (*count != i) { printf("Wrong indices number\n"); exit(0); } if(DB_PF) printf("load file %s over \n", infile); return arreturn; } void readparam(int* iconf, double* conf) { FILE* file2; char Data[MAX_LINE_LENGTH]; if((file2=fopen("param","r")) == NULL) { printf("Error opening param file\n"); return; } // h fgets(Data,MAX_LINE_LENGTH,file2); fscanf(file2,"%d\n",&iconf[0]); // size_x fgets(Data,MAX_LINE_LENGTH,file2); fscanf(file2,"%d\n",&iconf[1]); // size_y fgets(Data,MAX_LINE_LENGTH,file2); fscanf(file2,"%d\n",&iconf[2]); // size_z fgets(Data,MAX_LINE_LENGTH,file2); fscanf(file2,"%d\n",&iconf[3]); // x_domains fgets(Data,MAX_LINE_LENGTH,file2); fscanf(file2,"%d\n",&iconf[4]); // y_domains fgets(Data,MAX_LINE_LENGTH,file2); fscanf(file2,"%d\n",&iconf[5]); // z_domains fgets(Data,MAX_LINE_LENGTH,file2); fscanf(file2,"%d\n",&iconf[6]); // save_data fgets(Data,MAX_LINE_LENGTH,file2); fscanf(file2,"%d\n",&iconf[7]); // use_failing fgets(Data,MAX_LINE_LENGTH,file2); fscanf(file2,"%d\n",&iconf[8]); // T fgets(Data,MAX_LINE_LENGTH,file2); fscanf(file2,"%le\n",&conf[0]); // DT fgets(Data,MAX_LINE_LENGTH,file2); fscanf(file2,"%le\n",&conf[1]); // save data in binary file fgets(Data,MAX_LINE_LENGTH,file2); fscanf(file2,"%d\n",&iconf[9]); // save data in hdf5 format fgets(Data,MAX_LINE_LENGTH,file2); fscanf(file2,"%d\n",&iconf[10]); // blocking_y_for_cache fgets(Data,MAX_LINE_LENGTH,file2); fscanf(file2,"%d",&iconf[11]); fclose(file2); } void updateBound(double* C00, double* C01, double* C02, double* C03, double* C04, int C_flag, int nx0, int ny0, int nz0, double* yz_sbuf0,double* yz_rbuf0, double* xz_sbuf0,double* xz_rbuf0, double* xy_sbuf0,double* xy_rbuf0, double* yz_sbuf1,double* yz_rbuf1, double* xz_sbuf1,double* xz_rbuf1, double* xy_sbuf1,double* xy_rbuf1, int* neighbor, MPI_Status* ar_status, MPI_Request* ar_send_req, MPI_Request* ar_recv_req, MPI_Comm comm, MPI_Comm comm3d) { int i,j,k; int nx=nx0-2; int ny=ny0-2; int nz=nz0-2; int dims[3]; int periods[3]; int coords[3]; int ZN=0, ZP=1, YN=2, YP=3, XN=4, XP=5; MPI_Cart_get(comm3d, 3, dims, periods, coords); // Ghost X end sheet if(coords[2]==0){ i=0; for (j=1; j<ny0-1; j++) for (k=1; k<nz0-1; k++){ C00[i*nz0*ny0+j*nz0+k] = C00[(i+1)*nz0*ny0+j*nz0+k]; C01[i*nz0*ny0+j*nz0+k] = C01[(i+1)*nz0*ny0+j*nz0+k]; C02[i*nz0*ny0+j*nz0+k] = C02[(i+1)*nz0*ny0+j*nz0+k]; C03[i*nz0*ny0+j*nz0+k] = C03[(i+1)*nz0*ny0+j*nz0+k]; C04[i*nz0*ny0+j*nz0+k] = C04[(i+1)*nz0*ny0+j*nz0+k]; } } else { putin_sendbuffer_yz(1*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C00, nx, ny, nz, &yz_sbuf0[0*ny*nz],ny*nz); putin_sendbuffer_yz(1*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C01, nx, ny, nz, &yz_sbuf0[1*ny*nz],ny*nz); putin_sendbuffer_yz(1*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C02, nx, ny, nz, &yz_sbuf0[2*ny*nz],ny*nz); putin_sendbuffer_yz(1*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C03, nx, ny, nz, &yz_sbuf0[3*ny*nz],ny*nz); putin_sendbuffer_yz(1*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C04, nx, ny, nz, &yz_sbuf0[4*ny*nz],ny*nz); } MPI_Isend(yz_sbuf0,5*ny*nz,MPI_DOUBLE,neighbor[XN],C_flag+1000, comm, &ar_send_req[0]); MPI_Irecv(yz_rbuf0,5*ny*nz,MPI_DOUBLE,neighbor[XN],C_flag+1000, comm, &ar_recv_req[0]); // MPI_Sendrecv(yz_sbuf0,5*ny*nz,MPI_DOUBLE,neighbor[XN],C_flag+1000, // yz_rbuf0,5*ny*nz,MPI_DOUBLE,neighbor[XN],C_flag+1000,comm,&status); if(coords[2]==(dims[2]-1)) { i=nx0-1; for (j=1; j<ny0-1; j++) for (k=1; k<nz0-1; k++){ C00[i*nz0*ny0+j*nz0+k] = C00[(i-1)*nz0*ny0+j*nz0+k]; C01[i*nz0*ny0+j*nz0+k] = C01[(i-1)*nz0*ny0+j*nz0+k]; C02[i*nz0*ny0+j*nz0+k] = C02[(i-1)*nz0*ny0+j*nz0+k]; C03[i*nz0*ny0+j*nz0+k] = C03[(i-1)*nz0*ny0+j*nz0+k]; C04[i*nz0*ny0+j*nz0+k] = C04[(i-1)*nz0*ny0+j*nz0+k]; } } else { putin_sendbuffer_yz( (nx0-2)*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C00, nx, ny, nz, &yz_sbuf1[0*ny*nz],ny*nz); putin_sendbuffer_yz( (nx0-2)*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C01, nx, ny, nz, &yz_sbuf1[1*ny*nz],ny*nz); putin_sendbuffer_yz( (nx0-2)*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C02, nx, ny, nz, &yz_sbuf1[2*ny*nz],ny*nz); putin_sendbuffer_yz( (nx0-2)*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C03, nx, ny, nz, &yz_sbuf1[3*ny*nz],ny*nz); putin_sendbuffer_yz( (nx0-2)*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C04, nx, ny, nz, &yz_sbuf1[4*ny*nz],ny*nz); } MPI_Isend(yz_sbuf1,5*ny*nz,MPI_DOUBLE,neighbor[XP],C_flag+1000, comm, &ar_send_req[1]); MPI_Irecv(yz_rbuf1,5*ny*nz,MPI_DOUBLE,neighbor[XP],C_flag+1000, comm, &ar_recv_req[1]); // MPI_Sendrecv(yz_sbuf1,5*ny*nz,MPI_DOUBLE,neighbor[XP],C_flag+1000, // yz_rbuf1,5*ny*nz,MPI_DOUBLE,neighbor[XP],C_flag+1000,comm,&status); // printf("exchange X end sheet ok! coords[%d,%d,%d]\n",coords[0],coords[1],coords[2]); // Ghost Y end sheet if(coords[1]==0){ j=0; for (i=1; i<nx0-1; i++) for (k=1; k<nz0-1; k++){ C00[i*nz0*ny0+j*nz0+k] = C00[i*nz0*ny0+(j+1)*nz0+k]; C01[i*nz0*ny0+j*nz0+k] = C01[i*nz0*ny0+(j+1)*nz0+k]; C02[i*nz0*ny0+j*nz0+k] = C02[i*nz0*ny0+(j+1)*nz0+k]; C03[i*nz0*ny0+j*nz0+k] = C03[i*nz0*ny0+(j+1)*nz0+k]; C04[i*nz0*ny0+j*nz0+k] = C04[i*nz0*ny0+(j+1)*nz0+k]; } } else { putin_sendbuffer_xz( 1*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C00, nx, ny, nz, &xz_sbuf0[0*nx*nz],nx*nz); putin_sendbuffer_xz( 1*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C01, nx, ny, nz, &xz_sbuf0[1*nx*nz],nx*nz); putin_sendbuffer_xz( 1*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C02, nx, ny, nz, &xz_sbuf0[2*nx*nz],nx*nz); putin_sendbuffer_xz( 1*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C03, nx, ny, nz, &xz_sbuf0[3*nx*nz],nx*nz); putin_sendbuffer_xz( 1*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C04, nx, ny, nz, &xz_sbuf0[4*nx*nz],nx*nz); } MPI_Isend(xz_sbuf0,5*nx*nz,MPI_DOUBLE,neighbor[YN],C_flag+2000, comm, &ar_send_req[2]); MPI_Irecv(xz_rbuf0,5*nx*nz,MPI_DOUBLE,neighbor[YN],C_flag+2000, comm, &ar_recv_req[2]); // MPI_Sendrecv(xz_sbuf0,5*nx*nz,MPI_DOUBLE,neighbor[YN],C_flag+2000, // xz_rbuf0,5*nx*nz,MPI_DOUBLE,neighbor[YN],C_flag+2000,comm,&status); if(coords[1]==(dims[1]-1)) { j=ny0-1; for (i=1; i<nx0-1; i++) for (k=1; k<nz0-1; k++){ C00[i*nz0*ny0+j*nz0+k] = C00[i*nz0*ny0+(j-1)*nz0+k]; C01[i*nz0*ny0+j*nz0+k] = C01[i*nz0*ny0+(j-1)*nz0+k]; C02[i*nz0*ny0+j*nz0+k] = C02[i*nz0*ny0+(j-1)*nz0+k]; C03[i*nz0*ny0+j*nz0+k] = C03[i*nz0*ny0+(j-1)*nz0+k]; C04[i*nz0*ny0+j*nz0+k] = C04[i*nz0*ny0+(j-1)*nz0+k]; } } else { putin_sendbuffer_xz( 1*nz0*ny0+(ny0-2)*nz0+1,nx0,ny0, nz0, C00, nx, ny, nz, &xz_sbuf1[0*nx*nz],nx*nz); putin_sendbuffer_xz( 1*nz0*ny0+(ny0-2)*nz0+1,nx0,ny0, nz0, C01, nx, ny, nz, &xz_sbuf1[1*nx*nz],nx*nz); putin_sendbuffer_xz( 1*nz0*ny0+(ny0-2)*nz0+1,nx0,ny0, nz0, C02, nx, ny, nz, &xz_sbuf1[2*nx*nz],nx*nz); putin_sendbuffer_xz( 1*nz0*ny0+(ny0-2)*nz0+1,nx0,ny0, nz0, C03, nx, ny, nz, &xz_sbuf1[3*nx*nz],nx*nz); putin_sendbuffer_xz( 1*nz0*ny0+(ny0-2)*nz0+1,nx0,ny0, nz0, C04, nx, ny, nz, &xz_sbuf1[4*nx*nz],nx*nz); } MPI_Isend(xz_sbuf1,5*nx*nz,MPI_DOUBLE,neighbor[YP],C_flag+2000, comm, &ar_send_req[3]); MPI_Irecv(xz_rbuf1,5*nx*nz,MPI_DOUBLE,neighbor[YP],C_flag+2000, comm, &ar_recv_req[3]); // MPI_Sendrecv(xz_sbuf1,5*nx*nz,MPI_DOUBLE,neighbor[YP],C_flag+2000, // xz_rbuf1,5*nx*nz,MPI_DOUBLE,neighbor[YP],C_flag+2000,comm,&status); // printf("exchange Y end sheet ok! coords[%d,%d,%d]\n",coords[0],coords[1],coords[2]); // Ghost Z end sheet if(coords[0]==0){ k=0; for (i=1; i<nx0-1; i++) for (j=1; j<ny0-1; j++){ C00[i*nz0*ny0+j*nz0+k] = C00[i*nz0*ny0+j*nz0+k+1]; C01[i*nz0*ny0+j*nz0+k] = C01[i*nz0*ny0+j*nz0+k+1]; C02[i*nz0*ny0+j*nz0+k] = C02[i*nz0*ny0+j*nz0+k+1]; C03[i*nz0*ny0+j*nz0+k] = C03[i*nz0*ny0+j*nz0+k+1]; C04[i*nz0*ny0+j*nz0+k] = C04[i*nz0*ny0+j*nz0+k+1]; } } else { putin_sendbuffer_xy(1*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C00, nx, ny, nz, &xy_sbuf0[0*nx*ny],nx*ny); putin_sendbuffer_xy(1*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C01, nx, ny, nz, &xy_sbuf0[1*nx*ny],nx*ny); putin_sendbuffer_xy(1*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C02, nx, ny, nz, &xy_sbuf0[2*nx*ny],nx*ny); putin_sendbuffer_xy(1*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C03, nx, ny, nz, &xy_sbuf0[3*nx*ny],nx*ny); putin_sendbuffer_xy(1*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C04, nx, ny, nz, &xy_sbuf0[4*nx*ny],nx*ny); } MPI_Isend(xy_sbuf0,5*nx*ny,MPI_DOUBLE,neighbor[ZN],C_flag+3000, comm, &ar_send_req[4]); MPI_Irecv(xy_rbuf0,5*nx*ny,MPI_DOUBLE,neighbor[ZN],C_flag+3000, comm, &ar_recv_req[4]); // MPI_Sendrecv(xy_sbuf0,5*nx*ny,MPI_DOUBLE,neighbor[ZN],C_flag+3000, // xy_rbuf0,5*nx*ny,MPI_DOUBLE,neighbor[ZN],C_flag+3000,comm,&status); if(coords[0]==(dims[0]-1)) { k=nz0-1; for (i=1; i<nx0-1; i++) for (j=1; j<ny0-1; j++){ C00[i*nz0*ny0+j*nz0+k] = C00[i*nz0*ny0+j*nz0+k-1]; C01[i*nz0*ny0+j*nz0+k] = C01[i*nz0*ny0+j*nz0+k-1]; C02[i*nz0*ny0+j*nz0+k] = C02[i*nz0*ny0+j*nz0+k-1]; C03[i*nz0*ny0+j*nz0+k] = C03[i*nz0*ny0+j*nz0+k-1]; C04[i*nz0*ny0+j*nz0+k] = C04[i*nz0*ny0+j*nz0+k-1]; } } else { putin_sendbuffer_xy( 1*nz0*ny0+nz0+nz0-2,nx0,ny0, nz0, C00, nx, ny, nz, &xy_sbuf1[0*nx*ny],nx*ny); putin_sendbuffer_xy( 1*nz0*ny0+nz0+nz0-2,nx0,ny0, nz0, C01, nx, ny, nz, &xy_sbuf1[1*nx*ny],nx*ny); putin_sendbuffer_xy( 1*nz0*ny0+nz0+nz0-2,nx0,ny0, nz0, C02, nx, ny, nz, &xy_sbuf1[2*nx*ny],nx*ny); putin_sendbuffer_xy( 1*nz0*ny0+nz0+nz0-2,nx0,ny0, nz0, C03, nx, ny, nz, &xy_sbuf1[3*nx*ny],nx*ny); putin_sendbuffer_xy( 1*nz0*ny0+nz0+nz0-2,nx0,ny0, nz0, C04, nx, ny, nz, &xy_sbuf1[4*nx*ny],nx*ny); } MPI_Isend(xy_sbuf1,5*nx*ny,MPI_DOUBLE,neighbor[ZP],C_flag+3000, comm, &ar_send_req[5]); MPI_Irecv(xy_rbuf1,5*nx*ny,MPI_DOUBLE,neighbor[ZP],C_flag+3000, comm, &ar_recv_req[5]); // MPI_Sendrecv(xy_sbuf1,5*nx*ny,MPI_DOUBLE,neighbor[ZP],C_flag+3000, // xy_rbuf1,5*nx*ny,MPI_DOUBLE,neighbor[ZP],C_flag+3000,comm,&status); MPI_Waitall(6, ar_recv_req, ar_status); if(coords[2]!=0){ getout_recvbuffer_yz( 1*nz0+1,nx0,ny0, nz0, C00, nx, ny, nz, &yz_rbuf0[0*ny*nz],ny*nz); getout_recvbuffer_yz( 1*nz0+1,nx0,ny0, nz0, C01, nx, ny, nz, &yz_rbuf0[1*ny*nz],ny*nz); getout_recvbuffer_yz( 1*nz0+1,nx0,ny0, nz0, C02, nx, ny, nz, &yz_rbuf0[2*ny*nz],ny*nz); getout_recvbuffer_yz( 1*nz0+1,nx0,ny0, nz0, C03, nx, ny, nz, &yz_rbuf0[3*ny*nz],ny*nz); getout_recvbuffer_yz( 1*nz0+1,nx0,ny0, nz0, C04, nx, ny, nz, &yz_rbuf0[4*ny*nz],ny*nz); } if(coords[2]!=(dims[2]-1)){ getout_recvbuffer_yz((nx0-1)*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C00, nx, ny, nz, &yz_rbuf1[0*ny*nz],ny*nz); getout_recvbuffer_yz((nx0-1)*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C01, nx, ny, nz, &yz_rbuf1[1*ny*nz],ny*nz); getout_recvbuffer_yz((nx0-1)*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C02, nx, ny, nz, &yz_rbuf1[2*ny*nz],ny*nz); getout_recvbuffer_yz((nx0-1)*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C03, nx, ny, nz, &yz_rbuf1[3*ny*nz],ny*nz); getout_recvbuffer_yz((nx0-1)*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C04, nx, ny, nz, &yz_rbuf1[4*ny*nz],ny*nz); } if(coords[1]!=0){ getout_recvbuffer_xz( 1*nz0*ny0+1,nx0,ny0, nz0, C00, nx, ny, nz, &xz_rbuf0[0*nx*nz],nx*nz); getout_recvbuffer_xz( 1*nz0*ny0+1,nx0,ny0, nz0, C01, nx, ny, nz, &xz_rbuf0[1*nx*nz],nx*nz); getout_recvbuffer_xz( 1*nz0*ny0+1,nx0,ny0, nz0, C02, nx, ny, nz, &xz_rbuf0[2*nx*nz],nx*nz); getout_recvbuffer_xz( 1*nz0*ny0+1,nx0,ny0, nz0, C03, nx, ny, nz, &xz_rbuf0[3*nx*nz],nx*nz); getout_recvbuffer_xz( 1*nz0*ny0+1,nx0,ny0, nz0, C04, nx, ny, nz, &xz_rbuf0[4*nx*nz],nx*nz); } if(coords[1]!=(dims[1]-1)){ getout_recvbuffer_xz(1*nz0*ny0+(ny0-1)*nz0+1,nx0,ny0, nz0, C00, nx, ny, nz, &xz_rbuf1[0*nx*nz],nx*nz); getout_recvbuffer_xz(1*nz0*ny0+(ny0-1)*nz0+1,nx0,ny0, nz0, C01, nx, ny, nz, &xz_rbuf1[1*nx*nz],nx*nz); getout_recvbuffer_xz(1*nz0*ny0+(ny0-1)*nz0+1,nx0,ny0, nz0, C02, nx, ny, nz, &xz_rbuf1[2*nx*nz],nx*nz); getout_recvbuffer_xz(1*nz0*ny0+(ny0-1)*nz0+1,nx0,ny0, nz0, C03, nx, ny, nz, &xz_rbuf1[3*nx*nz],nx*nz); getout_recvbuffer_xz(1*nz0*ny0+(ny0-1)*nz0+1,nx0,ny0, nz0, C04, nx, ny, nz, &xz_rbuf1[4*nx*nz],nx*nz); } if(coords[0]!=0){ getout_recvbuffer_xy( 1*nz0*ny0+1*nz0, nx0,ny0, nz0, C00, nx, ny, nz, &xy_rbuf0[0*nx*ny],nx*ny); getout_recvbuffer_xy( 1*nz0*ny0+1*nz0, nx0,ny0, nz0, C01, nx, ny, nz, &xy_rbuf0[1*nx*ny],nx*ny); getout_recvbuffer_xy( 1*nz0*ny0+1*nz0, nx0,ny0, nz0, C02, nx, ny, nz, &xy_rbuf0[2*nx*ny],nx*ny); getout_recvbuffer_xy( 1*nz0*ny0+1*nz0, nx0,ny0, nz0, C03, nx, ny, nz, &xy_rbuf0[3*nx*ny],nx*ny); getout_recvbuffer_xy( 1*nz0*ny0+1*nz0, nx0,ny0, nz0, C04, nx, ny, nz, &xy_rbuf0[4*nx*ny],nx*ny); } if(coords[0]!=(dims[0]-1)){ getout_recvbuffer_xy( 1*nz0*ny0+nz0+nz0-1,nx0,ny0, nz0, C00, nx, ny, nz, &xy_rbuf1[0*nx*ny],nx*ny); getout_recvbuffer_xy( 1*nz0*ny0+nz0+nz0-1,nx0,ny0, nz0, C01, nx, ny, nz, &xy_rbuf1[1*nx*ny],nx*ny); getout_recvbuffer_xy( 1*nz0*ny0+nz0+nz0-1,nx0,ny0, nz0, C02, nx, ny, nz, &xy_rbuf1[2*nx*ny],nx*ny); getout_recvbuffer_xy( 1*nz0*ny0+nz0+nz0-1,nx0,ny0, nz0, C03, nx, ny, nz, &xy_rbuf1[3*nx*ny],nx*ny); getout_recvbuffer_xy( 1*nz0*ny0+nz0+nz0-1,nx0,ny0, nz0, C04, nx, ny, nz, &xy_rbuf1[4*nx*ny],nx*ny); } // printf("exchange Z end sheet ok! coords[%d,%d,%d]\n",coords[0],coords[1],coords[2]); } void putin_sendbuffer_yz(int base_addr,int nx0,int ny0, int nz0, double* arr, int nx, int ny, int nz, double* sbuf, int sbuf_len) { int i; if(sbuf_len!=ny*nz) { printf("yz sbuf_len error!\n"); exit(0); } for ( i = 0; i < ny; i += 1 ) { memcpy(&sbuf[i*nz],&arr[base_addr+i*nz0],nz*sizeof(double)); } } void putin_sendbuffer_xz(int base_addr,int nx0,int ny0, int nz0, double* arr, int nx, int ny, int nz, double* sbuf, int sbuf_len) { int i; if(sbuf_len!=nx*nz) { printf("xz sbuf_len error!\n"); exit(0); } for ( i = 0; i < nx; i += 1 ) { memcpy(&sbuf[i*nz],&arr[base_addr+i*ny0*nz0],nz*sizeof(double)); } } void putin_sendbuffer_xy(int base_addr,int nx0,int ny0, int nz0, double* arr, int nx, int ny, int nz, double* sbuf, int sbuf_len) { int i, j; if(sbuf_len!=nx*ny) { printf("xy sbuf_len error!\n"); exit(0); } for ( i = 0; i < nx; i += 1 ) { for ( j = 0; j < ny; j += 1 ) { sbuf[i*ny+j]=arr[base_addr+i*ny0*nz0+j*nz0]; } } } void getout_recvbuffer_yz(int base_addr,int nx0,int ny0, int nz0, double* arr, int nx, int ny, int nz, double* sbuf, int sbuf_len) { int i; if(sbuf_len!=ny*nz) { printf("yz rbuf_len error!\n"); exit(0); } for ( i = 0; i < ny; i += 1 ) { memcpy(&arr[base_addr+i*nz0],&sbuf[i*nz],nz*sizeof(double)); } } void getout_recvbuffer_xz(int base_addr,int nx0,int ny0, int nz0, double* arr, int nx, int ny, int nz, double* sbuf, int sbuf_len) { int i; if(sbuf_len!=nx*nz) { printf("xz rbuf_len error!\n"); exit(0); } for ( i = 0; i < nx; i += 1 ) { memcpy(&arr[base_addr+i*ny0*nz0],&sbuf[i*nz],nz*sizeof(double)); } } void getout_recvbuffer_xy(int base_addr,int nx0,int ny0, int nz0, double* arr, int nx, int ny, int nz, double* sbuf, int sbuf_len) { int i, j; if(sbuf_len!=nx*ny) { printf("xy rbuf_len error!\n"); exit(0); } for ( i = 0; i < nx; i += 1 ) { for ( j = 0; j < ny; j += 1 ) { arr[base_addr+i*ny0*nz0+j*nz0]=sbuf[i*ny+j]; } } } void BinarySort_two(int* pData, int* vData, int Count) { dichotomy_two(pData,vData,0,Count-1); } void dichotomy_two(int* pData,int* vData, int left,int right) { int i,j; int middle,iTemp; i = left; j = right; middle = pData[(left+right)/2]; do{ while((pData[i]<middle) && (i<right)) i++; while((pData[j]>middle) && (j>left)) j--; if(i<=j) { iTemp = pData[i]; pData[i] = pData[j]; pData[j] = iTemp; iTemp =vData[i]; vData[i]=vData[j]; vData[j]=iTemp; i++; j--; } }while(i<=j); if(left<j) dichotomy_two(pData,vData,left,j); if(right>i) dichotomy_two(pData,vData,i,right); } void *mpi_malloc ( int id, /* IN - Process rank */ int bytes) /* IN - Bytes to allocate */ { void *buffer; if ((buffer = malloc ((size_t) bytes)) == NULL) { printf ("Error: Malloc failed for process %d\n", id); fflush (stdout); MPI_Abort (MPI_COMM_WORLD, 4); } return buffer; } void compute_pde_ode(int nx0, int ny0, int nz0, double dt,double gamma, double fudge, double* alpha, double* B_tot, double* k_on, double* k_off, double** C0, double** C1, int div_y) { // Main kernel int i,j,k,jj,idx; int ny; double J; double Ca_ijk; double buff_ijk; double Ca_i2_ijk; double Ca_SR2_ijk; ny=ny0-2; for (i=1; i<nx0-1; i++) { for (jj=0; jj<ny/div_y; jj++) { //blocking for cache size on y line for (j=jj*div_y+1; j<(jj+1)*div_y+1; j++) { //Laplace diffusion process five array together for(idx=0;idx<5;idx++) { #pragma ivdep #pragma prefetch for (k=1; k<nz0-1; k++) { C1[idx][i*nz0*ny0+j*nz0+k] =alpha[idx]*( C0[idx][i*nz0*ny0+j*nz0+k]*(-6)+ C0[idx][(i-1)*nz0*ny0+j*nz0+k] + C0[idx][(i+1)*nz0*ny0+j*nz0+k] + C0[idx][i*nz0*ny0+(j-1)*nz0+k] + C0[idx][i*nz0*ny0+(j+1)*nz0+k] + C0[idx][i*nz0*ny0+j*nz0+k-1] + C0[idx][i*nz0*ny0+j*nz0+k+1]) + C0[idx][i*nz0*ny0+j*nz0+k]; } } //Reaction for(idx=2;idx<6;idx++) { #pragma ivdep #pragma prefetch for (k=1; k<nz0-1; k++) { Ca_ijk = C1[0][i*nz0*ny0+j*nz0+k]; buff_ijk = C1[idx][i*nz0*ny0+j*nz0+k]; J = k_on[idx]*(B_tot[idx] - buff_ijk)*Ca_ijk - k_off[idx]*buff_ijk; C1[0][i*nz0*ny0+j*nz0+k] -= dt*J; C1[idx][i*nz0*ny0+j*nz0+k] += dt*J; } } // serca3D #pragma ivdep #pragma prefetch for (k=1; k<nz0-1; k++) { // Main kernel Ca_i2_ijk = C1[0][i*nz0*ny0+j*nz0+k]; Ca_SR2_ijk = C1[1][i*nz0*ny0+j*nz0+k]; Ca_i2_ijk *= Ca_i2_ijk; Ca_SR2_ijk *= Ca_SR2_ijk; J = fudge*(570997802.885875*Ca_i2_ijk - 0.0425239333622699*Ca_SR2_ijk)/(106720651.206402*Ca_i2_ijk + 182.498197548666*Ca_SR2_ijk + 5.35062954944879); C1[0][i*nz0*ny0+j*nz0+k] -= dt*J; C1[1][i*nz0*ny0+j*nz0+k] += dt*J/gamma; } } } } }
serial_teams.c
// RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s // REQUIRES: ompt // UNSUPPORTED: gcc #include "callback.h" int main() { #pragma omp target teams num_teams(2) thread_limit(1) #pragma omp parallel num_threads(1) { printf("In teams parallel\n"); } return 0; } // CHECK: 0: NULL_POINTER=[[NULL:.*$]] // CHECK-NOT: 0: parallel_data initially not null // CHECK-NOT: 0: task_data initially not null // CHECK-NOT: 0: thread_data initially not null // CHECK: {{^}}[[MASTER_0:[0-9]+]]: ompt_event_initial_task_begin: // CHECK-SAME: task_id=[[INIT_TASK:[0-9]+]], {{.*}}, index=1 // CHECK: {{^}}[[MASTER_0]]: ompt_event_teams_begin: // CHECK-SAME: parent_task_id=[[INIT_TASK]] // CHECK-SAME: {{.*}} requested_num_teams=2 // CHECK-SAME: {{.*}} invoker=[[TEAMS_FLAGS:[0-9]+]] // // team 0 // // initial task in the teams construct // CHECK: {{^}}[[MASTER_0]]: ompt_event_initial_task_begin: // CHECK-SAME: task_id=[[INIT_TASK_0:[0-9]+]], actual_parallelism=2, index=0 // parallel region forked by runtime // CHECK: {{^}}[[MASTER_0]]: ompt_event_parallel_begin: // CHECK-SAME: {{.*}} parent_task_id=[[INIT_TASK_0]] // CHECK-SAME: {{.*}} parallel_id=[[PAR_0:[0-9]+]] // CHECK: {{^}}[[MASTER_0]]: ompt_event_implicit_task_begin: // CHECK-SAME: {{.*}} parallel_id=[[PAR_0]], task_id=[[IMPL_TASK_0:[0-9]+]] // user parallel region // CHECK: {{^}}[[MASTER_0]]: ompt_event_parallel_begin: // CHECK-SAME: {{.*}} parent_task_id=[[IMPL_TASK_0]] // CHECK-SAME: {{.*}} parallel_id=[[PAR_00:[0-9]+]] // CHECK: {{^}}[[MASTER_0]]: ompt_event_parallel_end: // CHECK-SAME: {{.*}} parallel_id=[[PAR_00]], task_id=[[IMPL_TASK_0]] // CHECK: {{^}}[[MASTER_0]]: ompt_event_implicit_task_end: // CHECK-SAME: {{.*}} parallel_id={{[0-9]+}}, task_id=[[IMPL_TASK_0]] // CHECK: {{^}}[[MASTER_0]]: ompt_event_parallel_end: // CHECK-SAME: {{.*}} parallel_id=[[PAR_0]], task_id=[[INIT_TASK_0]] // CHECK: {{^}}[[MASTER_0]]: ompt_event_initial_task_end: // CHECK-SAME: task_id=[[INIT_TASK_0]], actual_parallelism=0, index=0 // CHECK: {{^}}[[MASTER_0]]: ompt_event_teams_end: // CHECK-SAME: {{.*}} task_id=[[INIT_TASK]], invoker=[[TEAMS_FLAGS]] // CHECK: {{^}}[[MASTER_0]]: ompt_event_initial_task_end: // CHECK-SAME: task_id=[[INIT_TASK]], {{.*}}, index=1 // // team 1 // // initial task in the teams construct // CHECK: {{^}}[[MASTER_1:[0-9]+]]: ompt_event_initial_task_begin: // CHECK-SAME: task_id=[[INIT_TASK_1:[0-9]+]], actual_parallelism=2, index=1 // parallel region forked by runtime // CHECK: {{^}}[[MASTER_1]]: ompt_event_parallel_begin: // CHECK-SAME: {{.*}} parent_task_id=[[INIT_TASK_1]] // CHECK-SAME: {{.*}} parallel_id=[[PAR_ID_1:[0-9]+]] // CHECK: {{^}}[[MASTER_1]]: ompt_event_implicit_task_begin: // CHECK-SAME: {{.*}} parallel_id=[[PAR_ID_1]], task_id=[[IMPL_TASK_1:[0-9]+]] // user parallel region // CHECK: {{^}}[[MASTER_1]]: ompt_event_parallel_begin: // CHECK-SAME: {{.*}} parent_task_id=[[IMPL_TASK_1]] // CHECK-SAME: {{.*}} parallel_id=[[PAR_ID_11:[0-9]+]] // CHECK: {{^}}[[MASTER_1]]: ompt_event_parallel_end: // CHECK-SAME: {{.*}} parallel_id=[[PAR_ID_11]], task_id=[[IMPL_TASK_1]] // CHECK: {{^}}[[MASTER_1]]: ompt_event_implicit_task_end: // CHECK-SAME: {{.*}} parallel_id={{[0-9]+}}, task_id=[[IMPL_TASK_1]] // CHECK: {{^}}[[MASTER_1]]: ompt_event_parallel_end: // CHECK-SAME: {{.*}} parallel_id=[[PAR_ID_1]], task_id=[[INIT_TASK_1]] // CHECK: {{^}}[[MASTER_1]]: ompt_event_initial_task_end: // CHECK-SAME: task_id=[[INIT_TASK_1]], actual_parallelism=0, index=1
// RUN:%libomp - compile - and - run | %sort - threads | FileCheck % s // REQUIRES:ompt // UNSUPPORTED:gcc #include "callback.h" int main() { printf("In teams parallel\n"); return 0; } //CHECK: 0: NULL_POINTER =[[NULL:.* $]] // CHECK - NOT: 0:parallel_data initially not null // CHECK - NOT: 0:task_data initially not null // CHECK - NOT: 0:thread_data initially not null // CHECK:{ { ^ } }[[MASTER_0: [0 - 9] +]]: ompt_event_initial_task_begin: //CHECK - SAME: task_id =[[INIT_TASK:[0 - 9] +]], { { .* } }, index = 1 // CHECK:{ { ^ } }[[MASTER_0]]: ompt_event_teams_begin: //CHECK - SAME:parent_task_id =[[INIT_TASK]] // CHECK - SAME:{ { .* } } requested_num_teams = 2 // CHECK - SAME:{ { .* } } invoker =[[TEAMS_FLAGS:[0 - 9] +]] // //team 0 // //initial task in the teams construct // CHECK:{ { ^ } }[[MASTER_0]]: ompt_event_initial_task_begin: //CHECK - SAME: task_id =[[INIT_TASK_0:[0 - 9] +]], actual_parallelism = 2, index = 0 // parallel region forked by runtime // CHECK:{ { ^ } }[[MASTER_0]]: ompt_event_parallel_begin: //CHECK - SAME:{ { .* } } parent_task_id =[[INIT_TASK_0]] // CHECK - SAME:{ { .* } } parallel_id =[[PAR_0:[0 - 9] +]] // CHECK:{ { ^ } }[[MASTER_0]]: ompt_event_implicit_task_begin: //CHECK - SAME:{ { .* } } parallel_id =[[PAR_0]], task_id =[[IMPL_TASK_0:[0 - 9] +]] // user parallel region // CHECK:{ { ^ } }[[MASTER_0]]: ompt_event_parallel_begin: //CHECK - SAME:{ { .* } } parent_task_id =[[IMPL_TASK_0]] // CHECK - SAME:{ { .* } } parallel_id =[[PAR_00:[0 - 9] +]] // CHECK:{ { ^ } }[[MASTER_0]]: ompt_event_parallel_end: //CHECK - SAME:{ { .* } } parallel_id =[[PAR_00]], task_id =[[IMPL_TASK_0]] // CHECK:{ { ^ } }[[MASTER_0]]: ompt_event_implicit_task_end: //CHECK - SAME:{ { .* } } parallel_id = { { [0 - 9] + } }, task_id =[[IMPL_TASK_0]] // CHECK:{ { ^ } }[[MASTER_0]]: ompt_event_parallel_end: //CHECK - SAME:{ { .* } } parallel_id =[[PAR_0]], task_id =[[INIT_TASK_0]] // CHECK:{ { ^ } }[[MASTER_0]]: ompt_event_initial_task_end: //CHECK - SAME:task_id =[[INIT_TASK_0]], actual_parallelism = 0, index = 0 // CHECK:{ { ^ } }[[MASTER_0]]: ompt_event_teams_end: //CHECK - SAME:{ { .* } } task_id =[[INIT_TASK]], invoker =[[TEAMS_FLAGS]] // CHECK:{ { ^ } }[[MASTER_0]]: ompt_event_initial_task_end: //CHECK - SAME:task_id =[[INIT_TASK]], { { .* } }, index = 1 // //team 1 // //initial task in the teams construct // CHECK:{ { ^ } }[[MASTER_1: [0 - 9] +]]: ompt_event_initial_task_begin: //CHECK - SAME: task_id =[[INIT_TASK_1:[0 - 9] +]], actual_parallelism = 2, index = 1 // parallel region forked by runtime // CHECK:{ { ^ } }[[MASTER_1]]: ompt_event_parallel_begin: //CHECK - SAME:{ { .* } } parent_task_id =[[INIT_TASK_1]] // CHECK - SAME:{ { .* } } parallel_id =[[PAR_ID_1:[0 - 9] +]] // CHECK:{ { ^ } }[[MASTER_1]]: ompt_event_implicit_task_begin: //CHECK - SAME:{ { .* } } parallel_id =[[PAR_ID_1]], task_id =[[IMPL_TASK_1:[0 - 9] +]] // user parallel region // CHECK:{ { ^ } }[[MASTER_1]]: ompt_event_parallel_begin: //CHECK - SAME:{ { .* } } parent_task_id =[[IMPL_TASK_1]] // CHECK - SAME:{ { .* } } parallel_id =[[PAR_ID_11:[0 - 9] +]] // CHECK:{ { ^ } }[[MASTER_1]]: ompt_event_parallel_end: //CHECK - SAME:{ { .* } } parallel_id =[[PAR_ID_11]], task_id =[[IMPL_TASK_1]] // CHECK:{ { ^ } }[[MASTER_1]]: ompt_event_implicit_task_end: //CHECK - SAME:{ { .* } } parallel_id = { { [0 - 9] + } }, task_id =[[IMPL_TASK_1]] // CHECK:{ { ^ } }[[MASTER_1]]: ompt_event_parallel_end: //CHECK - SAME:{ { .* } } parallel_id =[[PAR_ID_1]], task_id =[[INIT_TASK_1]] // CHECK:{ { ^ } }[[MASTER_1]]: ompt_event_initial_task_end: //CHECK - SAME:task_id =[[INIT_TASK_1]], actual_parallelism = 0, index = 1
// RUN:%libomp - compile - and - run | %sort - threads | FileCheck % s // REQUIRES:ompt // UNSUPPORTED:gcc #include "callback.h" int main() { #pragma omp target teams num_teams(2) thread_limit(1) #pragma omp parallel num_threads(1) { printf("In teams parallel\n"); } return 0; } //CHECK: 0: NULL_POINTER =[[NULL:.* $]] // CHECK - NOT: 0:parallel_data initially not null // CHECK - NOT: 0:task_data initially not null // CHECK - NOT: 0:thread_data initially not null // CHECK:{ { ^ } }[[MASTER_0: [0 - 9] +]]: ompt_event_initial_task_begin: //CHECK - SAME: task_id =[[INIT_TASK:[0 - 9] +]], { { .* } }, index = 1 // CHECK:{ { ^ } }[[MASTER_0]]: ompt_event_teams_begin: //CHECK - SAME:parent_task_id =[[INIT_TASK]] // CHECK - SAME:{ { .* } } requested_num_teams = 2 // CHECK - SAME:{ { .* } } invoker =[[TEAMS_FLAGS:[0 - 9] +]] // //team 0 // //initial task in the teams construct // CHECK:{ { ^ } }[[MASTER_0]]: ompt_event_initial_task_begin: //CHECK - SAME: task_id =[[INIT_TASK_0:[0 - 9] +]], actual_parallelism = 2, index = 0 // parallel region forked by runtime // CHECK:{ { ^ } }[[MASTER_0]]: ompt_event_parallel_begin: //CHECK - SAME:{ { .* } } parent_task_id =[[INIT_TASK_0]] // CHECK - SAME:{ { .* } } parallel_id =[[PAR_0:[0 - 9] +]] // CHECK:{ { ^ } }[[MASTER_0]]: ompt_event_implicit_task_begin: //CHECK - SAME:{ { .* } } parallel_id =[[PAR_0]], task_id =[[IMPL_TASK_0:[0 - 9] +]] // user parallel region // CHECK:{ { ^ } }[[MASTER_0]]: ompt_event_parallel_begin: //CHECK - SAME:{ { .* } } parent_task_id =[[IMPL_TASK_0]] // CHECK - SAME:{ { .* } } parallel_id =[[PAR_00:[0 - 9] +]] // CHECK:{ { ^ } }[[MASTER_0]]: ompt_event_parallel_end: //CHECK - SAME:{ { .* } } parallel_id =[[PAR_00]], task_id =[[IMPL_TASK_0]] // CHECK:{ { ^ } }[[MASTER_0]]: ompt_event_implicit_task_end: //CHECK - SAME:{ { .* } } parallel_id = { { [0 - 9] + } }, task_id =[[IMPL_TASK_0]] // CHECK:{ { ^ } }[[MASTER_0]]: ompt_event_parallel_end: //CHECK - SAME:{ { .* } } parallel_id =[[PAR_0]], task_id =[[INIT_TASK_0]] // CHECK:{ { ^ } }[[MASTER_0]]: ompt_event_initial_task_end: //CHECK - SAME:task_id =[[INIT_TASK_0]], actual_parallelism = 0, index = 0 // CHECK:{ { ^ } }[[MASTER_0]]: ompt_event_teams_end: //CHECK - SAME:{ { .* } } task_id =[[INIT_TASK]], invoker =[[TEAMS_FLAGS]] // CHECK:{ { ^ } }[[MASTER_0]]: ompt_event_initial_task_end: //CHECK - SAME:task_id =[[INIT_TASK]], { { .* } }, index = 1 // //team 1 // //initial task in the teams construct // CHECK:{ { ^ } }[[MASTER_1: [0 - 9] +]]: ompt_event_initial_task_begin: //CHECK - SAME: task_id =[[INIT_TASK_1:[0 - 9] +]], actual_parallelism = 2, index = 1 // parallel region forked by runtime // CHECK:{ { ^ } }[[MASTER_1]]: ompt_event_parallel_begin: //CHECK - SAME:{ { .* } } parent_task_id =[[INIT_TASK_1]] // CHECK - SAME:{ { .* } } parallel_id =[[PAR_ID_1:[0 - 9] +]] // CHECK:{ { ^ } }[[MASTER_1]]: ompt_event_implicit_task_begin: //CHECK - SAME:{ { .* } } parallel_id =[[PAR_ID_1]], task_id =[[IMPL_TASK_1:[0 - 9] +]] // user parallel region // CHECK:{ { ^ } }[[MASTER_1]]: ompt_event_parallel_begin: //CHECK - SAME:{ { .* } } parent_task_id =[[IMPL_TASK_1]] // CHECK - SAME:{ { .* } } parallel_id =[[PAR_ID_11:[0 - 9] +]] // CHECK:{ { ^ } }[[MASTER_1]]: ompt_event_parallel_end: //CHECK - SAME:{ { .* } } parallel_id =[[PAR_ID_11]], task_id =[[IMPL_TASK_1]] // CHECK:{ { ^ } }[[MASTER_1]]: ompt_event_implicit_task_end: //CHECK - SAME:{ { .* } } parallel_id = { { [0 - 9] + } }, task_id =[[IMPL_TASK_1]] // CHECK:{ { ^ } }[[MASTER_1]]: ompt_event_parallel_end: //CHECK - SAME:{ { .* } } parallel_id =[[PAR_ID_1]], task_id =[[INIT_TASK_1]] // CHECK:{ { ^ } }[[MASTER_1]]: ompt_event_initial_task_end: //CHECK - SAME:task_id =[[INIT_TASK_1]], actual_parallelism = 0, index = 1
Sema.h
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/ASTConcept.h" #include "clang/AST/ASTFwd.h" #include "clang/AST/Attr.h" #include "clang/AST/Availability.h" #include "clang/AST/ComparisonCategories.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprConcepts.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExprOpenMP.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/LocInfoType.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/TypeLoc.h" #include "clang/AST/TypeOrdering.h" #include "clang/Basic/BitmaskEnum.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenCLOptions.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/PragmaKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/CleanupInfo.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/SemaConcept.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include "llvm/Frontend/OpenMP/OMPConstants.h" #include <deque> #include <memory> #include <string> #include <tuple> #include <vector> namespace llvm { class APSInt; template <typename ValueT> struct DenseMapInfo; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; struct InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class ParsedAttr; class BindingDecl; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class CoroutineBodyStmt; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPRequiresDecl; class OMPDeclareReductionDecl; class OMPDeclareSimdDecl; class OMPClause; struct OMPVarListLocTy; struct OverloadCandidate; enum class OverloadCandidateParamOrder : char; enum OverloadCandidateRewriteKind : unsigned; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateInstantiationCallback; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class Capture; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class SemaPPCallbacks; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// The end location for the first pointer declarator in the file. Used for /// placing fix-its. SourceLocation PointerEndLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Tracks expected type during expression parsing, for use in code completion. /// The type is tied to a particular token, all functions that update or consume /// the type take a start location of the token they are looking at as a /// parameter. This avoids updating the type on hot paths in the parser. class PreferredTypeBuilder { public: PreferredTypeBuilder(bool Enabled) : Enabled(Enabled) {} void enterCondition(Sema &S, SourceLocation Tok); void enterReturn(Sema &S, SourceLocation Tok); void enterVariableInit(SourceLocation Tok, Decl *D); /// Handles e.g. BaseType{ .D = Tok... void enterDesignatedInitializer(SourceLocation Tok, QualType BaseType, const Designation &D); /// Computing a type for the function argument may require running /// overloading, so we postpone its computation until it is actually needed. /// /// Clients should be very careful when using this funciton, as it stores a /// function_ref, clients should make sure all calls to get() with the same /// location happen while function_ref is alive. /// /// The callback should also emit signature help as a side-effect, but only /// if the completion point has been reached. void enterFunctionArgument(SourceLocation Tok, llvm::function_ref<QualType()> ComputeType); void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc); void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind, SourceLocation OpLoc); void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op); void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base); void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS); /// Handles all type casts, including C-style cast, C++ casts, etc. void enterTypeCast(SourceLocation Tok, QualType CastType); /// Get the expected type associated with this location, if any. /// /// If the location is a function argument, determining the expected type /// involves considering all function overloads and the arguments so far. /// In this case, signature help for these function overloads will be reported /// as a side-effect (only if the completion point has been reached). QualType get(SourceLocation Tok) const { if (!Enabled || Tok != ExpectedLoc) return QualType(); if (!Type.isNull()) return Type; if (ComputeType) return ComputeType(); return QualType(); } private: bool Enabled; /// Start position of a token for which we store expected type. SourceLocation ExpectedLoc; /// Expected type for a token starting at ExpectedLoc. QualType Type; /// A function to compute expected type at ExpectedLoc. It is only considered /// if Type is null. llvm::function_ref<QualType()> ComputeType; }; /// Sema - This implements semantic analysis and AST building for C. class Sema final { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; /// A key method to reduce duplicate debug info from Sema. virtual void anchor(); ///Source of additional semantic information. ExternalSemaSource *ExternalSource; ///Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); /// Determine whether two declarations should be linked together, given that /// the old declaration might not be visible and the new declaration might /// not have external linkage. bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { if (isVisible(Old)) return true; // See comment in below overload for why it's safe to compute the linkage // of the new declaration here. if (New->isExternallyDeclarable()) { assert(Old->isExternallyDeclarable() && "should not have found a non-externally-declarable previous decl"); return true; } return false; } bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New); void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem, QualType ResultTy, ArrayRef<QualType> Args); public: /// The maximum alignment, same as in llvm::Value. We duplicate them here /// because that allows us not to duplicate the constants in clang code, /// which we must to since we can't directly use the llvm constants. /// The value is verified against llvm here: lib/CodeGen/CGDecl.cpp /// /// This is the greatest alignment value supported by load, store, and alloca /// instructions, and global values. static const unsigned MaxAlignmentExponent = 29; static const unsigned MaximumAlignment = 1u << MaxAlignmentExponent; typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions CurFPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; /// Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; bool MSStructPragmaOn; // True when \#pragma ms_struct on /// Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; /// Holds TypoExprs that are created from `createDelayedTypo`. This is used by /// `TransformTypos` in order to keep track of any TypoExprs that are created /// recursively during typo correction and wipe them away if the correction /// fails. llvm::SmallVector<TypoExpr *, 2> TypoExprs; /// pragma clang section kind enum PragmaClangSectionKind { PCSK_Invalid = 0, PCSK_BSS = 1, PCSK_Data = 2, PCSK_Rodata = 3, PCSK_Text = 4, PCSK_Relro = 5 }; enum PragmaClangSectionAction { PCSA_Set = 0, PCSA_Clear = 1 }; struct PragmaClangSection { std::string SectionName; bool Valid = false; SourceLocation PragmaLocation; }; PragmaClangSection PragmaClangBSSSection; PragmaClangSection PragmaClangDataSection; PragmaClangSection PragmaClangRodataSection; PragmaClangSection PragmaClangRelroSection; PragmaClangSection PragmaClangTextSection; enum PragmaMsStackAction { PSK_Reset = 0x0, // #pragma () PSK_Set = 0x1, // #pragma (value) PSK_Push = 0x2, // #pragma (push[, id]) PSK_Pop = 0x4, // #pragma (pop[, id]) PSK_Show = 0x8, // #pragma (show) -- only for "pack"! PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value) PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value) }; // #pragma pack and align. class AlignPackInfo { public: // `Native` represents default align mode, which may vary based on the // platform. enum Mode : unsigned char { Native, Natural, Packed, Mac68k }; // #pragma pack info constructor AlignPackInfo(AlignPackInfo::Mode M, unsigned Num, bool IsXL) : PackAttr(true), AlignMode(M), PackNumber(Num), XLStack(IsXL) { assert(Num == PackNumber && "The pack number has been truncated."); } // #pragma align info constructor AlignPackInfo(AlignPackInfo::Mode M, bool IsXL) : PackAttr(false), AlignMode(M), PackNumber(M == Packed ? 1 : UninitPackVal), XLStack(IsXL) {} explicit AlignPackInfo(bool IsXL) : AlignPackInfo(Native, IsXL) {} AlignPackInfo() : AlignPackInfo(Native, false) {} // When a AlignPackInfo itself cannot be used, this returns an 32-bit // integer encoding for it. This should only be passed to // AlignPackInfo::getFromRawEncoding, it should not be inspected directly. static uint32_t getRawEncoding(const AlignPackInfo &Info) { std::uint32_t Encoding{}; if (Info.IsXLStack()) Encoding |= IsXLMask; Encoding |= static_cast<uint32_t>(Info.getAlignMode()) << 1; if (Info.IsPackAttr()) Encoding |= PackAttrMask; Encoding |= static_cast<uint32_t>(Info.getPackNumber()) << 4; return Encoding; } static AlignPackInfo getFromRawEncoding(unsigned Encoding) { bool IsXL = static_cast<bool>(Encoding & IsXLMask); AlignPackInfo::Mode M = static_cast<AlignPackInfo::Mode>((Encoding & AlignModeMask) >> 1); int PackNumber = (Encoding & PackNumMask) >> 4; if (Encoding & PackAttrMask) return AlignPackInfo(M, PackNumber, IsXL); return AlignPackInfo(M, IsXL); } bool IsPackAttr() const { return PackAttr; } bool IsAlignAttr() const { return !PackAttr; } Mode getAlignMode() const { return AlignMode; } unsigned getPackNumber() const { return PackNumber; } bool IsPackSet() const { // #pragma align, #pragma pack(), and #pragma pack(0) do not set the pack // attriute on a decl. return PackNumber != UninitPackVal && PackNumber != 0; } bool IsXLStack() const { return XLStack; } bool operator==(const AlignPackInfo &Info) const { return std::tie(AlignMode, PackNumber, PackAttr, XLStack) == std::tie(Info.AlignMode, Info.PackNumber, Info.PackAttr, Info.XLStack); } bool operator!=(const AlignPackInfo &Info) const { return !(*this == Info); } private: /// \brief True if this is a pragma pack attribute, /// not a pragma align attribute. bool PackAttr; /// \brief The alignment mode that is in effect. Mode AlignMode; /// \brief The pack number of the stack. unsigned char PackNumber; /// \brief True if it is a XL #pragma align/pack stack. bool XLStack; /// \brief Uninitialized pack value. static constexpr unsigned char UninitPackVal = -1; // Masks to encode and decode an AlignPackInfo. static constexpr uint32_t IsXLMask{0x0000'0001}; static constexpr uint32_t AlignModeMask{0x0000'0006}; static constexpr uint32_t PackAttrMask{0x00000'0008}; static constexpr uint32_t PackNumMask{0x0000'01F0}; }; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; SourceLocation PragmaPushLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation, SourceLocation PragmaPushLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation), PragmaPushLocation(PragmaPushLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value) { if (Action == PSK_Reset) { CurrentValue = DefaultValue; CurrentPragmaLocation = PragmaLocation; return; } if (Action & PSK_Push) Stack.emplace_back(StackSlotLabel, CurrentValue, CurrentPragmaLocation, PragmaLocation); else if (Action & PSK_Pop) { if (!StackSlotLabel.empty()) { // If we've got a label, try to find it and jump there. auto I = llvm::find_if(llvm::reverse(Stack), [&](const Slot &x) { return x.StackSlotLabel == StackSlotLabel; }); // If we found the label so pop from there. if (I != Stack.rend()) { CurrentValue = I->Value; CurrentPragmaLocation = I->PragmaLocation; Stack.erase(std::prev(I.base()), Stack.end()); } } else if (!Stack.empty()) { // We do not have a label, just pop the last entry. CurrentValue = Stack.back().Value; CurrentPragmaLocation = Stack.back().PragmaLocation; Stack.pop_back(); } } if (Action & PSK_Set) { CurrentValue = Value; CurrentPragmaLocation = PragmaLocation; } } // MSVC seems to add artificial slots to #pragma stacks on entering a C++ // method body to restore the stacks on exit, so it works like this: // // struct S { // #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>) // void Method {} // #pragma <name>(pop, InternalPragmaSlot) // }; // // It works even with #pragma vtordisp, although MSVC doesn't support // #pragma vtordisp(push [, id], n) // syntax. // // Push / pop a named sentinel slot. void SentinelAction(PragmaMsStackAction Action, StringRef Label) { assert((Action == PSK_Push || Action == PSK_Pop) && "Can only push / pop #pragma stack sentinels!"); Act(CurrentPragmaLocation, Action, Label, CurrentValue); } // Constructors. explicit PragmaStack(const ValueType &Default) : DefaultValue(Default), CurrentValue(Default) {} bool hasValue() const { return CurrentValue != DefaultValue; } SmallVector<Slot, 2> Stack; ValueType DefaultValue; // Value used for PSK_Reset action. ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). /// Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects PragmaStack<MSVtorDispMode> VtorDispStack; PragmaStack<AlignPackInfo> AlignPackStack; // The current #pragma align/pack values and locations at each #include. struct AlignPackIncludeState { AlignPackInfo CurrentValue; SourceLocation CurrentPragmaLocation; bool HasNonDefaultValue, ShouldWarnOnInclude; }; SmallVector<AlignPackIncludeState, 8> AlignPackIncludeStack; // Segment #pragmas. PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; // This stack tracks the current state of Sema.CurFPFeatures. PragmaStack<FPOptionsOverride> FpPragmaStack; FPOptionsOverride CurFPFeatureOverrides() { FPOptionsOverride result; if (!FpPragmaStack.hasValue()) { result = FPOptionsOverride(); } else { result = FpPragmaStack.CurrentValue; } return result; } // RAII object to push / pop sentinel slots for all MS #pragma stacks. // Actions should be performed only if we enter / exit a C++ method body. class PragmaStackSentinelRAII { public: PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct); ~PragmaStackSentinelRAII(); private: Sema &S; StringRef SlotLabel; bool ShouldAct; }; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// This an attribute introduced by \#pragma clang attribute. struct PragmaAttributeEntry { SourceLocation Loc; ParsedAttr *Attribute; SmallVector<attr::SubjectMatchRule, 4> MatchRules; bool IsUsed; }; /// A push'd group of PragmaAttributeEntries. struct PragmaAttributeGroup { /// The location of the push attribute. SourceLocation Loc; /// The namespace of this push group. const IdentifierInfo *Namespace; SmallVector<PragmaAttributeEntry, 2> Entries; }; SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack; /// The declaration that is currently receiving an attribute from the /// #pragma attribute stack. const Decl *PragmaAttributeCurrentTargetDecl; /// This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// Used to control the generation of ExprWithCleanups. CleanupInfo Cleanup; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. SmallVector<ExprWithCleanups::CleanupObject, 8> ExprCleanupObjects; /// Store a set of either DeclRefExprs or MemberExprs that contain a reference /// to a variable (constant) that may or may not be odr-used in this Expr, and /// we won't know until all lvalue-to-rvalue and discarded value conversions /// have been applied to all subexpressions of the enclosing full expression. /// This is cleared at the end of each full expression. using MaybeODRUseExprSet = llvm::SetVector<Expr *, SmallVector<Expr *, 4>, llvm::SmallPtrSet<Expr *, 4>>; MaybeODRUseExprSet MaybeODRUseExprs; std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope; /// Stack containing information about each of the nested /// function, block, and method scopes that are currently active. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; /// The index of the first FunctionScope that corresponds to the current /// context. unsigned FunctionScopesStart = 0; ArrayRef<sema::FunctionScopeInfo*> getFunctionScopes() const { return llvm::makeArrayRef(FunctionScopes.begin() + FunctionScopesStart, FunctionScopes.end()); } /// Stack containing information needed when in C++2a an 'auto' is encountered /// in a function declaration parameter type specifier in order to invent a /// corresponding template parameter in the enclosing abbreviated function /// template. This information is also present in LambdaScopeInfo, stored in /// the FunctionScopes stack. SmallVector<InventedTemplateParameterInfo, 4> InventedParameterInfos; /// The index of the first InventedParameterInfo that refers to the current /// context. unsigned InventedParameterInfosStart = 0; ArrayRef<InventedTemplateParameterInfo> getInventedParameterInfos() const { return llvm::makeArrayRef(InventedParameterInfos.begin() + InventedParameterInfosStart, InventedParameterInfos.end()); } typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType; /// Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; /// All the external declarations encoutered and used in the TU. SmallVector<VarDecl *, 4> ExternalDeclarations; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedOverridingExceptionSpecChecks; /// All the function redeclarations seen during a class definition that had /// their exception spec checks delayed, plus the prior declaration they /// should be checked against. Except during error recovery, the new decl /// should always be a friend declaration, as that's the only valid way to /// redeclare a special member before its class is complete. SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2> DelayedEquivalentExceptionSpecChecks; typedef llvm::MapVector<const FunctionDecl *, std::unique_ptr<LateParsedTemplate>> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; unsigned SavedFunctionScopesStart; unsigned SavedInventedParameterInfosStart; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride), SavedFunctionScopesStart(S.FunctionScopesStart), SavedInventedParameterInfosStart(S.InventedParameterInfosStart) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); // Any saved FunctionScopes do not refer to this context. S.FunctionScopesStart = S.FunctionScopes.size(); S.InventedParameterInfosStart = S.InventedParameterInfos.size(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; S.FunctionScopesStart = SavedFunctionScopesStart; S.InventedParameterInfosStart = SavedInventedParameterInfosStart; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// Whether the AST is currently being rebuilt to correct immediate /// invocations. Immediate invocation candidates and references to consteval /// functions aren't tracked when this is set. bool RebuildingImmediateInvocation = false; /// Used to change context to isConstantEvaluated without pushing a heavy /// ExpressionEvaluationContextRecord object. bool isConstantEvaluatedOverride; bool isConstantEvaluated() { return ExprEvalContexts.back().isConstantEvaluated() || isConstantEvaluatedOverride; } /// RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; bool PushedCodeSynthesisContext = false; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::PotentiallyEvaluated); if (auto *FD = dyn_cast<FunctionDecl>(DC)) FD->setWillHaveBody(true); else assert(isa<ObjCMethodDecl>(DC)); } void addContextNote(SourceLocation UseLoc) { assert(!PushedCodeSynthesisContext); Sema::CodeSynthesisContext Ctx; Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction; Ctx.PointOfInstantiation = UseLoc; Ctx.Entity = cast<Decl>(S.CurContext); S.pushCodeSynthesisContext(Ctx); PushedCodeSynthesisContext = true; } ~SynthesizedFunctionScope() { if (PushedCodeSynthesisContext) S.popCodeSynthesisContext(); if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext)) FD->setWillHaveBody(false); S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// The C++ "std::align_val_t" enum class, which is defined by the C++ /// standard library. LazyDeclPtr StdAlignValT; /// The C++ "std::experimental" namespace, where the experimental parts /// of the standard library resides. NamespaceDecl *StdExperimentalNamespaceCache; /// The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// The C++ "std::coroutine_traits" template, which is defined in /// \<coroutine_traits> ClassTemplateDecl *StdCoroutineTraitsCache; /// The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// Caches identifiers/selectors for NSFoundation APIs. std::unique_ptr<NSAPI> NSAPIObj; /// The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// Pointer to NSString type (NSString *). QualType NSStringPointer; /// The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// id<NSCopying> type. QualType QIDNSCopying; /// will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum class ExpressionEvaluationContext { /// The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// The current expression occurs within a braced-init-list within /// an unevaluated operand. This is mostly like a regular unevaluated /// context, except that we still instantiate constexpr functions that are /// referenced here so that we can perform narrowing checks correctly. UnevaluatedList, /// The current expression occurs within a discarded statement. /// This behaves largely similarly to an unevaluated operand in preventing /// definitions from being required, but not in other ways. DiscardedStatement, /// The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; using ImmediateInvocationCandidate = llvm::PointerIntPair<ConstantExpr *, 1>; /// Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// The expression evaluation context. ExpressionEvaluationContext Context; /// Whether the enclosing context needed a cleanup. CleanupInfo ParentCleanup; /// The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; MaybeODRUseExprSet SavedMaybeODRUseExprs; /// The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs; /// Expressions appearing as the LHS of a volatile assignment in this /// context. We produce a warning for these when popping the context if /// they are not discarded-value expressions nor unevaluated operands. SmallVector<Expr*, 2> VolatileAssignmentLHSs; /// Set of candidates for starting an immediate invocation. llvm::SmallVector<ImmediateInvocationCandidate, 4> ImmediateInvocationCandidates; /// Set of DeclRefExprs referencing a consteval function when used in a /// context not already known to be immediately invoked. llvm::SmallPtrSet<DeclRefExpr *, 4> ReferenceToConsteval; /// \brief Describes whether we are in an expression constext which we have /// to handle differently. enum ExpressionKind { EK_Decltype, EK_TemplateArgument, EK_Other } ExprContext; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, CleanupInfo ParentCleanup, Decl *ManglingContextDecl, ExpressionKind ExprContext) : Context(Context), ParentCleanup(ParentCleanup), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext) {} bool isUnevaluated() const { return Context == ExpressionEvaluationContext::Unevaluated || Context == ExpressionEvaluationContext::UnevaluatedAbstract || Context == ExpressionEvaluationContext::UnevaluatedList; } bool isConstantEvaluated() const { return Context == ExpressionEvaluationContext::ConstantEvaluated; } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// Emit a warning for all pending noderef expressions that we recorded. void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec); /// Compute the mangling number context for a lambda expression or /// block literal. Also return the extra mangling decl if any. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. std::tuple<MangleNumberingContext *, Decl *> getCurrentMangleNumberContext(const DeclContext *DC); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl*, 2> Pair; public: SpecialMemberOverloadResult() : Pair() {} SpecialMemberOverloadResult(CXXMethodDecl *MD) : Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; class SpecialMemberOverloadResultEntry : public llvm::FastFoldingSetNode, public SpecialMemberOverloadResult { public: SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} }; /// A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache; /// A cache of the flags available in enumerations with the flag_bits /// attribute. mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache; /// The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed; /// Determine if VD, which must be a variable or function, is an external /// symbol that nonetheless can't be referenced from outside this translation /// unit because its type has no linkage and it's not extern "C". bool isExternalWithNoLinkageType(ValueDecl *VD); /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods; typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// List of SourceLocations where 'self' is implicitly retained inside a /// block. llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1> ImplicitlyRetainedSelfLocs; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; /// Kinds of defaulted comparison operator functions. enum class DefaultedComparisonKind : unsigned char { /// This is not a defaultable comparison operator. None, /// This is an operator== that should be implemented as a series of /// subobject comparisons. Equal, /// This is an operator<=> that should be implemented as a series of /// subobject comparisons. ThreeWay, /// This is an operator!= that should be implemented as a rewrite in terms /// of a == comparison. NotEqual, /// This is an <, <=, >, or >= that should be implemented as a rewrite in /// terms of a <=> comparison. Relational, }; /// The function definitions which were renamed as part of typo-correction /// to match their respective declarations. We want to keep track of them /// to ensure that we don't emit a "redefinition" error if we encounter a /// correctly named definition after the renamed definition. llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions; /// Stack of types that correspond to the parameter entities that are /// currently being copy-initialized. Can be empty. llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes; void ReadMethodPool(Selector Sel); void updateOutOfDateSelector(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the CurFPFeatures state on entry/exit of compound /// statements. class FPFeaturesStateRAII { public: FPFeaturesStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.CurFPFeatures) { OldOverrides = S.FpPragmaStack.CurrentValue; } ~FPFeaturesStateRAII() { S.CurFPFeatures = OldFPFeaturesState; S.FpPragmaStack.CurrentValue = OldOverrides; } FPOptionsOverride getOverrides() { return OldOverrides; } private: Sema& S; FPOptions OldFPFeaturesState; FPOptionsOverride OldOverrides; }; void addImplicitTypedef(StringRef Name, QualType T); bool WarnedStackExhausted = false; public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getCurFPFeatures() { return CurFPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } ///Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// Warn that the stack is nearly exhausted. void warnStackExhausted(SourceLocation Loc); /// Run some code with "sufficient" stack space. (Currently, at least 256K is /// guaranteed). Produces a warning if we're low on stack space and allocates /// more in that case. Use this in code that may recurse deeply (for example, /// in template instantiation) to avoid stack overflow. void runWithSufficientStackSpace(SourceLocation Loc, llvm::function_ref<void()> Fn); /// Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. ImmediateDiagBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class ImmediateDiagBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: ImmediateDiagBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) {} ImmediateDiagBuilder(DiagnosticBuilder &&DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) {} // This is a cunning lie. DiagnosticBuilder actually performs move // construction in its copy constructor (but due to varied uses, it's not // possible to conveniently express this as actual move construction). So // the default copy ctor here is fine, because the base class disables the // source anyway, so the user-defined ~ImmediateDiagBuilder is a safe no-op // in that case anwyay. ImmediateDiagBuilder(const ImmediateDiagBuilder &) = default; ~ImmediateDiagBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First clear the diagnostic // builder itself so it won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template <typename T> friend const ImmediateDiagBuilder & operator<<(const ImmediateDiagBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } // It is necessary to limit this to rvalue reference to avoid calling this // function with a bitfield lvalue argument since non-const reference to // bitfield is not allowed. template <typename T, typename = typename std::enable_if< !std::is_lvalue_reference<T>::value>::type> const ImmediateDiagBuilder &operator<<(T &&V) const { const DiagnosticBuilder &BaseDiag = *this; BaseDiag << std::move(V); return *this; } }; /// A generic diagnostic builder for errors which may or may not be deferred. /// /// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch) /// which are not allowed to appear inside __device__ functions and are /// allowed to appear in __host__ __device__ functions only if the host+device /// function is never codegen'ed. /// /// To handle this, we use the notion of "deferred diagnostics", where we /// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed. /// /// This class lets you emit either a regular diagnostic, a deferred /// diagnostic, or no diagnostic at all, according to an argument you pass to /// its constructor, thus simplifying the process of creating these "maybe /// deferred" diagnostics. class SemaDiagnosticBuilder { public: enum Kind { /// Emit no diagnostics. K_Nop, /// Emit the diagnostic immediately (i.e., behave like Sema::Diag()). K_Immediate, /// Emit the diagnostic immediately, and, if it's a warning or error, also /// emit a call stack showing how this function can be reached by an a /// priori known-emitted function. K_ImmediateWithCallStack, /// Create a deferred diagnostic, which is emitted only if the function /// it's attached to is codegen'ed. Also emit a call stack as with /// K_ImmediateWithCallStack. K_Deferred }; SemaDiagnosticBuilder(Kind K, SourceLocation Loc, unsigned DiagID, FunctionDecl *Fn, Sema &S); SemaDiagnosticBuilder(SemaDiagnosticBuilder &&D); SemaDiagnosticBuilder(const SemaDiagnosticBuilder &) = default; ~SemaDiagnosticBuilder(); bool isImmediate() const { return ImmediateDiag.hasValue(); } /// Convertible to bool: True if we immediately emitted an error, false if /// we didn't emit an error or we created a deferred error. /// /// Example usage: /// /// if (SemaDiagnosticBuilder(...) << foo << bar) /// return ExprError(); /// /// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably /// want to use these instead of creating a SemaDiagnosticBuilder yourself. operator bool() const { return isImmediate(); } template <typename T> friend const SemaDiagnosticBuilder & operator<<(const SemaDiagnosticBuilder &Diag, const T &Value) { if (Diag.ImmediateDiag.hasValue()) *Diag.ImmediateDiag << Value; else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second << Value; return Diag; } // It is necessary to limit this to rvalue reference to avoid calling this // function with a bitfield lvalue argument since non-const reference to // bitfield is not allowed. template <typename T, typename = typename std::enable_if< !std::is_lvalue_reference<T>::value>::type> const SemaDiagnosticBuilder &operator<<(T &&V) const { if (ImmediateDiag.hasValue()) *ImmediateDiag << std::move(V); else if (PartialDiagId.hasValue()) S.DeviceDeferredDiags[Fn][*PartialDiagId].second << std::move(V); return *this; } friend const SemaDiagnosticBuilder & operator<<(const SemaDiagnosticBuilder &Diag, const PartialDiagnostic &PD) { if (Diag.ImmediateDiag.hasValue()) PD.Emit(*Diag.ImmediateDiag); else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second = PD; return Diag; } void AddFixItHint(const FixItHint &Hint) const { if (ImmediateDiag.hasValue()) ImmediateDiag->AddFixItHint(Hint); else if (PartialDiagId.hasValue()) S.DeviceDeferredDiags[Fn][*PartialDiagId].second.AddFixItHint(Hint); } friend ExprResult ExprError(const SemaDiagnosticBuilder &) { return ExprError(); } friend StmtResult StmtError(const SemaDiagnosticBuilder &) { return StmtError(); } operator ExprResult() const { return ExprError(); } operator StmtResult() const { return StmtError(); } operator TypeResult() const { return TypeError(); } operator DeclResult() const { return DeclResult(true); } operator MemInitResult() const { return MemInitResult(true); } private: Sema &S; SourceLocation Loc; unsigned DiagID; FunctionDecl *Fn; bool ShowCallStack; // Invariant: At most one of these Optionals has a value. // FIXME: Switch these to a Variant once that exists. llvm::Optional<ImmediateDiagBuilder> ImmediateDiag; llvm::Optional<unsigned> PartialDiagId; }; /// Is the last error level diagnostic immediate. This is used to determined /// whether the next info diagnostic should be immediate. bool IsLastErrorImmediate = true; /// Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID, bool DeferHint = false); /// Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic &PD, bool DeferHint = false); /// Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h /// Whether uncompilable error has occurred. This includes error happens /// in deferred diagnostics. bool hasUncompilableErrorOccurred() const; bool findMacroSpelling(SourceLocation &loc, StringRef name); /// Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; /// Invent a new identifier for parameters of abbreviated templates. IdentifierInfo * InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName, unsigned Index); void emitAndClearUnusedLocalTypedefWarnings(); private: /// Function or variable declarations to be checked for whether the deferred /// diagnostics should be emitted. SmallVector<Decl *, 4> DeclsToCheckForDeferredDiags; public: // Emit all deferred diagnostics. void emitDeferredDiags(); enum TUFragmentKind { /// The global module fragment, between 'module;' and a module-declaration. Global, /// A normal translation unit fragment. For a non-module unit, this is the /// entire translation unit. Otherwise, it runs from the module-declaration /// to the private-module-fragment (if any) or the end of the TU (if not). Normal, /// The private module fragment, between 'module :private;' and the end of /// the translation unit. Private }; void ActOnStartOfTranslationUnit(); void ActOnEndOfTranslationUnit(); void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K, unsigned OpenMPCaptureLevel = 0); /// Custom deleter to allow FunctionScopeInfos to be kept alive for a short /// time after they've been popped. class PoppedFunctionScopeDeleter { Sema *Self; public: explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {} void operator()(sema::FunctionScopeInfo *Scope) const; }; using PoppedFunctionScopePtr = std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>; PoppedFunctionScopePtr PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, QualType BlockType = QualType()); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.empty() ? nullptr : FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const; void setFunctionHasBranchIntoScope(); void setFunctionHasBranchProtectedScope(); void setFunctionHasIndirectGoto(); void PushCompoundScope(bool IsStmtExpr); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// Get the innermost lambda enclosing the current location, if any. This /// looks through intervening non-lambda scopes such as local functions and /// blocks. sema::LambdaScopeInfo *getEnclosingLambda() const; /// Retrieve the current lambda scope info, if any. /// \param IgnoreNonLambdaCapturingScope true if should find the top-most /// lambda scope info ignoring all inner capturing scopes that are not /// lambda scopes. sema::LambdaScopeInfo * getCurLambda(bool IgnoreNonLambdaCapturingScope = false); /// Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } /// Called before parsing a function declarator belonging to a function /// declaration. void ActOnStartFunctionDeclarationDeclarator(Declarator &D, unsigned TemplateParameterDepth); /// Called after parsing a function declarator belonging to a function /// declaration. void ActOnFinishFunctionDeclarationDeclarator(Declarator &D); void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); QualType BuildMatrixType(QualType T, Expr *NumRows, Expr *NumColumns, SourceLocation AttrLoc); QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace, SourceLocation AttrLoc); /// Same as above, but constructs the AddressSpace index if not provided. QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace, SourceLocation AttrLoc); bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); /// Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); QualType BuildReadPipeType(QualType T, SourceLocation Loc); QualType BuildWritePipeType(QualType T, SourceLocation Loc); QualType BuildExtIntType(bool IsUnsigned, Expr *BitWidth, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); /// Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Stmt *E); /// Determine whether the callee of a particular function call can throw. /// E, D and Loc are all optional. static CanThrowResult canCalleeThrow(Sema &S, const Expr *E, const Decl *D, SourceLocation Loc = SourceLocation()); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool handlerCanCatch(QualType HandlerType, QualType ExceptionType); bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID, const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const PartialDiagnostic &NoThrowDiagID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// Abstract class used to diagnose incomplete types. struct TypeDiagnoser { TypeDiagnoser() {} virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { protected: unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, std::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(), DiagID(DiagID), Args(Args...) { assert(DiagID != 0 && "no diagnostic for type diagnoser"); } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, std::index_sequence_for<Ts...>()); DB << T; } }; /// Do a check to make sure \p Name looks like a legal argument for the /// swift_name attribute applied to decl \p D. Raise a diagnostic if the name /// is invalid for the given declaration. /// /// \p AL is used to provide caret diagnostics in case of a malformed name. /// /// \returns true if the name is a valid swift name for \p D, false otherwise. bool DiagnoseSwiftName(Decl *D, StringRef Name, SourceLocation Loc, const ParsedAttr &AL, bool IsAsync); /// A derivative of BoundTypeDiagnoser for which the diagnostic's type /// parameter is preceded by a 0/1 enum that is 1 if the type is sizeless. /// For example, a diagnostic with no other parameters would generally have /// the form "...%select{incomplete|sizeless}0 type %1...". template <typename... Ts> class SizelessTypeDiagnoser : public BoundTypeDiagnoser<Ts...> { public: SizelessTypeDiagnoser(unsigned DiagID, const Ts &... Args) : BoundTypeDiagnoser<Ts...>(DiagID, Args...) {} void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, this->DiagID); this->emit(DB, std::index_sequence_for<Ts...>()); DB << T->isSizelessType() << T; } }; enum class CompleteTypeKind { /// Apply the normal rules for complete types. In particular, /// treat all sizeless types as incomplete. Normal, /// Relax the normal rules for complete types so that they include /// sizeless built-in types. AcceptSizeless, // FIXME: Eventually we should flip the default to Normal and opt in // to AcceptSizeless rather than opt out of it. Default = AcceptSizeless }; private: /// Methods for marking which expressions involve dereferencing a pointer /// marked with the 'noderef' attribute. Expressions are checked bottom up as /// they are parsed, meaning that a noderef pointer may not be accessed. For /// example, in `&*p` where `p` is a noderef pointer, we will first parse the /// `*p`, but need to check that `address of` is called on it. This requires /// keeping a container of all pending expressions and checking if the address /// of them are eventually taken. void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E); void CheckAddressOfNoDeref(const Expr *E); void CheckMemberAccessOfNoDeref(const MemberExpr *E); bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, CompleteTypeKind Kind, TypeDiagnoser *Diagnoser); struct ModuleScope { SourceLocation BeginLoc; clang::Module *Module = nullptr; bool ModuleInterface = false; bool ImplicitGlobalModuleFragment = false; VisibleModuleSet OuterVisibleModules; }; /// The modules we're currently parsing. llvm::SmallVector<ModuleScope, 16> ModuleScopes; /// Namespace definitions that we will export when they finish. llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces; /// Get the module whose scope we are currently within. Module *getCurrentModule() const { return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module; } VisibleModuleSet VisibleModules; public: /// Get the module owning an entity. Module *getOwningModule(const Decl *Entity) { return Entity->getOwningModule(); } /// Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND); bool isModuleVisible(const Module *M, bool ModulePrivate = false); // When loading a non-modular PCH files, this is used to restore module // visibility. void makeModuleVisible(Module *Mod, SourceLocation ImportLoc) { VisibleModules.setVisible(Mod, ImportLoc); } /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return D->isUnconditionallyVisible() || isVisibleSlow(D); } /// Determine whether any declaration of an entity is visible. bool hasVisibleDeclaration(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr) { return isVisible(D) || hasVisibleDeclarationSlow(D, Modules); } bool hasVisibleDeclarationSlow(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules); bool hasVisibleMergedDefinition(NamedDecl *Def); bool hasMergedDefinitionInCurrentModule(NamedDecl *Def); /// Determine if \p D and \p Suggested have a structurally compatible /// layout as described in C11 6.2.7/1. bool hasStructuralCompatLayout(Decl *D, Decl *Suggested); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is an explicit /// specialization declaration for a specialization of a template. (For a /// member specialization, use hasVisibleMemberSpecialization.) bool hasVisibleExplicitSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is a member /// specialization declaration (as opposed to an instantiated declaration). bool hasVisibleMemberSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if \p A and \p B are equivalent internal linkage declarations /// from different modules, and thus an ambiguity error can be downgraded to /// an extension warning. bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A, const NamedDecl *B); void diagnoseEquivalentInternalLinkageDeclarations( SourceLocation Loc, const NamedDecl *D, ArrayRef<const NamedDecl *> Equiv); bool isUsualDeallocationFunction(const CXXMethodDecl *FD); bool isCompleteType(SourceLocation Loc, QualType T, CompleteTypeKind Kind = CompleteTypeKind::Default) { return !RequireCompleteTypeImpl(Loc, T, Kind, nullptr); } bool RequireCompleteType(SourceLocation Loc, QualType T, CompleteTypeKind Kind, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, CompleteTypeKind Kind, unsigned DiagID); bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser) { return RequireCompleteType(Loc, T, CompleteTypeKind::Default, Diagnoser); } bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID) { return RequireCompleteType(Loc, T, CompleteTypeKind::Default, DiagID); } template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } template <typename... Ts> bool RequireCompleteSizedType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &... Args) { SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, CompleteTypeKind::Normal, Diagnoser); } /// Get the type of expression E, triggering instantiation to complete the /// type if necessary -- that is, if the expression refers to a templated /// static data member of incomplete array type. /// /// May still return an incomplete type if instantiation was not possible or /// if the type is incomplete for a different reason. Use /// RequireCompleteExprType instead if a diagnostic is expected for an /// incomplete expression type. QualType getCompletedType(Expr *E); void completeExprArrayBound(Expr *E); bool RequireCompleteExprType(Expr *E, CompleteTypeKind Kind, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, CompleteTypeKind::Default, Diagnoser); } template <typename... Ts> bool RequireCompleteSizedExprType(Expr *E, unsigned DiagID, const Ts &... Args) { SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, CompleteTypeKind::Normal, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T, TagDecl *OwnedTagDecl = nullptr); QualType BuildTypeofExprType(Expr *E, SourceLocation Loc); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, SourceLocation Loc, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr), New(nullptr) {} bool ShouldSkip; bool CheckSameAsPrevious; NamedDecl *Previous; NamedDecl *New; }; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = nullptr, bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, bool IsClassTemplateDeductionContext = true, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool IsTemplateName = false); /// Attempt to behave like MSVC in situations where lookup of an unqualified /// type name has failed in a dependent context. In these situations, we /// automatically form a DependentTypeName that will retry lookup in a related /// scope during instantiation. ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II, SourceLocation NameLoc, bool IsTemplateTypeArg); /// Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { /// This name is not a type or template in this context, but might be /// something else. NC_Unknown, /// Classification failed; an error has been produced. NC_Error, /// The name has been typo-corrected to a keyword. NC_Keyword, /// The name was classified as a type. NC_Type, /// The name was classified as a specific non-type, non-template /// declaration. ActOnNameClassifiedAsNonType should be called to /// convert the declaration to an expression. NC_NonType, /// The name was classified as an ADL-only function name. /// ActOnNameClassifiedAsUndeclaredNonType should be called to convert the /// result to an expression. NC_UndeclaredNonType, /// The name denotes a member of a dependent type that could not be /// resolved. ActOnNameClassifiedAsDependentNonType should be called to /// convert the result to an expression. NC_DependentNonType, /// The name was classified as an overload set, and an expression /// representing that overload set has been formed. /// ActOnNameClassifiedAsOverloadSet should be called to form a suitable /// expression referencing the overload set. NC_OverloadSet, /// The name was classified as a template whose specializations are types. NC_TypeTemplate, /// The name was classified as a variable template name. NC_VarTemplate, /// The name was classified as a function template name. NC_FunctionTemplate, /// The name was classified as an ADL-only function template name. NC_UndeclaredTemplate, /// The name was classified as a concept name. NC_Concept, }; class NameClassification { NameClassificationKind Kind; union { ExprResult Expr; NamedDecl *NonTypeDecl; TemplateName Template; ParsedType Type; }; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {} static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification OverloadSet(ExprResult E) { NameClassification Result(NC_OverloadSet); Result.Expr = E; return Result; } static NameClassification NonType(NamedDecl *D) { NameClassification Result(NC_NonType); Result.NonTypeDecl = D; return Result; } static NameClassification UndeclaredNonType() { return NameClassification(NC_UndeclaredNonType); } static NameClassification DependentNonType() { return NameClassification(NC_DependentNonType); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } static NameClassification Concept(TemplateName Name) { NameClassification Result(NC_Concept); Result.Template = Name; return Result; } static NameClassification UndeclaredTemplate(TemplateName Name) { NameClassification Result(NC_UndeclaredTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ExprResult getExpression() const { assert(Kind == NC_OverloadSet); return Expr; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } NamedDecl *getNonTypeDecl() const { assert(Kind == NC_NonType); return NonTypeDecl; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate || Kind == NC_Concept || Kind == NC_UndeclaredTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; case NC_Concept: return TNK_Concept_template; case NC_UndeclaredTemplate: return TNK_Undeclared_template; default: llvm_unreachable("unsupported name classification."); } } }; /// Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, CorrectionCandidateCallback *CCC = nullptr); /// Act on the result of classifying a name as an undeclared (ADL-only) /// non-type declaration. ExprResult ActOnNameClassifiedAsUndeclaredNonType(IdentifierInfo *Name, SourceLocation NameLoc); /// Act on the result of classifying a name as an undeclared member of a /// dependent base class. ExprResult ActOnNameClassifiedAsDependentNonType(const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, bool IsAddressOfOperand); /// Act on the result of classifying a name as a specific non-type /// declaration. ExprResult ActOnNameClassifiedAsNonType(Scope *S, const CXXScopeSpec &SS, NamedDecl *Found, SourceLocation NameLoc, const Token &NextToken); /// Act on the result of classifying a name as an overload set. ExprResult ActOnNameClassifiedAsOverloadSet(Scope *S, Expr *OverloadSet); /// Describes the detailed kind of a template name. Used in diagnostics. enum class TemplateNameKindForDiagnostics { ClassTemplate, FunctionTemplate, VarTemplate, AliasTemplate, TemplateTemplateParam, Concept, DependentTemplate }; TemplateNameKindForDiagnostics getTemplateNameKindForDiagnostics(TemplateName Name); /// Determine whether it's plausible that E was intended to be a /// template-name. bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) { if (!getLangOpts().CPlusPlus || E.isInvalid()) return false; Dependent = false; if (auto *DRE = dyn_cast<DeclRefExpr>(E.get())) return !DRE->hasExplicitTemplateArgs(); if (auto *ME = dyn_cast<MemberExpr>(E.get())) return !ME->hasExplicitTemplateArgs(); Dependent = true; if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get())) return !DSDRE->hasExplicitTemplateArgs(); if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get())) return !DSME->hasExplicitTemplateArgs(); // Any additional cases recognized here should also be handled by // diagnoseExprIntendedAsTemplateName. return false; } void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName, SourceLocation Less, SourceLocation Greater); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc, bool IsTemplateId); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation(), SourceLocation UnalignedQualLoc = SourceLocation()); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const BindingDecl *D, const LookupResult &R); void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl, const LookupResult &R); void CheckShadow(Scope *S, VarDecl *D); /// Warn if 'E', which is an expression that is about to be modified, refers /// to a shadowing declaration. void CheckShadowingDeclModification(Expr *E, SourceLocation Loc); void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI); private: /// Map of current shadowing declarations to shadowed declarations. Warn if /// it looks like the user is trying to modify the shadowing declaration. llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls; public: void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope, ArrayRef<BindingDecl *> Bindings = None); NamedDecl * ActOnDecompositionDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists); // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous); void CheckVariableDeclarationType(VarDecl *NewVD); bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit, Expr *Init); void CheckCompleteVariableDeclaration(VarDecl *VD); void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); enum class CheckConstexprKind { /// Diagnose issues that are non-constant or that are extensions. Diagnose, /// Identify whether this function satisfies the formal rules for constexpr /// functions in the current lanugage mode (with no extensions). CheckValid }; bool CheckConstexprFunctionDefinition(const FunctionDecl *FD, CheckConstexprKind Kind); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsMemberSpecialization); bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl); bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD, QualType NewT, QualType OldT); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD, bool IsDefinition); void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC); void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); ExprResult ConvertParamDefaultArgument(const ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); void SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); // Contexts where using non-trivial C union types can be disallowed. This is // passed to err_non_trivial_c_union_in_invalid_context. enum NonTrivialCUnionContext { // Function parameter. NTCUC_FunctionParam, // Function return. NTCUC_FunctionReturn, // Default-initialized object. NTCUC_DefaultInitializedObject, // Variable with automatic storage duration. NTCUC_AutoVar, // Initializer expression that might copy from another object. NTCUC_CopyInit, // Assignment. NTCUC_Assignment, // Compound literal. NTCUC_CompoundLiteral, // Block capture. NTCUC_BlockCapture, // lvalue-to-rvalue conversion of volatile type. NTCUC_LValueToRValueVolatile, }; /// Emit diagnostics if the initializer or any of its explicit or /// implicitly-generated subexpressions require copying or /// default-initializing a type that is or contains a C union type that is /// non-trivial to copy or default-initialize. void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc); // These flags are passed to checkNonTrivialCUnion. enum NonTrivialCUnionKind { NTCUK_Init = 0x1, NTCUK_Destruct = 0x2, NTCUK_Copy = 0x4, }; /// Emit diagnostics if a non-trivial C union type or a struct that contains /// a non-trivial C union is used in an invalid context. void checkNonTrivialCUnion(QualType QT, SourceLocation Loc, NonTrivialCUnionContext UseContext, unsigned NonTrivialKind); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit); void ActOnUninitializedDecl(Decl *dcl); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void CheckStaticLocalForDllExport(VarDecl *VD); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition( FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D, SkipBodyInfo *SkipBody = nullptr); void ActOnStartTrailingRequiresClause(Scope *S, Declarator &D); ExprResult ActOnFinishTrailingRequiresClause(ExprResult ConstraintExpr); ExprResult ActOnRequiresClause(ExprResult ConstraintExpr); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineFunctionDef(FunctionDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters); /// Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList, SourceLocation SemiLoc); enum class ModuleDeclKind { Interface, ///< 'export module X;' Implementation, ///< 'module X;' }; /// The parser has processed a module-declaration that begins the definition /// of a module interface or implementation. DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc, ModuleDeclKind MDK, ModuleIdPath Path, bool IsFirstDecl); /// The parser has processed a global-module-fragment declaration that begins /// the definition of the global module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc); /// The parser has processed a private-module-fragment declaration that begins /// the definition of the private module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. /// \param PrivateLoc The location of the 'private' keyword. DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc, SourceLocation PrivateLoc); /// The parser has processed a module import declaration. /// /// \param StartLoc The location of the first token in the declaration. This /// could be the location of an '@', 'export', or 'import'. /// \param ExportLoc The location of the 'export' keyword, if any. /// \param ImportLoc The location of the 'import' keyword. /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, ModuleIdPath Path); DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, Module *M, ModuleIdPath Path = {}); /// The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument, ExplicitSpecialization, PartialSpecialization }; /// Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, MissingImportKind MIK, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc, SourceLocation LBraceLoc); Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl, SourceLocation RBraceLoc); /// We've found a use of a templated declaration that would trigger an /// implicit instantiation. Check that any relevant explicit specializations /// and partial specializations are visible, and diagnose if not. void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// Retrieve a suitable printing policy for diagnostics. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// Retrieve a suitable printing policy for diagnostics. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, RecordDecl *&AnonRecord); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation, RecordDecl *&AnonRecord); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); /// Common ways to introduce type names without a tag for use in diagnostics. /// Keep in sync with err_tag_reference_non_tag. enum NonTagKind { NTK_NonStruct, NTK_NonClass, NTK_NonUnion, NTK_NonEnum, NTK_Typedef, NTK_TypeAlias, NTK_Template, NTK_TypeAliasTemplate, NTK_TemplateTemplateArgument, }; /// Given a non-tag type declaration, returns an enum useful for indicating /// what kind of non-tag type this is. NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, bool IsTemplateParamOrArg, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, const ParsedAttr &MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); enum TrivialABIHandling { /// The triviality of a method unaffected by "trivial_abi". TAH_IgnoreTrivialABI, /// The triviality of a method affected by "trivial_abi". TAH_ConsiderTrivialABI }; bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, TrivialABIHandling TAH = TAH_IgnoreTrivialABI, bool Diagnose = false); /// For a defaulted function, the kind of defaulted function that it is. class DefaultedFunctionKind { CXXSpecialMember SpecialMember : 8; DefaultedComparisonKind Comparison : 8; public: DefaultedFunctionKind() : SpecialMember(CXXInvalid), Comparison(DefaultedComparisonKind::None) { } DefaultedFunctionKind(CXXSpecialMember CSM) : SpecialMember(CSM), Comparison(DefaultedComparisonKind::None) {} DefaultedFunctionKind(DefaultedComparisonKind Comp) : SpecialMember(CXXInvalid), Comparison(Comp) {} bool isSpecialMember() const { return SpecialMember != CXXInvalid; } bool isComparison() const { return Comparison != DefaultedComparisonKind::None; } explicit operator bool() const { return isSpecialMember() || isComparison(); } CXXSpecialMember asSpecialMember() const { return SpecialMember; } DefaultedComparisonKind asComparison() const { return Comparison; } /// Get the index of this function kind for use in diagnostics. unsigned getDiagnosticIndex() const { static_assert(CXXInvalid > CXXDestructor, "invalid should have highest index"); static_assert((unsigned)DefaultedComparisonKind::None == 0, "none should be equal to zero"); return SpecialMember + (unsigned)Comparison; } }; DefaultedFunctionKind getDefaultedFunctionKind(const FunctionDecl *FD); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD) { return getDefaultedFunctionKind(MD).asSpecialMember(); } DefaultedComparisonKind getDefaultedComparisonKind(const FunctionDecl *FD) { return getDefaultedFunctionKind(FD).asComparison(); } void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); /// Perform ODR-like check for C/ObjC when merging tag types from modules. /// Differently from C++, actually parse the body and reject / error out /// in case of a structural mismatch. bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev, SkipBodyInfo &SkipBody); typedef void *SkippedDefinitionContext; /// Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceRange BraceRange); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, bool IsFixed, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, const ParsedAttributesView &Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, const ParsedAttributesView &Attr); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Enter a template parameter scope, after it's been associated with a particular /// DeclContext. Causes lookup within the scope to chain through enclosing contexts /// in the correct order. void EnterTemplatedContext(Scope *S, DeclContext *DC); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// Don't merge availability attributes at all. AMK_None, /// Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override, /// Merge availability attributes for an implementation of /// a protocol requirement. AMK_ProtocolImplementation, }; /// Describes the kind of priority given to an availability attribute. /// /// The sum of priorities deteremines the final priority of the attribute. /// The final priority determines how the attribute will be merged. /// An attribute with a lower priority will always remove higher priority /// attributes for the specified platform when it is being applied. An /// attribute with a higher priority will not be applied if the declaration /// already has an availability attribute with a lower priority for the /// specified platform. The final prirority values are not expected to match /// the values in this enumeration, but instead should be treated as a plain /// integer value. This enumeration just names the priority weights that are /// used to calculate that final vaue. enum AvailabilityPriority : int { /// The availability attribute was specified explicitly next to the /// declaration. AP_Explicit = 0, /// The availability attribute was applied using '#pragma clang attribute'. AP_PragmaClangAttribute = 1, /// The availability attribute for a specific platform was inferred from /// an availability attribute for another platform. AP_InferredFromOtherPlatform = 2 }; /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr * mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI, IdentifierInfo *Platform, bool Implicit, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool IsStrict, StringRef Replacement, AvailabilityMergeKind AMK, int Priority); TypeVisibilityAttr * mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI, TypeVisibilityAttr::VisibilityType Vis); VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI, VisibilityAttr::VisibilityType Vis); UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI, StringRef UuidAsWritten, MSGuidDecl *GuidDecl); DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI); DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI); MSInheritanceAttr *mergeMSInheritanceAttr(Decl *D, const AttributeCommonInfo &CI, bool BestCase, MSInheritanceModel Model); FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Format, int FormatIdx, int FirstArg); SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name); CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, const AttributeCommonInfo &CI, const IdentifierInfo *Ident); MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI); NoSpeculativeLoadHardeningAttr * mergeNoSpeculativeLoadHardeningAttr(Decl *D, const NoSpeculativeLoadHardeningAttr &AL); SpeculativeLoadHardeningAttr * mergeSpeculativeLoadHardeningAttr(Decl *D, const SpeculativeLoadHardeningAttr &AL); SwiftNameAttr *mergeSwiftNameAttr(Decl *D, const SwiftNameAttr &SNA, StringRef Name); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, const AttributeCommonInfo &CI); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const InternalLinkageAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL); WebAssemblyImportNameAttr *mergeImportNameAttr( Decl *D, const WebAssemblyImportNameAttr &AL); WebAssemblyImportModuleAttr *mergeImportModuleAttr( Decl *D, const WebAssemblyImportModuleAttr &AL); EnforceTCBAttr *mergeEnforceTCBAttr(Decl *D, const EnforceTCBAttr &AL); EnforceTCBLeafAttr *mergeEnforceTCBLeafAttr(Decl *D, const EnforceTCBLeafAttr &AL); void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous); void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld); void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn); void notePreviousDefinition(const NamedDecl *Old, SourceLocation New); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl, bool ConsiderCudaAttrs = true, bool ConsiderRequiresClauses = true); enum class AllowedExplicit { /// Allow no explicit functions to be used. None, /// Allow explicit conversion functions but not explicit constructors. Conversions, /// Allow both explicit conversion functions and explicit constructors. All }; ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, AllowedExplicit AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess, bool Diagnose = true); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsFunctionConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const VarDecl *NRVOCandidate, QualType ResultType, Expr *Value, bool AllowNRVO = true); bool CanPerformAggregateInitializationForOverloadResolution( const InitializedEntity &Entity, InitListExpr *From); bool IsStringInit(Expr *Init, const ArrayType *AT); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); /// Check that the lifetime of the initializer (and its subobjects) is /// sufficient for initializing the entity, and perform lifetime extension /// (when permitted) if not. void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_ArrayBound, ///< Array bound in array declarator or new-expression. CCEK_ConstexprIf, ///< Condition in a constexpr if statement. CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE, NamedDecl *Dest = nullptr); /// Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet; using ADLCallKind = CallExpr::ADLCallKind; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, bool AllowExplicitConversion = false, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, ConversionSequenceList EarlyConversions = None, OverloadCandidateParamOrder PO = {}); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false, bool FirstArgumentIsBase = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false, OverloadCandidateParamOrder PO = {}); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, ConversionSequenceList EarlyConversions = None, OverloadCandidateParamOrder PO = {}); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, OverloadCandidateParamOrder PO = {}); void AddTemplateOverloadCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, OverloadCandidateParamOrder PO = {}); bool CheckNonDependentConversions( FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, ConversionSequenceList &Conversions, bool SuppressUserConversions, CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(), Expr::Classification ObjectClassification = {}, OverloadCandidateParamOrder PO = {}); void AddConversionCandidate( CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddTemplateConversionCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddNonMemberOperatorCandidates( const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, OverloadCandidateParamOrder PO = {}); void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate( NamedDecl *Found, FunctionDecl *Fn, OverloadCandidateRewriteKind RewriteKind = OverloadCandidateRewriteKind(), QualType DestType = QualType(), bool TakingAddress = false); // Emit as a series of 'note's all template and non-templates identified by // the expression Expr void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(), bool TakingAddress = false); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, SourceLocation CallLoc, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); /// Find the failed Boolean condition within a given Boolean /// constant expression, and describe it with a string. std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// non-ArgDependent DiagnoseIfAttrs. /// /// Argument-dependent diagnose_if attributes should be checked each time a /// function is used as a direct callee of a function call. /// /// Returns true if any errors were emitted. bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function, const Expr *ThisArg, ArrayRef<const Expr *> Args, SourceLocation Loc); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// ArgDependent DiagnoseIfAttrs. /// /// Argument-independent diagnose_if attributes should be checked on every use /// of a function. /// /// Returns true if any errors were emitted. bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND, SourceLocation Loc); /// Returns whether the given function's address can be taken or not, /// optionally emitting a diagnostic if the address can't be taken. /// /// Returns false if taking the address of the function is illegal. bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function, bool Complain = false, SourceLocation Loc = SourceLocation()); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * resolveAddressOfSingleOverloadCandidate(Expr *E, DeclAccessPair &FoundResult); bool resolveAndFixAddressOfSingleOverloadCandidate( ExprResult &SrcExpr, bool DoFunctionPointerConversion = false); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, SourceRange OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); void AddOverloadedCallCandidates( LookupResult &R, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc, SourceLocation RangeLoc, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true, bool CalleesAddressIsTaken=false); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateUnresolvedLookupExpr(CXXRecordDecl *NamingClass, NestedNameSpecifierLoc NNSLoc, DeclarationNameInfo DNI, const UnresolvedSetImpl &Fns, bool PerformADL = true); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *input, bool RequiresADL = true); void LookupOverloadedBinOp(OverloadCandidateSet &CandidateSet, OverloadedOperatorKind Op, const UnresolvedSetImpl &Fns, ArrayRef<Expr *> Args, bool RequiresADL = true); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, bool RequiresADL = true, bool AllowRewrittenCandidates = true, FunctionDecl *DefaultedFn = nullptr); ExprResult BuildSynthesizedThreeWayComparison(SourceLocation OpLoc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, FunctionDecl *DefaultedFn); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, bool AllowRecovery = false); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up a name following ~ in a destructor name. This is an ordinary /// lookup, but prefers tags to typedefs. LookupDestructorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// Look up the name of an OpenMP user-defined reduction operation. LookupOMPReductionName, /// Look up the name of an OpenMP user-defined mapper. LookupOMPMapperName, /// Look up any declaration with any name. LookupAnyName }; /// Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists and is visible. ForVisibleRedeclaration, /// The lookup results will be used for redeclaration of a name /// with external linkage; non-visible lookup results with external linkage /// may also be found. ForExternalRedeclaration }; RedeclarationKind forRedeclarationInCurContext() { // A declaration with an owning module for linkage can never link against // anything that is not visible. We don't need to check linkage here; if // the context has internal linkage, redeclaration lookup won't find things // from other TUs, and we can't safely compute linkage yet in general. if (cast<Decl>(CurContext) ->getOwningModuleForLinkage(/*IgnoreLinkage*/true)) return ForVisibleRedeclaration; return ForExternalRedeclaration; } /// The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// The lookup resulted in an error. LOLR_Error, /// The lookup found no match but no diagnostic was issued. LOLR_ErrorNoDiagnostic, /// The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplatePack, }; SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState &&other) noexcept; TypoExprState &operator=(TypoExprState &&other) noexcept; }; /// The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, SourceLocation TypoLoc); // The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupBuiltin(LookupResult &R); void LookupNecessaryTypesForBuiltin(Scope *S, unsigned ID); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, UnresolvedSetImpl &Functions); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate, bool DiagnoseMissing, StringLiteral *StringLit = nullptr); bool isKnownName(StringRef name); /// Status of the function emission on the CUDA/HIP/OpenMP host/device attrs. enum class FunctionEmissionStatus { Emitted, CUDADiscarded, // Discarded due to CUDA/HIP hostness OMPDiscarded, // Discarded due to OpenMP hostness TemplateDiscarded, // Discarded due to uninstantiated templates Unknown, }; FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl, bool Final = false); // Whether the callee should be ignored in CUDA/HIP/OpenMP host/device check. bool shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool LoadExternal = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool IncludeDependentBases = false, bool LoadExternal = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param RecoverUncorrectedTypos If true, when typo correction fails, it /// will rebuild the given Expr with all TypoExprs degraded to RecoveryExprs. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr( Expr *E, VarDecl *InitDecl = nullptr, bool RecoverUncorrectedTypos = false, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr( ExprResult ER, VarDecl *InitDecl = nullptr, bool RecoverUncorrectedTypos = false, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), InitDecl, RecoverUncorrectedTypos, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} /// Attempts to produce a RecoveryExpr after some AST node cannot be created. ExprResult CreateRecoveryExpr(SourceLocation Begin, SourceLocation End, ArrayRef<Expr *> SubExprs, QualType T = QualType()); ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); FunctionDecl *CreateBuiltin(IdentifierInfo *II, QualType Type, unsigned ID, SourceLocation Loc); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction( FunctionDecl *FD); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); // Helper for delayed processing of attributes. void ProcessDeclAttributeDelayed(Decl *D, const ParsedAttributesView &AttrList); void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const ParsedAttributesView &AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value); bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckAttrTarget(const ParsedAttr &CurrAttr); bool CheckAttrNoArgs(const ParsedAttr &CurrAttr); bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceModel SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor, SourceLocation Loc); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Stmt attributes - this routine is the top level dispatcher. StmtResult ProcessStmtAttributes(Stmt *Stmt, const ParsedAttributesView &Attrs, SourceRange Range); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl, ObjCInterfaceDecl *IDecl, SourceLocation AtEnd); void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, unsigned &Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCInterfaceDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); /// Returns default addr space for method qualifiers. LangAS getDefaultCXXMethodAddrSpace() const; private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// - Returns instance or factory methods in global method pool for /// given selector. It checks the desired kind first, if none is found, and /// parameter checkTheOther is set, it then checks the other kind. If no such /// method or only one method is found, function returns false; otherwise, it /// returns true. bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool InstanceFirst, bool CheckTheOther, const ObjCObjectType *TypeBound = nullptr); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass, SmallVectorImpl<ObjCMethodDecl*>& Methods); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance, SmallVectorImpl<ObjCMethodDecl*>& Methods); /// Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg() : E(nullptr) { } FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg( ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true); StmtResult ActOnExprStmtError(); StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(bool IsStmtExpr); void ActOnAfterCompoundStatementLeadingPragmas(); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) { S.ActOnStartOfCompoundStmt(IsStmtExpr); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS, SourceLocation DotDotDotLoc, ExprResult RHS, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult ActOnAttributedStmt(SourceLocation AttrLoc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); class ConditionResult; StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr, SourceLocation LParenLoc, Stmt *InitStmt, ConditionResult Cond, SourceLocation RParenLoc, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr, SourceLocation LParenLoc, Stmt *InitStmt, ConditionResult Cond, SourceLocation RParenLoc, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, SourceLocation LParenLoc, Stmt *InitStmt, ConditionResult Cond, SourceLocation RParenLoc); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, SourceLocation LParenLoc, ConditionResult Cond, SourceLocation RParenLoc, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, ConditionResult Second, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *Begin, Stmt *End, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params, unsigned OpenMPCaptureLevel = 0); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); enum CopyElisionSemanticsKind { CES_Strict = 0, CES_AllowParameters = 1, CES_AllowDifferentTypes = 2, CES_AllowExceptionVariables = 4, CES_AllowRValueReferenceType = 8, CES_ImplicitlyMovableCXX11CXX14CXX17 = (CES_AllowParameters | CES_AllowDifferentTypes), CES_ImplicitlyMovableCXX20 = (CES_AllowParameters | CES_AllowDifferentTypes | CES_AllowExceptionVariables | CES_AllowRValueReferenceType), }; VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E, CopyElisionSemanticsKind CESK); bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD, CopyElisionSemanticsKind CESK); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, unsigned NumLabels, SourceLocation RParenLoc); void FillInlineAsmIdentifierInfo(Expr *Res, llvm::InlineAsmIdentifierInfo &Info); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); /// Warn if we're implicitly casting from a _Nullable pointer type to a /// _Nonnull one. void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType, SourceLocation Loc); /// Warn when implicitly casting 0 to nullptr. void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { ParsingClassDepth++; return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { ParsingClassDepth--; DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass, bool ObjCPropertyAccess, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReceiver = nullptr); bool makeUnavailableInSystemHeader(SourceLocation loc, UnavailableAttr::ImplicitReason reason); /// Issue any -Wunguarded-availability warnings in \c FD void DiagnoseUnguardedAvailabilityViolations(Decl *FD); void handleDelayedAvailabilityCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid); bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass = nullptr, bool ObjCPropertyAccess = false, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReciever = nullptr); void NoteDeletedFunction(FunctionDecl *FD); void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult CheckUnevaluatedOperand(Expr *E); void CheckUnusedVolatileAssignment(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. // // MightBeOdrUse indicates whether the use could possibly be an odr-use, and // should usually be true. This only needs to be set to false if the lack of // odr-use cannot be determined from the current context (for instance, // because the name denotes a virtual function and was written without an // explicit nested-name-specifier). void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr); void MarkMemberReferenced(MemberExpr *E); void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E); void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc, unsigned CapturingScopeIndex); ExprResult CheckLValueToRValueConversionOperand(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); /// Mark all of the declarations referenced within a particular AST node as /// referenced. Used when template instantiation instantiates a non-dependent /// type -- entities referenced by the type are now referenced. void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false); /// Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// Try to convert an expression \p E to type \p Ty. Returns the result of the /// conversion. ExprResult tryConvertExprToType(Expr *E, QualType Ty); /// Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); /// Similar, but diagnostic is only produced if all the specified statements /// are reachable. bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseDependentMemberLookup(LookupResult &R); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, CorrectionCandidateCallback &CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); DeclResult LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S, IdentifierInfo *II); ExprResult BuildIvarRefExpr(Scope *S, SourceLocation Loc, ObjCIvarDecl *IV); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); /// If \p D cannot be odr-used in the current expression evaluation context, /// return a reason explaining why. Otherwise, return NOUR_None. NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D); DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, NestedNameSpecifierLoc NNS, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr( const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, UnresolvedLookupExpr *AsULE = nullptr); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance, const Scope *S); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentKind IK); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); bool isQualifiedMemberAccess(Expr *E); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, SourceRange ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinMatrixSubscriptExpr(Expr *Base, Expr *RowIdx, Expr *ColumnIdx, SourceLocation RBLoc); ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc, Expr *LowerBound, SourceLocation ColonLocFirst, SourceLocation ColonLocSecond, Expr *Length, Expr *Stride, SourceLocation RBLoc); ExprResult ActOnOMPArrayShapingExpr(Expr *Base, SourceLocation LParenLoc, SourceLocation RParenLoc, ArrayRef<Expr *> Dims, ArrayRef<SourceRange> Brackets); /// Data structure for iterator expression. struct OMPIteratorData { IdentifierInfo *DeclIdent = nullptr; SourceLocation DeclIdentLoc; ParsedType Type; OMPIteratorExpr::IteratorRange Range; SourceLocation AssignLoc; SourceLocation ColonLoc; SourceLocation SecColonLoc; }; ExprResult ActOnOMPIteratorExpr(Scope *S, SourceLocation IteratorKwLoc, SourceLocation LLoc, SourceLocation RLoc, ArrayRef<OMPIteratorData> Data); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, FieldDecl *Field, DeclAccessPair FoundDecl, const DeclarationNameInfo &MemberNameInfo); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec *SS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr); ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false, bool AllowRecovery = false); enum class AtomicArgumentOrder { API, AST }; ExprResult BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, SourceLocation RParenLoc, MultiExprArg Args, AtomicExpr::AtomicOp Op, AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false, ADLCallKind UsesADL = ADLCallKind::NotADL); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult BuildInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation EqualOrColonLoc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); void LookupBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, UnresolvedSetImpl &Functions); void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(Scope *S, SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); ExprResult BuildStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc, unsigned TemplateDepth); // Handle the final expression in a statement expression. ExprResult ActOnStmtExprResult(ExprResult E); void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(), // __builtin_COLUMN() ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc); // Build a potentially resolved SourceLocExpr. ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc, DeclContext *ParentContext); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// Describes the result of an "if-exists" condition check. enum IfExistsResult { /// The symbol exists. IER_Exists, /// The symbol does not exist. IER_DoesNotExist, /// The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, const ParsedAttributesView &AttrList, UsingDirectiveDecl *&UsingDecl); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); NamespaceDecl *lookupStdExperimentalNamespace(); CXXRecordDecl *getStdBadAlloc() const; EnumDecl *getStdAlignValT() const; private: // A cache representing if we've fully checked the various comparison category // types stored in ASTContext. The bit-index corresponds to the integer value // of a ComparisonCategoryType enumerator. llvm::SmallBitVector FullyCheckedComparisonCategories; ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl, CXXScopeSpec &SS, ParsedType TemplateTypeTy, IdentifierInfo *MemberOrBase); public: enum class ComparisonCategoryUsage { /// The '<=>' operator was used in an expression and a builtin operator /// was selected. OperatorInExpression, /// A defaulted 'operator<=>' needed the comparison category. This /// typically only applies to 'std::strong_ordering', due to the implicit /// fallback return value. DefaultedOperator, }; /// Lookup the specified comparison category types in the standard /// library, an check the VarDecls possibly returned by the operator<=> /// builtins for that type. /// /// \return The type of the comparison category type corresponding to the /// specified Kind, or a null type if an error occurs QualType CheckComparisonCategoryType(ComparisonCategoryType Kind, SourceLocation Loc, ComparisonCategoryUsage Usage); /// Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const FunctionDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, const ParsedAttributesView &AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc); NamedDecl *BuildUsingDeclaration( Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList, bool IsInstantiation); NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom, ArrayRef<NamedDecl *> Expansions); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); /// Given a derived-class using shadow declaration for a constructor and the /// correspnding base class constructor, find or create the implicit /// synthesized derived class constructor to use for this initialization. CXXConstructorDecl * findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor, ConstructorUsingShadowDecl *DerivedShadow); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation TypenameLoc, CXXScopeSpec &SS, UnqualifiedId &Name, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, const ParsedAttributesView &AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); /// Build a CXXConstructExpr whose constructor has already been resolved if /// it denotes an inherited constructor. ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// Instantiate or parse a C++ default argument expression as necessary. /// Return true on error. bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(!isComputedNoexcept(ComputedEST) && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// Integrate an invoked expression into the collected data. void CalledExpr(Expr *E) { CalledStmt(E); } /// Integrate an invoked statement into the collected data. void CalledStmt(Stmt *S); /// Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_NoexceptFalse; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, FunctionDecl *FD); /// Check the given noexcept-specifier, convert its expression, and compute /// the appropriate ExceptionSpecificationType. ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr, ExceptionSpecificationType &EST); /// Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); class InheritedConstructorInfo; /// Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, InheritedConstructorInfo *ICI = nullptr, bool Diagnose = false); /// Produce notes explaining why a defaulted function was defined as deleted. void DiagnoseDeletedDefaultedFunction(FunctionDecl *FD); /// Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor); /// Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// Check a completed declaration of an implicit special member. void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD); /// Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); /// Wrap the expression in a ConstantExpr if it is a potential immediate /// invocation. ExprResult CheckForImmediateInvocation(ExprResult E, FunctionDecl *Decl); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, QualType DeclInitType, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr *> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, bool EnteringContext); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorTypeForDecltype(const DeclSpec &DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); /// ActOnCXXNamedCast - Parse /// {dynamic,static,reinterpret,const,addrspace}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl, ExprResult Operand, SourceLocation RParenLoc); ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI, Expr *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(Scope *S, SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(UnresolvedLookupExpr *Callee, SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc, Optional<unsigned> NumExpansions); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// Build a CXXThisExpr and mark it referenced in the current context. Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit); void MarkThisReferenced(CXXThisExpr *This); /// Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr, bool ByCopy = false); /// Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); ExprResult ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs, SourceLocation AtLoc, SourceLocation RParen); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenOrBraceLoc, MultiExprArg Exprs, SourceLocation RParenOrBraceLoc, bool ListInitialization); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc, bool ListInitialization); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Optional<Expr *> ArraySize, SourceRange DirectInitRange, Expr *Initializer); /// Determine whether \p FD is an aligned allocation or deallocation /// function that is unavailable. bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const; /// Produce diagnostics if \p FD is an aligned allocation or deallocation /// function that is unavailable. void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD, SourceLocation Loc); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); /// The scope in which to find allocation functions. enum AllocationFunctionScope { /// Only look for allocation functions in the global scope. AFS_Global, /// Only look for allocation functions in the scope of the /// allocated class. AFS_Class, /// Look for allocation functions in both the global scope /// and in the scope of the allocated class. AFS_Both }; /// Finds the overloads of operator new and delete that are appropriate /// for the allocation. bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, AllocationFunctionScope NewScope, AllocationFunctionScope DeleteScope, QualType AllocType, bool IsArray, bool &PassAlignment, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, ArrayRef<QualType> Params); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, bool Overaligned, DeclarationName Name); FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc, CXXRecordDecl *RD); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc, bool IsDelete, bool CallCanBeVirtual, bool WarnOnNonAbstractTypes, SourceLocation DtorLoc); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the binary type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); MaterializeTemporaryExpr * CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary, bool BoundToLvalueReference); ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) { return ActOnFinishFullExpr( Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue, bool IsConstexpr = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); /// Keeps information about an identifier in a nested-name-spec. /// struct NestedNameSpecInfo { /// The type of the object, if we're parsing nested-name-specifier in /// a member access expression. ParsedType ObjectType; /// The identifier preceding the '::'. IdentifierInfo *Identifier; /// The location of the identifier. SourceLocation IdentifierLoc; /// The location of the '::'. SourceLocation CCLoc; /// Creates info object for the most typical case. NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType()) : ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, QualType ObjectType) : ObjectType(ParsedType::make(ObjectType)), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } }; bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo); bool BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); /// The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param IdInfo Parser information about an identifier in the /// nested-name-spec. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \param OnlyNamespace If true, only considers namespaces in lookup. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo, bool EnteringContext); /// The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// Start the definition of a lambda expression. CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params, ConstexprSpecKind ConstexprKind, Expr *TrailingRequiresClause); /// Number lambda for linkage purposes if necessary. void handleLambdaNumbering( CXXRecordDecl *Class, CXXMethodDecl *Method, Optional<std::tuple<bool, unsigned, unsigned, Decl *>> Mangling = None); /// Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. ParsedType actOnLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) { return ParsedType::make(buildLambdaInitCaptureInitialization( Loc, ByRef, EllipsisLoc, None, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init)); } QualType buildLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit, Expr *&Init); /// Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, SourceLocation EllipsisLoc, IdentifierInfo *Id, unsigned InitStyle, Expr *Init); /// Add an init-capture to a lambda scope. void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief This is called after parsing the explicit template parameter list /// on a lambda (if it exists) in C++2a. void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc, ArrayRef<NamedDecl *> TParams, SourceLocation RAngleLoc, ExprResult RequiresClause); /// Introduce the lambda parameters into scope. void addLambdaParameters( ArrayRef<LambdaIntroducer::LambdaCapture> Captures, CXXMethodDecl *CallOperator, Scope *CurScope); /// Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// Does copying/destroying the captured variable have side effects? bool CaptureHasSideEffects(const sema::Capture &From); /// Diagnose if an explicit lambda capture is unused. Returns true if a /// diagnostic is emitted. bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange, const sema::Capture &From); /// Build a FieldDecl suitable to hold the given capture. FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture); /// Initialize the given capture with a suitable expression. ExprResult BuildCaptureInit(const sema::Capture &Capture, SourceLocation ImplicitCaptureLoc, bool IsOpenMPMapping = false); /// Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// Get the return type to use for a lambda's conversion function(s) to /// function pointer type, given the type of the call operator. QualType getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType, CallingConv CC); /// Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); /// Check whether the given expression is a valid constraint expression. /// A diagnostic is emitted if it is not, false is returned, and /// PossibleNonPrimary will be set to true if the failure might be due to a /// non-primary expression being used as an atomic constraint. bool CheckConstraintExpression(const Expr *CE, Token NextToken = Token(), bool *PossibleNonPrimary = nullptr, bool IsTrailingRequiresClause = false); private: /// Caches pairs of template-like decls whose associated constraints were /// checked for subsumption and whether or not the first's constraints did in /// fact subsume the second's. llvm::DenseMap<std::pair<NamedDecl *, NamedDecl *>, bool> SubsumptionCache; /// Caches the normalized associated constraints of declarations (concepts or /// constrained declarations). If an error occurred while normalizing the /// associated constraints of the template or concept, nullptr will be cached /// here. llvm::DenseMap<NamedDecl *, NormalizedConstraint *> NormalizationCache; llvm::ContextualFoldingSet<ConstraintSatisfaction, const ASTContext &> SatisfactionCache; public: const NormalizedConstraint * getNormalizedAssociatedConstraints( NamedDecl *ConstrainedDecl, ArrayRef<const Expr *> AssociatedConstraints); /// \brief Check whether the given declaration's associated constraints are /// at least as constrained than another declaration's according to the /// partial ordering of constraints. /// /// \param Result If no error occurred, receives the result of true if D1 is /// at least constrained than D2, and false otherwise. /// /// \returns true if an error occurred, false otherwise. bool IsAtLeastAsConstrained(NamedDecl *D1, ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2, bool &Result); /// If D1 was not at least as constrained as D2, but would've been if a pair /// of atomic constraints involved had been declared in a concept and not /// repeated in two separate places in code. /// \returns true if such a diagnostic was emitted, false otherwise. bool MaybeEmitAmbiguousAtomicConstraintsDiagnostic(NamedDecl *D1, ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2); /// \brief Check whether the given list of constraint expressions are /// satisfied (as if in a 'conjunction') given template arguments. /// \param Template the template-like entity that triggered the constraints /// check (either a concept or a constrained entity). /// \param ConstraintExprs a list of constraint expressions, treated as if /// they were 'AND'ed together. /// \param TemplateArgs the list of template arguments to substitute into the /// constraint expression. /// \param TemplateIDRange The source range of the template id that /// caused the constraints check. /// \param Satisfaction if true is returned, will contain details of the /// satisfaction, with enough information to diagnose an unsatisfied /// expression. /// \returns true if an error occurred and satisfaction could not be checked, /// false otherwise. bool CheckConstraintSatisfaction( const NamedDecl *Template, ArrayRef<const Expr *> ConstraintExprs, ArrayRef<TemplateArgument> TemplateArgs, SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction); /// \brief Check whether the given non-dependent constraint expression is /// satisfied. Returns false and updates Satisfaction with the satisfaction /// verdict if successful, emits a diagnostic and returns true if an error /// occured and satisfaction could not be determined. /// /// \returns true if an error occurred, false otherwise. bool CheckConstraintSatisfaction(const Expr *ConstraintExpr, ConstraintSatisfaction &Satisfaction); /// Check whether the given function decl's trailing requires clause is /// satisfied, if any. Returns false and updates Satisfaction with the /// satisfaction verdict if successful, emits a diagnostic and returns true if /// an error occured and satisfaction could not be determined. /// /// \returns true if an error occurred, false otherwise. bool CheckFunctionConstraints(const FunctionDecl *FD, ConstraintSatisfaction &Satisfaction, SourceLocation UsageLoc = SourceLocation()); /// \brief Ensure that the given template arguments satisfy the constraints /// associated with the given template, emitting a diagnostic if they do not. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateArgs The converted, canonicalized template arguments. /// /// \param TemplateIDRange The source range of the template id that /// caused the constraints check. /// /// \returns true if the constrains are not satisfied or could not be checked /// for satisfaction, false if the constraints are satisfied. bool EnsureTemplateArgumentListConstraints(TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange TemplateIDRange); /// \brief Emit diagnostics explaining why a constraint expression was deemed /// unsatisfied. /// \param First whether this is the first time an unsatisfied constraint is /// diagnosed for this error. void DiagnoseUnsatisfiedConstraint(const ConstraintSatisfaction &Satisfaction, bool First = true); /// \brief Emit diagnostics explaining why a constraint expression was deemed /// unsatisfied. void DiagnoseUnsatisfiedConstraint(const ASTConstraintSatisfaction &Satisfaction, bool First = true); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, ArrayRef<Expr *> Strings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, MutableArrayRef<ObjCDictionaryElement> Elements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS); bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, const ParsedAttributesView &Attrs); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// Mark destructors of virtual bases of this class referenced. In the Itanium /// C++ ABI, this is done when emitting a destructor for any non-abstract /// class. In the Microsoft C++ ABI, this is done any time a class's /// destructor is referenced. void MarkVirtualBaseDestructorsReferenced( SourceLocation Location, CXXRecordDecl *ClassDecl, llvm::SmallPtrSetImpl<const RecordType *> *DirectVirtualBases = nullptr); /// Do semantic checks to allow the complete destructor variant to be emitted /// when the destructor is defined in another translation unit. In the Itanium /// C++ ABI, destructor variants are emitted together. In the MS C++ ABI, they /// can be emitted in separate TUs. To emit the complete variant, run a subset /// of the checks performed when emitting a regular destructor. void CheckCompleteDestructorVariant(SourceLocation CurrentLocation, CXXDestructorDecl *Dtor); /// The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD, bool ConstexprOnly = false); /// Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); /// Check class-level dllimport/dllexport attribute. The caller must /// ensure that referenceDLLExportedClassMethods is called some point later /// when all outer classes of Class are complete. void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class); void referenceDLLExportedClassMethods(); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); /// Add gsl::Pointer attribute to std::container::iterator /// \param ND The declaration that introduces the name /// std::container::iterator. \param UnderlyingRecord The record named by ND. void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord); /// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types. void inferGslOwnerPointerAttribute(CXXRecordDecl *Record); /// Add [[gsl::Pointer]] attributes for std:: types. void inferGslPointerAttribute(TypedefNameDecl *TD); void CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record); /// Check that the C++ class annoated with "trivial_abi" satisfies all the /// conditions that are needed for the attribute to have an effect. void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD); void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXNonNestedClass(); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Decl *Template, llvm::function_ref<Scope *()> EnterScope); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckDeductionGuideDeclarator(Declarator &D, QualType &R, StorageClass &SC); void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD); void CheckExplicitlyDefaultedFunction(Scope *S, FunctionDecl *MD); bool CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM); void CheckDelayedMemberExceptionSpecs(); bool CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *MD, DefaultedComparisonKind DCK); void DeclareImplicitEqualityComparison(CXXRecordDecl *RD, FunctionDecl *Spaceship); void DefineDefaultedComparison(SourceLocation Loc, FunctionDecl *FD, DefaultedComparisonKind DCK); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, MutableArrayRef<CXXBaseSpecifier *> Bases); void ActOnBaseSpecifiers(Decl *ClassDecl, MutableArrayRef<CXXBaseSpecifier *> Bases); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbiguousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath, bool IgnoreAccess = false); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D, bool Inconsistent); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckStructuredBindingMemberAccess(SourceLocation UseLoc, CXXRecordDecl *DecomposedClass, DeclAccessPair Field); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass, QualType BaseType); bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass, DeclAccessPair Found, QualType ObjectType, SourceLocation Loc, const PartialDiagnostic &Diag); bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass, DeclAccessPair Found, QualType ObjectType) { return isMemberAccessibleForDeletion(NamingClass, Found, ObjectType, SourceLocation(), PDiag()); } void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool isAbstractType(SourceLocation Loc, QualType T); bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true, bool AllowNonTemplateFunctions = false); /// Try to interpret the lookup result D as a template-name. /// /// \param D A declaration found by name lookup. /// \param AllowFunctionTemplates Whether function templates should be /// considered valid results. /// \param AllowDependent Whether unresolved using declarations (that might /// name templates) should be considered valid results. static NamedDecl *getAsTemplateNameDecl(NamedDecl *D, bool AllowFunctionTemplates = true, bool AllowDependent = true); enum TemplateNameIsRequiredTag { TemplateNameIsRequired }; /// Whether and why a template name is required in this lookup. class RequiredTemplateKind { public: /// Template name is required if TemplateKWLoc is valid. RequiredTemplateKind(SourceLocation TemplateKWLoc = SourceLocation()) : TemplateKW(TemplateKWLoc) {} /// Template name is unconditionally required. RequiredTemplateKind(TemplateNameIsRequiredTag) : TemplateKW() {} SourceLocation getTemplateKeywordLoc() const { return TemplateKW.getValueOr(SourceLocation()); } bool hasTemplateKeyword() const { return getTemplateKeywordLoc().isValid(); } bool isRequired() const { return TemplateKW != SourceLocation(); } explicit operator bool() const { return isRequired(); } private: llvm::Optional<SourceLocation> TemplateKW; }; enum class AssumedTemplateKind { /// This is not assumed to be a template name. None, /// This is assumed to be a template name because lookup found nothing. FoundNothing, /// This is assumed to be a template name because lookup found one or more /// functions (but no function templates). FoundFunctions, }; bool LookupTemplateName( LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization, RequiredTemplateKind RequiredTemplate = SourceLocation(), AssumedTemplateKind *ATK = nullptr, bool AllowTypoCorrection = true); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization, bool Disambiguation = false); /// Try to resolve an undeclared template name as a type template. /// /// Sets II to the identifier corresponding to the template name, and updates /// Name to a corresponding (typo-corrected) type template name and TNK to /// the corresponding kind, if possible. void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name, TemplateNameKind &TNK, SourceLocation NameLoc, IdentifierInfo *&II); bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name, SourceLocation NameLoc, bool Diagnose = true); /// Determine whether a particular identifier might be the name in a C++1z /// deduction-guide declaration. bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name, SourceLocation NameLoc, ParsedTemplateTy *Template = nullptr); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation, NamedDecl *Instantiation, bool InstantiatedFromMember, const NamedDecl *Pattern, const NamedDecl *PatternDef, TemplateSpecializationKind TSK, bool Complain = true); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); NamedDecl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg, bool HasTypeConstraint); bool ActOnTypeConstraint(const CXXScopeSpec &SS, TemplateIdAnnotation *TypeConstraint, TemplateTypeParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); bool AttachTypeConstraint(NestedNameSpecifierLoc NS, DeclarationNameInfo NameInfo, ConceptDecl *NamedConcept, const TemplateArgumentListInfo *TemplateArgs, TemplateTypeParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); bool AttachTypeConstraint(AutoTypeLoc TL, NonTypeTemplateParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); bool RequireStructuralType(QualType T, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); NamedDecl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ArrayRef<NamedDecl *> Params, SourceLocation RAngleLoc, Expr *RequiresClause); /// The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC, SkipBodyInfo *SkipBody = nullptr); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsMemberSpecialization, bool &Invalid, bool SuppressDiagnostic = false); DeclResult CheckClassTemplate( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg, QualType NTTPType, SourceLocation Loc); /// Get a template argument mapping the given template parameter to itself, /// e.g. for X in \c template<int X>, this would return an expression template /// argument referencing X. TemplateArgumentLoc getIdentityTemplateArgumentLoc(NamedDecl *Param, SourceLocation Location); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false, bool IsClassName = false); /// Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); /// Get the specialization of the given variable template corresponding to /// the specified argument list, or a null-but-valid result if the arguments /// are dependent. DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); /// Form a reference to the specialization of the given variable template /// corresponding to the specified argument list, or a null-but-valid result /// if the arguments are dependent. ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); ExprResult CheckConceptTemplateId(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &ConceptNameInfo, NamedDecl *FoundDecl, ConceptDecl *NamedConcept, const TemplateArgumentListInfo *TemplateArgs); void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnTemplateName( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool AllowInjectedClassName = false); DeclResult ActOnClassTemplateSpecialization( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, CXXScopeSpec &SS, TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc, TemplateDecl *PrimaryTemplate, unsigned NumExplicitArgs, ArrayRef<TemplateArgument> Args); void CheckTemplatePartialSpecialization( ClassTemplatePartialSpecializationDecl *Partial); void CheckTemplatePartialSpecialization( VarTemplatePartialSpecializationDecl *Partial); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization( FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous, bool QualifiedFriend = false); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation( Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to /// contain the converted forms of the template arguments as written. /// Otherwise, \p TemplateArgs will not be modified. /// /// \param ConstraintsNotSatisfied If provided, and an error occured, will /// receive true if the cause for the error is the associated constraints of /// the template not being satisfied by the template arguments. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted, bool UpdateArgsWithConversions = true, bool *ConstraintsNotSatisfied = nullptr); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TemplateTypeParmDecl *Param, TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateTemplateArgument(TemplateTemplateParmDecl *Param, TemplateParameterList *Params, TemplateArgumentLoc &Arg); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateII The identifier used to name the template. /// \param TemplateIILoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc, TypeSourceInfo **TSI, bool DeducedTSTContext); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc, bool DeducedTSTContext = true); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); //===--------------------------------------------------------------------===// // C++ Concepts //===--------------------------------------------------------------------===// Decl *ActOnConceptDefinition( Scope *S, MultiTemplateParamsArg TemplateParameterLists, IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr); RequiresExprBodyDecl * ActOnStartRequiresExpr(SourceLocation RequiresKWLoc, ArrayRef<ParmVarDecl *> LocalParameters, Scope *BodyScope); void ActOnFinishRequiresExpr(); concepts::Requirement *ActOnSimpleRequirement(Expr *E); concepts::Requirement *ActOnTypeRequirement( SourceLocation TypenameKWLoc, CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo *TypeName, TemplateIdAnnotation *TemplateId); concepts::Requirement *ActOnCompoundRequirement(Expr *E, SourceLocation NoexceptLoc); concepts::Requirement * ActOnCompoundRequirement( Expr *E, SourceLocation NoexceptLoc, CXXScopeSpec &SS, TemplateIdAnnotation *TypeConstraint, unsigned Depth); concepts::Requirement *ActOnNestedRequirement(Expr *Constraint); concepts::ExprRequirement * BuildExprRequirement( Expr *E, bool IsSatisfied, SourceLocation NoexceptLoc, concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement); concepts::ExprRequirement * BuildExprRequirement( concepts::Requirement::SubstitutionDiagnostic *ExprSubstDiag, bool IsSatisfied, SourceLocation NoexceptLoc, concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement); concepts::TypeRequirement *BuildTypeRequirement(TypeSourceInfo *Type); concepts::TypeRequirement * BuildTypeRequirement( concepts::Requirement::SubstitutionDiagnostic *SubstDiag); concepts::NestedRequirement *BuildNestedRequirement(Expr *E); concepts::NestedRequirement * BuildNestedRequirement( concepts::Requirement::SubstitutionDiagnostic *SubstDiag); ExprResult ActOnRequiresExpr(SourceLocation RequiresKWLoc, RequiresExprBodyDecl *Body, ArrayRef<ParmVarDecl *> LocalParameters, ArrayRef<concepts::Requirement *> Requirements, SourceLocation ClosingBraceLoc); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// An arbitrary expression. UPPC_Expression = 0, /// The base type of a class type. UPPC_BaseType, /// The type of an arbitrary declaration. UPPC_DeclarationType, /// The type of a data member. UPPC_DataMemberType, /// The size of a bit-field. UPPC_BitFieldWidth, /// The expression in a static assertion. UPPC_StaticAssertExpression, /// The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// The enumerator value. UPPC_EnumeratorValue, /// A using declaration. UPPC_UsingDeclaration, /// A friend declaration. UPPC_FriendDeclaration, /// A declaration qualifier. UPPC_DeclarationQualifier, /// An initializer. UPPC_Initializer, /// A default argument. UPPC_DefaultArgument, /// The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// The type of an exception. UPPC_ExceptionType, /// Partial specialization. UPPC_PartialSpecialization, /// Microsoft __if_exists. UPPC_IfExists, /// Microsoft __if_not_exists. UPPC_IfNotExists, /// Lambda expression. UPPC_Lambda, /// Block expression. UPPC_Block, /// A type constraint. UPPC_TypeConstraint, // A requirement in a requires-expression. UPPC_Requirement, // A requires-clause. UPPC_RequiresClause, }; /// Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// If the given requirees-expression contains an unexpanded reference to one /// of its own parameter packs, diagnose the error. /// /// \param RE The requiress-expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPackInRequiresExpr(RequiresExpr *RE); /// If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param NNS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; /// Given a template argument that contains an unexpanded parameter pack, but /// which has already been substituted, attempt to determine the number of /// elements that will be produced once this argument is fully-expanded. /// /// This is intended for use when transforming 'sizeof...(Arg)' in order to /// avoid actually expanding the pack where possible. Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg); //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// /// Adjust the type \p ArgFunctionType to match the calling convention, /// noreturn, and optionally the exception specification of \p FunctionType. /// Deduction often wants to ignore these properties when matching function /// types. QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType, bool AdjustExceptionSpec = false); /// Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// Template argument deduction was successful. TDK_Success = 0, /// The declaration was invalid; do nothing. TDK_Invalid, /// Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// Template argument deduction did not deduce a value for every /// expansion of an expanded template parameter pack. TDK_IncompletePack, /// Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// After substituting deduced template arguments, a dependent /// parameter type did not match the corresponding argument. TDK_DeducedMismatch, /// After substituting deduced template arguments, an element of /// a dependent parameter type did not match the corresponding element /// of the corresponding argument (when deducing from an initializer list). TDK_DeducedMismatchNested, /// A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// Checking non-dependent argument conversions failed. TDK_NonDependentConversionFailure, /// The deduced arguments did not satisfy the constraints associated /// with the template. TDK_ConstraintsNotSatisfied, /// Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure, /// CUDA Target attributes do not match. TDK_CUDATargetMismatch }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, bool DecomposedParam, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), DecomposedParam(DecomposedParam), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) {} QualType OriginalParamType; bool DecomposedParam; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction( FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false, llvm::function_ref<bool()> CheckNonDependent = []{ return false; }); TemplateDeductionResult DeduceTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading, llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); /// Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// Completely replace the \c auto in \p TypeWithAuto by /// \p Replacement. This does not retain any \c auto type sugar. QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement); TypeSourceInfo *ReplaceAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None, bool IgnoreConstraints = false); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None, bool IgnoreConstraints = false); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); /// Declare implicit deduction guides for a class template if we've /// not already done so. void DeclareImplicitDeductionGuides(TemplateDecl *Template, SourceLocation Loc); QualType DeduceTemplateSpecializationFromInitializer( TypeSourceInfo *TInfo, const InitializedEntity &Entity, const InitializationKind &Kind, MultiExprArg Init); QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name, QualType Type, TypeSourceInfo *TSI, SourceRange Range, bool DirectInit, Expr *Init); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate( FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2, bool Reversed = false); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); bool isTemplateTemplateParameterAtLeastAsSpecializedAs( TemplateParameterList *PParam, TemplateDecl *AArg, SourceLocation Loc); void MarkUsedTemplateParameters(const Expr *E, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// A context in which code is being synthesized (where a source location /// alone is not sufficient to identify the context). This covers template /// instantiation and various forms of implicitly-generated functions. struct CodeSynthesisContext { /// The kind of template instantiation we are performing enum SynthesisKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template parameter whose argument is /// being instantiated, the Template is the template, and the /// TemplateArgs/NumTemplateArguments provide the template arguments as /// specified. DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or /// a TemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are computing the exception specification for a defaulted special /// member function. ExceptionSpecEvaluation, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation, /// We are instantiating a requirement of a requires expression. RequirementInstantiation, /// We are checking the satisfaction of a nested requirement of a requires /// expression. NestedRequirementConstraintsCheck, /// We are declaring an implicit special member function. DeclaringSpecialMember, /// We are declaring an implicit 'operator==' for a defaulted /// 'operator<=>'. DeclaringImplicitEqualityComparison, /// We are defining a synthesized function (such as a defaulted special /// member). DefiningSynthesizedFunction, // We are checking the constraints associated with a constrained entity or // the constraint expression of a concept. This includes the checks that // atomic constraints have the type 'bool' and that they can be constant // evaluated. ConstraintsCheck, // We are substituting template arguments into a constraint expression. ConstraintSubstitution, // We are normalizing a constraint expression. ConstraintNormalization, // We are substituting into the parameter mapping of an atomic constraint // during normalization. ParameterMappingSubstitution, /// We are rewriting a comparison operator in terms of an operator<=>. RewritingOperatorAsSpaceship, /// We are initializing a structured binding. InitializingStructuredBinding, /// We are marking a class as __dllexport. MarkingClassDllexported, /// Added for Template instantiation observation. /// Memoization means we are _not_ instantiating a template because /// it is already instantiated (but we entered a context where we /// would have had to if it was not already instantiated). Memoization } Kind; /// Was the enclosing context a non-instantiation SFINAE context? bool SavedInNonInstantiationSFINAEContext; /// The point of instantiation or synthesis within the source code. SourceLocation PointOfInstantiation; /// The entity that is being synthesized. Decl *Entity; /// The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; // FIXME: Wrap this union around more members, or perhaps store the // kind-specific members in the RAII object owning the context. union { /// The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// The special member being declared or defined. CXXSpecialMember SpecialMember; }; ArrayRef<TemplateArgument> template_arguments() const { assert(Kind != DeclaringSpecialMember); return {TemplateArgs, NumTemplateArgs}; } /// The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; CodeSynthesisContext() : Kind(TemplateInstantiation), SavedInNonInstantiationSFINAEContext(false), Entity(nullptr), Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; }; /// List of active code synthesis contexts. /// /// This vector is treated as a stack. As synthesis of one entity requires /// synthesis of another, additional contexts are pushed onto the stack. SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts; /// Specializations whose definitions are currently being instantiated. llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations; /// Non-dependent types used in templates that have already been instantiated /// by some template instantiation. llvm::DenseSet<QualType> InstantiatedNonDependentTypes; /// Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> CodeSynthesisContextLookupModules; /// Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// Map from the most recent declaration of a namespace to the most /// recent visible declaration of that namespace. llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache; /// Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// The number of \p CodeSynthesisContexts that are not template /// instantiations and, therefore, should not be counted as part of the /// instantiation depth. /// /// When the instantiation depth reaches the user-configurable limit /// \p LangOptions::InstantiationDepth we will abort instantiation. // FIXME: Should we have a similar limit for other forms of synthesis? unsigned NonInstantiationEntries; /// The depth of the context stack at the point when the most recent /// error or warning was produced. /// /// This value is used to suppress printing of redundant context stacks /// when there are multiple errors or warnings in the same instantiation. // FIXME: Does this belong in Sema? It's tough to implement it anywhere else. unsigned LastEmittedCodeSynthesisContextDepth = 0; /// The template instantiation callbacks to trace or track /// instantiations (objects can be chained). /// /// This callbacks is used to print, trace or track template /// instantiations as they are being constructed. std::vector<std::unique_ptr<TemplateInstantiationCallback>> TemplateInstCallbacks; /// The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// Note that we are instantiating a class template, /// function template, variable template, alias template, /// or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateParameter Param, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting either explicitly-specified or /// deduced template arguments during function template argument deduction. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, CodeSynthesisContext::SynthesisKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template declaration. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument for a function /// parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); struct ConstraintsCheck {}; /// \brief Note that we are checking the constraints associated with some /// constrained entity (a concept declaration or a template with associated /// constraints). InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintsCheck, NamedDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); struct ConstraintSubstitution {}; /// \brief Note that we are checking a constraint expression associated /// with a template declaration or as part of the satisfaction check of a /// concept. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintSubstitution, NamedDecl *Template, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange); struct ConstraintNormalization {}; /// \brief Note that we are normalizing a constraint expression. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintNormalization, NamedDecl *Template, SourceRange InstantiationRange); struct ParameterMappingSubstitution {}; /// \brief Note that we are subtituting into the parameter mapping of an /// atomic constraint during constraint normalization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParameterMappingSubstitution, NamedDecl *Template, SourceRange InstantiationRange); /// \brief Note that we are substituting template arguments into a part of /// a requirement of a requires expression. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, concepts::Requirement *Req, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are checking the satisfaction of the constraint /// expression inside of a nested requirement. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, concepts::NestedRequirement *Req, ConstraintsCheck, SourceRange InstantiationRange = SourceRange()); /// Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } /// Determine whether we are already instantiating this /// specialization in some surrounding active instantiation. bool isAlreadyInstantiating() const { return AlreadyInstantiating; } private: Sema &SemaRef; bool Invalid; bool AlreadyInstantiating; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = None, sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void pushCodeSynthesisContext(CodeSynthesisContext Ctx); void popCodeSynthesisContext(); /// Determine whether we are currently performing template instantiation. bool inTemplateInstantiation() const { return CodeSynthesisContexts.size() > NonInstantiationEntries; } void PrintContextStack() { if (!CodeSynthesisContexts.empty() && CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) { PrintInstantiationStack(); LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size(); } if (PragmaAttributeCurrentTargetDecl) PrintPragmaAttributeInstantiationPoint(); } void PrintInstantiationStack(); void PrintPragmaAttributeInstantiationPoint(); /// Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } /// RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; bool PrevLastDiagnosticIgnored; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE), PrevLastDiagnosticIgnored( SemaRef.getDiagnostics().isLastDiagnosticIgnored()) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; SemaRef.getDiagnostics().setLastDiagnosticIgnored( PrevLastDiagnosticIgnored); } /// Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; /// Queue of implicit template instantiations that cannot be performed /// eagerly. SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations; class GlobalEagerInstantiationScope { public: GlobalEagerInstantiationScope(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } void perform() { if (Enabled) { S.DefineUsedVTables(); S.PerformPendingInstantiations(); } } ~GlobalEagerInstantiationScope() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. if (S.TUKind != TU_Prefix || !S.LangOpts.PCHInstantiateTemplates) { assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } else { // Template instantiations in the PCH may be delayed until the TU. S.PendingInstantiations.swap(SavedPendingInstantiations); S.PendingInstantiations.insert(S.PendingInstantiations.end(), SavedPendingInstantiations.begin(), SavedPendingInstantiations.end()); } } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class LocalEagerInstantiationScope { public: LocalEagerInstantiationScope(Sema &S) : S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); } ~LocalEagerInstantiationScope() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; /// A helper class for building up ExtParameterInfos. class ExtParameterInfoBuilder { SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos; bool HasInteresting = false; public: /// Set the ExtParameterInfo for the parameter at the given index, /// void set(unsigned index, FunctionProtoType::ExtParameterInfo info) { assert(Infos.size() <= index); Infos.resize(index); Infos.push_back(info); if (!HasInteresting) HasInteresting = (info != FunctionProtoType::ExtParameterInfo()); } /// Return a pointer (suitable for setting in an ExtProtoInfo) to the /// ExtParameterInfo array we've built up. const FunctionProtoType::ExtParameterInfo * getPointerOrNull(unsigned numParams) { if (!HasInteresting) return nullptr; Infos.resize(numParams); return Infos.data(); } }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, bool AllowDeducedTST = false); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, Qualifiers ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); bool SubstExceptionSpec(SourceLocation Loc, FunctionProtoType::ExceptionSpecInfo &ESI, SmallVectorImpl<QualType> &ExceptionStorage, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params, const FunctionProtoType::ExtParameterInfo *ExtParamInfos, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams, ExtParameterInfoBuilder &ParamInfos); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateParameterList * SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); bool SubstTemplateArguments(ArrayRef<TemplateArgumentLoc> Args, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateArgumentListInfo &Outputs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the name and return type of a defaulted 'operator<=>' to form /// an implicit 'operator=='. FunctionDecl *SubstSpaceshipAsEqualEqual(CXXRecordDecl *RD, FunctionDecl *Spaceship); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateDefaultCtorDefaultArgs(CXXConstructorDecl *Ctor); bool usesPartialOrExplicitSpecialization( SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs, TemplateArgumentListInfo &Result, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateDefaultArgument(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); bool CheckInstantiatedFunctionTemplateConstraints( SourceLocation PointOfInstantiation, FunctionDecl *Decl, ArrayRef<TemplateArgument> TemplateArgs, ConstraintSatisfaction &Satisfaction); FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD, const TemplateArgumentList *Args, SourceLocation Loc); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false, VarTemplateSpecializationDecl *PrevVTSD = nullptr); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, bool FindingInstantiatedContext = false); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface( Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, SmallVectorImpl<SourceLocation> &ProtocolLocs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl *const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryInterface( SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc, const ParsedAttributesView &AttrList); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, ArrayRef<IdentifierLocPair> IdentList, const ParsedAttributesView &attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, ArrayRef<IdentifierLocPair> ProtocolId, SmallVectorImpl<Decl *> &Protocols); void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId, SourceLocation ProtocolLoc, IdentifierInfo *TypeArgId, SourceLocation TypeArgLoc, bool SelectProtocolFirst = false); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C type parameter type. QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed void ProcessPropertyDecl(ObjCPropertyDecl *property); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc, ObjCPropertyQueryKind QueryKind); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. ParsedAttributesView ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); void deduceOpenCLAddressSpace(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// The message is sent to 'super'. ObjCSuperMessage, /// The message is an instance message. ObjCInstanceMessage, /// The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs, bool Diagnose = true); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr, bool Diagnose = true); bool CheckConversionToObjCLiteral(QualType DstType, Expr *&SrcExpr, bool Diagnose = true); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; void CheckObjCMethodDirectOverrides(ObjCMethodDecl *method, ObjCMethodDecl *overridden); void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaClangSection - Called on well formed \#pragma clang section void ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionAction Action, PragmaClangSectionKind SecKind, StringRef SecName); /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action, StringRef SlotLabel, Expr *Alignment); enum class PragmaAlignPackDiagnoseKind { NonDefaultStateAtInclude, ChangedStateAtExit }; void DiagnoseNonDefaultPragmaAlignPack(PragmaAlignPackDiagnoseKind Kind, SourceLocation IncludeLoc); void DiagnoseUnterminatedPragmaAlignPack(); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action, SourceLocation PragmaLoc, MSVtorDispMode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, NamedDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// Called on #pragma clang __debug dump II void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name, StringRef Value); /// Are precise floating point semantics currently enabled? bool isPreciseFPEnabled() { return !CurFPFeatures.getAllowFPReassociate() && !CurFPFeatures.getNoSignedZero() && !CurFPFeatures.getAllowReciprocal() && !CurFPFeatures.getAllowApproxFunc(); } /// ActOnPragmaFloatControl - Call on well-formed \#pragma float_control void ActOnPragmaFloatControl(SourceLocation Loc, PragmaMsStackAction Action, PragmaFloatControlKind Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT and /// \#pragma clang fp contract void ActOnPragmaFPContract(SourceLocation Loc, LangOptions::FPModeKind FPC); /// Called on well formed /// \#pragma clang fp reassociate void ActOnPragmaFPReassociate(SourceLocation Loc, bool IsEnabled); /// ActOnPragmaFenvAccess - Called on well formed /// \#pragma STDC FENV_ACCESS void ActOnPragmaFEnvAccess(SourceLocation Loc, bool IsEnabled); /// Called on well formed '\#pragma clang fp' that has option 'exceptions'. void ActOnPragmaFPExceptions(SourceLocation Loc, LangOptions::FPExceptionModeKind); /// Called to set constant rounding mode for floating point operations. void setRoundingMode(SourceLocation Loc, llvm::RoundingMode); /// Called to set exception behavior for floating point operations. void setExceptionMode(SourceLocation Loc, LangOptions::FPExceptionModeKind); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute, SourceLocation PragmaLoc, attr::ParsedSubjectMatchRuleSet Rules); void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Called on well-formed '\#pragma clang attribute pop'. void ActOnPragmaAttributePop(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Adds the attributes that have been specified using the /// '\#pragma clang attribute push' directives to the given declaration. void AddPragmaAttributes(Scope *S, Decl *D); void DiagnoseUnterminatedPragmaAttribute(); /// Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E, bool IsPackExpansion); void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E, Expr *OE); /// AddAllocAlignAttr - Adds an alloc_align attribute to a particular /// declaration. void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI, Expr *ParamExpr); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E); /// AddAnnotationAttr - Adds an annotation Annot with Args arguments to D. void AddAnnotationAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Annot, MutableArrayRef<Expr *> Args); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI, Expr *MaxThreads, Expr *MinBlocks); /// AddModeAttr - Adds a mode attribute to a particular declaration. void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name, bool InInstantiation = false); void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI, ParameterABI ABI); enum class RetainOwnershipKind {NS, CF, OS}; void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI, RetainOwnershipKind K, bool IsTemplateInstantiation); /// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size /// attribute to a particular declaration. void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI, Expr *Min, Expr *Max); /// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a /// particular declaration. void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI, Expr *Min, Expr *Max); bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type); //===--------------------------------------------------------------------===// // C++ Coroutines TS // bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc, StringRef Keyword); ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E); StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, UnresolvedLookupExpr* Lookup); ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E); StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs); bool buildCoroutineParameterMoves(SourceLocation Loc); VarDecl *buildCoroutinePromise(SourceLocation Loc); void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body); ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc, SourceLocation FuncLoc); /// Check that the expression co_await promise.final_suspend() shall not be /// potentially-throwing. bool checkFinalSuspendNoThrow(const Stmt *FinalSuspend); //===--------------------------------------------------------------------===// // OpenCL extensions. // private: std::string CurrOpenCLExtension; /// Extensions required by an OpenCL type. llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap; /// Extensions required by an OpenCL declaration. llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap; public: llvm::StringRef getCurrentOpenCLExtension() const { return CurrOpenCLExtension; } /// Check if a function declaration \p FD associates with any /// extensions present in OpenCLDeclExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD); /// Check if a function type \p FT associates with any /// extensions present in OpenCLTypeExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT); /// Find an extension in an appropriate extension map and return its name template<typename T, typename MapT> std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map); void setCurrentOpenCLExtension(llvm::StringRef Ext) { CurrOpenCLExtension = std::string(Ext); } /// Set OpenCL extensions for a type which can only be used when these /// OpenCL extensions are enabled. If \p Exts is empty, do nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts); /// Set OpenCL extensions for a declaration which can only be /// used when these OpenCL extensions are enabled. If \p Exts is empty, do /// nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts); /// Set current OpenCL extensions for a type which can only be used /// when these OpenCL extensions are enabled. If current OpenCL extension is /// empty, do nothing. void setCurrentOpenCLExtensionForType(QualType T); /// Set current OpenCL extensions for a declaration which /// can only be used when these OpenCL extensions are enabled. If current /// OpenCL extension is empty, do nothing. void setCurrentOpenCLExtensionForDecl(Decl *FD); bool isOpenCLDisabledDecl(Decl *FD); /// Check if type \p T corresponding to declaration specifier \p DS /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T); /// Check if declaration \p D used by expression \p E /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E); //===--------------------------------------------------------------------===// // OpenMP directives and clauses. // private: void *VarDataSharingAttributesStack; /// Number of nested '#pragma omp declare target' directives. SmallVector<SourceLocation, 4> DeclareTargetNesting; /// Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind, bool StrictlyPositive = true); /// Returns OpenMP nesting level for current directive. unsigned getOpenMPNestingLevel() const; /// Adjusts the function scopes index for the target-based regions. void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex, unsigned Level) const; /// Returns the number of scopes associated with the construct on the given /// OpenMP level. int getNumberOfConstructScopes(unsigned Level) const; /// Push new OpenMP function region for non-capturing function. void pushOpenMPFunctionRegion(); /// Pop OpenMP function region for non-capturing function. void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI); /// Checks if a type or a declaration is disabled due to the owning extension /// being disabled, and emits diagnostic messages if it is disabled. /// \param D type or declaration to be checked. /// \param DiagLoc source location for the diagnostic message. /// \param DiagInfo information to be emitted for the diagnostic message. /// \param SrcRange source range of the declaration. /// \param Map maps type or declaration to the extensions. /// \param Selector selects diagnostic message: 0 for type and 1 for /// declaration. /// \return true if the type or declaration is disabled. template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT> bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo, MapT &Map, unsigned Selector = 0, SourceRange SrcRange = SourceRange()); /// Helper to keep information about the current `omp begin/end declare /// variant` nesting. struct OMPDeclareVariantScope { /// The associated OpenMP context selector. OMPTraitInfo *TI; /// The associated OpenMP context selector mangling. std::string NameSuffix; OMPDeclareVariantScope(OMPTraitInfo &TI); }; /// Return the OMPTraitInfo for the surrounding scope, if any. OMPTraitInfo *getOMPTraitInfoForSurroundingScope() { return OMPDeclareVariantScopes.empty() ? nullptr : OMPDeclareVariantScopes.back().TI; } /// The current `omp begin/end declare variant` scopes. SmallVector<OMPDeclareVariantScope, 4> OMPDeclareVariantScopes; /// The current `omp begin/end assumes` scopes. SmallVector<AssumptionAttr *, 4> OMPAssumeScoped; /// All `omp assumes` we encountered so far. SmallVector<AssumptionAttr *, 4> OMPAssumeGlobal; public: /// The declarator \p D defines a function in the scope \p S which is nested /// in an `omp begin/end declare variant` scope. In this method we create a /// declaration for \p D and rename \p D according to the OpenMP context /// selector of the surrounding scope. Return all base functions in \p Bases. void ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope( Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, SmallVectorImpl<FunctionDecl *> &Bases); /// Register \p D as specialization of all base functions in \p Bases in the /// current `omp begin/end declare variant` scope. void ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope( Decl *D, SmallVectorImpl<FunctionDecl *> &Bases); /// Act on \p D, a function definition inside of an `omp [begin/end] assumes`. void ActOnFinishedFunctionDefinitionInOpenMPAssumeScope(Decl *D); /// Can we exit an OpenMP declare variant scope at the moment. bool isInOpenMPDeclareVariantScope() const { return !OMPDeclareVariantScopes.empty(); } /// Given the potential call expression \p Call, determine if there is a /// specialization via the OpenMP declare variant mechanism available. If /// there is, return the specialized call expression, otherwise return the /// original \p Call. ExprResult ActOnOpenMPCall(ExprResult Call, Scope *Scope, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig); /// Handle a `omp begin declare variant`. void ActOnOpenMPBeginDeclareVariant(SourceLocation Loc, OMPTraitInfo &TI); /// Handle a `omp end declare variant`. void ActOnOpenMPEndDeclareVariant(); /// Checks if the variant/multiversion functions are compatible. bool areMultiversionVariantFunctionsCompatible( const FunctionDecl *OldFD, const FunctionDecl *NewFD, const PartialDiagnostic &NoProtoDiagID, const PartialDiagnosticAt &NoteCausedDiagIDAt, const PartialDiagnosticAt &NoSupportDiagIDAt, const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported, bool ConstexprSupported, bool CLinkageMayDiffer); /// Function tries to capture lambda's captured variables in the OpenMP region /// before the original lambda is captured. void tryCaptureOpenMPLambdas(ValueDecl *V); /// Return true if the provided declaration \a VD should be captured by /// reference. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. /// \param OpenMPCaptureLevel Capture level within an OpenMP construct. bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level, unsigned OpenMPCaptureLevel) const; /// Check if the specified variable is used in one of the private /// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP /// constructs. VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false, unsigned StopAt = 0); ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK, ExprObjectKind OK, SourceLocation Loc); /// If the current region is a loop-based region, mark the start of the loop /// construct. void startOpenMPLoop(); /// If the current region is a range loop-based region, mark the start of the /// loop construct. void startOpenMPCXXRangeFor(); /// Check if the specified variable is used in 'private' clause. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. OpenMPClauseKind isOpenMPPrivateDecl(ValueDecl *D, unsigned Level, unsigned CapLevel) const; /// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.) /// for \p FD based on DSA for the provided corresponding captured declaration /// \p D. void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level); /// Check if the specified variable is captured by 'target' directive. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level, unsigned CaptureLevel) const; /// Check if the specified global variable must be captured by outer capture /// regions. /// \param Level Relative level of nested OpenMP construct for that /// the check is performed. bool isOpenMPGlobalCapturedDecl(ValueDecl *D, unsigned Level, unsigned CaptureLevel) const; ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// End analysis of clauses. void EndOpenMPClause(); /// Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); // OpenMP directives and clauses. /// Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OpenMPDirectiveKind Kind); /// Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList); /// Called on well-formed '#pragma omp allocate'. DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc, ArrayRef<Expr *> VarList, ArrayRef<OMPClause *> Clauses, DeclContext *Owner = nullptr); /// Called on well-formed '#pragma omp [begin] assume[s]'. void ActOnOpenMPAssumesDirective(SourceLocation Loc, OpenMPDirectiveKind DKind, ArrayRef<StringRef> Assumptions, bool SkippedClauses); /// Check if there is an active global `omp begin assumes` directive. bool isInOpenMPAssumeScope() const { return !OMPAssumeScoped.empty(); } /// Check if there is an active global `omp assumes` directive. bool hasGlobalOpenMPAssumes() const { return !OMPAssumeGlobal.empty(); } /// Called on well-formed '#pragma omp end assumes'. void ActOnOpenMPEndAssumesDirective(); /// Called on well-formed '#pragma omp requires'. DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc, ArrayRef<OMPClause *> ClauseList); /// Check restrictions on Requires directive OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc, ArrayRef<OMPClause *> Clauses); /// Check if the specified type is allowed to be used in 'omp declare /// reduction' construct. QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Initialize declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner); /// Initialize declare reduction construct initializer. /// \return omp_priv variable. VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer, VarDecl *OmpPrivParm); /// Called at the end of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd( Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid); /// Check variable declaration in 'omp declare mapper' construct. TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D); /// Check if the specified type is allowed to be used in 'omp declare /// mapper' construct. QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare mapper'. DeclGroupPtrTy ActOnOpenMPDeclareMapperDirective( Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType, SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS, Expr *MapperVarRef, ArrayRef<OMPClause *> Clauses, Decl *PrevDeclInScope = nullptr); /// Build the mapper variable of '#pragma omp declare mapper'. ExprResult ActOnOpenMPDeclareMapperDirectiveVarDecl(Scope *S, QualType MapperType, SourceLocation StartLoc, DeclarationName VN); bool isOpenMPDeclareMapperVarDeclAllowed(const VarDecl *VD) const; const ValueDecl *getOpenMPDeclareMapperVarName() const; /// Called on the start of target region i.e. '#pragma omp declare target'. bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc); /// Called at the end of target region i.e. '#pragme omp end declare target'. void ActOnFinishOpenMPDeclareTargetDirective(); /// Searches for the provided declaration name for OpenMP declare target /// directive. NamedDecl * lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, NamedDeclSetType &SameDirectiveDecls); /// Called on correct id-expression from the '#pragma omp declare target'. void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc, OMPDeclareTargetDeclAttr::MapTypeTy MT, OMPDeclareTargetDeclAttr::DevTypeTy DT); /// Check declaration inside target region. void checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D, SourceLocation IdLoc = SourceLocation()); /// Finishes analysis of the deferred functions calls that may be declared as /// host/nohost during device/host compilation. void finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller, const FunctionDecl *Callee, SourceLocation Loc); /// Return true inside OpenMP declare target region. bool isInOpenMPDeclareTargetContext() const { return !DeclareTargetNesting.empty(); } /// Return true inside OpenMP target region. bool isInOpenMPTargetExecutionDirective() const; /// Return the number of captured regions created for an OpenMP directive. static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind); /// Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// Called for syntactical loops (ForStmt or CXXForRangeStmt) associated to /// an OpenMP loop directive. StmtResult ActOnOpenMPCanonicalLoop(Stmt *AStmt); /// End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); using VarsWithInheritedDSAType = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>; /// Called on well-formed '\#pragma omp simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '#pragma omp tile' after parsing of its clauses and /// the associated statement. StmtResult ActOnOpenMPTileDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp sections' after parsing /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp section' after parsing of the /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp single' after parsing of the /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp master' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp critical' after parsing of the /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel for' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel sections' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp task' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskyield'. StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp barrier'. StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskwait'. StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskgroup'. StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp flush'. StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp depobj'. StmtResult ActOnOpenMPDepobjDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp scan'. StmtResult ActOnOpenMPScanDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp ordered' after parsing of the /// associated statement. StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp atomic' after parsing of the /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target data' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target enter data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target exit data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target parallel' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp cancellation point'. StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp cancel'. StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp master taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterTaskLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp master taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPMasterTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master taskloop' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterTaskLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master taskloop simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target update'. StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp distribute parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute' after parsing of /// the associated statement. StmtResult ActOnOpenMPTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target teams distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for /// simd' after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp interop'. StmtResult ActOnOpenMPInteropDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Checks correctness of linear modifiers. bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind, SourceLocation LinLoc); /// Checks that the specified declaration matches requirements for the linear /// decls. bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc, OpenMPLinearClauseKind LinKind, QualType Type, bool IsDeclareSimd = false); /// Called on well-formed '\#pragma omp declare simd' after parsing of /// the associated method/function. DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective( DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds, ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears, ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR); /// Checks '\#pragma omp declare variant' variant function and original /// functions after parsing of the associated method/function. /// \param DG Function declaration to which declare variant directive is /// applied to. /// \param VariantRef Expression that references the variant function, which /// must be used instead of the original one, specified in \p DG. /// \param TI The trait info object representing the match clause. /// \returns None, if the function/variant function are not compatible with /// the pragma, pair of original function/variant ref expression otherwise. Optional<std::pair<FunctionDecl *, Expr *>> checkOpenMPDeclareVariantFunction(DeclGroupPtrTy DG, Expr *VariantRef, OMPTraitInfo &TI, SourceRange SR); /// Called on well-formed '\#pragma omp declare variant' after parsing of /// the associated method/function. /// \param FD Function declaration to which declare variant directive is /// applied to. /// \param VariantRef Expression that references the variant function, which /// must be used instead of the original one, specified in \p DG. /// \param TI The context traits associated with the function variant. void ActOnOpenMPDeclareVariantDirective(FunctionDecl *FD, Expr *VariantRef, OMPTraitInfo &TI, SourceRange SR); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocator' clause. OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'simdlen' clause. OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-form 'sizes' clause. OMPClause *ActOnOpenMPSizesClause(ArrayRef<Expr *> SizeExprs, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'ordered' clause. OMPClause * ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc, SourceLocation LParenLoc = SourceLocation(), Expr *NumForLoops = nullptr); /// Called on well-formed 'grainsize' clause. OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_tasks' clause. OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'hint' clause. OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'detach' clause. OMPClause *ActOnOpenMPDetachClause(Expr *Evt, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(llvm::omp::DefaultKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(llvm::omp::ProcBindKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'order' clause. OMPClause *ActOnOpenMPOrderClause(OpenMPOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(OpenMPDependClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause( OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc, SourceLocation EndLoc); /// Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause( OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'acq_rel' clause. OMPClause *ActOnOpenMPAcqRelClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'acquire' clause. OMPClause *ActOnOpenMPAcquireClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'release' clause. OMPClause *ActOnOpenMPReleaseClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'relaxed' clause. OMPClause *ActOnOpenMPRelaxedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'init' clause. OMPClause *ActOnOpenMPInitClause(Expr *InteropVar, ArrayRef<Expr *> PrefExprs, bool IsTarget, bool IsTargetSync, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation VarLoc, SourceLocation EndLoc); /// Called on well-formed 'use' clause. OMPClause *ActOnOpenMPUseClause(Expr *InteropVar, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation VarLoc, SourceLocation EndLoc); /// Called on well-formed 'destroy' clause. OMPClause *ActOnOpenMPDestroyClause(Expr *InteropVar, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation VarLoc, SourceLocation EndLoc); /// Called on well-formed 'threads' clause. OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'simd' clause. OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nogroup' clause. OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'reverse_offload' clause. OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'dynamic_allocators' clause. OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'atomic_default_mem_order' clause. OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause( OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *DepModOrTailExpr, const OMPVarListLocTy &Locs, SourceLocation ColonLoc, CXXScopeSpec &ReductionOrMapperIdScopeSpec, DeclarationNameInfo &ReductionOrMapperId, int ExtraModifier, ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, bool IsMapTypeImplicit, SourceLocation ExtraModifierLoc, ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc); /// Called on well-formed 'inclusive' clause. OMPClause *ActOnOpenMPInclusiveClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'exclusive' clause. OMPClause *ActOnOpenMPExclusiveClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocate' clause. OMPClause * ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation ColonLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause( ArrayRef<Expr *> VarList, OpenMPLastprivateModifier LPKind, SourceLocation LPKindLoc, SourceLocation ColonLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'reduction' clause. OMPClause *ActOnOpenMPReductionClause( ArrayRef<Expr *> VarList, OpenMPReductionClauseModifier Modifier, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'task_reduction' clause. OMPClause *ActOnOpenMPTaskReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'in_reduction' clause. OMPClause *ActOnOpenMPInReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'linear' clause. OMPClause * ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind, SourceLocation LinLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depobj' pseudo clause. OMPClause *ActOnOpenMPDepobjClause(Expr *Depobj, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(Expr *DepModifier, OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'device' clause. OMPClause *ActOnOpenMPDeviceClause(OpenMPDeviceClauseModifier Modifier, Expr *Device, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation EndLoc); /// Called on well-formed 'map' clause. OMPClause * ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'num_teams' clause. OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'thread_limit' clause. OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'priority' clause. OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'dist_schedule' clause. OMPClause *ActOnOpenMPDistScheduleClause( OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// Called on well-formed 'defaultmap' clause. OMPClause *ActOnOpenMPDefaultmapClause( OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KindLoc, SourceLocation EndLoc); /// Called on well-formed 'to' clause. OMPClause * ActOnOpenMPToClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'from' clause. OMPClause * ActOnOpenMPFromClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'use_device_ptr' clause. OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'use_device_addr' clause. OMPClause *ActOnOpenMPUseDeviceAddrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'is_device_ptr' clause. OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'nontemporal' clause. OMPClause *ActOnOpenMPNontemporalClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Data for list of allocators. struct UsesAllocatorsData { /// Allocator. Expr *Allocator = nullptr; /// Allocator traits. Expr *AllocatorTraits = nullptr; /// Locations of '(' and ')' symbols. SourceLocation LParenLoc, RParenLoc; }; /// Called on well-formed 'uses_allocators' clause. OMPClause *ActOnOpenMPUsesAllocatorClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<UsesAllocatorsData> Data); /// Called on well-formed 'affinity' clause. OMPClause *ActOnOpenMPAffinityClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, Expr *Modifier, ArrayRef<Expr *> Locators); /// The kind of conversion being performed. enum CheckedConversionKind { /// An implicit conversion. CCK_ImplicitConversion, /// A C-style cast. CCK_CStyleCast, /// A functional-style cast. CCK_FunctionalCast, /// A cast other than a C-style cast. CCK_OtherCast, /// A conversion for an operand of a builtin overloaded operator. CCK_ForBuiltinOverloadedOp }; static bool isCast(CheckedConversionKind CCK) { return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast || CCK == CCK_OtherCast; } /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_RValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E, bool Diagnose = true); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This function is a no-op if the operand has a function type // or an array type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); /// If \p E is a prvalue denoting an unmaterialized temporary, materialize /// it as an xvalue. In C++98, the result will still be a prvalue, because /// we don't have xvalues there. ExprResult TemporaryMaterializationConversion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); /// Context in which we're performing a usual arithmetic conversion. enum ArithConvKind { /// An arithmetic operation. ACK_Arithmetic, /// A bitwise operation. ACK_BitwiseOp, /// A comparison. ACK_Comparison, /// A conditional (?:) operator. ACK_Conditional, /// A compound assignment expression. ACK_CompAssign, }; // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, ArithConvKind ACK); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatibleFunctionPointer - The assignment is between two function /// pointers types that are not compatible, but we accept them as an /// extension. IncompatibleFunctionPointer, /// IncompatiblePointerSign - The assignment is between two pointers types /// which point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerAddressSpaceMismatch - The assignment /// changes address spaces in nested pointer types which is not allowed. /// For instance, converting __private int ** to __generic int ** is /// illegal even though __private could be converted to __generic. IncompatibleNestedPointerAddressSpaceMismatch, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and optionally prepare for a conversion of /// the RHS to the LHS type. The conversion is prepared for if ConvertRHS /// is true. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind, bool ConvertRHS = true); /// Check assignment constraints for an assignment of RHS to LHSType. /// /// \param LHSType The destination type for the assignment. /// \param RHS The source expression for the assignment. /// \param Diagnose If \c true, diagnostics may be produced when checking /// for assignability. If a diagnostic is produced, \p RHS will be /// set to ExprError(). Note that this function may still return /// without producing a diagnostic, even for an invalid assignment. /// \param DiagnoseCFAudited If \c true, the target is a function parameter /// in an audited Core Foundation API and does not need to be checked /// for ARC retain issues. /// \param ConvertRHS If \c true, \p RHS will be updated to model the /// conversions necessary to perform the assignment. If \c false, /// \p Diagnose must also be \c false. AssignConvertType CheckSingleAssignmentConstraints( QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false, bool ConvertRHS = true); // If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); ExprResult PerformQualificationConversion( Expr *E, QualType Ty, ExprValueKind VK = VK_RValue, CheckedConversionKind CCK = CCK_ImplicitConversion); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, bool IsCompAssign = false); void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType CheckVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool ConvertArgs = true); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool ConvertArgs = true) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); /// Type checking for matrix binary operators. QualType CheckMatrixElementwiseOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign); QualType CheckMatrixMultiplyOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign); bool isValidSveBitcast(QualType srcType, QualType destType); bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible - The two types are reference-compatible. Ref_Compatible }; // Fake up a scoped enumeration that still contextually converts to bool. struct ReferenceConversionsScope { /// The conversions that would be performed on an lvalue of type T2 when /// binding a reference of type T1 to it, as determined when evaluating /// whether T1 is reference-compatible with T2. enum ReferenceConversions { Qualification = 0x1, NestedQualification = 0x2, Function = 0x4, DerivedToBase = 0x8, ObjC = 0x10, ObjCLifetime = 0x20, LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/ObjCLifetime) }; }; using ReferenceConversions = ReferenceConversionsScope::ReferenceConversions; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, ReferenceConversions *Conv = nullptr); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); /// Prepare `SplattedExpr` for a vector splat operation, adding /// implicit casts if necessary. ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error }; /// Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds for ARC and Weak. ARCConversionResult CheckObjCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool Diagnose = true, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); class ConditionResult { Decl *ConditionVar; FullExprArg Condition; bool Invalid; bool HasKnownValue; bool KnownValue; friend class Sema; ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition, bool IsConstexpr) : ConditionVar(ConditionVar), Condition(Condition), Invalid(false), HasKnownValue(IsConstexpr && Condition.get() && !Condition.get()->isValueDependent()), KnownValue(HasKnownValue && !!Condition.get()->EvaluateKnownConstInt(S.Context)) {} explicit ConditionResult(bool Invalid) : ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid), HasKnownValue(false), KnownValue(false) {} public: ConditionResult() : ConditionResult(false) {} bool isInvalid() const { return Invalid; } std::pair<VarDecl *, Expr *> get() const { return std::make_pair(cast_or_null<VarDecl>(ConditionVar), Condition.get()); } llvm::Optional<bool> getKnownValue() const { if (!HasKnownValue) return None; return KnownValue; } }; static ConditionResult ConditionError() { return ConditionResult(true); } enum class ConditionKind { Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'. ConstexprIf, ///< A constant boolean condition from 'if constexpr'. Switch ///< An integral condition for a 'switch' statement. }; ConditionResult ActOnCondition(Scope *S, SourceLocation Loc, Expr *SubExpr, ConditionKind CK); ConditionResult ActOnConditionVariable(Decl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E, bool IsConstexpr = false); /// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression /// found in an explicit(bool) specifier. ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E); /// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier. /// Returns true if the explicit specifier is now resolved. bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual SemaDiagnosticBuilder diagnoseNotICEType(Sema &S, SourceLocation Loc, QualType T); virtual SemaDiagnosticBuilder diagnoseNotICE(Sema &S, SourceLocation Loc) = 0; virtual SemaDiagnosticBuilder diagnoseFold(Sema &S, SourceLocation Loc); virtual ~VerifyICEDiagnoser() {} }; enum AllowFoldKind { NoFold, AllowFold, }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, AllowFoldKind CanFold = NoFold); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, AllowFoldKind CanFold = NoFold); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr, AllowFoldKind CanFold = NoFold); ExprResult VerifyIntegerConstantExpression(Expr *E, AllowFoldKind CanFold = NoFold) { return VerifyIntegerConstantExpression(E, nullptr, CanFold); } /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); private: unsigned ForceCUDAHostDeviceDepth = 0; public: /// Increments our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. So long as this count is greater /// than zero, all functions encountered will be __host__ __device__. void PushForceCUDAHostDevice(); /// Decrements our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. Returns false if the count is 0 /// before incrementing, so you can emit an error. bool PopForceCUDAHostDevice(); /// Diagnostics that are emitted only if we discover that the given function /// must be codegen'ed. Because handling these correctly adds overhead to /// compilation, this is currently only enabled for CUDA compilations. llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>, std::vector<PartialDiagnosticAt>> DeviceDeferredDiags; /// A pair of a canonical FunctionDecl and a SourceLocation. When used as the /// key in a hashtable, both the FD and location are hashed. struct FunctionDeclAndLoc { CanonicalDeclPtr<FunctionDecl> FD; SourceLocation Loc; }; /// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a /// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the /// same deferred diag twice. llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags; /// An inverse call graph, mapping known-emitted functions to one of their /// known-emitted callers (plus the location of the call). /// /// Functions that we can tell a priori must be emitted aren't added to this /// map. llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>, /* Caller = */ FunctionDeclAndLoc> DeviceKnownEmittedFns; /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a __host__ function, does not emit any diagnostics /// unless \p EmitOnBothSides is true. /// - If CurContext is a __device__ or __global__ function, emits the /// diagnostics immediately. /// - If CurContext is a __host__ __device__ function and we are compiling for /// the device, creates a diagnostic which is emitted if and when we realize /// that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in CUDA device code. /// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget()) /// return ExprError(); /// // Otherwise, continue parsing as normal. SemaDiagnosticBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as host code". /// /// Same as CUDADiagIfDeviceCode, with "host" and "device" switched. SemaDiagnosticBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID); /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the device, emits the diagnostics immediately. /// - If CurContext is a non-`declare target` function and we are compiling /// for the device, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. SemaDiagnosticBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD); /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as host code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the host, emits the diagnostics immediately. /// - If CurContext is a non-host function, just ignore it. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. SemaDiagnosticBuilder diagIfOpenMPHostCode(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD); SemaDiagnosticBuilder targetDiag(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD = nullptr); SemaDiagnosticBuilder targetDiag(SourceLocation Loc, const PartialDiagnostic &PD, FunctionDecl *FD = nullptr) { return targetDiag(Loc, PD.getDiagID(), FD) << PD; } /// Check if the expression is allowed to be used in expressions for the /// offloading devices. void checkDeviceDecl(ValueDecl *D, SourceLocation Loc); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; /// Determines whether the given function is a CUDA device/host/kernel/etc. /// function. /// /// Use this rather than examining the function's attributes yourself -- you /// will get it wrong. Returns CFT_Host if D is null. CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D, bool IgnoreImplicitHDAttr = false); CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs); /// Gets the CUDA target for the current context. CUDAFunctionTarget CurrentCUDATarget() { return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext)); } static bool isCUDAImplicitHostDeviceFunction(const FunctionDecl *D); // CUDA function call preference. Must be ordered numerically from // worst to best. enum CUDAFunctionPreference { CFP_Never, // Invalid caller/callee combination. CFP_WrongSide, // Calls from host-device to host or device // function that do not match current compilation // mode. CFP_HostDevice, // Any calls to host/device functions. CFP_SameSide, // Calls from host-device to host or device // function matching current compilation mode. CFP_Native, // host-to-host or device-to-device calls. }; /// Identifies relative preference of a given Caller/Callee /// combination, based on their host/device attributes. /// \param Caller function which needs address of \p Callee. /// nullptr in case of global context. /// \param Callee target function /// /// \returns preference value for particular Caller/Callee combination. CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Determines whether Caller may invoke Callee, based on their CUDA /// host/device attributes. Returns false if the call is not allowed. /// /// Note: Will return true for CFP_WrongSide calls. These may appear in /// semantically correct CUDA programs, but only if they're never codegen'ed. bool IsAllowedCUDACall(const FunctionDecl *Caller, const FunctionDecl *Callee) { return IdentifyCUDAPreference(Caller, Callee) != CFP_Never; } /// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD, /// depending on FD and the current compilation settings. void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD, const LookupResult &Previous); /// May add implicit CUDAConstantAttr attribute to VD, depending on VD /// and current compilation settings. void MaybeAddCUDAConstantAttr(VarDecl *VD); public: /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// (CFP_Never), emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to /// be emitted if and when the caller is codegen'ed, and returns true. /// /// Will only create deferred diagnostics for a given SourceLocation once, /// so you can safely call this multiple times without generating duplicate /// deferred errors. /// /// - Otherwise, returns true without emitting any diagnostics. bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee); void CUDACheckLambdaCapture(CXXMethodDecl *D, const sema::Capture &Capture); /// Set __device__ or __host__ __device__ attributes on the given lambda /// operator() method. /// /// CUDA lambdas by default is host device function unless it has explicit /// host or device attribute. void CUDASetLambdaAttrs(CXXMethodDecl *Method); /// Finds a function in \p Matches with highest calling priority /// from \p Caller context and erases all functions with lower /// calling priority. void EraseUnwantedCUDAMatches( const FunctionDecl *Caller, SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \return true if \p CD can be considered empty according to CUDA /// (E.2.3.1 in CUDA 7.5 Programming guide). bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD); bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD); // \brief Checks that initializers of \p Var satisfy CUDA restrictions. In // case of error emits appropriate diagnostic and invalidates \p Var. // // \details CUDA allows only empty constructors as initializers for global // variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all // __shared__ variables whether they are local or not (they all are implicitly // static in CUDA). One exception is that CUDA allows constant initializers // for __constant__ and __device__ variables. void checkAllowedCUDAInitializer(VarDecl *VD); /// Check whether NewFD is a valid overload for CUDA. Emits /// diagnostics and invalidates NewFD if not. void checkCUDATargetOverload(FunctionDecl *NewFD, const LookupResult &Previous); /// Copies target attributes from the template TD to the function FD. void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD); /// Returns the name of the launch configuration function. This is the name /// of the function that will be called to configure kernel call, with the /// parameters specified via <<<>>>. std::string getCudaConfigureFuncName() const; /// \name Code completion //@{ /// Describes the context in which code completion occurs. enum ParserCompletionContext { /// Code completion occurs at top-level or namespace context. PCC_Namespace, /// Code completion occurs within a class, struct, or union. PCC_Class, /// Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// Code completion occurs following one or more template /// headers. PCC_Template, /// Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// Code completion occurs within an expression. PCC_Expression, /// Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// Code completion occurs where only a type is permitted. PCC_Type, /// Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteExpression(Scope *S, QualType PreferredType, bool IsParenthesized = false); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase, SourceLocation OpLoc, bool IsArrow, bool IsBaseExprStatement, QualType PreferredType); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS, QualType PreferredType); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D, const VirtSpecifiers *VS = nullptr); void CodeCompleteBracketDeclarator(Scope *S); void CodeCompleteCase(Scope *S); /// Determines the preferred type of the current function argument, by /// examining the signatures of all possible overloads. /// Returns null if unknown or ambiguous, or if code completion is off. /// /// If the code completion point has been reached, also reports the function /// signatures that were considered. /// /// FIXME: rename to GuessCallArgumentType to reduce confusion. QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl, CXXScopeSpec SS, ParsedType TemplateTypeTy, ArrayRef<Expr *> ArgExprs, IdentifierInfo *II, SourceLocation OpenParLoc); void CodeCompleteInitializer(Scope *S, Decl *D); /// Trigger code completion for a record of \p BaseType. \p InitExprs are /// expressions in the initializer list seen so far and \p D is the current /// Designation being parsed. void CodeCompleteDesignator(const QualType BaseType, llvm::ArrayRef<Expr *> InitExprs, const Designation &D); void CodeCompleteAfterIf(Scope *S, bool IsBracedThen); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext, bool IsUsingDeclaration, QualType BaseType, QualType PreferredType); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteAfterFunctionEquals(Declarator &D); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences( ArrayRef<IdentifierLocPair> Protocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName, SourceLocation ClassNameLoc, bool IsBaseExprStatement); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled); void CodeCompleteNaturalLanguage(); void CodeCompleteAvailabilityPlatformName(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, QualType ThisType, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void CheckArgAlignment(SourceLocation Loc, NamedDecl *FDecl, StringRef ParamName, QualType ArgTy, QualType ParamTy); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, const Expr *ThisArg, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckOSLogFormatStringArg(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); bool CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckARMCoprocessorImmediate(const TargetInfo &TI, const Expr *CoprocArg, bool WantCDE); bool CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall, ArrayRef<int> ArgNums); bool CheckX86BuiltinTileDuplicate(CallExpr *TheCall, ArrayRef<int> ArgNums); bool CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall, ArrayRef<int> ArgNums); bool CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckRISCVBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); bool SemaBuiltinComplex(CallExpr *TheCall); bool SemaBuiltinVSX(CallExpr *TheCall); bool SemaBuiltinOSLogFormat(CallExpr *TheCall); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult, bool IsDelete); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High, bool RangeIsError = true); bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, unsigned Multiple); bool SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum); bool SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum, unsigned ArgBits); bool SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum, unsigned ArgBits); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinPPCMMACall(CallExpr *TheCall, const char *TypeDesc); bool CheckPPCMMAType(QualType Type, SourceLocation TypeLoc); // Matrix builtin handling. ExprResult SemaBuiltinMatrixTranspose(CallExpr *TheCall, ExprResult CallResult); ExprResult SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall, ExprResult CallResult); ExprResult SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall, ExprResult CallResult); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_OSLog, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); bool FormatStringHasSArg(const StringLiteral *FExpr); static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckFreeArguments(const CallExpr *E); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); public: void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS); private: void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(const Expr *E); /// Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// Check if there is a field shadowing. void CheckShadowInheritedFields(const SourceLocation &Loc, DeclarationName FieldName, const CXXRecordDecl *RD, bool DeclIsField = true); /// Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void CheckTCBEnforcement(const CallExpr *TheCall, const FunctionDecl *Callee); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const ArrayRef<const Expr *> ExprArgs, SourceLocation CallSiteLoc); /// Check if we are taking the address of a packed field /// as this may be a problem if the pointer value is dereferenced. void CheckAddressOfPackedMember(Expr *rhs); /// The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Nullable_result = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; /// The handler for the FileChanged preprocessor events. /// /// Used for diagnostics that implement custom semantic analysis for #include /// directives, like -Wpragma-pack. sema::SemaPPCallbacks *SemaPPCallbackHandler; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; bool isCFError(RecordDecl *D); /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// Determine the number of levels of enclosing template parameters. This is /// only usable while parsing. Note that this does not include dependent /// contexts in which no template parameters have yet been declared, such as /// in a terse function template or generic lambda before the first 'auto' is /// encountered. unsigned getTemplateDepth(Scope *S) const; /// To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } // Emitting members of dllexported classes is delayed until the class // (including field initializers) is fully parsed. SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses; SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions; private: int ParsingClassDepth = 0; class SavePendingParsedClassStateRAII { public: SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); } ~SavePendingParsedClassStateRAII() { assert(S.DelayedOverridingExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedEquivalentExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); swapSavedState(); } private: Sema &S; decltype(DelayedOverridingExceptionSpecChecks) SavedOverridingExceptionSpecChecks; decltype(DelayedEquivalentExceptionSpecChecks) SavedEquivalentExceptionSpecChecks; void swapSavedState() { SavedOverridingExceptionSpecChecks.swap( S.DelayedOverridingExceptionSpecChecks); SavedEquivalentExceptionSpecChecks.swap( S.DelayedEquivalentExceptionSpecChecks); } }; /// Helper class that collects misaligned member designations and /// their location info for delayed diagnostics. struct MisalignedMember { Expr *E; RecordDecl *RD; ValueDecl *MD; CharUnits Alignment; MisalignedMember() : E(), RD(), MD(), Alignment() {} MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment) : E(E), RD(RD), MD(MD), Alignment(Alignment) {} explicit MisalignedMember(Expr *E) : MisalignedMember(E, nullptr, nullptr, CharUnits()) {} bool operator==(const MisalignedMember &m) { return this->E == m.E; } }; /// Small set of gathered accesses to potentially misaligned members /// due to the packed attribute. SmallVector<MisalignedMember, 4> MisalignedMembers; /// Adds an expression to the set of gathered misaligned members. void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment); public: /// Diagnoses the current set of gathered accesses. This typically /// happens at full expression level. The set is cleared after emitting the /// diagnostics. void DiagnoseMisalignedMembers(); /// This function checks if the expression is in the sef of potentially /// misaligned members and it is converted to some pointer type T with lower /// or equal alignment requirements. If so it removes it. This is used when /// we do not want to diagnose such misaligned access (e.g. in conversions to /// void*). void DiscardMisalignedMemberAddress(const Type *T, Expr *E); /// This function calls Action when it determines that E designates a /// misaligned member due to the packed attribute. This is used to emit /// local diagnostics like in reference binding. void RefersToMemberWithReducedAlignment( Expr *E, llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> Action); /// Describes the reason a calling convention specification was ignored, used /// for diagnostics. enum class CallingConventionIgnoredReason { ForThisTarget = 0, VariadicFunction, ConstructorDestructor, BuiltinFunction }; /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurLexicalContext is a kernel function or it is known that the /// function will be emitted for the device, emits the diagnostics /// immediately. /// - If CurLexicalContext is a function and we are compiling /// for the device, but we don't know that this function will be codegen'ed /// for devive yet, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// Diagnose __float128 type usage only from SYCL device code if the current /// target doesn't support it /// if (!S.Context.getTargetInfo().hasFloat128Type() && /// S.getLangOpts().SYCLIsDevice) /// SYCLDiagIfDeviceCode(Loc, diag::err_type_unsupported) << "__float128"; SemaDiagnosticBuilder SYCLDiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed, creates a deferred diagnostic to be emitted if /// and when the caller is codegen'ed, and returns true. /// /// - Otherwise, returns true without emitting any diagnostics. /// /// Adds Callee to DeviceCallGraph if we don't know if its caller will be /// codegen'ed yet. bool checkSYCLDeviceFunction(SourceLocation Loc, FunctionDecl *Callee); }; /// RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; bool Entered = true; public: EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other, bool ShouldEnter = true) : Actions(Actions), Entered(ShouldEnter) { if (Entered) Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, ExprContext); } EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other) : Actions(Actions) { Actions.PushExpressionEvaluationContext( NewContext, Sema::ReuseLambdaContextDecl, ExprContext); } enum InitListTag { InitList }; EnterExpressionEvaluationContext(Sema &Actions, InitListTag, bool ShouldEnter = true) : Actions(Actions), Entered(false) { // In C++11 onwards, narrowing checks are performed on the contents of // braced-init-lists, even when they occur within unevaluated operands. // Therefore we still need to instantiate constexpr functions used in such // a context. if (ShouldEnter && Actions.isUnevaluatedContext() && Actions.getLangOpts().CPlusPlus11) { Actions.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::UnevaluatedList); Entered = true; } } ~EnterExpressionEvaluationContext() { if (Entered) Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// The template function declaration to be late parsed. Decl *D; }; template <> void Sema::PragmaStack<Sema::AlignPackInfo>::Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, AlignPackInfo Value); } // end namespace clang namespace llvm { // Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its // SourceLocation. template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> { using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc; using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>; static FunctionDeclAndLoc getEmptyKey() { return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()}; } static FunctionDeclAndLoc getTombstoneKey() { return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()}; } static unsigned getHashValue(const FunctionDeclAndLoc &FDL) { return hash_combine(FDBaseInfo::getHashValue(FDL.FD), FDL.Loc.getHashValue()); } static bool isEqual(const FunctionDeclAndLoc &LHS, const FunctionDeclAndLoc &RHS) { return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc; } }; } // namespace llvm #endif
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/ASTConcept.h" #include "clang/AST/ASTFwd.h" #include "clang/AST/Attr.h" #include "clang/AST/Availability.h" #include "clang/AST/ComparisonCategories.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprConcepts.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExprOpenMP.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/LocInfoType.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/TypeLoc.h" #include "clang/AST/TypeOrdering.h" #include "clang/Basic/BitmaskEnum.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenCLOptions.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/PragmaKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/CleanupInfo.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/SemaConcept.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include "llvm/Frontend/OpenMP/OMPConstants.h" #include <deque> #include <memory> #include <string> #include <tuple> #include <vector> namespace llvm { class APSInt; template <typename ValueT> struct DenseMapInfo; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; struct InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class ParsedAttr; class BindingDecl; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class CoroutineBodyStmt; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPRequiresDecl; class OMPDeclareReductionDecl; class OMPDeclareSimdDecl; class OMPClause; struct OMPVarListLocTy; struct OverloadCandidate; enum class OverloadCandidateParamOrder : char; enum OverloadCandidateRewriteKind : unsigned; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateInstantiationCallback; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class Capture; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class SemaPPCallbacks; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// The end location for the first pointer declarator in the file. Used for /// placing fix-its. SourceLocation PointerEndLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Tracks expected type during expression parsing, for use in code completion. /// The type is tied to a particular token, all functions that update or consume /// the type take a start location of the token they are looking at as a /// parameter. This avoids updating the type on hot paths in the parser. class PreferredTypeBuilder { public: PreferredTypeBuilder(bool Enabled) : Enabled(Enabled) {} void enterCondition(Sema &S, SourceLocation Tok); void enterReturn(Sema &S, SourceLocation Tok); void enterVariableInit(SourceLocation Tok, Decl *D); /// Handles e.g. BaseType{ .D = Tok... void enterDesignatedInitializer(SourceLocation Tok, QualType BaseType, const Designation &D); /// Computing a type for the function argument may require running /// overloading, so we postpone its computation until it is actually needed. /// /// Clients should be very careful when using this funciton, as it stores a /// function_ref, clients should make sure all calls to get() with the same /// location happen while function_ref is alive. /// /// The callback should also emit signature help as a side-effect, but only /// if the completion point has been reached. void enterFunctionArgument(SourceLocation Tok, llvm::function_ref<QualType()> ComputeType); void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc); void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind, SourceLocation OpLoc); void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op); void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base); void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS); /// Handles all type casts, including C-style cast, C++ casts, etc. void enterTypeCast(SourceLocation Tok, QualType CastType); /// Get the expected type associated with this location, if any. /// /// If the location is a function argument, determining the expected type /// involves considering all function overloads and the arguments so far. /// In this case, signature help for these function overloads will be reported /// as a side-effect (only if the completion point has been reached). QualType get(SourceLocation Tok) const { if (!Enabled || Tok != ExpectedLoc) return QualType(); if (!Type.isNull()) return Type; if (ComputeType) return ComputeType(); return QualType(); } private: bool Enabled; /// Start position of a token for which we store expected type. SourceLocation ExpectedLoc; /// Expected type for a token starting at ExpectedLoc. QualType Type; /// A function to compute expected type at ExpectedLoc. It is only considered /// if Type is null. llvm::function_ref<QualType()> ComputeType; }; /// Sema - This implements semantic analysis and AST building for C. class Sema final { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; /// A key method to reduce duplicate debug info from Sema. virtual void anchor(); ///Source of additional semantic information. ExternalSemaSource *ExternalSource; ///Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); /// Determine whether two declarations should be linked together, given that /// the old declaration might not be visible and the new declaration might /// not have external linkage. bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { if (isVisible(Old)) return true; // See comment in below overload for why it's safe to compute the linkage // of the new declaration here. if (New->isExternallyDeclarable()) { assert(Old->isExternallyDeclarable() && "should not have found a non-externally-declarable previous decl"); return true; } return false; } bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New); void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem, QualType ResultTy, ArrayRef<QualType> Args); public: /// The maximum alignment, same as in llvm::Value. We duplicate them here /// because that allows us not to duplicate the constants in clang code, /// which we must to since we can't directly use the llvm constants. /// The value is verified against llvm here: lib/CodeGen/CGDecl.cpp /// /// This is the greatest alignment value supported by load, store, and alloca /// instructions, and global values. static const unsigned MaxAlignmentExponent = 29; static const unsigned MaximumAlignment = 1u << MaxAlignmentExponent; typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions CurFPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; /// Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; bool MSStructPragmaOn; // True when \#pragma ms_struct on /// Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; /// Holds TypoExprs that are created from `createDelayedTypo`. This is used by /// `TransformTypos` in order to keep track of any TypoExprs that are created /// recursively during typo correction and wipe them away if the correction /// fails. llvm::SmallVector<TypoExpr *, 2> TypoExprs; /// pragma clang section kind enum PragmaClangSectionKind { PCSK_Invalid = 0, PCSK_BSS = 1, PCSK_Data = 2, PCSK_Rodata = 3, PCSK_Text = 4, PCSK_Relro = 5 }; enum PragmaClangSectionAction { PCSA_Set = 0, PCSA_Clear = 1 }; struct PragmaClangSection { std::string SectionName; bool Valid = false; SourceLocation PragmaLocation; }; PragmaClangSection PragmaClangBSSSection; PragmaClangSection PragmaClangDataSection; PragmaClangSection PragmaClangRodataSection; PragmaClangSection PragmaClangRelroSection; PragmaClangSection PragmaClangTextSection; enum PragmaMsStackAction { PSK_Reset = 0x0, // #pragma () PSK_Set = 0x1, // #pragma (value) PSK_Push = 0x2, // #pragma (push[, id]) PSK_Pop = 0x4, // #pragma (pop[, id]) PSK_Show = 0x8, // #pragma (show) -- only for "pack"! PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value) PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value) }; // #pragma pack and align. class AlignPackInfo { public: // `Native` represents default align mode, which may vary based on the // platform. enum Mode : unsigned char { Native, Natural, Packed, Mac68k }; // #pragma pack info constructor AlignPackInfo(AlignPackInfo::Mode M, unsigned Num, bool IsXL) : PackAttr(true), AlignMode(M), PackNumber(Num), XLStack(IsXL) { assert(Num == PackNumber && "The pack number has been truncated."); } // #pragma align info constructor AlignPackInfo(AlignPackInfo::Mode M, bool IsXL) : PackAttr(false), AlignMode(M), PackNumber(M == Packed ? 1 : UninitPackVal), XLStack(IsXL) {} explicit AlignPackInfo(bool IsXL) : AlignPackInfo(Native, IsXL) {} AlignPackInfo() : AlignPackInfo(Native, false) {} // When a AlignPackInfo itself cannot be used, this returns an 32-bit // integer encoding for it. This should only be passed to // AlignPackInfo::getFromRawEncoding, it should not be inspected directly. static uint32_t getRawEncoding(const AlignPackInfo &Info) { std::uint32_t Encoding{}; if (Info.IsXLStack()) Encoding |= IsXLMask; Encoding |= static_cast<uint32_t>(Info.getAlignMode()) << 1; if (Info.IsPackAttr()) Encoding |= PackAttrMask; Encoding |= static_cast<uint32_t>(Info.getPackNumber()) << 4; return Encoding; } static AlignPackInfo getFromRawEncoding(unsigned Encoding) { bool IsXL = static_cast<bool>(Encoding & IsXLMask); AlignPackInfo::Mode M = static_cast<AlignPackInfo::Mode>((Encoding & AlignModeMask) >> 1); int PackNumber = (Encoding & PackNumMask) >> 4; if (Encoding & PackAttrMask) return AlignPackInfo(M, PackNumber, IsXL); return AlignPackInfo(M, IsXL); } bool IsPackAttr() const { return PackAttr; } bool IsAlignAttr() const { return !PackAttr; } Mode getAlignMode() const { return AlignMode; } unsigned getPackNumber() const { return PackNumber; } bool IsPackSet() const { // #pragma align, #pragma pack(), and #pragma pack(0) do not set the pack // attriute on a decl. return PackNumber != UninitPackVal && PackNumber != 0; } bool IsXLStack() const { return XLStack; } bool operator==(const AlignPackInfo &Info) const { return std::tie(AlignMode, PackNumber, PackAttr, XLStack) == std::tie(Info.AlignMode, Info.PackNumber, Info.PackAttr, Info.XLStack); } bool operator!=(const AlignPackInfo &Info) const { return !(*this == Info); } private: /// \brief True if this is a pragma pack attribute, /// not a pragma align attribute. bool PackAttr; /// \brief The alignment mode that is in effect. Mode AlignMode; /// \brief The pack number of the stack. unsigned char PackNumber; /// \brief True if it is a XL #pragma align/pack stack. bool XLStack; /// \brief Uninitialized pack value. static constexpr unsigned char UninitPackVal = -1; // Masks to encode and decode an AlignPackInfo. static constexpr uint32_t IsXLMask{0x0000'0001}; static constexpr uint32_t AlignModeMask{0x0000'0006}; static constexpr uint32_t PackAttrMask{0x00000'0008}; static constexpr uint32_t PackNumMask{0x0000'01F0}; }; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; SourceLocation PragmaPushLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation, SourceLocation PragmaPushLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation), PragmaPushLocation(PragmaPushLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value) { if (Action == PSK_Reset) { CurrentValue = DefaultValue; CurrentPragmaLocation = PragmaLocation; return; } if (Action & PSK_Push) Stack.emplace_back(StackSlotLabel, CurrentValue, CurrentPragmaLocation, PragmaLocation); else if (Action & PSK_Pop) { if (!StackSlotLabel.empty()) { // If we've got a label, try to find it and jump there. auto I = llvm::find_if(llvm::reverse(Stack), [&](const Slot &x) { return x.StackSlotLabel == StackSlotLabel; }); // If we found the label so pop from there. if (I != Stack.rend()) { CurrentValue = I->Value; CurrentPragmaLocation = I->PragmaLocation; Stack.erase(std::prev(I.base()), Stack.end()); } } else if (!Stack.empty()) { // We do not have a label, just pop the last entry. CurrentValue = Stack.back().Value; CurrentPragmaLocation = Stack.back().PragmaLocation; Stack.pop_back(); } } if (Action & PSK_Set) { CurrentValue = Value; CurrentPragmaLocation = PragmaLocation; } } // MSVC seems to add artificial slots to #pragma stacks on entering a C++ // method body to restore the stacks on exit, so it works like this: // // struct S { // #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>) // void Method {} // #pragma <name>(pop, InternalPragmaSlot) // }; // // It works even with #pragma vtordisp, although MSVC doesn't support // #pragma vtordisp(push [, id], n) // syntax. // // Push / pop a named sentinel slot. void SentinelAction(PragmaMsStackAction Action, StringRef Label) { assert((Action == PSK_Push || Action == PSK_Pop) && "Can only push / pop #pragma stack sentinels!"); Act(CurrentPragmaLocation, Action, Label, CurrentValue); } // Constructors. explicit PragmaStack(const ValueType &Default) : DefaultValue(Default), CurrentValue(Default) {} bool hasValue() const { return CurrentValue != DefaultValue; } SmallVector<Slot, 2> Stack; ValueType DefaultValue; // Value used for PSK_Reset action. ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). /// Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects PragmaStack<MSVtorDispMode> VtorDispStack; PragmaStack<AlignPackInfo> AlignPackStack; // The current #pragma align/pack values and locations at each #include. struct AlignPackIncludeState { AlignPackInfo CurrentValue; SourceLocation CurrentPragmaLocation; bool HasNonDefaultValue, ShouldWarnOnInclude; }; SmallVector<AlignPackIncludeState, 8> AlignPackIncludeStack; // Segment #pragmas. PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; // This stack tracks the current state of Sema.CurFPFeatures. PragmaStack<FPOptionsOverride> FpPragmaStack; FPOptionsOverride CurFPFeatureOverrides() { FPOptionsOverride result; if (!FpPragmaStack.hasValue()) { result = FPOptionsOverride(); } else { result = FpPragmaStack.CurrentValue; } return result; } // RAII object to push / pop sentinel slots for all MS #pragma stacks. // Actions should be performed only if we enter / exit a C++ method body. class PragmaStackSentinelRAII { public: PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct); ~PragmaStackSentinelRAII(); private: Sema &S; StringRef SlotLabel; bool ShouldAct; }; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// This an attribute introduced by \#pragma clang attribute. struct PragmaAttributeEntry { SourceLocation Loc; ParsedAttr *Attribute; SmallVector<attr::SubjectMatchRule, 4> MatchRules; bool IsUsed; }; /// A push'd group of PragmaAttributeEntries. struct PragmaAttributeGroup { /// The location of the push attribute. SourceLocation Loc; /// The namespace of this push group. const IdentifierInfo *Namespace; SmallVector<PragmaAttributeEntry, 2> Entries; }; SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack; /// The declaration that is currently receiving an attribute from the /// #pragma attribute stack. const Decl *PragmaAttributeCurrentTargetDecl; /// This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// Used to control the generation of ExprWithCleanups. CleanupInfo Cleanup; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. SmallVector<ExprWithCleanups::CleanupObject, 8> ExprCleanupObjects; /// Store a set of either DeclRefExprs or MemberExprs that contain a reference /// to a variable (constant) that may or may not be odr-used in this Expr, and /// we won't know until all lvalue-to-rvalue and discarded value conversions /// have been applied to all subexpressions of the enclosing full expression. /// This is cleared at the end of each full expression. using MaybeODRUseExprSet = llvm::SetVector<Expr *, SmallVector<Expr *, 4>, llvm::SmallPtrSet<Expr *, 4>>; MaybeODRUseExprSet MaybeODRUseExprs; std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope; /// Stack containing information about each of the nested /// function, block, and method scopes that are currently active. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; /// The index of the first FunctionScope that corresponds to the current /// context. unsigned FunctionScopesStart = 0; ArrayRef<sema::FunctionScopeInfo*> getFunctionScopes() const { return llvm::makeArrayRef(FunctionScopes.begin() + FunctionScopesStart, FunctionScopes.end()); } /// Stack containing information needed when in C++2a an 'auto' is encountered /// in a function declaration parameter type specifier in order to invent a /// corresponding template parameter in the enclosing abbreviated function /// template. This information is also present in LambdaScopeInfo, stored in /// the FunctionScopes stack. SmallVector<InventedTemplateParameterInfo, 4> InventedParameterInfos; /// The index of the first InventedParameterInfo that refers to the current /// context. unsigned InventedParameterInfosStart = 0; ArrayRef<InventedTemplateParameterInfo> getInventedParameterInfos() const { return llvm::makeArrayRef(InventedParameterInfos.begin() + InventedParameterInfosStart, InventedParameterInfos.end()); } typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType; /// Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; /// All the external declarations encoutered and used in the TU. SmallVector<VarDecl *, 4> ExternalDeclarations; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedOverridingExceptionSpecChecks; /// All the function redeclarations seen during a class definition that had /// their exception spec checks delayed, plus the prior declaration they /// should be checked against. Except during error recovery, the new decl /// should always be a friend declaration, as that's the only valid way to /// redeclare a special member before its class is complete. SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2> DelayedEquivalentExceptionSpecChecks; typedef llvm::MapVector<const FunctionDecl *, std::unique_ptr<LateParsedTemplate>> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; unsigned SavedFunctionScopesStart; unsigned SavedInventedParameterInfosStart; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride), SavedFunctionScopesStart(S.FunctionScopesStart), SavedInventedParameterInfosStart(S.InventedParameterInfosStart) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); // Any saved FunctionScopes do not refer to this context. S.FunctionScopesStart = S.FunctionScopes.size(); S.InventedParameterInfosStart = S.InventedParameterInfos.size(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; S.FunctionScopesStart = SavedFunctionScopesStart; S.InventedParameterInfosStart = SavedInventedParameterInfosStart; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// Whether the AST is currently being rebuilt to correct immediate /// invocations. Immediate invocation candidates and references to consteval /// functions aren't tracked when this is set. bool RebuildingImmediateInvocation = false; /// Used to change context to isConstantEvaluated without pushing a heavy /// ExpressionEvaluationContextRecord object. bool isConstantEvaluatedOverride; bool isConstantEvaluated() { return ExprEvalContexts.back().isConstantEvaluated() || isConstantEvaluatedOverride; } /// RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; bool PushedCodeSynthesisContext = false; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::PotentiallyEvaluated); if (auto *FD = dyn_cast<FunctionDecl>(DC)) FD->setWillHaveBody(true); else assert(isa<ObjCMethodDecl>(DC)); } void addContextNote(SourceLocation UseLoc) { assert(!PushedCodeSynthesisContext); Sema::CodeSynthesisContext Ctx; Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction; Ctx.PointOfInstantiation = UseLoc; Ctx.Entity = cast<Decl>(S.CurContext); S.pushCodeSynthesisContext(Ctx); PushedCodeSynthesisContext = true; } ~SynthesizedFunctionScope() { if (PushedCodeSynthesisContext) S.popCodeSynthesisContext(); if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext)) FD->setWillHaveBody(false); S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// The C++ "std::align_val_t" enum class, which is defined by the C++ /// standard library. LazyDeclPtr StdAlignValT; /// The C++ "std::experimental" namespace, where the experimental parts /// of the standard library resides. NamespaceDecl *StdExperimentalNamespaceCache; /// The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// The C++ "std::coroutine_traits" template, which is defined in /// \<coroutine_traits> ClassTemplateDecl *StdCoroutineTraitsCache; /// The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// Caches identifiers/selectors for NSFoundation APIs. std::unique_ptr<NSAPI> NSAPIObj; /// The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// Pointer to NSString type (NSString *). QualType NSStringPointer; /// The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// id<NSCopying> type. QualType QIDNSCopying; /// will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum class ExpressionEvaluationContext { /// The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// The current expression occurs within a braced-init-list within /// an unevaluated operand. This is mostly like a regular unevaluated /// context, except that we still instantiate constexpr functions that are /// referenced here so that we can perform narrowing checks correctly. UnevaluatedList, /// The current expression occurs within a discarded statement. /// This behaves largely similarly to an unevaluated operand in preventing /// definitions from being required, but not in other ways. DiscardedStatement, /// The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; using ImmediateInvocationCandidate = llvm::PointerIntPair<ConstantExpr *, 1>; /// Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// The expression evaluation context. ExpressionEvaluationContext Context; /// Whether the enclosing context needed a cleanup. CleanupInfo ParentCleanup; /// The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; MaybeODRUseExprSet SavedMaybeODRUseExprs; /// The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs; /// Expressions appearing as the LHS of a volatile assignment in this /// context. We produce a warning for these when popping the context if /// they are not discarded-value expressions nor unevaluated operands. SmallVector<Expr*, 2> VolatileAssignmentLHSs; /// Set of candidates for starting an immediate invocation. llvm::SmallVector<ImmediateInvocationCandidate, 4> ImmediateInvocationCandidates; /// Set of DeclRefExprs referencing a consteval function when used in a /// context not already known to be immediately invoked. llvm::SmallPtrSet<DeclRefExpr *, 4> ReferenceToConsteval; /// \brief Describes whether we are in an expression constext which we have /// to handle differently. enum ExpressionKind { EK_Decltype, EK_TemplateArgument, EK_Other } ExprContext; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, CleanupInfo ParentCleanup, Decl *ManglingContextDecl, ExpressionKind ExprContext) : Context(Context), ParentCleanup(ParentCleanup), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext) {} bool isUnevaluated() const { return Context == ExpressionEvaluationContext::Unevaluated || Context == ExpressionEvaluationContext::UnevaluatedAbstract || Context == ExpressionEvaluationContext::UnevaluatedList; } bool isConstantEvaluated() const { return Context == ExpressionEvaluationContext::ConstantEvaluated; } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// Emit a warning for all pending noderef expressions that we recorded. void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec); /// Compute the mangling number context for a lambda expression or /// block literal. Also return the extra mangling decl if any. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. std::tuple<MangleNumberingContext *, Decl *> getCurrentMangleNumberContext(const DeclContext *DC); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl*, 2> Pair; public: SpecialMemberOverloadResult() : Pair() {} SpecialMemberOverloadResult(CXXMethodDecl *MD) : Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; class SpecialMemberOverloadResultEntry : public llvm::FastFoldingSetNode, public SpecialMemberOverloadResult { public: SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} }; /// A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache; /// A cache of the flags available in enumerations with the flag_bits /// attribute. mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache; /// The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed; /// Determine if VD, which must be a variable or function, is an external /// symbol that nonetheless can't be referenced from outside this translation /// unit because its type has no linkage and it's not extern "C". bool isExternalWithNoLinkageType(ValueDecl *VD); /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods; typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// List of SourceLocations where 'self' is implicitly retained inside a /// block. llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1> ImplicitlyRetainedSelfLocs; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; /// Kinds of defaulted comparison operator functions. enum class DefaultedComparisonKind : unsigned char { /// This is not a defaultable comparison operator. None, /// This is an operator== that should be implemented as a series of /// subobject comparisons. Equal, /// This is an operator<=> that should be implemented as a series of /// subobject comparisons. ThreeWay, /// This is an operator!= that should be implemented as a rewrite in terms /// of a == comparison. NotEqual, /// This is an <, <=, >, or >= that should be implemented as a rewrite in /// terms of a <=> comparison. Relational, }; /// The function definitions which were renamed as part of typo-correction /// to match their respective declarations. We want to keep track of them /// to ensure that we don't emit a "redefinition" error if we encounter a /// correctly named definition after the renamed definition. llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions; /// Stack of types that correspond to the parameter entities that are /// currently being copy-initialized. Can be empty. llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes; void ReadMethodPool(Selector Sel); void updateOutOfDateSelector(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the CurFPFeatures state on entry/exit of compound /// statements. class FPFeaturesStateRAII { public: FPFeaturesStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.CurFPFeatures) { OldOverrides = S.FpPragmaStack.CurrentValue; } ~FPFeaturesStateRAII() { S.CurFPFeatures = OldFPFeaturesState; S.FpPragmaStack.CurrentValue = OldOverrides; } FPOptionsOverride getOverrides() { return OldOverrides; } private: Sema& S; FPOptions OldFPFeaturesState; FPOptionsOverride OldOverrides; }; void addImplicitTypedef(StringRef Name, QualType T); bool WarnedStackExhausted = false; public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getCurFPFeatures() { return CurFPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } ///Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// Warn that the stack is nearly exhausted. void warnStackExhausted(SourceLocation Loc); /// Run some code with "sufficient" stack space. (Currently, at least 256K is /// guaranteed). Produces a warning if we're low on stack space and allocates /// more in that case. Use this in code that may recurse deeply (for example, /// in template instantiation) to avoid stack overflow. void runWithSufficientStackSpace(SourceLocation Loc, llvm::function_ref<void()> Fn); /// Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. ImmediateDiagBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class ImmediateDiagBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: ImmediateDiagBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) {} ImmediateDiagBuilder(DiagnosticBuilder &&DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) {} // This is a cunning lie. DiagnosticBuilder actually performs move // construction in its copy constructor (but due to varied uses, it's not // possible to conveniently express this as actual move construction). So // the default copy ctor here is fine, because the base class disables the // source anyway, so the user-defined ~ImmediateDiagBuilder is a safe no-op // in that case anwyay. ImmediateDiagBuilder(const ImmediateDiagBuilder &) = default; ~ImmediateDiagBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First clear the diagnostic // builder itself so it won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template <typename T> friend const ImmediateDiagBuilder & operator<<(const ImmediateDiagBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } // It is necessary to limit this to rvalue reference to avoid calling this // function with a bitfield lvalue argument since non-const reference to // bitfield is not allowed. template <typename T, typename = typename std::enable_if< !std::is_lvalue_reference<T>::value>::type> const ImmediateDiagBuilder &operator<<(T &&V) const { const DiagnosticBuilder &BaseDiag = *this; BaseDiag << std::move(V); return *this; } }; /// A generic diagnostic builder for errors which may or may not be deferred. /// /// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch) /// which are not allowed to appear inside __device__ functions and are /// allowed to appear in __host__ __device__ functions only if the host+device /// function is never codegen'ed. /// /// To handle this, we use the notion of "deferred diagnostics", where we /// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed. /// /// This class lets you emit either a regular diagnostic, a deferred /// diagnostic, or no diagnostic at all, according to an argument you pass to /// its constructor, thus simplifying the process of creating these "maybe /// deferred" diagnostics. class SemaDiagnosticBuilder { public: enum Kind { /// Emit no diagnostics. K_Nop, /// Emit the diagnostic immediately (i.e., behave like Sema::Diag()). K_Immediate, /// Emit the diagnostic immediately, and, if it's a warning or error, also /// emit a call stack showing how this function can be reached by an a /// priori known-emitted function. K_ImmediateWithCallStack, /// Create a deferred diagnostic, which is emitted only if the function /// it's attached to is codegen'ed. Also emit a call stack as with /// K_ImmediateWithCallStack. K_Deferred }; SemaDiagnosticBuilder(Kind K, SourceLocation Loc, unsigned DiagID, FunctionDecl *Fn, Sema &S); SemaDiagnosticBuilder(SemaDiagnosticBuilder &&D); SemaDiagnosticBuilder(const SemaDiagnosticBuilder &) = default; ~SemaDiagnosticBuilder(); bool isImmediate() const { return ImmediateDiag.hasValue(); } /// Convertible to bool: True if we immediately emitted an error, false if /// we didn't emit an error or we created a deferred error. /// /// Example usage: /// /// if (SemaDiagnosticBuilder(...) << foo << bar) /// return ExprError(); /// /// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably /// want to use these instead of creating a SemaDiagnosticBuilder yourself. operator bool() const { return isImmediate(); } template <typename T> friend const SemaDiagnosticBuilder & operator<<(const SemaDiagnosticBuilder &Diag, const T &Value) { if (Diag.ImmediateDiag.hasValue()) *Diag.ImmediateDiag << Value; else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second << Value; return Diag; } // It is necessary to limit this to rvalue reference to avoid calling this // function with a bitfield lvalue argument since non-const reference to // bitfield is not allowed. template <typename T, typename = typename std::enable_if< !std::is_lvalue_reference<T>::value>::type> const SemaDiagnosticBuilder &operator<<(T &&V) const { if (ImmediateDiag.hasValue()) *ImmediateDiag << std::move(V); else if (PartialDiagId.hasValue()) S.DeviceDeferredDiags[Fn][*PartialDiagId].second << std::move(V); return *this; } friend const SemaDiagnosticBuilder & operator<<(const SemaDiagnosticBuilder &Diag, const PartialDiagnostic &PD) { if (Diag.ImmediateDiag.hasValue()) PD.Emit(*Diag.ImmediateDiag); else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second = PD; return Diag; } void AddFixItHint(const FixItHint &Hint) const { if (ImmediateDiag.hasValue()) ImmediateDiag->AddFixItHint(Hint); else if (PartialDiagId.hasValue()) S.DeviceDeferredDiags[Fn][*PartialDiagId].second.AddFixItHint(Hint); } friend ExprResult ExprError(const SemaDiagnosticBuilder &) { return ExprError(); } friend StmtResult StmtError(const SemaDiagnosticBuilder &) { return StmtError(); } operator ExprResult() const { return ExprError(); } operator StmtResult() const { return StmtError(); } operator TypeResult() const { return TypeError(); } operator DeclResult() const { return DeclResult(true); } operator MemInitResult() const { return MemInitResult(true); } private: Sema &S; SourceLocation Loc; unsigned DiagID; FunctionDecl *Fn; bool ShowCallStack; // Invariant: At most one of these Optionals has a value. // FIXME: Switch these to a Variant once that exists. llvm::Optional<ImmediateDiagBuilder> ImmediateDiag; llvm::Optional<unsigned> PartialDiagId; }; /// Is the last error level diagnostic immediate. This is used to determined /// whether the next info diagnostic should be immediate. bool IsLastErrorImmediate = true; /// Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID, bool DeferHint = false); /// Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic &PD, bool DeferHint = false); /// Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h /// Whether uncompilable error has occurred. This includes error happens /// in deferred diagnostics. bool hasUncompilableErrorOccurred() const; bool findMacroSpelling(SourceLocation &loc, StringRef name); /// Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; /// Invent a new identifier for parameters of abbreviated templates. IdentifierInfo * InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName, unsigned Index); void emitAndClearUnusedLocalTypedefWarnings(); private: /// Function or variable declarations to be checked for whether the deferred /// diagnostics should be emitted. SmallVector<Decl *, 4> DeclsToCheckForDeferredDiags; public: // Emit all deferred diagnostics. void emitDeferredDiags(); enum TUFragmentKind { /// The global module fragment, between 'module;' and a module-declaration. Global, /// A normal translation unit fragment. For a non-module unit, this is the /// entire translation unit. Otherwise, it runs from the module-declaration /// to the private-module-fragment (if any) or the end of the TU (if not). Normal, /// The private module fragment, between 'module :private;' and the end of /// the translation unit. Private }; void ActOnStartOfTranslationUnit(); void ActOnEndOfTranslationUnit(); void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K, unsigned OpenMPCaptureLevel = 0); /// Custom deleter to allow FunctionScopeInfos to be kept alive for a short /// time after they've been popped. class PoppedFunctionScopeDeleter { Sema *Self; public: explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {} void operator()(sema::FunctionScopeInfo *Scope) const; }; using PoppedFunctionScopePtr = std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>; PoppedFunctionScopePtr PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, QualType BlockType = QualType()); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.empty() ? nullptr : FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const; void setFunctionHasBranchIntoScope(); void setFunctionHasBranchProtectedScope(); void setFunctionHasIndirectGoto(); void PushCompoundScope(bool IsStmtExpr); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// Get the innermost lambda enclosing the current location, if any. This /// looks through intervening non-lambda scopes such as local functions and /// blocks. sema::LambdaScopeInfo *getEnclosingLambda() const; /// Retrieve the current lambda scope info, if any. /// \param IgnoreNonLambdaCapturingScope true if should find the top-most /// lambda scope info ignoring all inner capturing scopes that are not /// lambda scopes. sema::LambdaScopeInfo * getCurLambda(bool IgnoreNonLambdaCapturingScope = false); /// Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } /// Called before parsing a function declarator belonging to a function /// declaration. void ActOnStartFunctionDeclarationDeclarator(Declarator &D, unsigned TemplateParameterDepth); /// Called after parsing a function declarator belonging to a function /// declaration. void ActOnFinishFunctionDeclarationDeclarator(Declarator &D); void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); QualType BuildMatrixType(QualType T, Expr *NumRows, Expr *NumColumns, SourceLocation AttrLoc); QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace, SourceLocation AttrLoc); /// Same as above, but constructs the AddressSpace index if not provided. QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace, SourceLocation AttrLoc); bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); /// Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); QualType BuildReadPipeType(QualType T, SourceLocation Loc); QualType BuildWritePipeType(QualType T, SourceLocation Loc); QualType BuildExtIntType(bool IsUnsigned, Expr *BitWidth, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); /// Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Stmt *E); /// Determine whether the callee of a particular function call can throw. /// E, D and Loc are all optional. static CanThrowResult canCalleeThrow(Sema &S, const Expr *E, const Decl *D, SourceLocation Loc = SourceLocation()); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool handlerCanCatch(QualType HandlerType, QualType ExceptionType); bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID, const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const PartialDiagnostic &NoThrowDiagID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// Abstract class used to diagnose incomplete types. struct TypeDiagnoser { TypeDiagnoser() {} virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { protected: unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, std::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(), DiagID(DiagID), Args(Args...) { assert(DiagID != 0 && "no diagnostic for type diagnoser"); } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, std::index_sequence_for<Ts...>()); DB << T; } }; /// Do a check to make sure \p Name looks like a legal argument for the /// swift_name attribute applied to decl \p D. Raise a diagnostic if the name /// is invalid for the given declaration. /// /// \p AL is used to provide caret diagnostics in case of a malformed name. /// /// \returns true if the name is a valid swift name for \p D, false otherwise. bool DiagnoseSwiftName(Decl *D, StringRef Name, SourceLocation Loc, const ParsedAttr &AL, bool IsAsync); /// A derivative of BoundTypeDiagnoser for which the diagnostic's type /// parameter is preceded by a 0/1 enum that is 1 if the type is sizeless. /// For example, a diagnostic with no other parameters would generally have /// the form "...%select{incomplete|sizeless}0 type %1...". template <typename... Ts> class SizelessTypeDiagnoser : public BoundTypeDiagnoser<Ts...> { public: SizelessTypeDiagnoser(unsigned DiagID, const Ts &... Args) : BoundTypeDiagnoser<Ts...>(DiagID, Args...) {} void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, this->DiagID); this->emit(DB, std::index_sequence_for<Ts...>()); DB << T->isSizelessType() << T; } }; enum class CompleteTypeKind { /// Apply the normal rules for complete types. In particular, /// treat all sizeless types as incomplete. Normal, /// Relax the normal rules for complete types so that they include /// sizeless built-in types. AcceptSizeless, // FIXME: Eventually we should flip the default to Normal and opt in // to AcceptSizeless rather than opt out of it. Default = AcceptSizeless }; private: /// Methods for marking which expressions involve dereferencing a pointer /// marked with the 'noderef' attribute. Expressions are checked bottom up as /// they are parsed, meaning that a noderef pointer may not be accessed. For /// example, in `&*p` where `p` is a noderef pointer, we will first parse the /// `*p`, but need to check that `address of` is called on it. This requires /// keeping a container of all pending expressions and checking if the address /// of them are eventually taken. void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E); void CheckAddressOfNoDeref(const Expr *E); void CheckMemberAccessOfNoDeref(const MemberExpr *E); bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, CompleteTypeKind Kind, TypeDiagnoser *Diagnoser); struct ModuleScope { SourceLocation BeginLoc; clang::Module *Module = nullptr; bool ModuleInterface = false; bool ImplicitGlobalModuleFragment = false; VisibleModuleSet OuterVisibleModules; }; /// The modules we're currently parsing. llvm::SmallVector<ModuleScope, 16> ModuleScopes; /// Namespace definitions that we will export when they finish. llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces; /// Get the module whose scope we are currently within. Module *getCurrentModule() const { return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module; } VisibleModuleSet VisibleModules; public: /// Get the module owning an entity. Module *getOwningModule(const Decl *Entity) { return Entity->getOwningModule(); } /// Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND); bool isModuleVisible(const Module *M, bool ModulePrivate = false); // When loading a non-modular PCH files, this is used to restore module // visibility. void makeModuleVisible(Module *Mod, SourceLocation ImportLoc) { VisibleModules.setVisible(Mod, ImportLoc); } /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return D->isUnconditionallyVisible() || isVisibleSlow(D); } /// Determine whether any declaration of an entity is visible. bool hasVisibleDeclaration(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr) { return isVisible(D) || hasVisibleDeclarationSlow(D, Modules); } bool hasVisibleDeclarationSlow(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules); bool hasVisibleMergedDefinition(NamedDecl *Def); bool hasMergedDefinitionInCurrentModule(NamedDecl *Def); /// Determine if \p D and \p Suggested have a structurally compatible /// layout as described in C11 6.2.7/1. bool hasStructuralCompatLayout(Decl *D, Decl *Suggested); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is an explicit /// specialization declaration for a specialization of a template. (For a /// member specialization, use hasVisibleMemberSpecialization.) bool hasVisibleExplicitSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is a member /// specialization declaration (as opposed to an instantiated declaration). bool hasVisibleMemberSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if \p A and \p B are equivalent internal linkage declarations /// from different modules, and thus an ambiguity error can be downgraded to /// an extension warning. bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A, const NamedDecl *B); void diagnoseEquivalentInternalLinkageDeclarations( SourceLocation Loc, const NamedDecl *D, ArrayRef<const NamedDecl *> Equiv); bool isUsualDeallocationFunction(const CXXMethodDecl *FD); bool isCompleteType(SourceLocation Loc, QualType T, CompleteTypeKind Kind = CompleteTypeKind::Default) { return !RequireCompleteTypeImpl(Loc, T, Kind, nullptr); } bool RequireCompleteType(SourceLocation Loc, QualType T, CompleteTypeKind Kind, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, CompleteTypeKind Kind, unsigned DiagID); bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser) { return RequireCompleteType(Loc, T, CompleteTypeKind::Default, Diagnoser); } bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID) { return RequireCompleteType(Loc, T, CompleteTypeKind::Default, DiagID); } template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } template <typename... Ts> bool RequireCompleteSizedType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &... Args) { SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, CompleteTypeKind::Normal, Diagnoser); } /// Get the type of expression E, triggering instantiation to complete the /// type if necessary -- that is, if the expression refers to a templated /// static data member of incomplete array type. /// /// May still return an incomplete type if instantiation was not possible or /// if the type is incomplete for a different reason. Use /// RequireCompleteExprType instead if a diagnostic is expected for an /// incomplete expression type. QualType getCompletedType(Expr *E); void completeExprArrayBound(Expr *E); bool RequireCompleteExprType(Expr *E, CompleteTypeKind Kind, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, CompleteTypeKind::Default, Diagnoser); } template <typename... Ts> bool RequireCompleteSizedExprType(Expr *E, unsigned DiagID, const Ts &... Args) { SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, CompleteTypeKind::Normal, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T, TagDecl *OwnedTagDecl = nullptr); QualType BuildTypeofExprType(Expr *E, SourceLocation Loc); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, SourceLocation Loc, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr), New(nullptr) {} bool ShouldSkip; bool CheckSameAsPrevious; NamedDecl *Previous; NamedDecl *New; }; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = nullptr, bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, bool IsClassTemplateDeductionContext = true, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool IsTemplateName = false); /// Attempt to behave like MSVC in situations where lookup of an unqualified /// type name has failed in a dependent context. In these situations, we /// automatically form a DependentTypeName that will retry lookup in a related /// scope during instantiation. ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II, SourceLocation NameLoc, bool IsTemplateTypeArg); /// Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { /// This name is not a type or template in this context, but might be /// something else. NC_Unknown, /// Classification failed; an error has been produced. NC_Error, /// The name has been typo-corrected to a keyword. NC_Keyword, /// The name was classified as a type. NC_Type, /// The name was classified as a specific non-type, non-template /// declaration. ActOnNameClassifiedAsNonType should be called to /// convert the declaration to an expression. NC_NonType, /// The name was classified as an ADL-only function name. /// ActOnNameClassifiedAsUndeclaredNonType should be called to convert the /// result to an expression. NC_UndeclaredNonType, /// The name denotes a member of a dependent type that could not be /// resolved. ActOnNameClassifiedAsDependentNonType should be called to /// convert the result to an expression. NC_DependentNonType, /// The name was classified as an overload set, and an expression /// representing that overload set has been formed. /// ActOnNameClassifiedAsOverloadSet should be called to form a suitable /// expression referencing the overload set. NC_OverloadSet, /// The name was classified as a template whose specializations are types. NC_TypeTemplate, /// The name was classified as a variable template name. NC_VarTemplate, /// The name was classified as a function template name. NC_FunctionTemplate, /// The name was classified as an ADL-only function template name. NC_UndeclaredTemplate, /// The name was classified as a concept name. NC_Concept, }; class NameClassification { NameClassificationKind Kind; union { ExprResult Expr; NamedDecl *NonTypeDecl; TemplateName Template; ParsedType Type; }; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {} static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification OverloadSet(ExprResult E) { NameClassification Result(NC_OverloadSet); Result.Expr = E; return Result; } static NameClassification NonType(NamedDecl *D) { NameClassification Result(NC_NonType); Result.NonTypeDecl = D; return Result; } static NameClassification UndeclaredNonType() { return NameClassification(NC_UndeclaredNonType); } static NameClassification DependentNonType() { return NameClassification(NC_DependentNonType); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } static NameClassification Concept(TemplateName Name) { NameClassification Result(NC_Concept); Result.Template = Name; return Result; } static NameClassification UndeclaredTemplate(TemplateName Name) { NameClassification Result(NC_UndeclaredTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ExprResult getExpression() const { assert(Kind == NC_OverloadSet); return Expr; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } NamedDecl *getNonTypeDecl() const { assert(Kind == NC_NonType); return NonTypeDecl; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate || Kind == NC_Concept || Kind == NC_UndeclaredTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; case NC_Concept: return TNK_Concept_template; case NC_UndeclaredTemplate: return TNK_Undeclared_template; default: llvm_unreachable("unsupported name classification."); } } }; /// Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, CorrectionCandidateCallback *CCC = nullptr); /// Act on the result of classifying a name as an undeclared (ADL-only) /// non-type declaration. ExprResult ActOnNameClassifiedAsUndeclaredNonType(IdentifierInfo *Name, SourceLocation NameLoc); /// Act on the result of classifying a name as an undeclared member of a /// dependent base class. ExprResult ActOnNameClassifiedAsDependentNonType(const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, bool IsAddressOfOperand); /// Act on the result of classifying a name as a specific non-type /// declaration. ExprResult ActOnNameClassifiedAsNonType(Scope *S, const CXXScopeSpec &SS, NamedDecl *Found, SourceLocation NameLoc, const Token &NextToken); /// Act on the result of classifying a name as an overload set. ExprResult ActOnNameClassifiedAsOverloadSet(Scope *S, Expr *OverloadSet); /// Describes the detailed kind of a template name. Used in diagnostics. enum class TemplateNameKindForDiagnostics { ClassTemplate, FunctionTemplate, VarTemplate, AliasTemplate, TemplateTemplateParam, Concept, DependentTemplate }; TemplateNameKindForDiagnostics getTemplateNameKindForDiagnostics(TemplateName Name); /// Determine whether it's plausible that E was intended to be a /// template-name. bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) { if (!getLangOpts().CPlusPlus || E.isInvalid()) return false; Dependent = false; if (auto *DRE = dyn_cast<DeclRefExpr>(E.get())) return !DRE->hasExplicitTemplateArgs(); if (auto *ME = dyn_cast<MemberExpr>(E.get())) return !ME->hasExplicitTemplateArgs(); Dependent = true; if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get())) return !DSDRE->hasExplicitTemplateArgs(); if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get())) return !DSME->hasExplicitTemplateArgs(); // Any additional cases recognized here should also be handled by // diagnoseExprIntendedAsTemplateName. return false; } void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName, SourceLocation Less, SourceLocation Greater); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc, bool IsTemplateId); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation(), SourceLocation UnalignedQualLoc = SourceLocation()); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const BindingDecl *D, const LookupResult &R); void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl, const LookupResult &R); void CheckShadow(Scope *S, VarDecl *D); /// Warn if 'E', which is an expression that is about to be modified, refers /// to a shadowing declaration. void CheckShadowingDeclModification(Expr *E, SourceLocation Loc); void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI); private: /// Map of current shadowing declarations to shadowed declarations. Warn if /// it looks like the user is trying to modify the shadowing declaration. llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls; public: void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope, ArrayRef<BindingDecl *> Bindings = None); NamedDecl * ActOnDecompositionDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists); // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous); void CheckVariableDeclarationType(VarDecl *NewVD); bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit, Expr *Init); void CheckCompleteVariableDeclaration(VarDecl *VD); void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); enum class CheckConstexprKind { /// Diagnose issues that are non-constant or that are extensions. Diagnose, /// Identify whether this function satisfies the formal rules for constexpr /// functions in the current lanugage mode (with no extensions). CheckValid }; bool CheckConstexprFunctionDefinition(const FunctionDecl *FD, CheckConstexprKind Kind); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsMemberSpecialization); bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl); bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD, QualType NewT, QualType OldT); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD, bool IsDefinition); void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC); void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); ExprResult ConvertParamDefaultArgument(const ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); void SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); // Contexts where using non-trivial C union types can be disallowed. This is // passed to err_non_trivial_c_union_in_invalid_context. enum NonTrivialCUnionContext { // Function parameter. NTCUC_FunctionParam, // Function return. NTCUC_FunctionReturn, // Default-initialized object. NTCUC_DefaultInitializedObject, // Variable with automatic storage duration. NTCUC_AutoVar, // Initializer expression that might copy from another object. NTCUC_CopyInit, // Assignment. NTCUC_Assignment, // Compound literal. NTCUC_CompoundLiteral, // Block capture. NTCUC_BlockCapture, // lvalue-to-rvalue conversion of volatile type. NTCUC_LValueToRValueVolatile, }; /// Emit diagnostics if the initializer or any of its explicit or /// implicitly-generated subexpressions require copying or /// default-initializing a type that is or contains a C union type that is /// non-trivial to copy or default-initialize. void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc); // These flags are passed to checkNonTrivialCUnion. enum NonTrivialCUnionKind { NTCUK_Init = 0x1, NTCUK_Destruct = 0x2, NTCUK_Copy = 0x4, }; /// Emit diagnostics if a non-trivial C union type or a struct that contains /// a non-trivial C union is used in an invalid context. void checkNonTrivialCUnion(QualType QT, SourceLocation Loc, NonTrivialCUnionContext UseContext, unsigned NonTrivialKind); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit); void ActOnUninitializedDecl(Decl *dcl); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void CheckStaticLocalForDllExport(VarDecl *VD); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition( FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D, SkipBodyInfo *SkipBody = nullptr); void ActOnStartTrailingRequiresClause(Scope *S, Declarator &D); ExprResult ActOnFinishTrailingRequiresClause(ExprResult ConstraintExpr); ExprResult ActOnRequiresClause(ExprResult ConstraintExpr); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineFunctionDef(FunctionDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters); /// Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList, SourceLocation SemiLoc); enum class ModuleDeclKind { Interface, ///< 'export module X;' Implementation, ///< 'module X;' }; /// The parser has processed a module-declaration that begins the definition /// of a module interface or implementation. DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc, ModuleDeclKind MDK, ModuleIdPath Path, bool IsFirstDecl); /// The parser has processed a global-module-fragment declaration that begins /// the definition of the global module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc); /// The parser has processed a private-module-fragment declaration that begins /// the definition of the private module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. /// \param PrivateLoc The location of the 'private' keyword. DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc, SourceLocation PrivateLoc); /// The parser has processed a module import declaration. /// /// \param StartLoc The location of the first token in the declaration. This /// could be the location of an '@', 'export', or 'import'. /// \param ExportLoc The location of the 'export' keyword, if any. /// \param ImportLoc The location of the 'import' keyword. /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, ModuleIdPath Path); DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, Module *M, ModuleIdPath Path = {}); /// The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument, ExplicitSpecialization, PartialSpecialization }; /// Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, MissingImportKind MIK, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc, SourceLocation LBraceLoc); Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl, SourceLocation RBraceLoc); /// We've found a use of a templated declaration that would trigger an /// implicit instantiation. Check that any relevant explicit specializations /// and partial specializations are visible, and diagnose if not. void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// Retrieve a suitable printing policy for diagnostics. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// Retrieve a suitable printing policy for diagnostics. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, RecordDecl *&AnonRecord); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation, RecordDecl *&AnonRecord); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); /// Common ways to introduce type names without a tag for use in diagnostics. /// Keep in sync with err_tag_reference_non_tag. enum NonTagKind { NTK_NonStruct, NTK_NonClass, NTK_NonUnion, NTK_NonEnum, NTK_Typedef, NTK_TypeAlias, NTK_Template, NTK_TypeAliasTemplate, NTK_TemplateTemplateArgument, }; /// Given a non-tag type declaration, returns an enum useful for indicating /// what kind of non-tag type this is. NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, bool IsTemplateParamOrArg, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, const ParsedAttr &MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); enum TrivialABIHandling { /// The triviality of a method unaffected by "trivial_abi". TAH_IgnoreTrivialABI, /// The triviality of a method affected by "trivial_abi". TAH_ConsiderTrivialABI }; bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, TrivialABIHandling TAH = TAH_IgnoreTrivialABI, bool Diagnose = false); /// For a defaulted function, the kind of defaulted function that it is. class DefaultedFunctionKind { CXXSpecialMember SpecialMember : 8; DefaultedComparisonKind Comparison : 8; public: DefaultedFunctionKind() : SpecialMember(CXXInvalid), Comparison(DefaultedComparisonKind::None) { } DefaultedFunctionKind(CXXSpecialMember CSM) : SpecialMember(CSM), Comparison(DefaultedComparisonKind::None) {} DefaultedFunctionKind(DefaultedComparisonKind Comp) : SpecialMember(CXXInvalid), Comparison(Comp) {} bool isSpecialMember() const { return SpecialMember != CXXInvalid; } bool isComparison() const { return Comparison != DefaultedComparisonKind::None; } explicit operator bool() const { return isSpecialMember() || isComparison(); } CXXSpecialMember asSpecialMember() const { return SpecialMember; } DefaultedComparisonKind asComparison() const { return Comparison; } /// Get the index of this function kind for use in diagnostics. unsigned getDiagnosticIndex() const { static_assert(CXXInvalid > CXXDestructor, "invalid should have highest index"); static_assert((unsigned)DefaultedComparisonKind::None == 0, "none should be equal to zero"); return SpecialMember + (unsigned)Comparison; } }; DefaultedFunctionKind getDefaultedFunctionKind(const FunctionDecl *FD); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD) { return getDefaultedFunctionKind(MD).asSpecialMember(); } DefaultedComparisonKind getDefaultedComparisonKind(const FunctionDecl *FD) { return getDefaultedFunctionKind(FD).asComparison(); } void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); /// Perform ODR-like check for C/ObjC when merging tag types from modules. /// Differently from C++, actually parse the body and reject / error out /// in case of a structural mismatch. bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev, SkipBodyInfo &SkipBody); typedef void *SkippedDefinitionContext; /// Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceRange BraceRange); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, bool IsFixed, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, const ParsedAttributesView &Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, const ParsedAttributesView &Attr); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Enter a template parameter scope, after it's been associated with a particular /// DeclContext. Causes lookup within the scope to chain through enclosing contexts /// in the correct order. void EnterTemplatedContext(Scope *S, DeclContext *DC); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// Don't merge availability attributes at all. AMK_None, /// Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override, /// Merge availability attributes for an implementation of /// a protocol requirement. AMK_ProtocolImplementation, }; /// Describes the kind of priority given to an availability attribute. /// /// The sum of priorities deteremines the final priority of the attribute. /// The final priority determines how the attribute will be merged. /// An attribute with a lower priority will always remove higher priority /// attributes for the specified platform when it is being applied. An /// attribute with a higher priority will not be applied if the declaration /// already has an availability attribute with a lower priority for the /// specified platform. The final prirority values are not expected to match /// the values in this enumeration, but instead should be treated as a plain /// integer value. This enumeration just names the priority weights that are /// used to calculate that final vaue. enum AvailabilityPriority : int { /// The availability attribute was specified explicitly next to the /// declaration. AP_Explicit = 0, /// The availability attribute was applied using '#pragma clang attribute'. AP_PragmaClangAttribute = 1, /// The availability attribute for a specific platform was inferred from /// an availability attribute for another platform. AP_InferredFromOtherPlatform = 2 }; /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr * mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI, IdentifierInfo *Platform, bool Implicit, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool IsStrict, StringRef Replacement, AvailabilityMergeKind AMK, int Priority); TypeVisibilityAttr * mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI, TypeVisibilityAttr::VisibilityType Vis); VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI, VisibilityAttr::VisibilityType Vis); UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI, StringRef UuidAsWritten, MSGuidDecl *GuidDecl); DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI); DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI); MSInheritanceAttr *mergeMSInheritanceAttr(Decl *D, const AttributeCommonInfo &CI, bool BestCase, MSInheritanceModel Model); FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Format, int FormatIdx, int FirstArg); SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name); CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, const AttributeCommonInfo &CI, const IdentifierInfo *Ident); MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI); NoSpeculativeLoadHardeningAttr * mergeNoSpeculativeLoadHardeningAttr(Decl *D, const NoSpeculativeLoadHardeningAttr &AL); SpeculativeLoadHardeningAttr * mergeSpeculativeLoadHardeningAttr(Decl *D, const SpeculativeLoadHardeningAttr &AL); SwiftNameAttr *mergeSwiftNameAttr(Decl *D, const SwiftNameAttr &SNA, StringRef Name); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, const AttributeCommonInfo &CI); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const InternalLinkageAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL); WebAssemblyImportNameAttr *mergeImportNameAttr( Decl *D, const WebAssemblyImportNameAttr &AL); WebAssemblyImportModuleAttr *mergeImportModuleAttr( Decl *D, const WebAssemblyImportModuleAttr &AL); EnforceTCBAttr *mergeEnforceTCBAttr(Decl *D, const EnforceTCBAttr &AL); EnforceTCBLeafAttr *mergeEnforceTCBLeafAttr(Decl *D, const EnforceTCBLeafAttr &AL); void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous); void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld); void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn); void notePreviousDefinition(const NamedDecl *Old, SourceLocation New); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl, bool ConsiderCudaAttrs = true, bool ConsiderRequiresClauses = true); enum class AllowedExplicit { /// Allow no explicit functions to be used. None, /// Allow explicit conversion functions but not explicit constructors. Conversions, /// Allow both explicit conversion functions and explicit constructors. All }; ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, AllowedExplicit AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess, bool Diagnose = true); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsFunctionConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const VarDecl *NRVOCandidate, QualType ResultType, Expr *Value, bool AllowNRVO = true); bool CanPerformAggregateInitializationForOverloadResolution( const InitializedEntity &Entity, InitListExpr *From); bool IsStringInit(Expr *Init, const ArrayType *AT); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); /// Check that the lifetime of the initializer (and its subobjects) is /// sufficient for initializing the entity, and perform lifetime extension /// (when permitted) if not. void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_ArrayBound, ///< Array bound in array declarator or new-expression. CCEK_ConstexprIf, ///< Condition in a constexpr if statement. CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE, NamedDecl *Dest = nullptr); /// Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet; using ADLCallKind = CallExpr::ADLCallKind; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, bool AllowExplicitConversion = false, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, ConversionSequenceList EarlyConversions = None, OverloadCandidateParamOrder PO = {}); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false, bool FirstArgumentIsBase = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false, OverloadCandidateParamOrder PO = {}); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, ConversionSequenceList EarlyConversions = None, OverloadCandidateParamOrder PO = {}); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, OverloadCandidateParamOrder PO = {}); void AddTemplateOverloadCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, OverloadCandidateParamOrder PO = {}); bool CheckNonDependentConversions( FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, ConversionSequenceList &Conversions, bool SuppressUserConversions, CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(), Expr::Classification ObjectClassification = {}, OverloadCandidateParamOrder PO = {}); void AddConversionCandidate( CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddTemplateConversionCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddNonMemberOperatorCandidates( const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, OverloadCandidateParamOrder PO = {}); void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate( NamedDecl *Found, FunctionDecl *Fn, OverloadCandidateRewriteKind RewriteKind = OverloadCandidateRewriteKind(), QualType DestType = QualType(), bool TakingAddress = false); // Emit as a series of 'note's all template and non-templates identified by // the expression Expr void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(), bool TakingAddress = false); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, SourceLocation CallLoc, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); /// Find the failed Boolean condition within a given Boolean /// constant expression, and describe it with a string. std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// non-ArgDependent DiagnoseIfAttrs. /// /// Argument-dependent diagnose_if attributes should be checked each time a /// function is used as a direct callee of a function call. /// /// Returns true if any errors were emitted. bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function, const Expr *ThisArg, ArrayRef<const Expr *> Args, SourceLocation Loc); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// ArgDependent DiagnoseIfAttrs. /// /// Argument-independent diagnose_if attributes should be checked on every use /// of a function. /// /// Returns true if any errors were emitted. bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND, SourceLocation Loc); /// Returns whether the given function's address can be taken or not, /// optionally emitting a diagnostic if the address can't be taken. /// /// Returns false if taking the address of the function is illegal. bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function, bool Complain = false, SourceLocation Loc = SourceLocation()); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * resolveAddressOfSingleOverloadCandidate(Expr *E, DeclAccessPair &FoundResult); bool resolveAndFixAddressOfSingleOverloadCandidate( ExprResult &SrcExpr, bool DoFunctionPointerConversion = false); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, SourceRange OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); void AddOverloadedCallCandidates( LookupResult &R, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc, SourceLocation RangeLoc, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true, bool CalleesAddressIsTaken=false); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateUnresolvedLookupExpr(CXXRecordDecl *NamingClass, NestedNameSpecifierLoc NNSLoc, DeclarationNameInfo DNI, const UnresolvedSetImpl &Fns, bool PerformADL = true); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *input, bool RequiresADL = true); void LookupOverloadedBinOp(OverloadCandidateSet &CandidateSet, OverloadedOperatorKind Op, const UnresolvedSetImpl &Fns, ArrayRef<Expr *> Args, bool RequiresADL = true); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, bool RequiresADL = true, bool AllowRewrittenCandidates = true, FunctionDecl *DefaultedFn = nullptr); ExprResult BuildSynthesizedThreeWayComparison(SourceLocation OpLoc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, FunctionDecl *DefaultedFn); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, bool AllowRecovery = false); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up a name following ~ in a destructor name. This is an ordinary /// lookup, but prefers tags to typedefs. LookupDestructorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// Look up the name of an OpenMP user-defined reduction operation. LookupOMPReductionName, /// Look up the name of an OpenMP user-defined mapper. LookupOMPMapperName, /// Look up any declaration with any name. LookupAnyName }; /// Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists and is visible. ForVisibleRedeclaration, /// The lookup results will be used for redeclaration of a name /// with external linkage; non-visible lookup results with external linkage /// may also be found. ForExternalRedeclaration }; RedeclarationKind forRedeclarationInCurContext() { // A declaration with an owning module for linkage can never link against // anything that is not visible. We don't need to check linkage here; if // the context has internal linkage, redeclaration lookup won't find things // from other TUs, and we can't safely compute linkage yet in general. if (cast<Decl>(CurContext) ->getOwningModuleForLinkage(/*IgnoreLinkage*/true)) return ForVisibleRedeclaration; return ForExternalRedeclaration; } /// The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// The lookup resulted in an error. LOLR_Error, /// The lookup found no match but no diagnostic was issued. LOLR_ErrorNoDiagnostic, /// The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplatePack, }; SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState &&other) noexcept; TypoExprState &operator=(TypoExprState &&other) noexcept; }; /// The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, SourceLocation TypoLoc); // The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupBuiltin(LookupResult &R); void LookupNecessaryTypesForBuiltin(Scope *S, unsigned ID); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, UnresolvedSetImpl &Functions); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate, bool DiagnoseMissing, StringLiteral *StringLit = nullptr); bool isKnownName(StringRef name); /// Status of the function emission on the CUDA/HIP/OpenMP host/device attrs. enum class FunctionEmissionStatus { Emitted, CUDADiscarded, // Discarded due to CUDA/HIP hostness OMPDiscarded, // Discarded due to OpenMP hostness TemplateDiscarded, // Discarded due to uninstantiated templates Unknown, }; FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl, bool Final = false); // Whether the callee should be ignored in CUDA/HIP/OpenMP host/device check. bool shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool LoadExternal = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool IncludeDependentBases = false, bool LoadExternal = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param RecoverUncorrectedTypos If true, when typo correction fails, it /// will rebuild the given Expr with all TypoExprs degraded to RecoveryExprs. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr( Expr *E, VarDecl *InitDecl = nullptr, bool RecoverUncorrectedTypos = false, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr( ExprResult ER, VarDecl *InitDecl = nullptr, bool RecoverUncorrectedTypos = false, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), InitDecl, RecoverUncorrectedTypos, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} /// Attempts to produce a RecoveryExpr after some AST node cannot be created. ExprResult CreateRecoveryExpr(SourceLocation Begin, SourceLocation End, ArrayRef<Expr *> SubExprs, QualType T = QualType()); ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); FunctionDecl *CreateBuiltin(IdentifierInfo *II, QualType Type, unsigned ID, SourceLocation Loc); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction( FunctionDecl *FD); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); // Helper for delayed processing of attributes. void ProcessDeclAttributeDelayed(Decl *D, const ParsedAttributesView &AttrList); void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const ParsedAttributesView &AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value); bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckAttrTarget(const ParsedAttr &CurrAttr); bool CheckAttrNoArgs(const ParsedAttr &CurrAttr); bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceModel SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor, SourceLocation Loc); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Stmt attributes - this routine is the top level dispatcher. StmtResult ProcessStmtAttributes(Stmt *Stmt, const ParsedAttributesView &Attrs, SourceRange Range); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl, ObjCInterfaceDecl *IDecl, SourceLocation AtEnd); void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, unsigned &Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCInterfaceDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); /// Returns default addr space for method qualifiers. LangAS getDefaultCXXMethodAddrSpace() const; private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// - Returns instance or factory methods in global method pool for /// given selector. It checks the desired kind first, if none is found, and /// parameter checkTheOther is set, it then checks the other kind. If no such /// method or only one method is found, function returns false; otherwise, it /// returns true. bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool InstanceFirst, bool CheckTheOther, const ObjCObjectType *TypeBound = nullptr); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass, SmallVectorImpl<ObjCMethodDecl*>& Methods); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance, SmallVectorImpl<ObjCMethodDecl*>& Methods); /// Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg() : E(nullptr) { } FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg( ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true); StmtResult ActOnExprStmtError(); StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(bool IsStmtExpr); void ActOnAfterCompoundStatementLeadingPragmas(); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) { S.ActOnStartOfCompoundStmt(IsStmtExpr); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS, SourceLocation DotDotDotLoc, ExprResult RHS, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult ActOnAttributedStmt(SourceLocation AttrLoc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); class ConditionResult; StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr, SourceLocation LParenLoc, Stmt *InitStmt, ConditionResult Cond, SourceLocation RParenLoc, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr, SourceLocation LParenLoc, Stmt *InitStmt, ConditionResult Cond, SourceLocation RParenLoc, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, SourceLocation LParenLoc, Stmt *InitStmt, ConditionResult Cond, SourceLocation RParenLoc); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, SourceLocation LParenLoc, ConditionResult Cond, SourceLocation RParenLoc, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, ConditionResult Second, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *Begin, Stmt *End, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params, unsigned OpenMPCaptureLevel = 0); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); enum CopyElisionSemanticsKind { CES_Strict = 0, CES_AllowParameters = 1, CES_AllowDifferentTypes = 2, CES_AllowExceptionVariables = 4, CES_AllowRValueReferenceType = 8, CES_ImplicitlyMovableCXX11CXX14CXX17 = (CES_AllowParameters | CES_AllowDifferentTypes), CES_ImplicitlyMovableCXX20 = (CES_AllowParameters | CES_AllowDifferentTypes | CES_AllowExceptionVariables | CES_AllowRValueReferenceType), }; VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E, CopyElisionSemanticsKind CESK); bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD, CopyElisionSemanticsKind CESK); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, unsigned NumLabels, SourceLocation RParenLoc); void FillInlineAsmIdentifierInfo(Expr *Res, llvm::InlineAsmIdentifierInfo &Info); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); /// Warn if we're implicitly casting from a _Nullable pointer type to a /// _Nonnull one. void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType, SourceLocation Loc); /// Warn when implicitly casting 0 to nullptr. void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { ParsingClassDepth++; return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { ParsingClassDepth--; DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass, bool ObjCPropertyAccess, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReceiver = nullptr); bool makeUnavailableInSystemHeader(SourceLocation loc, UnavailableAttr::ImplicitReason reason); /// Issue any -Wunguarded-availability warnings in \c FD void DiagnoseUnguardedAvailabilityViolations(Decl *FD); void handleDelayedAvailabilityCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid); bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass = nullptr, bool ObjCPropertyAccess = false, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReciever = nullptr); void NoteDeletedFunction(FunctionDecl *FD); void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult CheckUnevaluatedOperand(Expr *E); void CheckUnusedVolatileAssignment(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. // // MightBeOdrUse indicates whether the use could possibly be an odr-use, and // should usually be true. This only needs to be set to false if the lack of // odr-use cannot be determined from the current context (for instance, // because the name denotes a virtual function and was written without an // explicit nested-name-specifier). void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr); void MarkMemberReferenced(MemberExpr *E); void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E); void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc, unsigned CapturingScopeIndex); ExprResult CheckLValueToRValueConversionOperand(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); /// Mark all of the declarations referenced within a particular AST node as /// referenced. Used when template instantiation instantiates a non-dependent /// type -- entities referenced by the type are now referenced. void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false); /// Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// Try to convert an expression \p E to type \p Ty. Returns the result of the /// conversion. ExprResult tryConvertExprToType(Expr *E, QualType Ty); /// Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); /// Similar, but diagnostic is only produced if all the specified statements /// are reachable. bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseDependentMemberLookup(LookupResult &R); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, CorrectionCandidateCallback &CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); DeclResult LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S, IdentifierInfo *II); ExprResult BuildIvarRefExpr(Scope *S, SourceLocation Loc, ObjCIvarDecl *IV); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); /// If \p D cannot be odr-used in the current expression evaluation context, /// return a reason explaining why. Otherwise, return NOUR_None. NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D); DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, NestedNameSpecifierLoc NNS, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr( const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, UnresolvedLookupExpr *AsULE = nullptr); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance, const Scope *S); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentKind IK); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); bool isQualifiedMemberAccess(Expr *E); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, SourceRange ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinMatrixSubscriptExpr(Expr *Base, Expr *RowIdx, Expr *ColumnIdx, SourceLocation RBLoc); ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc, Expr *LowerBound, SourceLocation ColonLocFirst, SourceLocation ColonLocSecond, Expr *Length, Expr *Stride, SourceLocation RBLoc); ExprResult ActOnOMPArrayShapingExpr(Expr *Base, SourceLocation LParenLoc, SourceLocation RParenLoc, ArrayRef<Expr *> Dims, ArrayRef<SourceRange> Brackets); /// Data structure for iterator expression. struct OMPIteratorData { IdentifierInfo *DeclIdent = nullptr; SourceLocation DeclIdentLoc; ParsedType Type; OMPIteratorExpr::IteratorRange Range; SourceLocation AssignLoc; SourceLocation ColonLoc; SourceLocation SecColonLoc; }; ExprResult ActOnOMPIteratorExpr(Scope *S, SourceLocation IteratorKwLoc, SourceLocation LLoc, SourceLocation RLoc, ArrayRef<OMPIteratorData> Data); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, FieldDecl *Field, DeclAccessPair FoundDecl, const DeclarationNameInfo &MemberNameInfo); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec *SS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr); ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false, bool AllowRecovery = false); enum class AtomicArgumentOrder { API, AST }; ExprResult BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, SourceLocation RParenLoc, MultiExprArg Args, AtomicExpr::AtomicOp Op, AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false, ADLCallKind UsesADL = ADLCallKind::NotADL); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult BuildInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation EqualOrColonLoc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); void LookupBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, UnresolvedSetImpl &Functions); void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(Scope *S, SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); ExprResult BuildStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc, unsigned TemplateDepth); // Handle the final expression in a statement expression. ExprResult ActOnStmtExprResult(ExprResult E); void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(), // __builtin_COLUMN() ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc); // Build a potentially resolved SourceLocExpr. ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc, DeclContext *ParentContext); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// Describes the result of an "if-exists" condition check. enum IfExistsResult { /// The symbol exists. IER_Exists, /// The symbol does not exist. IER_DoesNotExist, /// The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, const ParsedAttributesView &AttrList, UsingDirectiveDecl *&UsingDecl); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); NamespaceDecl *lookupStdExperimentalNamespace(); CXXRecordDecl *getStdBadAlloc() const; EnumDecl *getStdAlignValT() const; private: // A cache representing if we've fully checked the various comparison category // types stored in ASTContext. The bit-index corresponds to the integer value // of a ComparisonCategoryType enumerator. llvm::SmallBitVector FullyCheckedComparisonCategories; ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl, CXXScopeSpec &SS, ParsedType TemplateTypeTy, IdentifierInfo *MemberOrBase); public: enum class ComparisonCategoryUsage { /// The '<=>' operator was used in an expression and a builtin operator /// was selected. OperatorInExpression, /// A defaulted 'operator<=>' needed the comparison category. This /// typically only applies to 'std::strong_ordering', due to the implicit /// fallback return value. DefaultedOperator, }; /// Lookup the specified comparison category types in the standard /// library, an check the VarDecls possibly returned by the operator<=> /// builtins for that type. /// /// \return The type of the comparison category type corresponding to the /// specified Kind, or a null type if an error occurs QualType CheckComparisonCategoryType(ComparisonCategoryType Kind, SourceLocation Loc, ComparisonCategoryUsage Usage); /// Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const FunctionDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, const ParsedAttributesView &AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc); NamedDecl *BuildUsingDeclaration( Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList, bool IsInstantiation); NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom, ArrayRef<NamedDecl *> Expansions); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); /// Given a derived-class using shadow declaration for a constructor and the /// correspnding base class constructor, find or create the implicit /// synthesized derived class constructor to use for this initialization. CXXConstructorDecl * findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor, ConstructorUsingShadowDecl *DerivedShadow); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation TypenameLoc, CXXScopeSpec &SS, UnqualifiedId &Name, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, const ParsedAttributesView &AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); /// Build a CXXConstructExpr whose constructor has already been resolved if /// it denotes an inherited constructor. ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// Instantiate or parse a C++ default argument expression as necessary. /// Return true on error. bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(!isComputedNoexcept(ComputedEST) && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// Integrate an invoked expression into the collected data. void CalledExpr(Expr *E) { CalledStmt(E); } /// Integrate an invoked statement into the collected data. void CalledStmt(Stmt *S); /// Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_NoexceptFalse; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, FunctionDecl *FD); /// Check the given noexcept-specifier, convert its expression, and compute /// the appropriate ExceptionSpecificationType. ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr, ExceptionSpecificationType &EST); /// Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); class InheritedConstructorInfo; /// Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, InheritedConstructorInfo *ICI = nullptr, bool Diagnose = false); /// Produce notes explaining why a defaulted function was defined as deleted. void DiagnoseDeletedDefaultedFunction(FunctionDecl *FD); /// Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor); /// Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// Check a completed declaration of an implicit special member. void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD); /// Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); /// Wrap the expression in a ConstantExpr if it is a potential immediate /// invocation. ExprResult CheckForImmediateInvocation(ExprResult E, FunctionDecl *Decl); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, QualType DeclInitType, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr *> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, bool EnteringContext); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorTypeForDecltype(const DeclSpec &DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); /// ActOnCXXNamedCast - Parse /// {dynamic,static,reinterpret,const,addrspace}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl, ExprResult Operand, SourceLocation RParenLoc); ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI, Expr *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(Scope *S, SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(UnresolvedLookupExpr *Callee, SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc, Optional<unsigned> NumExpansions); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// Build a CXXThisExpr and mark it referenced in the current context. Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit); void MarkThisReferenced(CXXThisExpr *This); /// Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr, bool ByCopy = false); /// Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); ExprResult ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs, SourceLocation AtLoc, SourceLocation RParen); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenOrBraceLoc, MultiExprArg Exprs, SourceLocation RParenOrBraceLoc, bool ListInitialization); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc, bool ListInitialization); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Optional<Expr *> ArraySize, SourceRange DirectInitRange, Expr *Initializer); /// Determine whether \p FD is an aligned allocation or deallocation /// function that is unavailable. bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const; /// Produce diagnostics if \p FD is an aligned allocation or deallocation /// function that is unavailable. void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD, SourceLocation Loc); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); /// The scope in which to find allocation functions. enum AllocationFunctionScope { /// Only look for allocation functions in the global scope. AFS_Global, /// Only look for allocation functions in the scope of the /// allocated class. AFS_Class, /// Look for allocation functions in both the global scope /// and in the scope of the allocated class. AFS_Both }; /// Finds the overloads of operator new and delete that are appropriate /// for the allocation. bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, AllocationFunctionScope NewScope, AllocationFunctionScope DeleteScope, QualType AllocType, bool IsArray, bool &PassAlignment, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, ArrayRef<QualType> Params); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, bool Overaligned, DeclarationName Name); FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc, CXXRecordDecl *RD); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc, bool IsDelete, bool CallCanBeVirtual, bool WarnOnNonAbstractTypes, SourceLocation DtorLoc); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the binary type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); MaterializeTemporaryExpr * CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary, bool BoundToLvalueReference); ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) { return ActOnFinishFullExpr( Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue, bool IsConstexpr = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); /// Keeps information about an identifier in a nested-name-spec. /// struct NestedNameSpecInfo { /// The type of the object, if we're parsing nested-name-specifier in /// a member access expression. ParsedType ObjectType; /// The identifier preceding the '::'. IdentifierInfo *Identifier; /// The location of the identifier. SourceLocation IdentifierLoc; /// The location of the '::'. SourceLocation CCLoc; /// Creates info object for the most typical case. NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType()) : ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, QualType ObjectType) : ObjectType(ParsedType::make(ObjectType)), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } }; bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo); bool BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); /// The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param IdInfo Parser information about an identifier in the /// nested-name-spec. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \param OnlyNamespace If true, only considers namespaces in lookup. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo, bool EnteringContext); /// The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// Start the definition of a lambda expression. CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params, ConstexprSpecKind ConstexprKind, Expr *TrailingRequiresClause); /// Number lambda for linkage purposes if necessary. void handleLambdaNumbering( CXXRecordDecl *Class, CXXMethodDecl *Method, Optional<std::tuple<bool, unsigned, unsigned, Decl *>> Mangling = None); /// Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. ParsedType actOnLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) { return ParsedType::make(buildLambdaInitCaptureInitialization( Loc, ByRef, EllipsisLoc, None, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init)); } QualType buildLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit, Expr *&Init); /// Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, SourceLocation EllipsisLoc, IdentifierInfo *Id, unsigned InitStyle, Expr *Init); /// Add an init-capture to a lambda scope. void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief This is called after parsing the explicit template parameter list /// on a lambda (if it exists) in C++2a. void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc, ArrayRef<NamedDecl *> TParams, SourceLocation RAngleLoc, ExprResult RequiresClause); /// Introduce the lambda parameters into scope. void addLambdaParameters( ArrayRef<LambdaIntroducer::LambdaCapture> Captures, CXXMethodDecl *CallOperator, Scope *CurScope); /// Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// Does copying/destroying the captured variable have side effects? bool CaptureHasSideEffects(const sema::Capture &From); /// Diagnose if an explicit lambda capture is unused. Returns true if a /// diagnostic is emitted. bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange, const sema::Capture &From); /// Build a FieldDecl suitable to hold the given capture. FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture); /// Initialize the given capture with a suitable expression. ExprResult BuildCaptureInit(const sema::Capture &Capture, SourceLocation ImplicitCaptureLoc, bool IsOpenMPMapping = false); /// Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// Get the return type to use for a lambda's conversion function(s) to /// function pointer type, given the type of the call operator. QualType getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType, CallingConv CC); /// Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); /// Check whether the given expression is a valid constraint expression. /// A diagnostic is emitted if it is not, false is returned, and /// PossibleNonPrimary will be set to true if the failure might be due to a /// non-primary expression being used as an atomic constraint. bool CheckConstraintExpression(const Expr *CE, Token NextToken = Token(), bool *PossibleNonPrimary = nullptr, bool IsTrailingRequiresClause = false); private: /// Caches pairs of template-like decls whose associated constraints were /// checked for subsumption and whether or not the first's constraints did in /// fact subsume the second's. llvm::DenseMap<std::pair<NamedDecl *, NamedDecl *>, bool> SubsumptionCache; /// Caches the normalized associated constraints of declarations (concepts or /// constrained declarations). If an error occurred while normalizing the /// associated constraints of the template or concept, nullptr will be cached /// here. llvm::DenseMap<NamedDecl *, NormalizedConstraint *> NormalizationCache; llvm::ContextualFoldingSet<ConstraintSatisfaction, const ASTContext &> SatisfactionCache; public: const NormalizedConstraint * getNormalizedAssociatedConstraints( NamedDecl *ConstrainedDecl, ArrayRef<const Expr *> AssociatedConstraints); /// \brief Check whether the given declaration's associated constraints are /// at least as constrained than another declaration's according to the /// partial ordering of constraints. /// /// \param Result If no error occurred, receives the result of true if D1 is /// at least constrained than D2, and false otherwise. /// /// \returns true if an error occurred, false otherwise. bool IsAtLeastAsConstrained(NamedDecl *D1, ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2, bool &Result); /// If D1 was not at least as constrained as D2, but would've been if a pair /// of atomic constraints involved had been declared in a concept and not /// repeated in two separate places in code. /// \returns true if such a diagnostic was emitted, false otherwise. bool MaybeEmitAmbiguousAtomicConstraintsDiagnostic(NamedDecl *D1, ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2); /// \brief Check whether the given list of constraint expressions are /// satisfied (as if in a 'conjunction') given template arguments. /// \param Template the template-like entity that triggered the constraints /// check (either a concept or a constrained entity). /// \param ConstraintExprs a list of constraint expressions, treated as if /// they were 'AND'ed together. /// \param TemplateArgs the list of template arguments to substitute into the /// constraint expression. /// \param TemplateIDRange The source range of the template id that /// caused the constraints check. /// \param Satisfaction if true is returned, will contain details of the /// satisfaction, with enough information to diagnose an unsatisfied /// expression. /// \returns true if an error occurred and satisfaction could not be checked, /// false otherwise. bool CheckConstraintSatisfaction( const NamedDecl *Template, ArrayRef<const Expr *> ConstraintExprs, ArrayRef<TemplateArgument> TemplateArgs, SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction); /// \brief Check whether the given non-dependent constraint expression is /// satisfied. Returns false and updates Satisfaction with the satisfaction /// verdict if successful, emits a diagnostic and returns true if an error /// occured and satisfaction could not be determined. /// /// \returns true if an error occurred, false otherwise. bool CheckConstraintSatisfaction(const Expr *ConstraintExpr, ConstraintSatisfaction &Satisfaction); /// Check whether the given function decl's trailing requires clause is /// satisfied, if any. Returns false and updates Satisfaction with the /// satisfaction verdict if successful, emits a diagnostic and returns true if /// an error occured and satisfaction could not be determined. /// /// \returns true if an error occurred, false otherwise. bool CheckFunctionConstraints(const FunctionDecl *FD, ConstraintSatisfaction &Satisfaction, SourceLocation UsageLoc = SourceLocation()); /// \brief Ensure that the given template arguments satisfy the constraints /// associated with the given template, emitting a diagnostic if they do not. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateArgs The converted, canonicalized template arguments. /// /// \param TemplateIDRange The source range of the template id that /// caused the constraints check. /// /// \returns true if the constrains are not satisfied or could not be checked /// for satisfaction, false if the constraints are satisfied. bool EnsureTemplateArgumentListConstraints(TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange TemplateIDRange); /// \brief Emit diagnostics explaining why a constraint expression was deemed /// unsatisfied. /// \param First whether this is the first time an unsatisfied constraint is /// diagnosed for this error. void DiagnoseUnsatisfiedConstraint(const ConstraintSatisfaction &Satisfaction, bool First = true); /// \brief Emit diagnostics explaining why a constraint expression was deemed /// unsatisfied. void DiagnoseUnsatisfiedConstraint(const ASTConstraintSatisfaction &Satisfaction, bool First = true); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, ArrayRef<Expr *> Strings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, MutableArrayRef<ObjCDictionaryElement> Elements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS); bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, const ParsedAttributesView &Attrs); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// Mark destructors of virtual bases of this class referenced. In the Itanium /// C++ ABI, this is done when emitting a destructor for any non-abstract /// class. In the Microsoft C++ ABI, this is done any time a class's /// destructor is referenced. void MarkVirtualBaseDestructorsReferenced( SourceLocation Location, CXXRecordDecl *ClassDecl, llvm::SmallPtrSetImpl<const RecordType *> *DirectVirtualBases = nullptr); /// Do semantic checks to allow the complete destructor variant to be emitted /// when the destructor is defined in another translation unit. In the Itanium /// C++ ABI, destructor variants are emitted together. In the MS C++ ABI, they /// can be emitted in separate TUs. To emit the complete variant, run a subset /// of the checks performed when emitting a regular destructor. void CheckCompleteDestructorVariant(SourceLocation CurrentLocation, CXXDestructorDecl *Dtor); /// The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD, bool ConstexprOnly = false); /// Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); /// Check class-level dllimport/dllexport attribute. The caller must /// ensure that referenceDLLExportedClassMethods is called some point later /// when all outer classes of Class are complete. void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class); void referenceDLLExportedClassMethods(); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); /// Add gsl::Pointer attribute to std::container::iterator /// \param ND The declaration that introduces the name /// std::container::iterator. \param UnderlyingRecord The record named by ND. void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord); /// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types. void inferGslOwnerPointerAttribute(CXXRecordDecl *Record); /// Add [[gsl::Pointer]] attributes for std:: types. void inferGslPointerAttribute(TypedefNameDecl *TD); void CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record); /// Check that the C++ class annoated with "trivial_abi" satisfies all the /// conditions that are needed for the attribute to have an effect. void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD); void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXNonNestedClass(); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Decl *Template, llvm::function_ref<Scope *()> EnterScope); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckDeductionGuideDeclarator(Declarator &D, QualType &R, StorageClass &SC); void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD); void CheckExplicitlyDefaultedFunction(Scope *S, FunctionDecl *MD); bool CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM); void CheckDelayedMemberExceptionSpecs(); bool CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *MD, DefaultedComparisonKind DCK); void DeclareImplicitEqualityComparison(CXXRecordDecl *RD, FunctionDecl *Spaceship); void DefineDefaultedComparison(SourceLocation Loc, FunctionDecl *FD, DefaultedComparisonKind DCK); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, MutableArrayRef<CXXBaseSpecifier *> Bases); void ActOnBaseSpecifiers(Decl *ClassDecl, MutableArrayRef<CXXBaseSpecifier *> Bases); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbiguousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath, bool IgnoreAccess = false); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D, bool Inconsistent); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckStructuredBindingMemberAccess(SourceLocation UseLoc, CXXRecordDecl *DecomposedClass, DeclAccessPair Field); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass, QualType BaseType); bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass, DeclAccessPair Found, QualType ObjectType, SourceLocation Loc, const PartialDiagnostic &Diag); bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass, DeclAccessPair Found, QualType ObjectType) { return isMemberAccessibleForDeletion(NamingClass, Found, ObjectType, SourceLocation(), PDiag()); } void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool isAbstractType(SourceLocation Loc, QualType T); bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true, bool AllowNonTemplateFunctions = false); /// Try to interpret the lookup result D as a template-name. /// /// \param D A declaration found by name lookup. /// \param AllowFunctionTemplates Whether function templates should be /// considered valid results. /// \param AllowDependent Whether unresolved using declarations (that might /// name templates) should be considered valid results. static NamedDecl *getAsTemplateNameDecl(NamedDecl *D, bool AllowFunctionTemplates = true, bool AllowDependent = true); enum TemplateNameIsRequiredTag { TemplateNameIsRequired }; /// Whether and why a template name is required in this lookup. class RequiredTemplateKind { public: /// Template name is required if TemplateKWLoc is valid. RequiredTemplateKind(SourceLocation TemplateKWLoc = SourceLocation()) : TemplateKW(TemplateKWLoc) {} /// Template name is unconditionally required. RequiredTemplateKind(TemplateNameIsRequiredTag) : TemplateKW() {} SourceLocation getTemplateKeywordLoc() const { return TemplateKW.getValueOr(SourceLocation()); } bool hasTemplateKeyword() const { return getTemplateKeywordLoc().isValid(); } bool isRequired() const { return TemplateKW != SourceLocation(); } explicit operator bool() const { return isRequired(); } private: llvm::Optional<SourceLocation> TemplateKW; }; enum class AssumedTemplateKind { /// This is not assumed to be a template name. None, /// This is assumed to be a template name because lookup found nothing. FoundNothing, /// This is assumed to be a template name because lookup found one or more /// functions (but no function templates). FoundFunctions, }; bool LookupTemplateName( LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization, RequiredTemplateKind RequiredTemplate = SourceLocation(), AssumedTemplateKind *ATK = nullptr, bool AllowTypoCorrection = true); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization, bool Disambiguation = false); /// Try to resolve an undeclared template name as a type template. /// /// Sets II to the identifier corresponding to the template name, and updates /// Name to a corresponding (typo-corrected) type template name and TNK to /// the corresponding kind, if possible. void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name, TemplateNameKind &TNK, SourceLocation NameLoc, IdentifierInfo *&II); bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name, SourceLocation NameLoc, bool Diagnose = true); /// Determine whether a particular identifier might be the name in a C++1z /// deduction-guide declaration. bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name, SourceLocation NameLoc, ParsedTemplateTy *Template = nullptr); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation, NamedDecl *Instantiation, bool InstantiatedFromMember, const NamedDecl *Pattern, const NamedDecl *PatternDef, TemplateSpecializationKind TSK, bool Complain = true); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); NamedDecl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg, bool HasTypeConstraint); bool ActOnTypeConstraint(const CXXScopeSpec &SS, TemplateIdAnnotation *TypeConstraint, TemplateTypeParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); bool AttachTypeConstraint(NestedNameSpecifierLoc NS, DeclarationNameInfo NameInfo, ConceptDecl *NamedConcept, const TemplateArgumentListInfo *TemplateArgs, TemplateTypeParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); bool AttachTypeConstraint(AutoTypeLoc TL, NonTypeTemplateParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); bool RequireStructuralType(QualType T, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); NamedDecl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ArrayRef<NamedDecl *> Params, SourceLocation RAngleLoc, Expr *RequiresClause); /// The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC, SkipBodyInfo *SkipBody = nullptr); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsMemberSpecialization, bool &Invalid, bool SuppressDiagnostic = false); DeclResult CheckClassTemplate( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg, QualType NTTPType, SourceLocation Loc); /// Get a template argument mapping the given template parameter to itself, /// e.g. for X in \c template<int X>, this would return an expression template /// argument referencing X. TemplateArgumentLoc getIdentityTemplateArgumentLoc(NamedDecl *Param, SourceLocation Location); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false, bool IsClassName = false); /// Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); /// Get the specialization of the given variable template corresponding to /// the specified argument list, or a null-but-valid result if the arguments /// are dependent. DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); /// Form a reference to the specialization of the given variable template /// corresponding to the specified argument list, or a null-but-valid result /// if the arguments are dependent. ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); ExprResult CheckConceptTemplateId(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &ConceptNameInfo, NamedDecl *FoundDecl, ConceptDecl *NamedConcept, const TemplateArgumentListInfo *TemplateArgs); void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnTemplateName( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool AllowInjectedClassName = false); DeclResult ActOnClassTemplateSpecialization( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, CXXScopeSpec &SS, TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc, TemplateDecl *PrimaryTemplate, unsigned NumExplicitArgs, ArrayRef<TemplateArgument> Args); void CheckTemplatePartialSpecialization( ClassTemplatePartialSpecializationDecl *Partial); void CheckTemplatePartialSpecialization( VarTemplatePartialSpecializationDecl *Partial); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization( FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous, bool QualifiedFriend = false); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation( Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to /// contain the converted forms of the template arguments as written. /// Otherwise, \p TemplateArgs will not be modified. /// /// \param ConstraintsNotSatisfied If provided, and an error occured, will /// receive true if the cause for the error is the associated constraints of /// the template not being satisfied by the template arguments. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted, bool UpdateArgsWithConversions = true, bool *ConstraintsNotSatisfied = nullptr); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TemplateTypeParmDecl *Param, TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateTemplateArgument(TemplateTemplateParmDecl *Param, TemplateParameterList *Params, TemplateArgumentLoc &Arg); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateII The identifier used to name the template. /// \param TemplateIILoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc, TypeSourceInfo **TSI, bool DeducedTSTContext); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc, bool DeducedTSTContext = true); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); //===--------------------------------------------------------------------===// // C++ Concepts //===--------------------------------------------------------------------===// Decl *ActOnConceptDefinition( Scope *S, MultiTemplateParamsArg TemplateParameterLists, IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr); RequiresExprBodyDecl * ActOnStartRequiresExpr(SourceLocation RequiresKWLoc, ArrayRef<ParmVarDecl *> LocalParameters, Scope *BodyScope); void ActOnFinishRequiresExpr(); concepts::Requirement *ActOnSimpleRequirement(Expr *E); concepts::Requirement *ActOnTypeRequirement( SourceLocation TypenameKWLoc, CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo *TypeName, TemplateIdAnnotation *TemplateId); concepts::Requirement *ActOnCompoundRequirement(Expr *E, SourceLocation NoexceptLoc); concepts::Requirement * ActOnCompoundRequirement( Expr *E, SourceLocation NoexceptLoc, CXXScopeSpec &SS, TemplateIdAnnotation *TypeConstraint, unsigned Depth); concepts::Requirement *ActOnNestedRequirement(Expr *Constraint); concepts::ExprRequirement * BuildExprRequirement( Expr *E, bool IsSatisfied, SourceLocation NoexceptLoc, concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement); concepts::ExprRequirement * BuildExprRequirement( concepts::Requirement::SubstitutionDiagnostic *ExprSubstDiag, bool IsSatisfied, SourceLocation NoexceptLoc, concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement); concepts::TypeRequirement *BuildTypeRequirement(TypeSourceInfo *Type); concepts::TypeRequirement * BuildTypeRequirement( concepts::Requirement::SubstitutionDiagnostic *SubstDiag); concepts::NestedRequirement *BuildNestedRequirement(Expr *E); concepts::NestedRequirement * BuildNestedRequirement( concepts::Requirement::SubstitutionDiagnostic *SubstDiag); ExprResult ActOnRequiresExpr(SourceLocation RequiresKWLoc, RequiresExprBodyDecl *Body, ArrayRef<ParmVarDecl *> LocalParameters, ArrayRef<concepts::Requirement *> Requirements, SourceLocation ClosingBraceLoc); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// An arbitrary expression. UPPC_Expression = 0, /// The base type of a class type. UPPC_BaseType, /// The type of an arbitrary declaration. UPPC_DeclarationType, /// The type of a data member. UPPC_DataMemberType, /// The size of a bit-field. UPPC_BitFieldWidth, /// The expression in a static assertion. UPPC_StaticAssertExpression, /// The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// The enumerator value. UPPC_EnumeratorValue, /// A using declaration. UPPC_UsingDeclaration, /// A friend declaration. UPPC_FriendDeclaration, /// A declaration qualifier. UPPC_DeclarationQualifier, /// An initializer. UPPC_Initializer, /// A default argument. UPPC_DefaultArgument, /// The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// The type of an exception. UPPC_ExceptionType, /// Partial specialization. UPPC_PartialSpecialization, /// Microsoft __if_exists. UPPC_IfExists, /// Microsoft __if_not_exists. UPPC_IfNotExists, /// Lambda expression. UPPC_Lambda, /// Block expression. UPPC_Block, /// A type constraint. UPPC_TypeConstraint, // A requirement in a requires-expression. UPPC_Requirement, // A requires-clause. UPPC_RequiresClause, }; /// Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// If the given requirees-expression contains an unexpanded reference to one /// of its own parameter packs, diagnose the error. /// /// \param RE The requiress-expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPackInRequiresExpr(RequiresExpr *RE); /// If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param NNS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; /// Given a template argument that contains an unexpanded parameter pack, but /// which has already been substituted, attempt to determine the number of /// elements that will be produced once this argument is fully-expanded. /// /// This is intended for use when transforming 'sizeof...(Arg)' in order to /// avoid actually expanding the pack where possible. Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg); //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// /// Adjust the type \p ArgFunctionType to match the calling convention, /// noreturn, and optionally the exception specification of \p FunctionType. /// Deduction often wants to ignore these properties when matching function /// types. QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType, bool AdjustExceptionSpec = false); /// Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// Template argument deduction was successful. TDK_Success = 0, /// The declaration was invalid; do nothing. TDK_Invalid, /// Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// Template argument deduction did not deduce a value for every /// expansion of an expanded template parameter pack. TDK_IncompletePack, /// Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// After substituting deduced template arguments, a dependent /// parameter type did not match the corresponding argument. TDK_DeducedMismatch, /// After substituting deduced template arguments, an element of /// a dependent parameter type did not match the corresponding element /// of the corresponding argument (when deducing from an initializer list). TDK_DeducedMismatchNested, /// A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// Checking non-dependent argument conversions failed. TDK_NonDependentConversionFailure, /// The deduced arguments did not satisfy the constraints associated /// with the template. TDK_ConstraintsNotSatisfied, /// Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure, /// CUDA Target attributes do not match. TDK_CUDATargetMismatch }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, bool DecomposedParam, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), DecomposedParam(DecomposedParam), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) {} QualType OriginalParamType; bool DecomposedParam; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction( FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false, llvm::function_ref<bool()> CheckNonDependent = []{ return false; }); TemplateDeductionResult DeduceTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading, llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); /// Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// Completely replace the \c auto in \p TypeWithAuto by /// \p Replacement. This does not retain any \c auto type sugar. QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement); TypeSourceInfo *ReplaceAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None, bool IgnoreConstraints = false); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None, bool IgnoreConstraints = false); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); /// Declare implicit deduction guides for a class template if we've /// not already done so. void DeclareImplicitDeductionGuides(TemplateDecl *Template, SourceLocation Loc); QualType DeduceTemplateSpecializationFromInitializer( TypeSourceInfo *TInfo, const InitializedEntity &Entity, const InitializationKind &Kind, MultiExprArg Init); QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name, QualType Type, TypeSourceInfo *TSI, SourceRange Range, bool DirectInit, Expr *Init); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate( FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2, bool Reversed = false); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); bool isTemplateTemplateParameterAtLeastAsSpecializedAs( TemplateParameterList *PParam, TemplateDecl *AArg, SourceLocation Loc); void MarkUsedTemplateParameters(const Expr *E, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// A context in which code is being synthesized (where a source location /// alone is not sufficient to identify the context). This covers template /// instantiation and various forms of implicitly-generated functions. struct CodeSynthesisContext { /// The kind of template instantiation we are performing enum SynthesisKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template parameter whose argument is /// being instantiated, the Template is the template, and the /// TemplateArgs/NumTemplateArguments provide the template arguments as /// specified. DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or /// a TemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are computing the exception specification for a defaulted special /// member function. ExceptionSpecEvaluation, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation, /// We are instantiating a requirement of a requires expression. RequirementInstantiation, /// We are checking the satisfaction of a nested requirement of a requires /// expression. NestedRequirementConstraintsCheck, /// We are declaring an implicit special member function. DeclaringSpecialMember, /// We are declaring an implicit 'operator==' for a defaulted /// 'operator<=>'. DeclaringImplicitEqualityComparison, /// We are defining a synthesized function (such as a defaulted special /// member). DefiningSynthesizedFunction, // We are checking the constraints associated with a constrained entity or // the constraint expression of a concept. This includes the checks that // atomic constraints have the type 'bool' and that they can be constant // evaluated. ConstraintsCheck, // We are substituting template arguments into a constraint expression. ConstraintSubstitution, // We are normalizing a constraint expression. ConstraintNormalization, // We are substituting into the parameter mapping of an atomic constraint // during normalization. ParameterMappingSubstitution, /// We are rewriting a comparison operator in terms of an operator<=>. RewritingOperatorAsSpaceship, /// We are initializing a structured binding. InitializingStructuredBinding, /// We are marking a class as __dllexport. MarkingClassDllexported, /// Added for Template instantiation observation. /// Memoization means we are _not_ instantiating a template because /// it is already instantiated (but we entered a context where we /// would have had to if it was not already instantiated). Memoization } Kind; /// Was the enclosing context a non-instantiation SFINAE context? bool SavedInNonInstantiationSFINAEContext; /// The point of instantiation or synthesis within the source code. SourceLocation PointOfInstantiation; /// The entity that is being synthesized. Decl *Entity; /// The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; // FIXME: Wrap this union around more members, or perhaps store the // kind-specific members in the RAII object owning the context. union { /// The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// The special member being declared or defined. CXXSpecialMember SpecialMember; }; ArrayRef<TemplateArgument> template_arguments() const { assert(Kind != DeclaringSpecialMember); return {TemplateArgs, NumTemplateArgs}; } /// The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; CodeSynthesisContext() : Kind(TemplateInstantiation), SavedInNonInstantiationSFINAEContext(false), Entity(nullptr), Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; }; /// List of active code synthesis contexts. /// /// This vector is treated as a stack. As synthesis of one entity requires /// synthesis of another, additional contexts are pushed onto the stack. SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts; /// Specializations whose definitions are currently being instantiated. llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations; /// Non-dependent types used in templates that have already been instantiated /// by some template instantiation. llvm::DenseSet<QualType> InstantiatedNonDependentTypes; /// Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> CodeSynthesisContextLookupModules; /// Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// Map from the most recent declaration of a namespace to the most /// recent visible declaration of that namespace. llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache; /// Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// The number of \p CodeSynthesisContexts that are not template /// instantiations and, therefore, should not be counted as part of the /// instantiation depth. /// /// When the instantiation depth reaches the user-configurable limit /// \p LangOptions::InstantiationDepth we will abort instantiation. // FIXME: Should we have a similar limit for other forms of synthesis? unsigned NonInstantiationEntries; /// The depth of the context stack at the point when the most recent /// error or warning was produced. /// /// This value is used to suppress printing of redundant context stacks /// when there are multiple errors or warnings in the same instantiation. // FIXME: Does this belong in Sema? It's tough to implement it anywhere else. unsigned LastEmittedCodeSynthesisContextDepth = 0; /// The template instantiation callbacks to trace or track /// instantiations (objects can be chained). /// /// This callbacks is used to print, trace or track template /// instantiations as they are being constructed. std::vector<std::unique_ptr<TemplateInstantiationCallback>> TemplateInstCallbacks; /// The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// Note that we are instantiating a class template, /// function template, variable template, alias template, /// or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateParameter Param, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting either explicitly-specified or /// deduced template arguments during function template argument deduction. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, CodeSynthesisContext::SynthesisKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template declaration. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument for a function /// parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); struct ConstraintsCheck {}; /// \brief Note that we are checking the constraints associated with some /// constrained entity (a concept declaration or a template with associated /// constraints). InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintsCheck, NamedDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); struct ConstraintSubstitution {}; /// \brief Note that we are checking a constraint expression associated /// with a template declaration or as part of the satisfaction check of a /// concept. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintSubstitution, NamedDecl *Template, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange); struct ConstraintNormalization {}; /// \brief Note that we are normalizing a constraint expression. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintNormalization, NamedDecl *Template, SourceRange InstantiationRange); struct ParameterMappingSubstitution {}; /// \brief Note that we are subtituting into the parameter mapping of an /// atomic constraint during constraint normalization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParameterMappingSubstitution, NamedDecl *Template, SourceRange InstantiationRange); /// \brief Note that we are substituting template arguments into a part of /// a requirement of a requires expression. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, concepts::Requirement *Req, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are checking the satisfaction of the constraint /// expression inside of a nested requirement. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, concepts::NestedRequirement *Req, ConstraintsCheck, SourceRange InstantiationRange = SourceRange()); /// Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } /// Determine whether we are already instantiating this /// specialization in some surrounding active instantiation. bool isAlreadyInstantiating() const { return AlreadyInstantiating; } private: Sema &SemaRef; bool Invalid; bool AlreadyInstantiating; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = None, sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void pushCodeSynthesisContext(CodeSynthesisContext Ctx); void popCodeSynthesisContext(); /// Determine whether we are currently performing template instantiation. bool inTemplateInstantiation() const { return CodeSynthesisContexts.size() > NonInstantiationEntries; } void PrintContextStack() { if (!CodeSynthesisContexts.empty() && CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) { PrintInstantiationStack(); LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size(); } if (PragmaAttributeCurrentTargetDecl) PrintPragmaAttributeInstantiationPoint(); } void PrintInstantiationStack(); void PrintPragmaAttributeInstantiationPoint(); /// Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } /// RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; bool PrevLastDiagnosticIgnored; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE), PrevLastDiagnosticIgnored( SemaRef.getDiagnostics().isLastDiagnosticIgnored()) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; SemaRef.getDiagnostics().setLastDiagnosticIgnored( PrevLastDiagnosticIgnored); } /// Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; /// Queue of implicit template instantiations that cannot be performed /// eagerly. SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations; class GlobalEagerInstantiationScope { public: GlobalEagerInstantiationScope(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } void perform() { if (Enabled) { S.DefineUsedVTables(); S.PerformPendingInstantiations(); } } ~GlobalEagerInstantiationScope() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. if (S.TUKind != TU_Prefix || !S.LangOpts.PCHInstantiateTemplates) { assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } else { // Template instantiations in the PCH may be delayed until the TU. S.PendingInstantiations.swap(SavedPendingInstantiations); S.PendingInstantiations.insert(S.PendingInstantiations.end(), SavedPendingInstantiations.begin(), SavedPendingInstantiations.end()); } } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class LocalEagerInstantiationScope { public: LocalEagerInstantiationScope(Sema &S) : S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); } ~LocalEagerInstantiationScope() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; /// A helper class for building up ExtParameterInfos. class ExtParameterInfoBuilder { SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos; bool HasInteresting = false; public: /// Set the ExtParameterInfo for the parameter at the given index, /// void set(unsigned index, FunctionProtoType::ExtParameterInfo info) { assert(Infos.size() <= index); Infos.resize(index); Infos.push_back(info); if (!HasInteresting) HasInteresting = (info != FunctionProtoType::ExtParameterInfo()); } /// Return a pointer (suitable for setting in an ExtProtoInfo) to the /// ExtParameterInfo array we've built up. const FunctionProtoType::ExtParameterInfo * getPointerOrNull(unsigned numParams) { if (!HasInteresting) return nullptr; Infos.resize(numParams); return Infos.data(); } }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, bool AllowDeducedTST = false); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, Qualifiers ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); bool SubstExceptionSpec(SourceLocation Loc, FunctionProtoType::ExceptionSpecInfo &ESI, SmallVectorImpl<QualType> &ExceptionStorage, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params, const FunctionProtoType::ExtParameterInfo *ExtParamInfos, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams, ExtParameterInfoBuilder &ParamInfos); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateParameterList * SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); bool SubstTemplateArguments(ArrayRef<TemplateArgumentLoc> Args, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateArgumentListInfo &Outputs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the name and return type of a defaulted 'operator<=>' to form /// an implicit 'operator=='. FunctionDecl *SubstSpaceshipAsEqualEqual(CXXRecordDecl *RD, FunctionDecl *Spaceship); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateDefaultCtorDefaultArgs(CXXConstructorDecl *Ctor); bool usesPartialOrExplicitSpecialization( SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs, TemplateArgumentListInfo &Result, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateDefaultArgument(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); bool CheckInstantiatedFunctionTemplateConstraints( SourceLocation PointOfInstantiation, FunctionDecl *Decl, ArrayRef<TemplateArgument> TemplateArgs, ConstraintSatisfaction &Satisfaction); FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD, const TemplateArgumentList *Args, SourceLocation Loc); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false, VarTemplateSpecializationDecl *PrevVTSD = nullptr); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, bool FindingInstantiatedContext = false); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface( Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, SmallVectorImpl<SourceLocation> &ProtocolLocs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl *const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryInterface( SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc, const ParsedAttributesView &AttrList); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, ArrayRef<IdentifierLocPair> IdentList, const ParsedAttributesView &attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, ArrayRef<IdentifierLocPair> ProtocolId, SmallVectorImpl<Decl *> &Protocols); void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId, SourceLocation ProtocolLoc, IdentifierInfo *TypeArgId, SourceLocation TypeArgLoc, bool SelectProtocolFirst = false); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C type parameter type. QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed void ProcessPropertyDecl(ObjCPropertyDecl *property); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc, ObjCPropertyQueryKind QueryKind); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. ParsedAttributesView ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); void deduceOpenCLAddressSpace(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// The message is sent to 'super'. ObjCSuperMessage, /// The message is an instance message. ObjCInstanceMessage, /// The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs, bool Diagnose = true); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr, bool Diagnose = true); bool CheckConversionToObjCLiteral(QualType DstType, Expr *&SrcExpr, bool Diagnose = true); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; void CheckObjCMethodDirectOverrides(ObjCMethodDecl *method, ObjCMethodDecl *overridden); void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaClangSection - Called on well formed \#pragma clang section void ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionAction Action, PragmaClangSectionKind SecKind, StringRef SecName); /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action, StringRef SlotLabel, Expr *Alignment); enum class PragmaAlignPackDiagnoseKind { NonDefaultStateAtInclude, ChangedStateAtExit }; void DiagnoseNonDefaultPragmaAlignPack(PragmaAlignPackDiagnoseKind Kind, SourceLocation IncludeLoc); void DiagnoseUnterminatedPragmaAlignPack(); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action, SourceLocation PragmaLoc, MSVtorDispMode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, NamedDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// Called on #pragma clang __debug dump II void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name, StringRef Value); /// Are precise floating point semantics currently enabled? bool isPreciseFPEnabled() { return !CurFPFeatures.getAllowFPReassociate() && !CurFPFeatures.getNoSignedZero() && !CurFPFeatures.getAllowReciprocal() && !CurFPFeatures.getAllowApproxFunc(); } /// ActOnPragmaFloatControl - Call on well-formed \#pragma float_control void ActOnPragmaFloatControl(SourceLocation Loc, PragmaMsStackAction Action, PragmaFloatControlKind Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT and /// \#pragma clang fp contract void ActOnPragmaFPContract(SourceLocation Loc, LangOptions::FPModeKind FPC); /// Called on well formed /// \#pragma clang fp reassociate void ActOnPragmaFPReassociate(SourceLocation Loc, bool IsEnabled); /// ActOnPragmaFenvAccess - Called on well formed /// \#pragma STDC FENV_ACCESS void ActOnPragmaFEnvAccess(SourceLocation Loc, bool IsEnabled); /// Called on well formed '\#pragma clang fp' that has option 'exceptions'. void ActOnPragmaFPExceptions(SourceLocation Loc, LangOptions::FPExceptionModeKind); /// Called to set constant rounding mode for floating point operations. void setRoundingMode(SourceLocation Loc, llvm::RoundingMode); /// Called to set exception behavior for floating point operations. void setExceptionMode(SourceLocation Loc, LangOptions::FPExceptionModeKind); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute, SourceLocation PragmaLoc, attr::ParsedSubjectMatchRuleSet Rules); void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Called on well-formed '\#pragma clang attribute pop'. void ActOnPragmaAttributePop(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Adds the attributes that have been specified using the /// '\#pragma clang attribute push' directives to the given declaration. void AddPragmaAttributes(Scope *S, Decl *D); void DiagnoseUnterminatedPragmaAttribute(); /// Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E, bool IsPackExpansion); void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E, Expr *OE); /// AddAllocAlignAttr - Adds an alloc_align attribute to a particular /// declaration. void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI, Expr *ParamExpr); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E); /// AddAnnotationAttr - Adds an annotation Annot with Args arguments to D. void AddAnnotationAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Annot, MutableArrayRef<Expr *> Args); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI, Expr *MaxThreads, Expr *MinBlocks); /// AddModeAttr - Adds a mode attribute to a particular declaration. void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name, bool InInstantiation = false); void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI, ParameterABI ABI); enum class RetainOwnershipKind {NS, CF, OS}; void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI, RetainOwnershipKind K, bool IsTemplateInstantiation); /// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size /// attribute to a particular declaration. void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI, Expr *Min, Expr *Max); /// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a /// particular declaration. void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI, Expr *Min, Expr *Max); bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type); //===--------------------------------------------------------------------===// // C++ Coroutines TS // bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc, StringRef Keyword); ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E); StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, UnresolvedLookupExpr* Lookup); ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E); StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs); bool buildCoroutineParameterMoves(SourceLocation Loc); VarDecl *buildCoroutinePromise(SourceLocation Loc); void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body); ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc, SourceLocation FuncLoc); /// Check that the expression co_await promise.final_suspend() shall not be /// potentially-throwing. bool checkFinalSuspendNoThrow(const Stmt *FinalSuspend); //===--------------------------------------------------------------------===// // OpenCL extensions. // private: std::string CurrOpenCLExtension; /// Extensions required by an OpenCL type. llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap; /// Extensions required by an OpenCL declaration. llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap; public: llvm::StringRef getCurrentOpenCLExtension() const { return CurrOpenCLExtension; } /// Check if a function declaration \p FD associates with any /// extensions present in OpenCLDeclExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD); /// Check if a function type \p FT associates with any /// extensions present in OpenCLTypeExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT); /// Find an extension in an appropriate extension map and return its name template<typename T, typename MapT> std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map); void setCurrentOpenCLExtension(llvm::StringRef Ext) { CurrOpenCLExtension = std::string(Ext); } /// Set OpenCL extensions for a type which can only be used when these /// OpenCL extensions are enabled. If \p Exts is empty, do nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts); /// Set OpenCL extensions for a declaration which can only be /// used when these OpenCL extensions are enabled. If \p Exts is empty, do /// nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts); /// Set current OpenCL extensions for a type which can only be used /// when these OpenCL extensions are enabled. If current OpenCL extension is /// empty, do nothing. void setCurrentOpenCLExtensionForType(QualType T); /// Set current OpenCL extensions for a declaration which /// can only be used when these OpenCL extensions are enabled. If current /// OpenCL extension is empty, do nothing. void setCurrentOpenCLExtensionForDecl(Decl *FD); bool isOpenCLDisabledDecl(Decl *FD); /// Check if type \p T corresponding to declaration specifier \p DS /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T); /// Check if declaration \p D used by expression \p E /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E); //===--------------------------------------------------------------------===// // OpenMP directives and clauses. // private: void *VarDataSharingAttributesStack; /// Number of nested ' SmallVector<SourceLocation, 4> DeclareTargetNesting; /// Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind, bool StrictlyPositive = true); /// Returns OpenMP nesting level for current directive. unsigned getOpenMPNestingLevel() const; /// Adjusts the function scopes index for the target-based regions. void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex, unsigned Level) const; /// Returns the number of scopes associated with the construct on the given /// OpenMP level. int getNumberOfConstructScopes(unsigned Level) const; /// Push new OpenMP function region for non-capturing function. void pushOpenMPFunctionRegion(); /// Pop OpenMP function region for non-capturing function. void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI); /// Checks if a type or a declaration is disabled due to the owning extension /// being disabled, and emits diagnostic messages if it is disabled. /// \param D type or declaration to be checked. /// \param DiagLoc source location for the diagnostic message. /// \param DiagInfo information to be emitted for the diagnostic message. /// \param SrcRange source range of the declaration. /// \param Map maps type or declaration to the extensions. /// \param Selector selects diagnostic message: 0 for type and 1 for /// declaration. /// \return true if the type or declaration is disabled. template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT> bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo, MapT &Map, unsigned Selector = 0, SourceRange SrcRange = SourceRange()); /// Helper to keep information about the current `omp begin/end declare /// variant` nesting. struct OMPDeclareVariantScope { /// The associated OpenMP context selector. OMPTraitInfo *TI; /// The associated OpenMP context selector mangling. std::string NameSuffix; OMPDeclareVariantScope(OMPTraitInfo &TI); }; /// Return the OMPTraitInfo for the surrounding scope, if any. OMPTraitInfo *getOMPTraitInfoForSurroundingScope() { return OMPDeclareVariantScopes.empty() ? nullptr : OMPDeclareVariantScopes.back().TI; } /// The current `omp begin/end declare variant` scopes. SmallVector<OMPDeclareVariantScope, 4> OMPDeclareVariantScopes; /// The current `omp begin/end assumes` scopes. SmallVector<AssumptionAttr *, 4> OMPAssumeScoped; /// All `omp assumes` we encountered so far. SmallVector<AssumptionAttr *, 4> OMPAssumeGlobal; public: /// The declarator \p D defines a function in the scope \p S which is nested /// in an `omp begin/end declare variant` scope. In this method we create a /// declaration for \p D and rename \p D according to the OpenMP context /// selector of the surrounding scope. Return all base functions in \p Bases. void ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope( Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, SmallVectorImpl<FunctionDecl *> &Bases); /// Register \p D as specialization of all base functions in \p Bases in the /// current `omp begin/end declare variant` scope. void ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope( Decl *D, SmallVectorImpl<FunctionDecl *> &Bases); /// Act on \p D, a function definition inside of an `omp [begin/end] assumes`. void ActOnFinishedFunctionDefinitionInOpenMPAssumeScope(Decl *D); /// Can we exit an OpenMP declare variant scope at the moment. bool isInOpenMPDeclareVariantScope() const { return !OMPDeclareVariantScopes.empty(); } /// Given the potential call expression \p Call, determine if there is a /// specialization via the OpenMP declare variant mechanism available. If /// there is, return the specialized call expression, otherwise return the /// original \p Call. ExprResult ActOnOpenMPCall(ExprResult Call, Scope *Scope, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig); /// Handle a `omp begin declare variant`. void ActOnOpenMPBeginDeclareVariant(SourceLocation Loc, OMPTraitInfo &TI); /// Handle a `omp end declare variant`. void ActOnOpenMPEndDeclareVariant(); /// Checks if the variant/multiversion functions are compatible. bool areMultiversionVariantFunctionsCompatible( const FunctionDecl *OldFD, const FunctionDecl *NewFD, const PartialDiagnostic &NoProtoDiagID, const PartialDiagnosticAt &NoteCausedDiagIDAt, const PartialDiagnosticAt &NoSupportDiagIDAt, const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported, bool ConstexprSupported, bool CLinkageMayDiffer); /// Function tries to capture lambda's captured variables in the OpenMP region /// before the original lambda is captured. void tryCaptureOpenMPLambdas(ValueDecl *V); /// Return true if the provided declaration \a VD should be captured by /// reference. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. /// \param OpenMPCaptureLevel Capture level within an OpenMP construct. bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level, unsigned OpenMPCaptureLevel) const; /// Check if the specified variable is used in one of the private /// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP /// constructs. VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false, unsigned StopAt = 0); ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK, ExprObjectKind OK, SourceLocation Loc); /// If the current region is a loop-based region, mark the start of the loop /// construct. void startOpenMPLoop(); /// If the current region is a range loop-based region, mark the start of the /// loop construct. void startOpenMPCXXRangeFor(); /// Check if the specified variable is used in 'private' clause. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. OpenMPClauseKind isOpenMPPrivateDecl(ValueDecl *D, unsigned Level, unsigned CapLevel) const; /// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.) /// for \p FD based on DSA for the provided corresponding captured declaration /// \p D. void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level); /// Check if the specified variable is captured by 'target' directive. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level, unsigned CaptureLevel) const; /// Check if the specified global variable must be captured by outer capture /// regions. /// \param Level Relative level of nested OpenMP construct for that /// the check is performed. bool isOpenMPGlobalCapturedDecl(ValueDecl *D, unsigned Level, unsigned CaptureLevel) const; ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// End analysis of clauses. void EndOpenMPClause(); /// Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); // OpenMP directives and clauses. /// Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OpenMPDirectiveKind Kind); /// Called on well-formed ' DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList); /// Called on well-formed ' DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc, ArrayRef<Expr *> VarList, ArrayRef<OMPClause *> Clauses, DeclContext *Owner = nullptr); /// Called on well-formed ' void ActOnOpenMPAssumesDirective(SourceLocation Loc, OpenMPDirectiveKind DKind, ArrayRef<StringRef> Assumptions, bool SkippedClauses); /// Check if there is an active global `omp begin assumes` directive. bool isInOpenMPAssumeScope() const { return !OMPAssumeScoped.empty(); } /// Check if there is an active global `omp assumes` directive. bool hasGlobalOpenMPAssumes() const { return !OMPAssumeGlobal.empty(); } /// Called on well-formed ' void ActOnOpenMPEndAssumesDirective(); /// Called on well-formed ' DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc, ArrayRef<OMPClause *> ClauseList); /// Check restrictions on Requires directive OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc, ArrayRef<OMPClause *> Clauses); /// Check if the specified type is allowed to be used in 'omp declare /// reduction' construct. QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of ' DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Initialize declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner); /// Initialize declare reduction construct initializer. /// \return omp_priv variable. VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer, VarDecl *OmpPrivParm); /// Called at the end of ' DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd( Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid); /// Check variable declaration in 'omp declare mapper' construct. TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D); /// Check if the specified type is allowed to be used in 'omp declare /// mapper' construct. QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of ' DeclGroupPtrTy ActOnOpenMPDeclareMapperDirective( Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType, SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS, Expr *MapperVarRef, ArrayRef<OMPClause *> Clauses, Decl *PrevDeclInScope = nullptr); /// Build the mapper variable of ' ExprResult ActOnOpenMPDeclareMapperDirectiveVarDecl(Scope *S, QualType MapperType, SourceLocation StartLoc, DeclarationName VN); bool isOpenMPDeclareMapperVarDeclAllowed(const VarDecl *VD) const; const ValueDecl *getOpenMPDeclareMapperVarName() const; /// Called on the start of target region i.e. ' bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc); /// Called at the end of target region i.e. '#pragme omp end declare target'. void ActOnFinishOpenMPDeclareTargetDirective(); /// Searches for the provided declaration name for OpenMP declare target /// directive. NamedDecl * lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, NamedDeclSetType &SameDirectiveDecls); /// Called on correct id-expression from the ' void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc, OMPDeclareTargetDeclAttr::MapTypeTy MT, OMPDeclareTargetDeclAttr::DevTypeTy DT); /// Check declaration inside target region. void checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D, SourceLocation IdLoc = SourceLocation()); /// Finishes analysis of the deferred functions calls that may be declared as /// host/nohost during device/host compilation. void finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller, const FunctionDecl *Callee, SourceLocation Loc); /// Return true inside OpenMP declare target region. bool isInOpenMPDeclareTargetContext() const { return !DeclareTargetNesting.empty(); } /// Return true inside OpenMP target region. bool isInOpenMPTargetExecutionDirective() const; /// Return the number of captured regions created for an OpenMP directive. static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind); /// Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// Called for syntactical loops (ForStmt or CXXForRangeStmt) associated to /// an OpenMP loop directive. StmtResult ActOnOpenMPCanonicalLoop(Stmt *AStmt); /// End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); using VarsWithInheritedDSAType = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>; /// Called on well-formed '\ /// of the associated statement. StmtResult ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed ' /// the associated statement. StmtResult ActOnOpenMPTileDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ /// of the associated statement. StmtResult ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ StmtResult ActOnOpenMPDepobjDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ StmtResult ActOnOpenMPScanDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ /// the associated statement. StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ /// parsing of the associated statement. StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\ /// parsing of the associated statement. StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\ /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\ StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// the associated statement. StmtResult ActOnOpenMPTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPMasterTaskLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// the associated statement. StmtResult ActOnOpenMPMasterTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterTaskLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// of the associated statement. StmtResult ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\ /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// after parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// the associated statement. StmtResult ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// the associated statement. StmtResult ActOnOpenMPTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// of the associated statement. StmtResult ActOnOpenMPTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ /// of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// simd' after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ StmtResult ActOnOpenMPInteropDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Checks correctness of linear modifiers. bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind, SourceLocation LinLoc); /// Checks that the specified declaration matches requirements for the linear /// decls. bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc, OpenMPLinearClauseKind LinKind, QualType Type, bool IsDeclareSimd = false); /// Called on well-formed '\ /// the associated method/function. DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective( DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds, ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears, ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR); /// Checks '\ /// functions after parsing of the associated method/function. /// \param DG Function declaration to which declare variant directive is /// applied to. /// \param VariantRef Expression that references the variant function, which /// must be used instead of the original one, specified in \p DG. /// \param TI The trait info object representing the match clause. /// \returns None, if the function/variant function are not compatible with /// the pragma, pair of original function/variant ref expression otherwise. Optional<std::pair<FunctionDecl *, Expr *>> checkOpenMPDeclareVariantFunction(DeclGroupPtrTy DG, Expr *VariantRef, OMPTraitInfo &TI, SourceRange SR); /// Called on well-formed '\ /// the associated method/function. /// \param FD Function declaration to which declare variant directive is /// applied to. /// \param VariantRef Expression that references the variant function, which /// must be used instead of the original one, specified in \p DG. /// \param TI The context traits associated with the function variant. void ActOnOpenMPDeclareVariantDirective(FunctionDecl *FD, Expr *VariantRef, OMPTraitInfo &TI, SourceRange SR); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocator' clause. OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'simdlen' clause. OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-form 'sizes' clause. OMPClause *ActOnOpenMPSizesClause(ArrayRef<Expr *> SizeExprs, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'ordered' clause. OMPClause * ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc, SourceLocation LParenLoc = SourceLocation(), Expr *NumForLoops = nullptr); /// Called on well-formed 'grainsize' clause. OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_tasks' clause. OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'hint' clause. OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'detach' clause. OMPClause *ActOnOpenMPDetachClause(Expr *Evt, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(llvm::omp::DefaultKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(llvm::omp::ProcBindKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'order' clause. OMPClause *ActOnOpenMPOrderClause(OpenMPOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(OpenMPDependClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause( OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc, SourceLocation EndLoc); /// Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause( OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'acq_rel' clause. OMPClause *ActOnOpenMPAcqRelClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'acquire' clause. OMPClause *ActOnOpenMPAcquireClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'release' clause. OMPClause *ActOnOpenMPReleaseClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'relaxed' clause. OMPClause *ActOnOpenMPRelaxedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'init' clause. OMPClause *ActOnOpenMPInitClause(Expr *InteropVar, ArrayRef<Expr *> PrefExprs, bool IsTarget, bool IsTargetSync, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation VarLoc, SourceLocation EndLoc); /// Called on well-formed 'use' clause. OMPClause *ActOnOpenMPUseClause(Expr *InteropVar, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation VarLoc, SourceLocation EndLoc); /// Called on well-formed 'destroy' clause. OMPClause *ActOnOpenMPDestroyClause(Expr *InteropVar, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation VarLoc, SourceLocation EndLoc); /// Called on well-formed 'threads' clause. OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'simd' clause. OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nogroup' clause. OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'reverse_offload' clause. OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'dynamic_allocators' clause. OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'atomic_default_mem_order' clause. OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause( OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *DepModOrTailExpr, const OMPVarListLocTy &Locs, SourceLocation ColonLoc, CXXScopeSpec &ReductionOrMapperIdScopeSpec, DeclarationNameInfo &ReductionOrMapperId, int ExtraModifier, ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, bool IsMapTypeImplicit, SourceLocation ExtraModifierLoc, ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc); /// Called on well-formed 'inclusive' clause. OMPClause *ActOnOpenMPInclusiveClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'exclusive' clause. OMPClause *ActOnOpenMPExclusiveClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocate' clause. OMPClause * ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation ColonLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause( ArrayRef<Expr *> VarList, OpenMPLastprivateModifier LPKind, SourceLocation LPKindLoc, SourceLocation ColonLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'reduction' clause. OMPClause *ActOnOpenMPReductionClause( ArrayRef<Expr *> VarList, OpenMPReductionClauseModifier Modifier, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'task_reduction' clause. OMPClause *ActOnOpenMPTaskReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'in_reduction' clause. OMPClause *ActOnOpenMPInReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'linear' clause. OMPClause * ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind, SourceLocation LinLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depobj' pseudo clause. OMPClause *ActOnOpenMPDepobjClause(Expr *Depobj, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(Expr *DepModifier, OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'device' clause. OMPClause *ActOnOpenMPDeviceClause(OpenMPDeviceClauseModifier Modifier, Expr *Device, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation EndLoc); /// Called on well-formed 'map' clause. OMPClause * ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'num_teams' clause. OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'thread_limit' clause. OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'priority' clause. OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'dist_schedule' clause. OMPClause *ActOnOpenMPDistScheduleClause( OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// Called on well-formed 'defaultmap' clause. OMPClause *ActOnOpenMPDefaultmapClause( OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KindLoc, SourceLocation EndLoc); /// Called on well-formed 'to' clause. OMPClause * ActOnOpenMPToClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'from' clause. OMPClause * ActOnOpenMPFromClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'use_device_ptr' clause. OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'use_device_addr' clause. OMPClause *ActOnOpenMPUseDeviceAddrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'is_device_ptr' clause. OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'nontemporal' clause. OMPClause *ActOnOpenMPNontemporalClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Data for list of allocators. struct UsesAllocatorsData { /// Allocator. Expr *Allocator = nullptr; /// Allocator traits. Expr *AllocatorTraits = nullptr; /// Locations of '(' and ')' symbols. SourceLocation LParenLoc, RParenLoc; }; /// Called on well-formed 'uses_allocators' clause. OMPClause *ActOnOpenMPUsesAllocatorClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<UsesAllocatorsData> Data); /// Called on well-formed 'affinity' clause. OMPClause *ActOnOpenMPAffinityClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, Expr *Modifier, ArrayRef<Expr *> Locators); /// The kind of conversion being performed. enum CheckedConversionKind { /// An implicit conversion. CCK_ImplicitConversion, /// A C-style cast. CCK_CStyleCast, /// A functional-style cast. CCK_FunctionalCast, /// A cast other than a C-style cast. CCK_OtherCast, /// A conversion for an operand of a builtin overloaded operator. CCK_ForBuiltinOverloadedOp }; static bool isCast(CheckedConversionKind CCK) { return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast || CCK == CCK_OtherCast; } /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_RValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E, bool Diagnose = true); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This function is a no-op if the operand has a function type // or an array type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); /// If \p E is a prvalue denoting an unmaterialized temporary, materialize /// it as an xvalue. In C++98, the result will still be a prvalue, because /// we don't have xvalues there. ExprResult TemporaryMaterializationConversion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); /// Context in which we're performing a usual arithmetic conversion. enum ArithConvKind { /// An arithmetic operation. ACK_Arithmetic, /// A bitwise operation. ACK_BitwiseOp, /// A comparison. ACK_Comparison, /// A conditional (?:) operator. ACK_Conditional, /// A compound assignment expression. ACK_CompAssign, }; // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, ArithConvKind ACK); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatibleFunctionPointer - The assignment is between two function /// pointers types that are not compatible, but we accept them as an /// extension. IncompatibleFunctionPointer, /// IncompatiblePointerSign - The assignment is between two pointers types /// which point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerAddressSpaceMismatch - The assignment /// changes address spaces in nested pointer types which is not allowed. /// For instance, converting __private int ** to __generic int ** is /// illegal even though __private could be converted to __generic. IncompatibleNestedPointerAddressSpaceMismatch, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and optionally prepare for a conversion of /// the RHS to the LHS type. The conversion is prepared for if ConvertRHS /// is true. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind, bool ConvertRHS = true); /// Check assignment constraints for an assignment of RHS to LHSType. /// /// \param LHSType The destination type for the assignment. /// \param RHS The source expression for the assignment. /// \param Diagnose If \c true, diagnostics may be produced when checking /// for assignability. If a diagnostic is produced, \p RHS will be /// set to ExprError(). Note that this function may still return /// without producing a diagnostic, even for an invalid assignment. /// \param DiagnoseCFAudited If \c true, the target is a function parameter /// in an audited Core Foundation API and does not need to be checked /// for ARC retain issues. /// \param ConvertRHS If \c true, \p RHS will be updated to model the /// conversions necessary to perform the assignment. If \c false, /// \p Diagnose must also be \c false. AssignConvertType CheckSingleAssignmentConstraints( QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false, bool ConvertRHS = true); // If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); ExprResult PerformQualificationConversion( Expr *E, QualType Ty, ExprValueKind VK = VK_RValue, CheckedConversionKind CCK = CCK_ImplicitConversion); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, bool IsCompAssign = false); void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType CheckVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool ConvertArgs = true); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool ConvertArgs = true) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); /// Type checking for matrix binary operators. QualType CheckMatrixElementwiseOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign); QualType CheckMatrixMultiplyOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign); bool isValidSveBitcast(QualType srcType, QualType destType); bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible - The two types are reference-compatible. Ref_Compatible }; // Fake up a scoped enumeration that still contextually converts to bool. struct ReferenceConversionsScope { /// The conversions that would be performed on an lvalue of type T2 when /// binding a reference of type T1 to it, as determined when evaluating /// whether T1 is reference-compatible with T2. enum ReferenceConversions { Qualification = 0x1, NestedQualification = 0x2, Function = 0x4, DerivedToBase = 0x8, ObjC = 0x10, ObjCLifetime = 0x20, LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/ObjCLifetime) }; }; using ReferenceConversions = ReferenceConversionsScope::ReferenceConversions; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, ReferenceConversions *Conv = nullptr); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); /// Prepare `SplattedExpr` for a vector splat operation, adding /// implicit casts if necessary. ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error }; /// Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds for ARC and Weak. ARCConversionResult CheckObjCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool Diagnose = true, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); class ConditionResult { Decl *ConditionVar; FullExprArg Condition; bool Invalid; bool HasKnownValue; bool KnownValue; friend class Sema; ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition, bool IsConstexpr) : ConditionVar(ConditionVar), Condition(Condition), Invalid(false), HasKnownValue(IsConstexpr && Condition.get() && !Condition.get()->isValueDependent()), KnownValue(HasKnownValue && !!Condition.get()->EvaluateKnownConstInt(S.Context)) {} explicit ConditionResult(bool Invalid) : ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid), HasKnownValue(false), KnownValue(false) {} public: ConditionResult() : ConditionResult(false) {} bool isInvalid() const { return Invalid; } std::pair<VarDecl *, Expr *> get() const { return std::make_pair(cast_or_null<VarDecl>(ConditionVar), Condition.get()); } llvm::Optional<bool> getKnownValue() const { if (!HasKnownValue) return None; return KnownValue; } }; static ConditionResult ConditionError() { return ConditionResult(true); } enum class ConditionKind { Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'. ConstexprIf, ///< A constant boolean condition from 'if constexpr'. Switch ///< An integral condition for a 'switch' statement. }; ConditionResult ActOnCondition(Scope *S, SourceLocation Loc, Expr *SubExpr, ConditionKind CK); ConditionResult ActOnConditionVariable(Decl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E, bool IsConstexpr = false); /// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression /// found in an explicit(bool) specifier. ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E); /// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier. /// Returns true if the explicit specifier is now resolved. bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual SemaDiagnosticBuilder diagnoseNotICEType(Sema &S, SourceLocation Loc, QualType T); virtual SemaDiagnosticBuilder diagnoseNotICE(Sema &S, SourceLocation Loc) = 0; virtual SemaDiagnosticBuilder diagnoseFold(Sema &S, SourceLocation Loc); virtual ~VerifyICEDiagnoser() {} }; enum AllowFoldKind { NoFold, AllowFold, }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, AllowFoldKind CanFold = NoFold); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, AllowFoldKind CanFold = NoFold); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr, AllowFoldKind CanFold = NoFold); ExprResult VerifyIntegerConstantExpression(Expr *E, AllowFoldKind CanFold = NoFold) { return VerifyIntegerConstantExpression(E, nullptr, CanFold); } /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); private: unsigned ForceCUDAHostDeviceDepth = 0; public: /// Increments our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. So long as this count is greater /// than zero, all functions encountered will be __host__ __device__. void PushForceCUDAHostDevice(); /// Decrements our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. Returns false if the count is 0 /// before incrementing, so you can emit an error. bool PopForceCUDAHostDevice(); /// Diagnostics that are emitted only if we discover that the given function /// must be codegen'ed. Because handling these correctly adds overhead to /// compilation, this is currently only enabled for CUDA compilations. llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>, std::vector<PartialDiagnosticAt>> DeviceDeferredDiags; /// A pair of a canonical FunctionDecl and a SourceLocation. When used as the /// key in a hashtable, both the FD and location are hashed. struct FunctionDeclAndLoc { CanonicalDeclPtr<FunctionDecl> FD; SourceLocation Loc; }; /// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a /// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the /// same deferred diag twice. llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags; /// An inverse call graph, mapping known-emitted functions to one of their /// known-emitted callers (plus the location of the call). /// /// Functions that we can tell a priori must be emitted aren't added to this /// map. llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>, /* Caller = */ FunctionDeclAndLoc> DeviceKnownEmittedFns; /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a __host__ function, does not emit any diagnostics /// unless \p EmitOnBothSides is true. /// - If CurContext is a __device__ or __global__ function, emits the /// diagnostics immediately. /// - If CurContext is a __host__ __device__ function and we are compiling for /// the device, creates a diagnostic which is emitted if and when we realize /// that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in CUDA device code. /// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget()) /// return ExprError(); /// // Otherwise, continue parsing as normal. SemaDiagnosticBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as host code". /// /// Same as CUDADiagIfDeviceCode, with "host" and "device" switched. SemaDiagnosticBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID); /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the device, emits the diagnostics immediately. /// - If CurContext is a non-`declare target` function and we are compiling /// for the device, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. SemaDiagnosticBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD); /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as host code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the host, emits the diagnostics immediately. /// - If CurContext is a non-host function, just ignore it. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. SemaDiagnosticBuilder diagIfOpenMPHostCode(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD); SemaDiagnosticBuilder targetDiag(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD = nullptr); SemaDiagnosticBuilder targetDiag(SourceLocation Loc, const PartialDiagnostic &PD, FunctionDecl *FD = nullptr) { return targetDiag(Loc, PD.getDiagID(), FD) << PD; } /// Check if the expression is allowed to be used in expressions for the /// offloading devices. void checkDeviceDecl(ValueDecl *D, SourceLocation Loc); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; /// Determines whether the given function is a CUDA device/host/kernel/etc. /// function. /// /// Use this rather than examining the function's attributes yourself -- you /// will get it wrong. Returns CFT_Host if D is null. CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D, bool IgnoreImplicitHDAttr = false); CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs); /// Gets the CUDA target for the current context. CUDAFunctionTarget CurrentCUDATarget() { return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext)); } static bool isCUDAImplicitHostDeviceFunction(const FunctionDecl *D); // CUDA function call preference. Must be ordered numerically from // worst to best. enum CUDAFunctionPreference { CFP_Never, // Invalid caller/callee combination. CFP_WrongSide, // Calls from host-device to host or device // function that do not match current compilation // mode. CFP_HostDevice, // Any calls to host/device functions. CFP_SameSide, // Calls from host-device to host or device // function matching current compilation mode. CFP_Native, // host-to-host or device-to-device calls. }; /// Identifies relative preference of a given Caller/Callee /// combination, based on their host/device attributes. /// \param Caller function which needs address of \p Callee. /// nullptr in case of global context. /// \param Callee target function /// /// \returns preference value for particular Caller/Callee combination. CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Determines whether Caller may invoke Callee, based on their CUDA /// host/device attributes. Returns false if the call is not allowed. /// /// Note: Will return true for CFP_WrongSide calls. These may appear in /// semantically correct CUDA programs, but only if they're never codegen'ed. bool IsAllowedCUDACall(const FunctionDecl *Caller, const FunctionDecl *Callee) { return IdentifyCUDAPreference(Caller, Callee) != CFP_Never; } /// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD, /// depending on FD and the current compilation settings. void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD, const LookupResult &Previous); /// May add implicit CUDAConstantAttr attribute to VD, depending on VD /// and current compilation settings. void MaybeAddCUDAConstantAttr(VarDecl *VD); public: /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// (CFP_Never), emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to /// be emitted if and when the caller is codegen'ed, and returns true. /// /// Will only create deferred diagnostics for a given SourceLocation once, /// so you can safely call this multiple times without generating duplicate /// deferred errors. /// /// - Otherwise, returns true without emitting any diagnostics. bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee); void CUDACheckLambdaCapture(CXXMethodDecl *D, const sema::Capture &Capture); /// Set __device__ or __host__ __device__ attributes on the given lambda /// operator() method. /// /// CUDA lambdas by default is host device function unless it has explicit /// host or device attribute. void CUDASetLambdaAttrs(CXXMethodDecl *Method); /// Finds a function in \p Matches with highest calling priority /// from \p Caller context and erases all functions with lower /// calling priority. void EraseUnwantedCUDAMatches( const FunctionDecl *Caller, SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \return true if \p CD can be considered empty according to CUDA /// (E.2.3.1 in CUDA 7.5 Programming guide). bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD); bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD); // \brief Checks that initializers of \p Var satisfy CUDA restrictions. In // case of error emits appropriate diagnostic and invalidates \p Var. // // \details CUDA allows only empty constructors as initializers for global // variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all // __shared__ variables whether they are local or not (they all are implicitly // static in CUDA). One exception is that CUDA allows constant initializers // for __constant__ and __device__ variables. void checkAllowedCUDAInitializer(VarDecl *VD); /// Check whether NewFD is a valid overload for CUDA. Emits /// diagnostics and invalidates NewFD if not. void checkCUDATargetOverload(FunctionDecl *NewFD, const LookupResult &Previous); /// Copies target attributes from the template TD to the function FD. void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD); /// Returns the name of the launch configuration function. This is the name /// of the function that will be called to configure kernel call, with the /// parameters specified via <<<>>>. std::string getCudaConfigureFuncName() const; /// \name Code completion //@{ /// Describes the context in which code completion occurs. enum ParserCompletionContext { /// Code completion occurs at top-level or namespace context. PCC_Namespace, /// Code completion occurs within a class, struct, or union. PCC_Class, /// Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// Code completion occurs following one or more template /// headers. PCC_Template, /// Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// Code completion occurs within an expression. PCC_Expression, /// Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// Code completion occurs where only a type is permitted. PCC_Type, /// Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteExpression(Scope *S, QualType PreferredType, bool IsParenthesized = false); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase, SourceLocation OpLoc, bool IsArrow, bool IsBaseExprStatement, QualType PreferredType); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS, QualType PreferredType); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D, const VirtSpecifiers *VS = nullptr); void CodeCompleteBracketDeclarator(Scope *S); void CodeCompleteCase(Scope *S); /// Determines the preferred type of the current function argument, by /// examining the signatures of all possible overloads. /// Returns null if unknown or ambiguous, or if code completion is off. /// /// If the code completion point has been reached, also reports the function /// signatures that were considered. /// /// FIXME: rename to GuessCallArgumentType to reduce confusion. QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl, CXXScopeSpec SS, ParsedType TemplateTypeTy, ArrayRef<Expr *> ArgExprs, IdentifierInfo *II, SourceLocation OpenParLoc); void CodeCompleteInitializer(Scope *S, Decl *D); /// Trigger code completion for a record of \p BaseType. \p InitExprs are /// expressions in the initializer list seen so far and \p D is the current /// Designation being parsed. void CodeCompleteDesignator(const QualType BaseType, llvm::ArrayRef<Expr *> InitExprs, const Designation &D); void CodeCompleteAfterIf(Scope *S, bool IsBracedThen); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext, bool IsUsingDeclaration, QualType BaseType, QualType PreferredType); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteAfterFunctionEquals(Declarator &D); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences( ArrayRef<IdentifierLocPair> Protocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName, SourceLocation ClassNameLoc, bool IsBaseExprStatement); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled); void CodeCompleteNaturalLanguage(); void CodeCompleteAvailabilityPlatformName(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, QualType ThisType, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void CheckArgAlignment(SourceLocation Loc, NamedDecl *FDecl, StringRef ParamName, QualType ArgTy, QualType ParamTy); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, const Expr *ThisArg, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckOSLogFormatStringArg(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); bool CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckARMCoprocessorImmediate(const TargetInfo &TI, const Expr *CoprocArg, bool WantCDE); bool CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall, ArrayRef<int> ArgNums); bool CheckX86BuiltinTileDuplicate(CallExpr *TheCall, ArrayRef<int> ArgNums); bool CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall, ArrayRef<int> ArgNums); bool CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckRISCVBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); bool SemaBuiltinComplex(CallExpr *TheCall); bool SemaBuiltinVSX(CallExpr *TheCall); bool SemaBuiltinOSLogFormat(CallExpr *TheCall); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult, bool IsDelete); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High, bool RangeIsError = true); bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, unsigned Multiple); bool SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum); bool SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum, unsigned ArgBits); bool SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum, unsigned ArgBits); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinPPCMMACall(CallExpr *TheCall, const char *TypeDesc); bool CheckPPCMMAType(QualType Type, SourceLocation TypeLoc); // Matrix builtin handling. ExprResult SemaBuiltinMatrixTranspose(CallExpr *TheCall, ExprResult CallResult); ExprResult SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall, ExprResult CallResult); ExprResult SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall, ExprResult CallResult); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_OSLog, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); bool FormatStringHasSArg(const StringLiteral *FExpr); static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckFreeArguments(const CallExpr *E); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); public: void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS); private: void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(const Expr *E); /// Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// Check if there is a field shadowing. void CheckShadowInheritedFields(const SourceLocation &Loc, DeclarationName FieldName, const CXXRecordDecl *RD, bool DeclIsField = true); /// Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void CheckTCBEnforcement(const CallExpr *TheCall, const FunctionDecl *Callee); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const ArrayRef<const Expr *> ExprArgs, SourceLocation CallSiteLoc); /// Check if we are taking the address of a packed field /// as this may be a problem if the pointer value is dereferenced. void CheckAddressOfPackedMember(Expr *rhs); /// The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Nullable_result = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; /// The handler for the FileChanged preprocessor events. /// /// Used for diagnostics that implement custom semantic analysis for #include /// directives, like -Wpragma-pack. sema::SemaPPCallbacks *SemaPPCallbackHandler; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; bool isCFError(RecordDecl *D); /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// Determine the number of levels of enclosing template parameters. This is /// only usable while parsing. Note that this does not include dependent /// contexts in which no template parameters have yet been declared, such as /// in a terse function template or generic lambda before the first 'auto' is /// encountered. unsigned getTemplateDepth(Scope *S) const; /// To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } // Emitting members of dllexported classes is delayed until the class // (including field initializers) is fully parsed. SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses; SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions; private: int ParsingClassDepth = 0; class SavePendingParsedClassStateRAII { public: SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); } ~SavePendingParsedClassStateRAII() { assert(S.DelayedOverridingExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedEquivalentExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); swapSavedState(); } private: Sema &S; decltype(DelayedOverridingExceptionSpecChecks) SavedOverridingExceptionSpecChecks; decltype(DelayedEquivalentExceptionSpecChecks) SavedEquivalentExceptionSpecChecks; void swapSavedState() { SavedOverridingExceptionSpecChecks.swap( S.DelayedOverridingExceptionSpecChecks); SavedEquivalentExceptionSpecChecks.swap( S.DelayedEquivalentExceptionSpecChecks); } }; /// Helper class that collects misaligned member designations and /// their location info for delayed diagnostics. struct MisalignedMember { Expr *E; RecordDecl *RD; ValueDecl *MD; CharUnits Alignment; MisalignedMember() : E(), RD(), MD(), Alignment() {} MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment) : E(E), RD(RD), MD(MD), Alignment(Alignment) {} explicit MisalignedMember(Expr *E) : MisalignedMember(E, nullptr, nullptr, CharUnits()) {} bool operator==(const MisalignedMember &m) { return this->E == m.E; } }; /// Small set of gathered accesses to potentially misaligned members /// due to the packed attribute. SmallVector<MisalignedMember, 4> MisalignedMembers; /// Adds an expression to the set of gathered misaligned members. void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment); public: /// Diagnoses the current set of gathered accesses. This typically /// happens at full expression level. The set is cleared after emitting the /// diagnostics. void DiagnoseMisalignedMembers(); /// This function checks if the expression is in the sef of potentially /// misaligned members and it is converted to some pointer type T with lower /// or equal alignment requirements. If so it removes it. This is used when /// we do not want to diagnose such misaligned access (e.g. in conversions to /// void*). void DiscardMisalignedMemberAddress(const Type *T, Expr *E); /// This function calls Action when it determines that E designates a /// misaligned member due to the packed attribute. This is used to emit /// local diagnostics like in reference binding. void RefersToMemberWithReducedAlignment( Expr *E, llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> Action); /// Describes the reason a calling convention specification was ignored, used /// for diagnostics. enum class CallingConventionIgnoredReason { ForThisTarget = 0, VariadicFunction, ConstructorDestructor, BuiltinFunction }; /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurLexicalContext is a kernel function or it is known that the /// function will be emitted for the device, emits the diagnostics /// immediately. /// - If CurLexicalContext is a function and we are compiling /// for the device, but we don't know that this function will be codegen'ed /// for devive yet, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// Diagnose __float128 type usage only from SYCL device code if the current /// target doesn't support it /// if (!S.Context.getTargetInfo().hasFloat128Type() && /// S.getLangOpts().SYCLIsDevice) /// SYCLDiagIfDeviceCode(Loc, diag::err_type_unsupported) << "__float128"; SemaDiagnosticBuilder SYCLDiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed, creates a deferred diagnostic to be emitted if /// and when the caller is codegen'ed, and returns true. /// /// - Otherwise, returns true without emitting any diagnostics. /// /// Adds Callee to DeviceCallGraph if we don't know if its caller will be /// codegen'ed yet. bool checkSYCLDeviceFunction(SourceLocation Loc, FunctionDecl *Callee); }; /// RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; bool Entered = true; public: EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other, bool ShouldEnter = true) : Actions(Actions), Entered(ShouldEnter) { if (Entered) Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, ExprContext); } EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other) : Actions(Actions) { Actions.PushExpressionEvaluationContext( NewContext, Sema::ReuseLambdaContextDecl, ExprContext); } enum InitListTag { InitList }; EnterExpressionEvaluationContext(Sema &Actions, InitListTag, bool ShouldEnter = true) : Actions(Actions), Entered(false) { // In C++11 onwards, narrowing checks are performed on the contents of // braced-init-lists, even when they occur within unevaluated operands. // Therefore we still need to instantiate constexpr functions used in such // a context. if (ShouldEnter && Actions.isUnevaluatedContext() && Actions.getLangOpts().CPlusPlus11) { Actions.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::UnevaluatedList); Entered = true; } } ~EnterExpressionEvaluationContext() { if (Entered) Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// The template function declaration to be late parsed. Decl *D; }; template <> void Sema::PragmaStack<Sema::AlignPackInfo>::Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, AlignPackInfo Value); } // end namespace clang namespace llvm { // Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its // SourceLocation. template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> { using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc; using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>; static FunctionDeclAndLoc getEmptyKey() { return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()}; } static FunctionDeclAndLoc getTombstoneKey() { return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()}; } static unsigned getHashValue(const FunctionDeclAndLoc &FDL) { return hash_combine(FDBaseInfo::getHashValue(FDL.FD), FDL.Loc.getHashValue()); } static bool isEqual(const FunctionDeclAndLoc &LHS, const FunctionDeclAndLoc &RHS) { return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc; } }; } // namespace llvm #endif
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/ASTConcept.h" #include "clang/AST/ASTFwd.h" #include "clang/AST/Attr.h" #include "clang/AST/Availability.h" #include "clang/AST/ComparisonCategories.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprConcepts.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExprOpenMP.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/LocInfoType.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/TypeLoc.h" #include "clang/AST/TypeOrdering.h" #include "clang/Basic/BitmaskEnum.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenCLOptions.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/PragmaKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/CleanupInfo.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/SemaConcept.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include "llvm/Frontend/OpenMP/OMPConstants.h" #include <deque> #include <memory> #include <string> #include <tuple> #include <vector> namespace llvm { class APSInt; template <typename ValueT> struct DenseMapInfo; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; struct InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class ParsedAttr; class BindingDecl; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class CoroutineBodyStmt; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPRequiresDecl; class OMPDeclareReductionDecl; class OMPDeclareSimdDecl; class OMPClause; struct OMPVarListLocTy; struct OverloadCandidate; enum class OverloadCandidateParamOrder : char; enum OverloadCandidateRewriteKind : unsigned; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateInstantiationCallback; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class Capture; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class SemaPPCallbacks; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// The end location for the first pointer declarator in the file. Used for /// placing fix-its. SourceLocation PointerEndLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Tracks expected type during expression parsing, for use in code completion. /// The type is tied to a particular token, all functions that update or consume /// the type take a start location of the token they are looking at as a /// parameter. This avoids updating the type on hot paths in the parser. class PreferredTypeBuilder { public: PreferredTypeBuilder(bool Enabled) : Enabled(Enabled) {} void enterCondition(Sema &S, SourceLocation Tok); void enterReturn(Sema &S, SourceLocation Tok); void enterVariableInit(SourceLocation Tok, Decl *D); /// Handles e.g. BaseType{ .D = Tok... void enterDesignatedInitializer(SourceLocation Tok, QualType BaseType, const Designation &D); /// Computing a type for the function argument may require running /// overloading, so we postpone its computation until it is actually needed. /// /// Clients should be very careful when using this funciton, as it stores a /// function_ref, clients should make sure all calls to get() with the same /// location happen while function_ref is alive. /// /// The callback should also emit signature help as a side-effect, but only /// if the completion point has been reached. void enterFunctionArgument(SourceLocation Tok, llvm::function_ref<QualType()> ComputeType); void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc); void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind, SourceLocation OpLoc); void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op); void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base); void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS); /// Handles all type casts, including C-style cast, C++ casts, etc. void enterTypeCast(SourceLocation Tok, QualType CastType); /// Get the expected type associated with this location, if any. /// /// If the location is a function argument, determining the expected type /// involves considering all function overloads and the arguments so far. /// In this case, signature help for these function overloads will be reported /// as a side-effect (only if the completion point has been reached). QualType get(SourceLocation Tok) const { if (!Enabled || Tok != ExpectedLoc) return QualType(); if (!Type.isNull()) return Type; if (ComputeType) return ComputeType(); return QualType(); } private: bool Enabled; /// Start position of a token for which we store expected type. SourceLocation ExpectedLoc; /// Expected type for a token starting at ExpectedLoc. QualType Type; /// A function to compute expected type at ExpectedLoc. It is only considered /// if Type is null. llvm::function_ref<QualType()> ComputeType; }; /// Sema - This implements semantic analysis and AST building for C. class Sema final { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; /// A key method to reduce duplicate debug info from Sema. virtual void anchor(); ///Source of additional semantic information. ExternalSemaSource *ExternalSource; ///Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); /// Determine whether two declarations should be linked together, given that /// the old declaration might not be visible and the new declaration might /// not have external linkage. bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { if (isVisible(Old)) return true; // See comment in below overload for why it's safe to compute the linkage // of the new declaration here. if (New->isExternallyDeclarable()) { assert(Old->isExternallyDeclarable() && "should not have found a non-externally-declarable previous decl"); return true; } return false; } bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New); void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem, QualType ResultTy, ArrayRef<QualType> Args); public: /// The maximum alignment, same as in llvm::Value. We duplicate them here /// because that allows us not to duplicate the constants in clang code, /// which we must to since we can't directly use the llvm constants. /// The value is verified against llvm here: lib/CodeGen/CGDecl.cpp /// /// This is the greatest alignment value supported by load, store, and alloca /// instructions, and global values. static const unsigned MaxAlignmentExponent = 29; static const unsigned MaximumAlignment = 1u << MaxAlignmentExponent; typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions CurFPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; /// Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; bool MSStructPragmaOn; // True when \#pragma ms_struct on /// Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; /// Holds TypoExprs that are created from `createDelayedTypo`. This is used by /// `TransformTypos` in order to keep track of any TypoExprs that are created /// recursively during typo correction and wipe them away if the correction /// fails. llvm::SmallVector<TypoExpr *, 2> TypoExprs; /// pragma clang section kind enum PragmaClangSectionKind { PCSK_Invalid = 0, PCSK_BSS = 1, PCSK_Data = 2, PCSK_Rodata = 3, PCSK_Text = 4, PCSK_Relro = 5 }; enum PragmaClangSectionAction { PCSA_Set = 0, PCSA_Clear = 1 }; struct PragmaClangSection { std::string SectionName; bool Valid = false; SourceLocation PragmaLocation; }; PragmaClangSection PragmaClangBSSSection; PragmaClangSection PragmaClangDataSection; PragmaClangSection PragmaClangRodataSection; PragmaClangSection PragmaClangRelroSection; PragmaClangSection PragmaClangTextSection; enum PragmaMsStackAction { PSK_Reset = 0x0, // #pragma () PSK_Set = 0x1, // #pragma (value) PSK_Push = 0x2, // #pragma (push[, id]) PSK_Pop = 0x4, // #pragma (pop[, id]) PSK_Show = 0x8, // #pragma (show) -- only for "pack"! PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value) PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value) }; // #pragma pack and align. class AlignPackInfo { public: // `Native` represents default align mode, which may vary based on the // platform. enum Mode : unsigned char { Native, Natural, Packed, Mac68k }; // #pragma pack info constructor AlignPackInfo(AlignPackInfo::Mode M, unsigned Num, bool IsXL) : PackAttr(true), AlignMode(M), PackNumber(Num), XLStack(IsXL) { assert(Num == PackNumber && "The pack number has been truncated."); } // #pragma align info constructor AlignPackInfo(AlignPackInfo::Mode M, bool IsXL) : PackAttr(false), AlignMode(M), PackNumber(M == Packed ? 1 : UninitPackVal), XLStack(IsXL) {} explicit AlignPackInfo(bool IsXL) : AlignPackInfo(Native, IsXL) {} AlignPackInfo() : AlignPackInfo(Native, false) {} // When a AlignPackInfo itself cannot be used, this returns an 32-bit // integer encoding for it. This should only be passed to // AlignPackInfo::getFromRawEncoding, it should not be inspected directly. static uint32_t getRawEncoding(const AlignPackInfo &Info) { std::uint32_t Encoding{}; if (Info.IsXLStack()) Encoding |= IsXLMask; Encoding |= static_cast<uint32_t>(Info.getAlignMode()) << 1; if (Info.IsPackAttr()) Encoding |= PackAttrMask; Encoding |= static_cast<uint32_t>(Info.getPackNumber()) << 4; return Encoding; } static AlignPackInfo getFromRawEncoding(unsigned Encoding) { bool IsXL = static_cast<bool>(Encoding & IsXLMask); AlignPackInfo::Mode M = static_cast<AlignPackInfo::Mode>((Encoding & AlignModeMask) >> 1); int PackNumber = (Encoding & PackNumMask) >> 4; if (Encoding & PackAttrMask) return AlignPackInfo(M, PackNumber, IsXL); return AlignPackInfo(M, IsXL); } bool IsPackAttr() const { return PackAttr; } bool IsAlignAttr() const { return !PackAttr; } Mode getAlignMode() const { return AlignMode; } unsigned getPackNumber() const { return PackNumber; } bool IsPackSet() const { // #pragma align, #pragma pack(), and #pragma pack(0) do not set the pack // attriute on a decl. return PackNumber != UninitPackVal && PackNumber != 0; } bool IsXLStack() const { return XLStack; } bool operator==(const AlignPackInfo &Info) const { return std::tie(AlignMode, PackNumber, PackAttr, XLStack) == std::tie(Info.AlignMode, Info.PackNumber, Info.PackAttr, Info.XLStack); } bool operator!=(const AlignPackInfo &Info) const { return !(*this == Info); } private: /// \brief True if this is a pragma pack attribute, /// not a pragma align attribute. bool PackAttr; /// \brief The alignment mode that is in effect. Mode AlignMode; /// \brief The pack number of the stack. unsigned char PackNumber; /// \brief True if it is a XL #pragma align/pack stack. bool XLStack; /// \brief Uninitialized pack value. static constexpr unsigned char UninitPackVal = -1; // Masks to encode and decode an AlignPackInfo. static constexpr uint32_t IsXLMask{0x0000'0001}; static constexpr uint32_t AlignModeMask{0x0000'0006}; static constexpr uint32_t PackAttrMask{0x00000'0008}; static constexpr uint32_t PackNumMask{0x0000'01F0}; }; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; SourceLocation PragmaPushLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation, SourceLocation PragmaPushLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation), PragmaPushLocation(PragmaPushLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value) { if (Action == PSK_Reset) { CurrentValue = DefaultValue; CurrentPragmaLocation = PragmaLocation; return; } if (Action & PSK_Push) Stack.emplace_back(StackSlotLabel, CurrentValue, CurrentPragmaLocation, PragmaLocation); else if (Action & PSK_Pop) { if (!StackSlotLabel.empty()) { // If we've got a label, try to find it and jump there. auto I = llvm::find_if(llvm::reverse(Stack), [&](const Slot &x) { return x.StackSlotLabel == StackSlotLabel; }); // If we found the label so pop from there. if (I != Stack.rend()) { CurrentValue = I->Value; CurrentPragmaLocation = I->PragmaLocation; Stack.erase(std::prev(I.base()), Stack.end()); } } else if (!Stack.empty()) { // We do not have a label, just pop the last entry. CurrentValue = Stack.back().Value; CurrentPragmaLocation = Stack.back().PragmaLocation; Stack.pop_back(); } } if (Action & PSK_Set) { CurrentValue = Value; CurrentPragmaLocation = PragmaLocation; } } // MSVC seems to add artificial slots to #pragma stacks on entering a C++ // method body to restore the stacks on exit, so it works like this: // // struct S { // #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>) // void Method {} // #pragma <name>(pop, InternalPragmaSlot) // }; // // It works even with #pragma vtordisp, although MSVC doesn't support // #pragma vtordisp(push [, id], n) // syntax. // // Push / pop a named sentinel slot. void SentinelAction(PragmaMsStackAction Action, StringRef Label) { assert((Action == PSK_Push || Action == PSK_Pop) && "Can only push / pop #pragma stack sentinels!"); Act(CurrentPragmaLocation, Action, Label, CurrentValue); } // Constructors. explicit PragmaStack(const ValueType &Default) : DefaultValue(Default), CurrentValue(Default) {} bool hasValue() const { return CurrentValue != DefaultValue; } SmallVector<Slot, 2> Stack; ValueType DefaultValue; // Value used for PSK_Reset action. ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). /// Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects PragmaStack<MSVtorDispMode> VtorDispStack; PragmaStack<AlignPackInfo> AlignPackStack; // The current #pragma align/pack values and locations at each #include. struct AlignPackIncludeState { AlignPackInfo CurrentValue; SourceLocation CurrentPragmaLocation; bool HasNonDefaultValue, ShouldWarnOnInclude; }; SmallVector<AlignPackIncludeState, 8> AlignPackIncludeStack; // Segment #pragmas. PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; // This stack tracks the current state of Sema.CurFPFeatures. PragmaStack<FPOptionsOverride> FpPragmaStack; FPOptionsOverride CurFPFeatureOverrides() { FPOptionsOverride result; if (!FpPragmaStack.hasValue()) { result = FPOptionsOverride(); } else { result = FpPragmaStack.CurrentValue; } return result; } // RAII object to push / pop sentinel slots for all MS #pragma stacks. // Actions should be performed only if we enter / exit a C++ method body. class PragmaStackSentinelRAII { public: PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct); ~PragmaStackSentinelRAII(); private: Sema &S; StringRef SlotLabel; bool ShouldAct; }; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// This an attribute introduced by \#pragma clang attribute. struct PragmaAttributeEntry { SourceLocation Loc; ParsedAttr *Attribute; SmallVector<attr::SubjectMatchRule, 4> MatchRules; bool IsUsed; }; /// A push'd group of PragmaAttributeEntries. struct PragmaAttributeGroup { /// The location of the push attribute. SourceLocation Loc; /// The namespace of this push group. const IdentifierInfo *Namespace; SmallVector<PragmaAttributeEntry, 2> Entries; }; SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack; /// The declaration that is currently receiving an attribute from the /// #pragma attribute stack. const Decl *PragmaAttributeCurrentTargetDecl; /// This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// Used to control the generation of ExprWithCleanups. CleanupInfo Cleanup; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. SmallVector<ExprWithCleanups::CleanupObject, 8> ExprCleanupObjects; /// Store a set of either DeclRefExprs or MemberExprs that contain a reference /// to a variable (constant) that may or may not be odr-used in this Expr, and /// we won't know until all lvalue-to-rvalue and discarded value conversions /// have been applied to all subexpressions of the enclosing full expression. /// This is cleared at the end of each full expression. using MaybeODRUseExprSet = llvm::SetVector<Expr *, SmallVector<Expr *, 4>, llvm::SmallPtrSet<Expr *, 4>>; MaybeODRUseExprSet MaybeODRUseExprs; std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope; /// Stack containing information about each of the nested /// function, block, and method scopes that are currently active. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; /// The index of the first FunctionScope that corresponds to the current /// context. unsigned FunctionScopesStart = 0; ArrayRef<sema::FunctionScopeInfo*> getFunctionScopes() const { return llvm::makeArrayRef(FunctionScopes.begin() + FunctionScopesStart, FunctionScopes.end()); } /// Stack containing information needed when in C++2a an 'auto' is encountered /// in a function declaration parameter type specifier in order to invent a /// corresponding template parameter in the enclosing abbreviated function /// template. This information is also present in LambdaScopeInfo, stored in /// the FunctionScopes stack. SmallVector<InventedTemplateParameterInfo, 4> InventedParameterInfos; /// The index of the first InventedParameterInfo that refers to the current /// context. unsigned InventedParameterInfosStart = 0; ArrayRef<InventedTemplateParameterInfo> getInventedParameterInfos() const { return llvm::makeArrayRef(InventedParameterInfos.begin() + InventedParameterInfosStart, InventedParameterInfos.end()); } typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType; /// Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; /// All the external declarations encoutered and used in the TU. SmallVector<VarDecl *, 4> ExternalDeclarations; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedOverridingExceptionSpecChecks; /// All the function redeclarations seen during a class definition that had /// their exception spec checks delayed, plus the prior declaration they /// should be checked against. Except during error recovery, the new decl /// should always be a friend declaration, as that's the only valid way to /// redeclare a special member before its class is complete. SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2> DelayedEquivalentExceptionSpecChecks; typedef llvm::MapVector<const FunctionDecl *, std::unique_ptr<LateParsedTemplate>> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; unsigned SavedFunctionScopesStart; unsigned SavedInventedParameterInfosStart; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride), SavedFunctionScopesStart(S.FunctionScopesStart), SavedInventedParameterInfosStart(S.InventedParameterInfosStart) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); // Any saved FunctionScopes do not refer to this context. S.FunctionScopesStart = S.FunctionScopes.size(); S.InventedParameterInfosStart = S.InventedParameterInfos.size(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; S.FunctionScopesStart = SavedFunctionScopesStart; S.InventedParameterInfosStart = SavedInventedParameterInfosStart; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// Whether the AST is currently being rebuilt to correct immediate /// invocations. Immediate invocation candidates and references to consteval /// functions aren't tracked when this is set. bool RebuildingImmediateInvocation = false; /// Used to change context to isConstantEvaluated without pushing a heavy /// ExpressionEvaluationContextRecord object. bool isConstantEvaluatedOverride; bool isConstantEvaluated() { return ExprEvalContexts.back().isConstantEvaluated() || isConstantEvaluatedOverride; } /// RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; bool PushedCodeSynthesisContext = false; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::PotentiallyEvaluated); if (auto *FD = dyn_cast<FunctionDecl>(DC)) FD->setWillHaveBody(true); else assert(isa<ObjCMethodDecl>(DC)); } void addContextNote(SourceLocation UseLoc) { assert(!PushedCodeSynthesisContext); Sema::CodeSynthesisContext Ctx; Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction; Ctx.PointOfInstantiation = UseLoc; Ctx.Entity = cast<Decl>(S.CurContext); S.pushCodeSynthesisContext(Ctx); PushedCodeSynthesisContext = true; } ~SynthesizedFunctionScope() { if (PushedCodeSynthesisContext) S.popCodeSynthesisContext(); if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext)) FD->setWillHaveBody(false); S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// The C++ "std::align_val_t" enum class, which is defined by the C++ /// standard library. LazyDeclPtr StdAlignValT; /// The C++ "std::experimental" namespace, where the experimental parts /// of the standard library resides. NamespaceDecl *StdExperimentalNamespaceCache; /// The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// The C++ "std::coroutine_traits" template, which is defined in /// \<coroutine_traits> ClassTemplateDecl *StdCoroutineTraitsCache; /// The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// Caches identifiers/selectors for NSFoundation APIs. std::unique_ptr<NSAPI> NSAPIObj; /// The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// Pointer to NSString type (NSString *). QualType NSStringPointer; /// The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// id<NSCopying> type. QualType QIDNSCopying; /// will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum class ExpressionEvaluationContext { /// The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// The current expression occurs within a braced-init-list within /// an unevaluated operand. This is mostly like a regular unevaluated /// context, except that we still instantiate constexpr functions that are /// referenced here so that we can perform narrowing checks correctly. UnevaluatedList, /// The current expression occurs within a discarded statement. /// This behaves largely similarly to an unevaluated operand in preventing /// definitions from being required, but not in other ways. DiscardedStatement, /// The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; using ImmediateInvocationCandidate = llvm::PointerIntPair<ConstantExpr *, 1>; /// Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// The expression evaluation context. ExpressionEvaluationContext Context; /// Whether the enclosing context needed a cleanup. CleanupInfo ParentCleanup; /// The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; MaybeODRUseExprSet SavedMaybeODRUseExprs; /// The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs; /// Expressions appearing as the LHS of a volatile assignment in this /// context. We produce a warning for these when popping the context if /// they are not discarded-value expressions nor unevaluated operands. SmallVector<Expr*, 2> VolatileAssignmentLHSs; /// Set of candidates for starting an immediate invocation. llvm::SmallVector<ImmediateInvocationCandidate, 4> ImmediateInvocationCandidates; /// Set of DeclRefExprs referencing a consteval function when used in a /// context not already known to be immediately invoked. llvm::SmallPtrSet<DeclRefExpr *, 4> ReferenceToConsteval; /// \brief Describes whether we are in an expression constext which we have /// to handle differently. enum ExpressionKind { EK_Decltype, EK_TemplateArgument, EK_Other } ExprContext; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, CleanupInfo ParentCleanup, Decl *ManglingContextDecl, ExpressionKind ExprContext) : Context(Context), ParentCleanup(ParentCleanup), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext) {} bool isUnevaluated() const { return Context == ExpressionEvaluationContext::Unevaluated || Context == ExpressionEvaluationContext::UnevaluatedAbstract || Context == ExpressionEvaluationContext::UnevaluatedList; } bool isConstantEvaluated() const { return Context == ExpressionEvaluationContext::ConstantEvaluated; } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// Emit a warning for all pending noderef expressions that we recorded. void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec); /// Compute the mangling number context for a lambda expression or /// block literal. Also return the extra mangling decl if any. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. std::tuple<MangleNumberingContext *, Decl *> getCurrentMangleNumberContext(const DeclContext *DC); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl*, 2> Pair; public: SpecialMemberOverloadResult() : Pair() {} SpecialMemberOverloadResult(CXXMethodDecl *MD) : Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; class SpecialMemberOverloadResultEntry : public llvm::FastFoldingSetNode, public SpecialMemberOverloadResult { public: SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} }; /// A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache; /// A cache of the flags available in enumerations with the flag_bits /// attribute. mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache; /// The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed; /// Determine if VD, which must be a variable or function, is an external /// symbol that nonetheless can't be referenced from outside this translation /// unit because its type has no linkage and it's not extern "C". bool isExternalWithNoLinkageType(ValueDecl *VD); /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods; typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// List of SourceLocations where 'self' is implicitly retained inside a /// block. llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1> ImplicitlyRetainedSelfLocs; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; /// Kinds of defaulted comparison operator functions. enum class DefaultedComparisonKind : unsigned char { /// This is not a defaultable comparison operator. None, /// This is an operator== that should be implemented as a series of /// subobject comparisons. Equal, /// This is an operator<=> that should be implemented as a series of /// subobject comparisons. ThreeWay, /// This is an operator!= that should be implemented as a rewrite in terms /// of a == comparison. NotEqual, /// This is an <, <=, >, or >= that should be implemented as a rewrite in /// terms of a <=> comparison. Relational, }; /// The function definitions which were renamed as part of typo-correction /// to match their respective declarations. We want to keep track of them /// to ensure that we don't emit a "redefinition" error if we encounter a /// correctly named definition after the renamed definition. llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions; /// Stack of types that correspond to the parameter entities that are /// currently being copy-initialized. Can be empty. llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes; void ReadMethodPool(Selector Sel); void updateOutOfDateSelector(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the CurFPFeatures state on entry/exit of compound /// statements. class FPFeaturesStateRAII { public: FPFeaturesStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.CurFPFeatures) { OldOverrides = S.FpPragmaStack.CurrentValue; } ~FPFeaturesStateRAII() { S.CurFPFeatures = OldFPFeaturesState; S.FpPragmaStack.CurrentValue = OldOverrides; } FPOptionsOverride getOverrides() { return OldOverrides; } private: Sema& S; FPOptions OldFPFeaturesState; FPOptionsOverride OldOverrides; }; void addImplicitTypedef(StringRef Name, QualType T); bool WarnedStackExhausted = false; public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getCurFPFeatures() { return CurFPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } ///Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// Warn that the stack is nearly exhausted. void warnStackExhausted(SourceLocation Loc); /// Run some code with "sufficient" stack space. (Currently, at least 256K is /// guaranteed). Produces a warning if we're low on stack space and allocates /// more in that case. Use this in code that may recurse deeply (for example, /// in template instantiation) to avoid stack overflow. void runWithSufficientStackSpace(SourceLocation Loc, llvm::function_ref<void()> Fn); /// Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. ImmediateDiagBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class ImmediateDiagBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: ImmediateDiagBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) {} ImmediateDiagBuilder(DiagnosticBuilder &&DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) {} // This is a cunning lie. DiagnosticBuilder actually performs move // construction in its copy constructor (but due to varied uses, it's not // possible to conveniently express this as actual move construction). So // the default copy ctor here is fine, because the base class disables the // source anyway, so the user-defined ~ImmediateDiagBuilder is a safe no-op // in that case anwyay. ImmediateDiagBuilder(const ImmediateDiagBuilder &) = default; ~ImmediateDiagBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First clear the diagnostic // builder itself so it won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template <typename T> friend const ImmediateDiagBuilder & operator<<(const ImmediateDiagBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } // It is necessary to limit this to rvalue reference to avoid calling this // function with a bitfield lvalue argument since non-const reference to // bitfield is not allowed. template <typename T, typename = typename std::enable_if< !std::is_lvalue_reference<T>::value>::type> const ImmediateDiagBuilder &operator<<(T &&V) const { const DiagnosticBuilder &BaseDiag = *this; BaseDiag << std::move(V); return *this; } }; /// A generic diagnostic builder for errors which may or may not be deferred. /// /// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch) /// which are not allowed to appear inside __device__ functions and are /// allowed to appear in __host__ __device__ functions only if the host+device /// function is never codegen'ed. /// /// To handle this, we use the notion of "deferred diagnostics", where we /// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed. /// /// This class lets you emit either a regular diagnostic, a deferred /// diagnostic, or no diagnostic at all, according to an argument you pass to /// its constructor, thus simplifying the process of creating these "maybe /// deferred" diagnostics. class SemaDiagnosticBuilder { public: enum Kind { /// Emit no diagnostics. K_Nop, /// Emit the diagnostic immediately (i.e., behave like Sema::Diag()). K_Immediate, /// Emit the diagnostic immediately, and, if it's a warning or error, also /// emit a call stack showing how this function can be reached by an a /// priori known-emitted function. K_ImmediateWithCallStack, /// Create a deferred diagnostic, which is emitted only if the function /// it's attached to is codegen'ed. Also emit a call stack as with /// K_ImmediateWithCallStack. K_Deferred }; SemaDiagnosticBuilder(Kind K, SourceLocation Loc, unsigned DiagID, FunctionDecl *Fn, Sema &S); SemaDiagnosticBuilder(SemaDiagnosticBuilder &&D); SemaDiagnosticBuilder(const SemaDiagnosticBuilder &) = default; ~SemaDiagnosticBuilder(); bool isImmediate() const { return ImmediateDiag.hasValue(); } /// Convertible to bool: True if we immediately emitted an error, false if /// we didn't emit an error or we created a deferred error. /// /// Example usage: /// /// if (SemaDiagnosticBuilder(...) << foo << bar) /// return ExprError(); /// /// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably /// want to use these instead of creating a SemaDiagnosticBuilder yourself. operator bool() const { return isImmediate(); } template <typename T> friend const SemaDiagnosticBuilder & operator<<(const SemaDiagnosticBuilder &Diag, const T &Value) { if (Diag.ImmediateDiag.hasValue()) *Diag.ImmediateDiag << Value; else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second << Value; return Diag; } // It is necessary to limit this to rvalue reference to avoid calling this // function with a bitfield lvalue argument since non-const reference to // bitfield is not allowed. template <typename T, typename = typename std::enable_if< !std::is_lvalue_reference<T>::value>::type> const SemaDiagnosticBuilder &operator<<(T &&V) const { if (ImmediateDiag.hasValue()) *ImmediateDiag << std::move(V); else if (PartialDiagId.hasValue()) S.DeviceDeferredDiags[Fn][*PartialDiagId].second << std::move(V); return *this; } friend const SemaDiagnosticBuilder & operator<<(const SemaDiagnosticBuilder &Diag, const PartialDiagnostic &PD) { if (Diag.ImmediateDiag.hasValue()) PD.Emit(*Diag.ImmediateDiag); else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second = PD; return Diag; } void AddFixItHint(const FixItHint &Hint) const { if (ImmediateDiag.hasValue()) ImmediateDiag->AddFixItHint(Hint); else if (PartialDiagId.hasValue()) S.DeviceDeferredDiags[Fn][*PartialDiagId].second.AddFixItHint(Hint); } friend ExprResult ExprError(const SemaDiagnosticBuilder &) { return ExprError(); } friend StmtResult StmtError(const SemaDiagnosticBuilder &) { return StmtError(); } operator ExprResult() const { return ExprError(); } operator StmtResult() const { return StmtError(); } operator TypeResult() const { return TypeError(); } operator DeclResult() const { return DeclResult(true); } operator MemInitResult() const { return MemInitResult(true); } private: Sema &S; SourceLocation Loc; unsigned DiagID; FunctionDecl *Fn; bool ShowCallStack; // Invariant: At most one of these Optionals has a value. // FIXME: Switch these to a Variant once that exists. llvm::Optional<ImmediateDiagBuilder> ImmediateDiag; llvm::Optional<unsigned> PartialDiagId; }; /// Is the last error level diagnostic immediate. This is used to determined /// whether the next info diagnostic should be immediate. bool IsLastErrorImmediate = true; /// Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID, bool DeferHint = false); /// Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic &PD, bool DeferHint = false); /// Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h /// Whether uncompilable error has occurred. This includes error happens /// in deferred diagnostics. bool hasUncompilableErrorOccurred() const; bool findMacroSpelling(SourceLocation &loc, StringRef name); /// Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; /// Invent a new identifier for parameters of abbreviated templates. IdentifierInfo * InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName, unsigned Index); void emitAndClearUnusedLocalTypedefWarnings(); private: /// Function or variable declarations to be checked for whether the deferred /// diagnostics should be emitted. SmallVector<Decl *, 4> DeclsToCheckForDeferredDiags; public: // Emit all deferred diagnostics. void emitDeferredDiags(); enum TUFragmentKind { /// The global module fragment, between 'module;' and a module-declaration. Global, /// A normal translation unit fragment. For a non-module unit, this is the /// entire translation unit. Otherwise, it runs from the module-declaration /// to the private-module-fragment (if any) or the end of the TU (if not). Normal, /// The private module fragment, between 'module :private;' and the end of /// the translation unit. Private }; void ActOnStartOfTranslationUnit(); void ActOnEndOfTranslationUnit(); void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K, unsigned OpenMPCaptureLevel = 0); /// Custom deleter to allow FunctionScopeInfos to be kept alive for a short /// time after they've been popped. class PoppedFunctionScopeDeleter { Sema *Self; public: explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {} void operator()(sema::FunctionScopeInfo *Scope) const; }; using PoppedFunctionScopePtr = std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>; PoppedFunctionScopePtr PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, QualType BlockType = QualType()); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.empty() ? nullptr : FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const; void setFunctionHasBranchIntoScope(); void setFunctionHasBranchProtectedScope(); void setFunctionHasIndirectGoto(); void PushCompoundScope(bool IsStmtExpr); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// Get the innermost lambda enclosing the current location, if any. This /// looks through intervening non-lambda scopes such as local functions and /// blocks. sema::LambdaScopeInfo *getEnclosingLambda() const; /// Retrieve the current lambda scope info, if any. /// \param IgnoreNonLambdaCapturingScope true if should find the top-most /// lambda scope info ignoring all inner capturing scopes that are not /// lambda scopes. sema::LambdaScopeInfo * getCurLambda(bool IgnoreNonLambdaCapturingScope = false); /// Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } /// Called before parsing a function declarator belonging to a function /// declaration. void ActOnStartFunctionDeclarationDeclarator(Declarator &D, unsigned TemplateParameterDepth); /// Called after parsing a function declarator belonging to a function /// declaration. void ActOnFinishFunctionDeclarationDeclarator(Declarator &D); void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); QualType BuildMatrixType(QualType T, Expr *NumRows, Expr *NumColumns, SourceLocation AttrLoc); QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace, SourceLocation AttrLoc); /// Same as above, but constructs the AddressSpace index if not provided. QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace, SourceLocation AttrLoc); bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); /// Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); QualType BuildReadPipeType(QualType T, SourceLocation Loc); QualType BuildWritePipeType(QualType T, SourceLocation Loc); QualType BuildExtIntType(bool IsUnsigned, Expr *BitWidth, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); /// Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Stmt *E); /// Determine whether the callee of a particular function call can throw. /// E, D and Loc are all optional. static CanThrowResult canCalleeThrow(Sema &S, const Expr *E, const Decl *D, SourceLocation Loc = SourceLocation()); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool handlerCanCatch(QualType HandlerType, QualType ExceptionType); bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID, const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const PartialDiagnostic &NoThrowDiagID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// Abstract class used to diagnose incomplete types. struct TypeDiagnoser { TypeDiagnoser() {} virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { protected: unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, std::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(), DiagID(DiagID), Args(Args...) { assert(DiagID != 0 && "no diagnostic for type diagnoser"); } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, std::index_sequence_for<Ts...>()); DB << T; } }; /// Do a check to make sure \p Name looks like a legal argument for the /// swift_name attribute applied to decl \p D. Raise a diagnostic if the name /// is invalid for the given declaration. /// /// \p AL is used to provide caret diagnostics in case of a malformed name. /// /// \returns true if the name is a valid swift name for \p D, false otherwise. bool DiagnoseSwiftName(Decl *D, StringRef Name, SourceLocation Loc, const ParsedAttr &AL, bool IsAsync); /// A derivative of BoundTypeDiagnoser for which the diagnostic's type /// parameter is preceded by a 0/1 enum that is 1 if the type is sizeless. /// For example, a diagnostic with no other parameters would generally have /// the form "...%select{incomplete|sizeless}0 type %1...". template <typename... Ts> class SizelessTypeDiagnoser : public BoundTypeDiagnoser<Ts...> { public: SizelessTypeDiagnoser(unsigned DiagID, const Ts &... Args) : BoundTypeDiagnoser<Ts...>(DiagID, Args...) {} void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, this->DiagID); this->emit(DB, std::index_sequence_for<Ts...>()); DB << T->isSizelessType() << T; } }; enum class CompleteTypeKind { /// Apply the normal rules for complete types. In particular, /// treat all sizeless types as incomplete. Normal, /// Relax the normal rules for complete types so that they include /// sizeless built-in types. AcceptSizeless, // FIXME: Eventually we should flip the default to Normal and opt in // to AcceptSizeless rather than opt out of it. Default = AcceptSizeless }; private: /// Methods for marking which expressions involve dereferencing a pointer /// marked with the 'noderef' attribute. Expressions are checked bottom up as /// they are parsed, meaning that a noderef pointer may not be accessed. For /// example, in `&*p` where `p` is a noderef pointer, we will first parse the /// `*p`, but need to check that `address of` is called on it. This requires /// keeping a container of all pending expressions and checking if the address /// of them are eventually taken. void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E); void CheckAddressOfNoDeref(const Expr *E); void CheckMemberAccessOfNoDeref(const MemberExpr *E); bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, CompleteTypeKind Kind, TypeDiagnoser *Diagnoser); struct ModuleScope { SourceLocation BeginLoc; clang::Module *Module = nullptr; bool ModuleInterface = false; bool ImplicitGlobalModuleFragment = false; VisibleModuleSet OuterVisibleModules; }; /// The modules we're currently parsing. llvm::SmallVector<ModuleScope, 16> ModuleScopes; /// Namespace definitions that we will export when they finish. llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces; /// Get the module whose scope we are currently within. Module *getCurrentModule() const { return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module; } VisibleModuleSet VisibleModules; public: /// Get the module owning an entity. Module *getOwningModule(const Decl *Entity) { return Entity->getOwningModule(); } /// Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND); bool isModuleVisible(const Module *M, bool ModulePrivate = false); // When loading a non-modular PCH files, this is used to restore module // visibility. void makeModuleVisible(Module *Mod, SourceLocation ImportLoc) { VisibleModules.setVisible(Mod, ImportLoc); } /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return D->isUnconditionallyVisible() || isVisibleSlow(D); } /// Determine whether any declaration of an entity is visible. bool hasVisibleDeclaration(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr) { return isVisible(D) || hasVisibleDeclarationSlow(D, Modules); } bool hasVisibleDeclarationSlow(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules); bool hasVisibleMergedDefinition(NamedDecl *Def); bool hasMergedDefinitionInCurrentModule(NamedDecl *Def); /// Determine if \p D and \p Suggested have a structurally compatible /// layout as described in C11 6.2.7/1. bool hasStructuralCompatLayout(Decl *D, Decl *Suggested); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is an explicit /// specialization declaration for a specialization of a template. (For a /// member specialization, use hasVisibleMemberSpecialization.) bool hasVisibleExplicitSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is a member /// specialization declaration (as opposed to an instantiated declaration). bool hasVisibleMemberSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if \p A and \p B are equivalent internal linkage declarations /// from different modules, and thus an ambiguity error can be downgraded to /// an extension warning. bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A, const NamedDecl *B); void diagnoseEquivalentInternalLinkageDeclarations( SourceLocation Loc, const NamedDecl *D, ArrayRef<const NamedDecl *> Equiv); bool isUsualDeallocationFunction(const CXXMethodDecl *FD); bool isCompleteType(SourceLocation Loc, QualType T, CompleteTypeKind Kind = CompleteTypeKind::Default) { return !RequireCompleteTypeImpl(Loc, T, Kind, nullptr); } bool RequireCompleteType(SourceLocation Loc, QualType T, CompleteTypeKind Kind, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, CompleteTypeKind Kind, unsigned DiagID); bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser) { return RequireCompleteType(Loc, T, CompleteTypeKind::Default, Diagnoser); } bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID) { return RequireCompleteType(Loc, T, CompleteTypeKind::Default, DiagID); } template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } template <typename... Ts> bool RequireCompleteSizedType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &... Args) { SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, CompleteTypeKind::Normal, Diagnoser); } /// Get the type of expression E, triggering instantiation to complete the /// type if necessary -- that is, if the expression refers to a templated /// static data member of incomplete array type. /// /// May still return an incomplete type if instantiation was not possible or /// if the type is incomplete for a different reason. Use /// RequireCompleteExprType instead if a diagnostic is expected for an /// incomplete expression type. QualType getCompletedType(Expr *E); void completeExprArrayBound(Expr *E); bool RequireCompleteExprType(Expr *E, CompleteTypeKind Kind, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, CompleteTypeKind::Default, Diagnoser); } template <typename... Ts> bool RequireCompleteSizedExprType(Expr *E, unsigned DiagID, const Ts &... Args) { SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, CompleteTypeKind::Normal, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T, TagDecl *OwnedTagDecl = nullptr); QualType BuildTypeofExprType(Expr *E, SourceLocation Loc); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, SourceLocation Loc, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr), New(nullptr) {} bool ShouldSkip; bool CheckSameAsPrevious; NamedDecl *Previous; NamedDecl *New; }; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = nullptr, bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, bool IsClassTemplateDeductionContext = true, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool IsTemplateName = false); /// Attempt to behave like MSVC in situations where lookup of an unqualified /// type name has failed in a dependent context. In these situations, we /// automatically form a DependentTypeName that will retry lookup in a related /// scope during instantiation. ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II, SourceLocation NameLoc, bool IsTemplateTypeArg); /// Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { /// This name is not a type or template in this context, but might be /// something else. NC_Unknown, /// Classification failed; an error has been produced. NC_Error, /// The name has been typo-corrected to a keyword. NC_Keyword, /// The name was classified as a type. NC_Type, /// The name was classified as a specific non-type, non-template /// declaration. ActOnNameClassifiedAsNonType should be called to /// convert the declaration to an expression. NC_NonType, /// The name was classified as an ADL-only function name. /// ActOnNameClassifiedAsUndeclaredNonType should be called to convert the /// result to an expression. NC_UndeclaredNonType, /// The name denotes a member of a dependent type that could not be /// resolved. ActOnNameClassifiedAsDependentNonType should be called to /// convert the result to an expression. NC_DependentNonType, /// The name was classified as an overload set, and an expression /// representing that overload set has been formed. /// ActOnNameClassifiedAsOverloadSet should be called to form a suitable /// expression referencing the overload set. NC_OverloadSet, /// The name was classified as a template whose specializations are types. NC_TypeTemplate, /// The name was classified as a variable template name. NC_VarTemplate, /// The name was classified as a function template name. NC_FunctionTemplate, /// The name was classified as an ADL-only function template name. NC_UndeclaredTemplate, /// The name was classified as a concept name. NC_Concept, }; class NameClassification { NameClassificationKind Kind; union { ExprResult Expr; NamedDecl *NonTypeDecl; TemplateName Template; ParsedType Type; }; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {} static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification OverloadSet(ExprResult E) { NameClassification Result(NC_OverloadSet); Result.Expr = E; return Result; } static NameClassification NonType(NamedDecl *D) { NameClassification Result(NC_NonType); Result.NonTypeDecl = D; return Result; } static NameClassification UndeclaredNonType() { return NameClassification(NC_UndeclaredNonType); } static NameClassification DependentNonType() { return NameClassification(NC_DependentNonType); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } static NameClassification Concept(TemplateName Name) { NameClassification Result(NC_Concept); Result.Template = Name; return Result; } static NameClassification UndeclaredTemplate(TemplateName Name) { NameClassification Result(NC_UndeclaredTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ExprResult getExpression() const { assert(Kind == NC_OverloadSet); return Expr; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } NamedDecl *getNonTypeDecl() const { assert(Kind == NC_NonType); return NonTypeDecl; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate || Kind == NC_Concept || Kind == NC_UndeclaredTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; case NC_Concept: return TNK_Concept_template; case NC_UndeclaredTemplate: return TNK_Undeclared_template; default: llvm_unreachable("unsupported name classification."); } } }; /// Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, CorrectionCandidateCallback *CCC = nullptr); /// Act on the result of classifying a name as an undeclared (ADL-only) /// non-type declaration. ExprResult ActOnNameClassifiedAsUndeclaredNonType(IdentifierInfo *Name, SourceLocation NameLoc); /// Act on the result of classifying a name as an undeclared member of a /// dependent base class. ExprResult ActOnNameClassifiedAsDependentNonType(const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, bool IsAddressOfOperand); /// Act on the result of classifying a name as a specific non-type /// declaration. ExprResult ActOnNameClassifiedAsNonType(Scope *S, const CXXScopeSpec &SS, NamedDecl *Found, SourceLocation NameLoc, const Token &NextToken); /// Act on the result of classifying a name as an overload set. ExprResult ActOnNameClassifiedAsOverloadSet(Scope *S, Expr *OverloadSet); /// Describes the detailed kind of a template name. Used in diagnostics. enum class TemplateNameKindForDiagnostics { ClassTemplate, FunctionTemplate, VarTemplate, AliasTemplate, TemplateTemplateParam, Concept, DependentTemplate }; TemplateNameKindForDiagnostics getTemplateNameKindForDiagnostics(TemplateName Name); /// Determine whether it's plausible that E was intended to be a /// template-name. bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) { if (!getLangOpts().CPlusPlus || E.isInvalid()) return false; Dependent = false; if (auto *DRE = dyn_cast<DeclRefExpr>(E.get())) return !DRE->hasExplicitTemplateArgs(); if (auto *ME = dyn_cast<MemberExpr>(E.get())) return !ME->hasExplicitTemplateArgs(); Dependent = true; if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get())) return !DSDRE->hasExplicitTemplateArgs(); if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get())) return !DSME->hasExplicitTemplateArgs(); // Any additional cases recognized here should also be handled by // diagnoseExprIntendedAsTemplateName. return false; } void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName, SourceLocation Less, SourceLocation Greater); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc, bool IsTemplateId); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation(), SourceLocation UnalignedQualLoc = SourceLocation()); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const BindingDecl *D, const LookupResult &R); void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl, const LookupResult &R); void CheckShadow(Scope *S, VarDecl *D); /// Warn if 'E', which is an expression that is about to be modified, refers /// to a shadowing declaration. void CheckShadowingDeclModification(Expr *E, SourceLocation Loc); void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI); private: /// Map of current shadowing declarations to shadowed declarations. Warn if /// it looks like the user is trying to modify the shadowing declaration. llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls; public: void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope, ArrayRef<BindingDecl *> Bindings = None); NamedDecl * ActOnDecompositionDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists); // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous); void CheckVariableDeclarationType(VarDecl *NewVD); bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit, Expr *Init); void CheckCompleteVariableDeclaration(VarDecl *VD); void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); enum class CheckConstexprKind { /// Diagnose issues that are non-constant or that are extensions. Diagnose, /// Identify whether this function satisfies the formal rules for constexpr /// functions in the current lanugage mode (with no extensions). CheckValid }; bool CheckConstexprFunctionDefinition(const FunctionDecl *FD, CheckConstexprKind Kind); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsMemberSpecialization); bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl); bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD, QualType NewT, QualType OldT); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD, bool IsDefinition); void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC); void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); ExprResult ConvertParamDefaultArgument(const ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); void SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); // Contexts where using non-trivial C union types can be disallowed. This is // passed to err_non_trivial_c_union_in_invalid_context. enum NonTrivialCUnionContext { // Function parameter. NTCUC_FunctionParam, // Function return. NTCUC_FunctionReturn, // Default-initialized object. NTCUC_DefaultInitializedObject, // Variable with automatic storage duration. NTCUC_AutoVar, // Initializer expression that might copy from another object. NTCUC_CopyInit, // Assignment. NTCUC_Assignment, // Compound literal. NTCUC_CompoundLiteral, // Block capture. NTCUC_BlockCapture, // lvalue-to-rvalue conversion of volatile type. NTCUC_LValueToRValueVolatile, }; /// Emit diagnostics if the initializer or any of its explicit or /// implicitly-generated subexpressions require copying or /// default-initializing a type that is or contains a C union type that is /// non-trivial to copy or default-initialize. void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc); // These flags are passed to checkNonTrivialCUnion. enum NonTrivialCUnionKind { NTCUK_Init = 0x1, NTCUK_Destruct = 0x2, NTCUK_Copy = 0x4, }; /// Emit diagnostics if a non-trivial C union type or a struct that contains /// a non-trivial C union is used in an invalid context. void checkNonTrivialCUnion(QualType QT, SourceLocation Loc, NonTrivialCUnionContext UseContext, unsigned NonTrivialKind); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit); void ActOnUninitializedDecl(Decl *dcl); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void CheckStaticLocalForDllExport(VarDecl *VD); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition( FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D, SkipBodyInfo *SkipBody = nullptr); void ActOnStartTrailingRequiresClause(Scope *S, Declarator &D); ExprResult ActOnFinishTrailingRequiresClause(ExprResult ConstraintExpr); ExprResult ActOnRequiresClause(ExprResult ConstraintExpr); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineFunctionDef(FunctionDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters); /// Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList, SourceLocation SemiLoc); enum class ModuleDeclKind { Interface, ///< 'export module X;' Implementation, ///< 'module X;' }; /// The parser has processed a module-declaration that begins the definition /// of a module interface or implementation. DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc, ModuleDeclKind MDK, ModuleIdPath Path, bool IsFirstDecl); /// The parser has processed a global-module-fragment declaration that begins /// the definition of the global module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc); /// The parser has processed a private-module-fragment declaration that begins /// the definition of the private module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. /// \param PrivateLoc The location of the 'private' keyword. DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc, SourceLocation PrivateLoc); /// The parser has processed a module import declaration. /// /// \param StartLoc The location of the first token in the declaration. This /// could be the location of an '@', 'export', or 'import'. /// \param ExportLoc The location of the 'export' keyword, if any. /// \param ImportLoc The location of the 'import' keyword. /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, ModuleIdPath Path); DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, Module *M, ModuleIdPath Path = {}); /// The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument, ExplicitSpecialization, PartialSpecialization }; /// Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, MissingImportKind MIK, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc, SourceLocation LBraceLoc); Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl, SourceLocation RBraceLoc); /// We've found a use of a templated declaration that would trigger an /// implicit instantiation. Check that any relevant explicit specializations /// and partial specializations are visible, and diagnose if not. void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// Retrieve a suitable printing policy for diagnostics. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// Retrieve a suitable printing policy for diagnostics. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, RecordDecl *&AnonRecord); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation, RecordDecl *&AnonRecord); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); /// Common ways to introduce type names without a tag for use in diagnostics. /// Keep in sync with err_tag_reference_non_tag. enum NonTagKind { NTK_NonStruct, NTK_NonClass, NTK_NonUnion, NTK_NonEnum, NTK_Typedef, NTK_TypeAlias, NTK_Template, NTK_TypeAliasTemplate, NTK_TemplateTemplateArgument, }; /// Given a non-tag type declaration, returns an enum useful for indicating /// what kind of non-tag type this is. NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, bool IsTemplateParamOrArg, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, const ParsedAttr &MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); enum TrivialABIHandling { /// The triviality of a method unaffected by "trivial_abi". TAH_IgnoreTrivialABI, /// The triviality of a method affected by "trivial_abi". TAH_ConsiderTrivialABI }; bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, TrivialABIHandling TAH = TAH_IgnoreTrivialABI, bool Diagnose = false); /// For a defaulted function, the kind of defaulted function that it is. class DefaultedFunctionKind { CXXSpecialMember SpecialMember : 8; DefaultedComparisonKind Comparison : 8; public: DefaultedFunctionKind() : SpecialMember(CXXInvalid), Comparison(DefaultedComparisonKind::None) { } DefaultedFunctionKind(CXXSpecialMember CSM) : SpecialMember(CSM), Comparison(DefaultedComparisonKind::None) {} DefaultedFunctionKind(DefaultedComparisonKind Comp) : SpecialMember(CXXInvalid), Comparison(Comp) {} bool isSpecialMember() const { return SpecialMember != CXXInvalid; } bool isComparison() const { return Comparison != DefaultedComparisonKind::None; } explicit operator bool() const { return isSpecialMember() || isComparison(); } CXXSpecialMember asSpecialMember() const { return SpecialMember; } DefaultedComparisonKind asComparison() const { return Comparison; } /// Get the index of this function kind for use in diagnostics. unsigned getDiagnosticIndex() const { static_assert(CXXInvalid > CXXDestructor, "invalid should have highest index"); static_assert((unsigned)DefaultedComparisonKind::None == 0, "none should be equal to zero"); return SpecialMember + (unsigned)Comparison; } }; DefaultedFunctionKind getDefaultedFunctionKind(const FunctionDecl *FD); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD) { return getDefaultedFunctionKind(MD).asSpecialMember(); } DefaultedComparisonKind getDefaultedComparisonKind(const FunctionDecl *FD) { return getDefaultedFunctionKind(FD).asComparison(); } void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); /// Perform ODR-like check for C/ObjC when merging tag types from modules. /// Differently from C++, actually parse the body and reject / error out /// in case of a structural mismatch. bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev, SkipBodyInfo &SkipBody); typedef void *SkippedDefinitionContext; /// Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceRange BraceRange); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, bool IsFixed, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, const ParsedAttributesView &Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, const ParsedAttributesView &Attr); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Enter a template parameter scope, after it's been associated with a particular /// DeclContext. Causes lookup within the scope to chain through enclosing contexts /// in the correct order. void EnterTemplatedContext(Scope *S, DeclContext *DC); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// Don't merge availability attributes at all. AMK_None, /// Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override, /// Merge availability attributes for an implementation of /// a protocol requirement. AMK_ProtocolImplementation, }; /// Describes the kind of priority given to an availability attribute. /// /// The sum of priorities deteremines the final priority of the attribute. /// The final priority determines how the attribute will be merged. /// An attribute with a lower priority will always remove higher priority /// attributes for the specified platform when it is being applied. An /// attribute with a higher priority will not be applied if the declaration /// already has an availability attribute with a lower priority for the /// specified platform. The final prirority values are not expected to match /// the values in this enumeration, but instead should be treated as a plain /// integer value. This enumeration just names the priority weights that are /// used to calculate that final vaue. enum AvailabilityPriority : int { /// The availability attribute was specified explicitly next to the /// declaration. AP_Explicit = 0, /// The availability attribute was applied using '#pragma clang attribute'. AP_PragmaClangAttribute = 1, /// The availability attribute for a specific platform was inferred from /// an availability attribute for another platform. AP_InferredFromOtherPlatform = 2 }; /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr * mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI, IdentifierInfo *Platform, bool Implicit, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool IsStrict, StringRef Replacement, AvailabilityMergeKind AMK, int Priority); TypeVisibilityAttr * mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI, TypeVisibilityAttr::VisibilityType Vis); VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI, VisibilityAttr::VisibilityType Vis); UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI, StringRef UuidAsWritten, MSGuidDecl *GuidDecl); DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI); DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI); MSInheritanceAttr *mergeMSInheritanceAttr(Decl *D, const AttributeCommonInfo &CI, bool BestCase, MSInheritanceModel Model); FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Format, int FormatIdx, int FirstArg); SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name); CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, const AttributeCommonInfo &CI, const IdentifierInfo *Ident); MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI); NoSpeculativeLoadHardeningAttr * mergeNoSpeculativeLoadHardeningAttr(Decl *D, const NoSpeculativeLoadHardeningAttr &AL); SpeculativeLoadHardeningAttr * mergeSpeculativeLoadHardeningAttr(Decl *D, const SpeculativeLoadHardeningAttr &AL); SwiftNameAttr *mergeSwiftNameAttr(Decl *D, const SwiftNameAttr &SNA, StringRef Name); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, const AttributeCommonInfo &CI); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const InternalLinkageAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL); WebAssemblyImportNameAttr *mergeImportNameAttr( Decl *D, const WebAssemblyImportNameAttr &AL); WebAssemblyImportModuleAttr *mergeImportModuleAttr( Decl *D, const WebAssemblyImportModuleAttr &AL); EnforceTCBAttr *mergeEnforceTCBAttr(Decl *D, const EnforceTCBAttr &AL); EnforceTCBLeafAttr *mergeEnforceTCBLeafAttr(Decl *D, const EnforceTCBLeafAttr &AL); void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous); void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld); void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn); void notePreviousDefinition(const NamedDecl *Old, SourceLocation New); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl, bool ConsiderCudaAttrs = true, bool ConsiderRequiresClauses = true); enum class AllowedExplicit { /// Allow no explicit functions to be used. None, /// Allow explicit conversion functions but not explicit constructors. Conversions, /// Allow both explicit conversion functions and explicit constructors. All }; ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, AllowedExplicit AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess, bool Diagnose = true); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsFunctionConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const VarDecl *NRVOCandidate, QualType ResultType, Expr *Value, bool AllowNRVO = true); bool CanPerformAggregateInitializationForOverloadResolution( const InitializedEntity &Entity, InitListExpr *From); bool IsStringInit(Expr *Init, const ArrayType *AT); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); /// Check that the lifetime of the initializer (and its subobjects) is /// sufficient for initializing the entity, and perform lifetime extension /// (when permitted) if not. void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_ArrayBound, ///< Array bound in array declarator or new-expression. CCEK_ConstexprIf, ///< Condition in a constexpr if statement. CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE, NamedDecl *Dest = nullptr); /// Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet; using ADLCallKind = CallExpr::ADLCallKind; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, bool AllowExplicitConversion = false, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, ConversionSequenceList EarlyConversions = None, OverloadCandidateParamOrder PO = {}); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false, bool FirstArgumentIsBase = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false, OverloadCandidateParamOrder PO = {}); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, ConversionSequenceList EarlyConversions = None, OverloadCandidateParamOrder PO = {}); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, OverloadCandidateParamOrder PO = {}); void AddTemplateOverloadCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, OverloadCandidateParamOrder PO = {}); bool CheckNonDependentConversions( FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, ConversionSequenceList &Conversions, bool SuppressUserConversions, CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(), Expr::Classification ObjectClassification = {}, OverloadCandidateParamOrder PO = {}); void AddConversionCandidate( CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddTemplateConversionCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddNonMemberOperatorCandidates( const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, OverloadCandidateParamOrder PO = {}); void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate( NamedDecl *Found, FunctionDecl *Fn, OverloadCandidateRewriteKind RewriteKind = OverloadCandidateRewriteKind(), QualType DestType = QualType(), bool TakingAddress = false); // Emit as a series of 'note's all template and non-templates identified by // the expression Expr void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(), bool TakingAddress = false); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, SourceLocation CallLoc, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); /// Find the failed Boolean condition within a given Boolean /// constant expression, and describe it with a string. std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// non-ArgDependent DiagnoseIfAttrs. /// /// Argument-dependent diagnose_if attributes should be checked each time a /// function is used as a direct callee of a function call. /// /// Returns true if any errors were emitted. bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function, const Expr *ThisArg, ArrayRef<const Expr *> Args, SourceLocation Loc); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// ArgDependent DiagnoseIfAttrs. /// /// Argument-independent diagnose_if attributes should be checked on every use /// of a function. /// /// Returns true if any errors were emitted. bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND, SourceLocation Loc); /// Returns whether the given function's address can be taken or not, /// optionally emitting a diagnostic if the address can't be taken. /// /// Returns false if taking the address of the function is illegal. bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function, bool Complain = false, SourceLocation Loc = SourceLocation()); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * resolveAddressOfSingleOverloadCandidate(Expr *E, DeclAccessPair &FoundResult); bool resolveAndFixAddressOfSingleOverloadCandidate( ExprResult &SrcExpr, bool DoFunctionPointerConversion = false); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, SourceRange OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); void AddOverloadedCallCandidates( LookupResult &R, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc, SourceLocation RangeLoc, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true, bool CalleesAddressIsTaken=false); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateUnresolvedLookupExpr(CXXRecordDecl *NamingClass, NestedNameSpecifierLoc NNSLoc, DeclarationNameInfo DNI, const UnresolvedSetImpl &Fns, bool PerformADL = true); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *input, bool RequiresADL = true); void LookupOverloadedBinOp(OverloadCandidateSet &CandidateSet, OverloadedOperatorKind Op, const UnresolvedSetImpl &Fns, ArrayRef<Expr *> Args, bool RequiresADL = true); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, bool RequiresADL = true, bool AllowRewrittenCandidates = true, FunctionDecl *DefaultedFn = nullptr); ExprResult BuildSynthesizedThreeWayComparison(SourceLocation OpLoc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, FunctionDecl *DefaultedFn); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, bool AllowRecovery = false); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up a name following ~ in a destructor name. This is an ordinary /// lookup, but prefers tags to typedefs. LookupDestructorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// Look up the name of an OpenMP user-defined reduction operation. LookupOMPReductionName, /// Look up the name of an OpenMP user-defined mapper. LookupOMPMapperName, /// Look up any declaration with any name. LookupAnyName }; /// Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists and is visible. ForVisibleRedeclaration, /// The lookup results will be used for redeclaration of a name /// with external linkage; non-visible lookup results with external linkage /// may also be found. ForExternalRedeclaration }; RedeclarationKind forRedeclarationInCurContext() { // A declaration with an owning module for linkage can never link against // anything that is not visible. We don't need to check linkage here; if // the context has internal linkage, redeclaration lookup won't find things // from other TUs, and we can't safely compute linkage yet in general. if (cast<Decl>(CurContext) ->getOwningModuleForLinkage(/*IgnoreLinkage*/true)) return ForVisibleRedeclaration; return ForExternalRedeclaration; } /// The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// The lookup resulted in an error. LOLR_Error, /// The lookup found no match but no diagnostic was issued. LOLR_ErrorNoDiagnostic, /// The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplatePack, }; SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState &&other) noexcept; TypoExprState &operator=(TypoExprState &&other) noexcept; }; /// The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, SourceLocation TypoLoc); // The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupBuiltin(LookupResult &R); void LookupNecessaryTypesForBuiltin(Scope *S, unsigned ID); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, UnresolvedSetImpl &Functions); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate, bool DiagnoseMissing, StringLiteral *StringLit = nullptr); bool isKnownName(StringRef name); /// Status of the function emission on the CUDA/HIP/OpenMP host/device attrs. enum class FunctionEmissionStatus { Emitted, CUDADiscarded, // Discarded due to CUDA/HIP hostness OMPDiscarded, // Discarded due to OpenMP hostness TemplateDiscarded, // Discarded due to uninstantiated templates Unknown, }; FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl, bool Final = false); // Whether the callee should be ignored in CUDA/HIP/OpenMP host/device check. bool shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool LoadExternal = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool IncludeDependentBases = false, bool LoadExternal = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param RecoverUncorrectedTypos If true, when typo correction fails, it /// will rebuild the given Expr with all TypoExprs degraded to RecoveryExprs. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr( Expr *E, VarDecl *InitDecl = nullptr, bool RecoverUncorrectedTypos = false, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr( ExprResult ER, VarDecl *InitDecl = nullptr, bool RecoverUncorrectedTypos = false, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), InitDecl, RecoverUncorrectedTypos, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} /// Attempts to produce a RecoveryExpr after some AST node cannot be created. ExprResult CreateRecoveryExpr(SourceLocation Begin, SourceLocation End, ArrayRef<Expr *> SubExprs, QualType T = QualType()); ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); FunctionDecl *CreateBuiltin(IdentifierInfo *II, QualType Type, unsigned ID, SourceLocation Loc); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction( FunctionDecl *FD); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); // Helper for delayed processing of attributes. void ProcessDeclAttributeDelayed(Decl *D, const ParsedAttributesView &AttrList); void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const ParsedAttributesView &AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value); bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckAttrTarget(const ParsedAttr &CurrAttr); bool CheckAttrNoArgs(const ParsedAttr &CurrAttr); bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceModel SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor, SourceLocation Loc); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Stmt attributes - this routine is the top level dispatcher. StmtResult ProcessStmtAttributes(Stmt *Stmt, const ParsedAttributesView &Attrs, SourceRange Range); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl, ObjCInterfaceDecl *IDecl, SourceLocation AtEnd); void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, unsigned &Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCInterfaceDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); /// Returns default addr space for method qualifiers. LangAS getDefaultCXXMethodAddrSpace() const; private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// - Returns instance or factory methods in global method pool for /// given selector. It checks the desired kind first, if none is found, and /// parameter checkTheOther is set, it then checks the other kind. If no such /// method or only one method is found, function returns false; otherwise, it /// returns true. bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool InstanceFirst, bool CheckTheOther, const ObjCObjectType *TypeBound = nullptr); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass, SmallVectorImpl<ObjCMethodDecl*>& Methods); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance, SmallVectorImpl<ObjCMethodDecl*>& Methods); /// Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg() : E(nullptr) { } FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg( ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true); StmtResult ActOnExprStmtError(); StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(bool IsStmtExpr); void ActOnAfterCompoundStatementLeadingPragmas(); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) { S.ActOnStartOfCompoundStmt(IsStmtExpr); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS, SourceLocation DotDotDotLoc, ExprResult RHS, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult ActOnAttributedStmt(SourceLocation AttrLoc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); class ConditionResult; StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr, SourceLocation LParenLoc, Stmt *InitStmt, ConditionResult Cond, SourceLocation RParenLoc, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr, SourceLocation LParenLoc, Stmt *InitStmt, ConditionResult Cond, SourceLocation RParenLoc, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, SourceLocation LParenLoc, Stmt *InitStmt, ConditionResult Cond, SourceLocation RParenLoc); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, SourceLocation LParenLoc, ConditionResult Cond, SourceLocation RParenLoc, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, ConditionResult Second, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *Begin, Stmt *End, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params, unsigned OpenMPCaptureLevel = 0); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); enum CopyElisionSemanticsKind { CES_Strict = 0, CES_AllowParameters = 1, CES_AllowDifferentTypes = 2, CES_AllowExceptionVariables = 4, CES_AllowRValueReferenceType = 8, CES_ImplicitlyMovableCXX11CXX14CXX17 = (CES_AllowParameters | CES_AllowDifferentTypes), CES_ImplicitlyMovableCXX20 = (CES_AllowParameters | CES_AllowDifferentTypes | CES_AllowExceptionVariables | CES_AllowRValueReferenceType), }; VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E, CopyElisionSemanticsKind CESK); bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD, CopyElisionSemanticsKind CESK); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, unsigned NumLabels, SourceLocation RParenLoc); void FillInlineAsmIdentifierInfo(Expr *Res, llvm::InlineAsmIdentifierInfo &Info); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); /// Warn if we're implicitly casting from a _Nullable pointer type to a /// _Nonnull one. void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType, SourceLocation Loc); /// Warn when implicitly casting 0 to nullptr. void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { ParsingClassDepth++; return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { ParsingClassDepth--; DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass, bool ObjCPropertyAccess, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReceiver = nullptr); bool makeUnavailableInSystemHeader(SourceLocation loc, UnavailableAttr::ImplicitReason reason); /// Issue any -Wunguarded-availability warnings in \c FD void DiagnoseUnguardedAvailabilityViolations(Decl *FD); void handleDelayedAvailabilityCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid); bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass = nullptr, bool ObjCPropertyAccess = false, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReciever = nullptr); void NoteDeletedFunction(FunctionDecl *FD); void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult CheckUnevaluatedOperand(Expr *E); void CheckUnusedVolatileAssignment(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. // // MightBeOdrUse indicates whether the use could possibly be an odr-use, and // should usually be true. This only needs to be set to false if the lack of // odr-use cannot be determined from the current context (for instance, // because the name denotes a virtual function and was written without an // explicit nested-name-specifier). void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr); void MarkMemberReferenced(MemberExpr *E); void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E); void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc, unsigned CapturingScopeIndex); ExprResult CheckLValueToRValueConversionOperand(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); /// Mark all of the declarations referenced within a particular AST node as /// referenced. Used when template instantiation instantiates a non-dependent /// type -- entities referenced by the type are now referenced. void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false); /// Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// Try to convert an expression \p E to type \p Ty. Returns the result of the /// conversion. ExprResult tryConvertExprToType(Expr *E, QualType Ty); /// Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); /// Similar, but diagnostic is only produced if all the specified statements /// are reachable. bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseDependentMemberLookup(LookupResult &R); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, CorrectionCandidateCallback &CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); DeclResult LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S, IdentifierInfo *II); ExprResult BuildIvarRefExpr(Scope *S, SourceLocation Loc, ObjCIvarDecl *IV); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); /// If \p D cannot be odr-used in the current expression evaluation context, /// return a reason explaining why. Otherwise, return NOUR_None. NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D); DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, NestedNameSpecifierLoc NNS, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr( const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, UnresolvedLookupExpr *AsULE = nullptr); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance, const Scope *S); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentKind IK); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); bool isQualifiedMemberAccess(Expr *E); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, SourceRange ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinMatrixSubscriptExpr(Expr *Base, Expr *RowIdx, Expr *ColumnIdx, SourceLocation RBLoc); ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc, Expr *LowerBound, SourceLocation ColonLocFirst, SourceLocation ColonLocSecond, Expr *Length, Expr *Stride, SourceLocation RBLoc); ExprResult ActOnOMPArrayShapingExpr(Expr *Base, SourceLocation LParenLoc, SourceLocation RParenLoc, ArrayRef<Expr *> Dims, ArrayRef<SourceRange> Brackets); /// Data structure for iterator expression. struct OMPIteratorData { IdentifierInfo *DeclIdent = nullptr; SourceLocation DeclIdentLoc; ParsedType Type; OMPIteratorExpr::IteratorRange Range; SourceLocation AssignLoc; SourceLocation ColonLoc; SourceLocation SecColonLoc; }; ExprResult ActOnOMPIteratorExpr(Scope *S, SourceLocation IteratorKwLoc, SourceLocation LLoc, SourceLocation RLoc, ArrayRef<OMPIteratorData> Data); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, FieldDecl *Field, DeclAccessPair FoundDecl, const DeclarationNameInfo &MemberNameInfo); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec *SS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr); ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false, bool AllowRecovery = false); enum class AtomicArgumentOrder { API, AST }; ExprResult BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, SourceLocation RParenLoc, MultiExprArg Args, AtomicExpr::AtomicOp Op, AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false, ADLCallKind UsesADL = ADLCallKind::NotADL); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult BuildInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation EqualOrColonLoc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); void LookupBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, UnresolvedSetImpl &Functions); void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(Scope *S, SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); ExprResult BuildStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc, unsigned TemplateDepth); // Handle the final expression in a statement expression. ExprResult ActOnStmtExprResult(ExprResult E); void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(), // __builtin_COLUMN() ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc); // Build a potentially resolved SourceLocExpr. ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc, DeclContext *ParentContext); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// Describes the result of an "if-exists" condition check. enum IfExistsResult { /// The symbol exists. IER_Exists, /// The symbol does not exist. IER_DoesNotExist, /// The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, const ParsedAttributesView &AttrList, UsingDirectiveDecl *&UsingDecl); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); NamespaceDecl *lookupStdExperimentalNamespace(); CXXRecordDecl *getStdBadAlloc() const; EnumDecl *getStdAlignValT() const; private: // A cache representing if we've fully checked the various comparison category // types stored in ASTContext. The bit-index corresponds to the integer value // of a ComparisonCategoryType enumerator. llvm::SmallBitVector FullyCheckedComparisonCategories; ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl, CXXScopeSpec &SS, ParsedType TemplateTypeTy, IdentifierInfo *MemberOrBase); public: enum class ComparisonCategoryUsage { /// The '<=>' operator was used in an expression and a builtin operator /// was selected. OperatorInExpression, /// A defaulted 'operator<=>' needed the comparison category. This /// typically only applies to 'std::strong_ordering', due to the implicit /// fallback return value. DefaultedOperator, }; /// Lookup the specified comparison category types in the standard /// library, an check the VarDecls possibly returned by the operator<=> /// builtins for that type. /// /// \return The type of the comparison category type corresponding to the /// specified Kind, or a null type if an error occurs QualType CheckComparisonCategoryType(ComparisonCategoryType Kind, SourceLocation Loc, ComparisonCategoryUsage Usage); /// Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const FunctionDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, const ParsedAttributesView &AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc); NamedDecl *BuildUsingDeclaration( Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList, bool IsInstantiation); NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom, ArrayRef<NamedDecl *> Expansions); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); /// Given a derived-class using shadow declaration for a constructor and the /// correspnding base class constructor, find or create the implicit /// synthesized derived class constructor to use for this initialization. CXXConstructorDecl * findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor, ConstructorUsingShadowDecl *DerivedShadow); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation TypenameLoc, CXXScopeSpec &SS, UnqualifiedId &Name, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, const ParsedAttributesView &AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); /// Build a CXXConstructExpr whose constructor has already been resolved if /// it denotes an inherited constructor. ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// Instantiate or parse a C++ default argument expression as necessary. /// Return true on error. bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(!isComputedNoexcept(ComputedEST) && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// Integrate an invoked expression into the collected data. void CalledExpr(Expr *E) { CalledStmt(E); } /// Integrate an invoked statement into the collected data. void CalledStmt(Stmt *S); /// Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_NoexceptFalse; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, FunctionDecl *FD); /// Check the given noexcept-specifier, convert its expression, and compute /// the appropriate ExceptionSpecificationType. ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr, ExceptionSpecificationType &EST); /// Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); class InheritedConstructorInfo; /// Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, InheritedConstructorInfo *ICI = nullptr, bool Diagnose = false); /// Produce notes explaining why a defaulted function was defined as deleted. void DiagnoseDeletedDefaultedFunction(FunctionDecl *FD); /// Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor); /// Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// Check a completed declaration of an implicit special member. void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD); /// Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); /// Wrap the expression in a ConstantExpr if it is a potential immediate /// invocation. ExprResult CheckForImmediateInvocation(ExprResult E, FunctionDecl *Decl); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, QualType DeclInitType, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr *> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, bool EnteringContext); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorTypeForDecltype(const DeclSpec &DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); /// ActOnCXXNamedCast - Parse /// {dynamic,static,reinterpret,const,addrspace}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl, ExprResult Operand, SourceLocation RParenLoc); ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI, Expr *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(Scope *S, SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(UnresolvedLookupExpr *Callee, SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc, Optional<unsigned> NumExpansions); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// Build a CXXThisExpr and mark it referenced in the current context. Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit); void MarkThisReferenced(CXXThisExpr *This); /// Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr, bool ByCopy = false); /// Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); ExprResult ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs, SourceLocation AtLoc, SourceLocation RParen); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenOrBraceLoc, MultiExprArg Exprs, SourceLocation RParenOrBraceLoc, bool ListInitialization); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc, bool ListInitialization); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Optional<Expr *> ArraySize, SourceRange DirectInitRange, Expr *Initializer); /// Determine whether \p FD is an aligned allocation or deallocation /// function that is unavailable. bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const; /// Produce diagnostics if \p FD is an aligned allocation or deallocation /// function that is unavailable. void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD, SourceLocation Loc); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); /// The scope in which to find allocation functions. enum AllocationFunctionScope { /// Only look for allocation functions in the global scope. AFS_Global, /// Only look for allocation functions in the scope of the /// allocated class. AFS_Class, /// Look for allocation functions in both the global scope /// and in the scope of the allocated class. AFS_Both }; /// Finds the overloads of operator new and delete that are appropriate /// for the allocation. bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, AllocationFunctionScope NewScope, AllocationFunctionScope DeleteScope, QualType AllocType, bool IsArray, bool &PassAlignment, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, ArrayRef<QualType> Params); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, bool Overaligned, DeclarationName Name); FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc, CXXRecordDecl *RD); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc, bool IsDelete, bool CallCanBeVirtual, bool WarnOnNonAbstractTypes, SourceLocation DtorLoc); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the binary type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); MaterializeTemporaryExpr * CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary, bool BoundToLvalueReference); ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) { return ActOnFinishFullExpr( Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue, bool IsConstexpr = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); /// Keeps information about an identifier in a nested-name-spec. /// struct NestedNameSpecInfo { /// The type of the object, if we're parsing nested-name-specifier in /// a member access expression. ParsedType ObjectType; /// The identifier preceding the '::'. IdentifierInfo *Identifier; /// The location of the identifier. SourceLocation IdentifierLoc; /// The location of the '::'. SourceLocation CCLoc; /// Creates info object for the most typical case. NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType()) : ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, QualType ObjectType) : ObjectType(ParsedType::make(ObjectType)), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } }; bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo); bool BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); /// The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param IdInfo Parser information about an identifier in the /// nested-name-spec. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \param OnlyNamespace If true, only considers namespaces in lookup. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo, bool EnteringContext); /// The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// Start the definition of a lambda expression. CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params, ConstexprSpecKind ConstexprKind, Expr *TrailingRequiresClause); /// Number lambda for linkage purposes if necessary. void handleLambdaNumbering( CXXRecordDecl *Class, CXXMethodDecl *Method, Optional<std::tuple<bool, unsigned, unsigned, Decl *>> Mangling = None); /// Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. ParsedType actOnLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) { return ParsedType::make(buildLambdaInitCaptureInitialization( Loc, ByRef, EllipsisLoc, None, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init)); } QualType buildLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit, Expr *&Init); /// Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, SourceLocation EllipsisLoc, IdentifierInfo *Id, unsigned InitStyle, Expr *Init); /// Add an init-capture to a lambda scope. void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief This is called after parsing the explicit template parameter list /// on a lambda (if it exists) in C++2a. void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc, ArrayRef<NamedDecl *> TParams, SourceLocation RAngleLoc, ExprResult RequiresClause); /// Introduce the lambda parameters into scope. void addLambdaParameters( ArrayRef<LambdaIntroducer::LambdaCapture> Captures, CXXMethodDecl *CallOperator, Scope *CurScope); /// Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// Does copying/destroying the captured variable have side effects? bool CaptureHasSideEffects(const sema::Capture &From); /// Diagnose if an explicit lambda capture is unused. Returns true if a /// diagnostic is emitted. bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange, const sema::Capture &From); /// Build a FieldDecl suitable to hold the given capture. FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture); /// Initialize the given capture with a suitable expression. ExprResult BuildCaptureInit(const sema::Capture &Capture, SourceLocation ImplicitCaptureLoc, bool IsOpenMPMapping = false); /// Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// Get the return type to use for a lambda's conversion function(s) to /// function pointer type, given the type of the call operator. QualType getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType, CallingConv CC); /// Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); /// Check whether the given expression is a valid constraint expression. /// A diagnostic is emitted if it is not, false is returned, and /// PossibleNonPrimary will be set to true if the failure might be due to a /// non-primary expression being used as an atomic constraint. bool CheckConstraintExpression(const Expr *CE, Token NextToken = Token(), bool *PossibleNonPrimary = nullptr, bool IsTrailingRequiresClause = false); private: /// Caches pairs of template-like decls whose associated constraints were /// checked for subsumption and whether or not the first's constraints did in /// fact subsume the second's. llvm::DenseMap<std::pair<NamedDecl *, NamedDecl *>, bool> SubsumptionCache; /// Caches the normalized associated constraints of declarations (concepts or /// constrained declarations). If an error occurred while normalizing the /// associated constraints of the template or concept, nullptr will be cached /// here. llvm::DenseMap<NamedDecl *, NormalizedConstraint *> NormalizationCache; llvm::ContextualFoldingSet<ConstraintSatisfaction, const ASTContext &> SatisfactionCache; public: const NormalizedConstraint * getNormalizedAssociatedConstraints( NamedDecl *ConstrainedDecl, ArrayRef<const Expr *> AssociatedConstraints); /// \brief Check whether the given declaration's associated constraints are /// at least as constrained than another declaration's according to the /// partial ordering of constraints. /// /// \param Result If no error occurred, receives the result of true if D1 is /// at least constrained than D2, and false otherwise. /// /// \returns true if an error occurred, false otherwise. bool IsAtLeastAsConstrained(NamedDecl *D1, ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2, bool &Result); /// If D1 was not at least as constrained as D2, but would've been if a pair /// of atomic constraints involved had been declared in a concept and not /// repeated in two separate places in code. /// \returns true if such a diagnostic was emitted, false otherwise. bool MaybeEmitAmbiguousAtomicConstraintsDiagnostic(NamedDecl *D1, ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2); /// \brief Check whether the given list of constraint expressions are /// satisfied (as if in a 'conjunction') given template arguments. /// \param Template the template-like entity that triggered the constraints /// check (either a concept or a constrained entity). /// \param ConstraintExprs a list of constraint expressions, treated as if /// they were 'AND'ed together. /// \param TemplateArgs the list of template arguments to substitute into the /// constraint expression. /// \param TemplateIDRange The source range of the template id that /// caused the constraints check. /// \param Satisfaction if true is returned, will contain details of the /// satisfaction, with enough information to diagnose an unsatisfied /// expression. /// \returns true if an error occurred and satisfaction could not be checked, /// false otherwise. bool CheckConstraintSatisfaction( const NamedDecl *Template, ArrayRef<const Expr *> ConstraintExprs, ArrayRef<TemplateArgument> TemplateArgs, SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction); /// \brief Check whether the given non-dependent constraint expression is /// satisfied. Returns false and updates Satisfaction with the satisfaction /// verdict if successful, emits a diagnostic and returns true if an error /// occured and satisfaction could not be determined. /// /// \returns true if an error occurred, false otherwise. bool CheckConstraintSatisfaction(const Expr *ConstraintExpr, ConstraintSatisfaction &Satisfaction); /// Check whether the given function decl's trailing requires clause is /// satisfied, if any. Returns false and updates Satisfaction with the /// satisfaction verdict if successful, emits a diagnostic and returns true if /// an error occured and satisfaction could not be determined. /// /// \returns true if an error occurred, false otherwise. bool CheckFunctionConstraints(const FunctionDecl *FD, ConstraintSatisfaction &Satisfaction, SourceLocation UsageLoc = SourceLocation()); /// \brief Ensure that the given template arguments satisfy the constraints /// associated with the given template, emitting a diagnostic if they do not. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateArgs The converted, canonicalized template arguments. /// /// \param TemplateIDRange The source range of the template id that /// caused the constraints check. /// /// \returns true if the constrains are not satisfied or could not be checked /// for satisfaction, false if the constraints are satisfied. bool EnsureTemplateArgumentListConstraints(TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange TemplateIDRange); /// \brief Emit diagnostics explaining why a constraint expression was deemed /// unsatisfied. /// \param First whether this is the first time an unsatisfied constraint is /// diagnosed for this error. void DiagnoseUnsatisfiedConstraint(const ConstraintSatisfaction &Satisfaction, bool First = true); /// \brief Emit diagnostics explaining why a constraint expression was deemed /// unsatisfied. void DiagnoseUnsatisfiedConstraint(const ASTConstraintSatisfaction &Satisfaction, bool First = true); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, ArrayRef<Expr *> Strings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, MutableArrayRef<ObjCDictionaryElement> Elements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS); bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, const ParsedAttributesView &Attrs); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// Mark destructors of virtual bases of this class referenced. In the Itanium /// C++ ABI, this is done when emitting a destructor for any non-abstract /// class. In the Microsoft C++ ABI, this is done any time a class's /// destructor is referenced. void MarkVirtualBaseDestructorsReferenced( SourceLocation Location, CXXRecordDecl *ClassDecl, llvm::SmallPtrSetImpl<const RecordType *> *DirectVirtualBases = nullptr); /// Do semantic checks to allow the complete destructor variant to be emitted /// when the destructor is defined in another translation unit. In the Itanium /// C++ ABI, destructor variants are emitted together. In the MS C++ ABI, they /// can be emitted in separate TUs. To emit the complete variant, run a subset /// of the checks performed when emitting a regular destructor. void CheckCompleteDestructorVariant(SourceLocation CurrentLocation, CXXDestructorDecl *Dtor); /// The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD, bool ConstexprOnly = false); /// Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); /// Check class-level dllimport/dllexport attribute. The caller must /// ensure that referenceDLLExportedClassMethods is called some point later /// when all outer classes of Class are complete. void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class); void referenceDLLExportedClassMethods(); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); /// Add gsl::Pointer attribute to std::container::iterator /// \param ND The declaration that introduces the name /// std::container::iterator. \param UnderlyingRecord The record named by ND. void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord); /// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types. void inferGslOwnerPointerAttribute(CXXRecordDecl *Record); /// Add [[gsl::Pointer]] attributes for std:: types. void inferGslPointerAttribute(TypedefNameDecl *TD); void CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record); /// Check that the C++ class annoated with "trivial_abi" satisfies all the /// conditions that are needed for the attribute to have an effect. void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD); void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXNonNestedClass(); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Decl *Template, llvm::function_ref<Scope *()> EnterScope); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckDeductionGuideDeclarator(Declarator &D, QualType &R, StorageClass &SC); void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD); void CheckExplicitlyDefaultedFunction(Scope *S, FunctionDecl *MD); bool CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM); void CheckDelayedMemberExceptionSpecs(); bool CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *MD, DefaultedComparisonKind DCK); void DeclareImplicitEqualityComparison(CXXRecordDecl *RD, FunctionDecl *Spaceship); void DefineDefaultedComparison(SourceLocation Loc, FunctionDecl *FD, DefaultedComparisonKind DCK); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, MutableArrayRef<CXXBaseSpecifier *> Bases); void ActOnBaseSpecifiers(Decl *ClassDecl, MutableArrayRef<CXXBaseSpecifier *> Bases); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbiguousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath, bool IgnoreAccess = false); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D, bool Inconsistent); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckStructuredBindingMemberAccess(SourceLocation UseLoc, CXXRecordDecl *DecomposedClass, DeclAccessPair Field); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass, QualType BaseType); bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass, DeclAccessPair Found, QualType ObjectType, SourceLocation Loc, const PartialDiagnostic &Diag); bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass, DeclAccessPair Found, QualType ObjectType) { return isMemberAccessibleForDeletion(NamingClass, Found, ObjectType, SourceLocation(), PDiag()); } void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool isAbstractType(SourceLocation Loc, QualType T); bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true, bool AllowNonTemplateFunctions = false); /// Try to interpret the lookup result D as a template-name. /// /// \param D A declaration found by name lookup. /// \param AllowFunctionTemplates Whether function templates should be /// considered valid results. /// \param AllowDependent Whether unresolved using declarations (that might /// name templates) should be considered valid results. static NamedDecl *getAsTemplateNameDecl(NamedDecl *D, bool AllowFunctionTemplates = true, bool AllowDependent = true); enum TemplateNameIsRequiredTag { TemplateNameIsRequired }; /// Whether and why a template name is required in this lookup. class RequiredTemplateKind { public: /// Template name is required if TemplateKWLoc is valid. RequiredTemplateKind(SourceLocation TemplateKWLoc = SourceLocation()) : TemplateKW(TemplateKWLoc) {} /// Template name is unconditionally required. RequiredTemplateKind(TemplateNameIsRequiredTag) : TemplateKW() {} SourceLocation getTemplateKeywordLoc() const { return TemplateKW.getValueOr(SourceLocation()); } bool hasTemplateKeyword() const { return getTemplateKeywordLoc().isValid(); } bool isRequired() const { return TemplateKW != SourceLocation(); } explicit operator bool() const { return isRequired(); } private: llvm::Optional<SourceLocation> TemplateKW; }; enum class AssumedTemplateKind { /// This is not assumed to be a template name. None, /// This is assumed to be a template name because lookup found nothing. FoundNothing, /// This is assumed to be a template name because lookup found one or more /// functions (but no function templates). FoundFunctions, }; bool LookupTemplateName( LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization, RequiredTemplateKind RequiredTemplate = SourceLocation(), AssumedTemplateKind *ATK = nullptr, bool AllowTypoCorrection = true); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization, bool Disambiguation = false); /// Try to resolve an undeclared template name as a type template. /// /// Sets II to the identifier corresponding to the template name, and updates /// Name to a corresponding (typo-corrected) type template name and TNK to /// the corresponding kind, if possible. void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name, TemplateNameKind &TNK, SourceLocation NameLoc, IdentifierInfo *&II); bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name, SourceLocation NameLoc, bool Diagnose = true); /// Determine whether a particular identifier might be the name in a C++1z /// deduction-guide declaration. bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name, SourceLocation NameLoc, ParsedTemplateTy *Template = nullptr); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation, NamedDecl *Instantiation, bool InstantiatedFromMember, const NamedDecl *Pattern, const NamedDecl *PatternDef, TemplateSpecializationKind TSK, bool Complain = true); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); NamedDecl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg, bool HasTypeConstraint); bool ActOnTypeConstraint(const CXXScopeSpec &SS, TemplateIdAnnotation *TypeConstraint, TemplateTypeParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); bool AttachTypeConstraint(NestedNameSpecifierLoc NS, DeclarationNameInfo NameInfo, ConceptDecl *NamedConcept, const TemplateArgumentListInfo *TemplateArgs, TemplateTypeParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); bool AttachTypeConstraint(AutoTypeLoc TL, NonTypeTemplateParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); bool RequireStructuralType(QualType T, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); NamedDecl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ArrayRef<NamedDecl *> Params, SourceLocation RAngleLoc, Expr *RequiresClause); /// The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC, SkipBodyInfo *SkipBody = nullptr); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsMemberSpecialization, bool &Invalid, bool SuppressDiagnostic = false); DeclResult CheckClassTemplate( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg, QualType NTTPType, SourceLocation Loc); /// Get a template argument mapping the given template parameter to itself, /// e.g. for X in \c template<int X>, this would return an expression template /// argument referencing X. TemplateArgumentLoc getIdentityTemplateArgumentLoc(NamedDecl *Param, SourceLocation Location); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false, bool IsClassName = false); /// Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); /// Get the specialization of the given variable template corresponding to /// the specified argument list, or a null-but-valid result if the arguments /// are dependent. DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); /// Form a reference to the specialization of the given variable template /// corresponding to the specified argument list, or a null-but-valid result /// if the arguments are dependent. ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); ExprResult CheckConceptTemplateId(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &ConceptNameInfo, NamedDecl *FoundDecl, ConceptDecl *NamedConcept, const TemplateArgumentListInfo *TemplateArgs); void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnTemplateName( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool AllowInjectedClassName = false); DeclResult ActOnClassTemplateSpecialization( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, CXXScopeSpec &SS, TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc, TemplateDecl *PrimaryTemplate, unsigned NumExplicitArgs, ArrayRef<TemplateArgument> Args); void CheckTemplatePartialSpecialization( ClassTemplatePartialSpecializationDecl *Partial); void CheckTemplatePartialSpecialization( VarTemplatePartialSpecializationDecl *Partial); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization( FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous, bool QualifiedFriend = false); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation( Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to /// contain the converted forms of the template arguments as written. /// Otherwise, \p TemplateArgs will not be modified. /// /// \param ConstraintsNotSatisfied If provided, and an error occured, will /// receive true if the cause for the error is the associated constraints of /// the template not being satisfied by the template arguments. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted, bool UpdateArgsWithConversions = true, bool *ConstraintsNotSatisfied = nullptr); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TemplateTypeParmDecl *Param, TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateTemplateArgument(TemplateTemplateParmDecl *Param, TemplateParameterList *Params, TemplateArgumentLoc &Arg); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateII The identifier used to name the template. /// \param TemplateIILoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc, TypeSourceInfo **TSI, bool DeducedTSTContext); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc, bool DeducedTSTContext = true); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); //===--------------------------------------------------------------------===// // C++ Concepts //===--------------------------------------------------------------------===// Decl *ActOnConceptDefinition( Scope *S, MultiTemplateParamsArg TemplateParameterLists, IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr); RequiresExprBodyDecl * ActOnStartRequiresExpr(SourceLocation RequiresKWLoc, ArrayRef<ParmVarDecl *> LocalParameters, Scope *BodyScope); void ActOnFinishRequiresExpr(); concepts::Requirement *ActOnSimpleRequirement(Expr *E); concepts::Requirement *ActOnTypeRequirement( SourceLocation TypenameKWLoc, CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo *TypeName, TemplateIdAnnotation *TemplateId); concepts::Requirement *ActOnCompoundRequirement(Expr *E, SourceLocation NoexceptLoc); concepts::Requirement * ActOnCompoundRequirement( Expr *E, SourceLocation NoexceptLoc, CXXScopeSpec &SS, TemplateIdAnnotation *TypeConstraint, unsigned Depth); concepts::Requirement *ActOnNestedRequirement(Expr *Constraint); concepts::ExprRequirement * BuildExprRequirement( Expr *E, bool IsSatisfied, SourceLocation NoexceptLoc, concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement); concepts::ExprRequirement * BuildExprRequirement( concepts::Requirement::SubstitutionDiagnostic *ExprSubstDiag, bool IsSatisfied, SourceLocation NoexceptLoc, concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement); concepts::TypeRequirement *BuildTypeRequirement(TypeSourceInfo *Type); concepts::TypeRequirement * BuildTypeRequirement( concepts::Requirement::SubstitutionDiagnostic *SubstDiag); concepts::NestedRequirement *BuildNestedRequirement(Expr *E); concepts::NestedRequirement * BuildNestedRequirement( concepts::Requirement::SubstitutionDiagnostic *SubstDiag); ExprResult ActOnRequiresExpr(SourceLocation RequiresKWLoc, RequiresExprBodyDecl *Body, ArrayRef<ParmVarDecl *> LocalParameters, ArrayRef<concepts::Requirement *> Requirements, SourceLocation ClosingBraceLoc); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// An arbitrary expression. UPPC_Expression = 0, /// The base type of a class type. UPPC_BaseType, /// The type of an arbitrary declaration. UPPC_DeclarationType, /// The type of a data member. UPPC_DataMemberType, /// The size of a bit-field. UPPC_BitFieldWidth, /// The expression in a static assertion. UPPC_StaticAssertExpression, /// The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// The enumerator value. UPPC_EnumeratorValue, /// A using declaration. UPPC_UsingDeclaration, /// A friend declaration. UPPC_FriendDeclaration, /// A declaration qualifier. UPPC_DeclarationQualifier, /// An initializer. UPPC_Initializer, /// A default argument. UPPC_DefaultArgument, /// The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// The type of an exception. UPPC_ExceptionType, /// Partial specialization. UPPC_PartialSpecialization, /// Microsoft __if_exists. UPPC_IfExists, /// Microsoft __if_not_exists. UPPC_IfNotExists, /// Lambda expression. UPPC_Lambda, /// Block expression. UPPC_Block, /// A type constraint. UPPC_TypeConstraint, // A requirement in a requires-expression. UPPC_Requirement, // A requires-clause. UPPC_RequiresClause, }; /// Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// If the given requirees-expression contains an unexpanded reference to one /// of its own parameter packs, diagnose the error. /// /// \param RE The requiress-expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPackInRequiresExpr(RequiresExpr *RE); /// If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param NNS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; /// Given a template argument that contains an unexpanded parameter pack, but /// which has already been substituted, attempt to determine the number of /// elements that will be produced once this argument is fully-expanded. /// /// This is intended for use when transforming 'sizeof...(Arg)' in order to /// avoid actually expanding the pack where possible. Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg); //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// /// Adjust the type \p ArgFunctionType to match the calling convention, /// noreturn, and optionally the exception specification of \p FunctionType. /// Deduction often wants to ignore these properties when matching function /// types. QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType, bool AdjustExceptionSpec = false); /// Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// Template argument deduction was successful. TDK_Success = 0, /// The declaration was invalid; do nothing. TDK_Invalid, /// Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// Template argument deduction did not deduce a value for every /// expansion of an expanded template parameter pack. TDK_IncompletePack, /// Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// After substituting deduced template arguments, a dependent /// parameter type did not match the corresponding argument. TDK_DeducedMismatch, /// After substituting deduced template arguments, an element of /// a dependent parameter type did not match the corresponding element /// of the corresponding argument (when deducing from an initializer list). TDK_DeducedMismatchNested, /// A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// Checking non-dependent argument conversions failed. TDK_NonDependentConversionFailure, /// The deduced arguments did not satisfy the constraints associated /// with the template. TDK_ConstraintsNotSatisfied, /// Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure, /// CUDA Target attributes do not match. TDK_CUDATargetMismatch }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, bool DecomposedParam, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), DecomposedParam(DecomposedParam), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) {} QualType OriginalParamType; bool DecomposedParam; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction( FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false, llvm::function_ref<bool()> CheckNonDependent = []{ return false; }); TemplateDeductionResult DeduceTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading, llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); /// Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// Completely replace the \c auto in \p TypeWithAuto by /// \p Replacement. This does not retain any \c auto type sugar. QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement); TypeSourceInfo *ReplaceAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None, bool IgnoreConstraints = false); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None, bool IgnoreConstraints = false); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); /// Declare implicit deduction guides for a class template if we've /// not already done so. void DeclareImplicitDeductionGuides(TemplateDecl *Template, SourceLocation Loc); QualType DeduceTemplateSpecializationFromInitializer( TypeSourceInfo *TInfo, const InitializedEntity &Entity, const InitializationKind &Kind, MultiExprArg Init); QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name, QualType Type, TypeSourceInfo *TSI, SourceRange Range, bool DirectInit, Expr *Init); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate( FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2, bool Reversed = false); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); bool isTemplateTemplateParameterAtLeastAsSpecializedAs( TemplateParameterList *PParam, TemplateDecl *AArg, SourceLocation Loc); void MarkUsedTemplateParameters(const Expr *E, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// A context in which code is being synthesized (where a source location /// alone is not sufficient to identify the context). This covers template /// instantiation and various forms of implicitly-generated functions. struct CodeSynthesisContext { /// The kind of template instantiation we are performing enum SynthesisKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template parameter whose argument is /// being instantiated, the Template is the template, and the /// TemplateArgs/NumTemplateArguments provide the template arguments as /// specified. DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or /// a TemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are computing the exception specification for a defaulted special /// member function. ExceptionSpecEvaluation, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation, /// We are instantiating a requirement of a requires expression. RequirementInstantiation, /// We are checking the satisfaction of a nested requirement of a requires /// expression. NestedRequirementConstraintsCheck, /// We are declaring an implicit special member function. DeclaringSpecialMember, /// We are declaring an implicit 'operator==' for a defaulted /// 'operator<=>'. DeclaringImplicitEqualityComparison, /// We are defining a synthesized function (such as a defaulted special /// member). DefiningSynthesizedFunction, // We are checking the constraints associated with a constrained entity or // the constraint expression of a concept. This includes the checks that // atomic constraints have the type 'bool' and that they can be constant // evaluated. ConstraintsCheck, // We are substituting template arguments into a constraint expression. ConstraintSubstitution, // We are normalizing a constraint expression. ConstraintNormalization, // We are substituting into the parameter mapping of an atomic constraint // during normalization. ParameterMappingSubstitution, /// We are rewriting a comparison operator in terms of an operator<=>. RewritingOperatorAsSpaceship, /// We are initializing a structured binding. InitializingStructuredBinding, /// We are marking a class as __dllexport. MarkingClassDllexported, /// Added for Template instantiation observation. /// Memoization means we are _not_ instantiating a template because /// it is already instantiated (but we entered a context where we /// would have had to if it was not already instantiated). Memoization } Kind; /// Was the enclosing context a non-instantiation SFINAE context? bool SavedInNonInstantiationSFINAEContext; /// The point of instantiation or synthesis within the source code. SourceLocation PointOfInstantiation; /// The entity that is being synthesized. Decl *Entity; /// The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; // FIXME: Wrap this union around more members, or perhaps store the // kind-specific members in the RAII object owning the context. union { /// The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// The special member being declared or defined. CXXSpecialMember SpecialMember; }; ArrayRef<TemplateArgument> template_arguments() const { assert(Kind != DeclaringSpecialMember); return {TemplateArgs, NumTemplateArgs}; } /// The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; CodeSynthesisContext() : Kind(TemplateInstantiation), SavedInNonInstantiationSFINAEContext(false), Entity(nullptr), Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; }; /// List of active code synthesis contexts. /// /// This vector is treated as a stack. As synthesis of one entity requires /// synthesis of another, additional contexts are pushed onto the stack. SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts; /// Specializations whose definitions are currently being instantiated. llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations; /// Non-dependent types used in templates that have already been instantiated /// by some template instantiation. llvm::DenseSet<QualType> InstantiatedNonDependentTypes; /// Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> CodeSynthesisContextLookupModules; /// Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// Map from the most recent declaration of a namespace to the most /// recent visible declaration of that namespace. llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache; /// Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// The number of \p CodeSynthesisContexts that are not template /// instantiations and, therefore, should not be counted as part of the /// instantiation depth. /// /// When the instantiation depth reaches the user-configurable limit /// \p LangOptions::InstantiationDepth we will abort instantiation. // FIXME: Should we have a similar limit for other forms of synthesis? unsigned NonInstantiationEntries; /// The depth of the context stack at the point when the most recent /// error or warning was produced. /// /// This value is used to suppress printing of redundant context stacks /// when there are multiple errors or warnings in the same instantiation. // FIXME: Does this belong in Sema? It's tough to implement it anywhere else. unsigned LastEmittedCodeSynthesisContextDepth = 0; /// The template instantiation callbacks to trace or track /// instantiations (objects can be chained). /// /// This callbacks is used to print, trace or track template /// instantiations as they are being constructed. std::vector<std::unique_ptr<TemplateInstantiationCallback>> TemplateInstCallbacks; /// The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// Note that we are instantiating a class template, /// function template, variable template, alias template, /// or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateParameter Param, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting either explicitly-specified or /// deduced template arguments during function template argument deduction. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, CodeSynthesisContext::SynthesisKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template declaration. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument for a function /// parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); struct ConstraintsCheck {}; /// \brief Note that we are checking the constraints associated with some /// constrained entity (a concept declaration or a template with associated /// constraints). InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintsCheck, NamedDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); struct ConstraintSubstitution {}; /// \brief Note that we are checking a constraint expression associated /// with a template declaration or as part of the satisfaction check of a /// concept. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintSubstitution, NamedDecl *Template, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange); struct ConstraintNormalization {}; /// \brief Note that we are normalizing a constraint expression. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintNormalization, NamedDecl *Template, SourceRange InstantiationRange); struct ParameterMappingSubstitution {}; /// \brief Note that we are subtituting into the parameter mapping of an /// atomic constraint during constraint normalization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParameterMappingSubstitution, NamedDecl *Template, SourceRange InstantiationRange); /// \brief Note that we are substituting template arguments into a part of /// a requirement of a requires expression. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, concepts::Requirement *Req, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are checking the satisfaction of the constraint /// expression inside of a nested requirement. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, concepts::NestedRequirement *Req, ConstraintsCheck, SourceRange InstantiationRange = SourceRange()); /// Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } /// Determine whether we are already instantiating this /// specialization in some surrounding active instantiation. bool isAlreadyInstantiating() const { return AlreadyInstantiating; } private: Sema &SemaRef; bool Invalid; bool AlreadyInstantiating; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = None, sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void pushCodeSynthesisContext(CodeSynthesisContext Ctx); void popCodeSynthesisContext(); /// Determine whether we are currently performing template instantiation. bool inTemplateInstantiation() const { return CodeSynthesisContexts.size() > NonInstantiationEntries; } void PrintContextStack() { if (!CodeSynthesisContexts.empty() && CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) { PrintInstantiationStack(); LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size(); } if (PragmaAttributeCurrentTargetDecl) PrintPragmaAttributeInstantiationPoint(); } void PrintInstantiationStack(); void PrintPragmaAttributeInstantiationPoint(); /// Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } /// RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; bool PrevLastDiagnosticIgnored; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE), PrevLastDiagnosticIgnored( SemaRef.getDiagnostics().isLastDiagnosticIgnored()) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; SemaRef.getDiagnostics().setLastDiagnosticIgnored( PrevLastDiagnosticIgnored); } /// Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; /// Queue of implicit template instantiations that cannot be performed /// eagerly. SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations; class GlobalEagerInstantiationScope { public: GlobalEagerInstantiationScope(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } void perform() { if (Enabled) { S.DefineUsedVTables(); S.PerformPendingInstantiations(); } } ~GlobalEagerInstantiationScope() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. if (S.TUKind != TU_Prefix || !S.LangOpts.PCHInstantiateTemplates) { assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } else { // Template instantiations in the PCH may be delayed until the TU. S.PendingInstantiations.swap(SavedPendingInstantiations); S.PendingInstantiations.insert(S.PendingInstantiations.end(), SavedPendingInstantiations.begin(), SavedPendingInstantiations.end()); } } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class LocalEagerInstantiationScope { public: LocalEagerInstantiationScope(Sema &S) : S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); } ~LocalEagerInstantiationScope() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; /// A helper class for building up ExtParameterInfos. class ExtParameterInfoBuilder { SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos; bool HasInteresting = false; public: /// Set the ExtParameterInfo for the parameter at the given index, /// void set(unsigned index, FunctionProtoType::ExtParameterInfo info) { assert(Infos.size() <= index); Infos.resize(index); Infos.push_back(info); if (!HasInteresting) HasInteresting = (info != FunctionProtoType::ExtParameterInfo()); } /// Return a pointer (suitable for setting in an ExtProtoInfo) to the /// ExtParameterInfo array we've built up. const FunctionProtoType::ExtParameterInfo * getPointerOrNull(unsigned numParams) { if (!HasInteresting) return nullptr; Infos.resize(numParams); return Infos.data(); } }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, bool AllowDeducedTST = false); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, Qualifiers ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); bool SubstExceptionSpec(SourceLocation Loc, FunctionProtoType::ExceptionSpecInfo &ESI, SmallVectorImpl<QualType> &ExceptionStorage, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params, const FunctionProtoType::ExtParameterInfo *ExtParamInfos, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams, ExtParameterInfoBuilder &ParamInfos); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateParameterList * SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); bool SubstTemplateArguments(ArrayRef<TemplateArgumentLoc> Args, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateArgumentListInfo &Outputs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the name and return type of a defaulted 'operator<=>' to form /// an implicit 'operator=='. FunctionDecl *SubstSpaceshipAsEqualEqual(CXXRecordDecl *RD, FunctionDecl *Spaceship); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateDefaultCtorDefaultArgs(CXXConstructorDecl *Ctor); bool usesPartialOrExplicitSpecialization( SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs, TemplateArgumentListInfo &Result, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateDefaultArgument(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); bool CheckInstantiatedFunctionTemplateConstraints( SourceLocation PointOfInstantiation, FunctionDecl *Decl, ArrayRef<TemplateArgument> TemplateArgs, ConstraintSatisfaction &Satisfaction); FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD, const TemplateArgumentList *Args, SourceLocation Loc); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false, VarTemplateSpecializationDecl *PrevVTSD = nullptr); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, bool FindingInstantiatedContext = false); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface( Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, SmallVectorImpl<SourceLocation> &ProtocolLocs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl *const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryInterface( SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc, const ParsedAttributesView &AttrList); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, ArrayRef<IdentifierLocPair> IdentList, const ParsedAttributesView &attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, ArrayRef<IdentifierLocPair> ProtocolId, SmallVectorImpl<Decl *> &Protocols); void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId, SourceLocation ProtocolLoc, IdentifierInfo *TypeArgId, SourceLocation TypeArgLoc, bool SelectProtocolFirst = false); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C type parameter type. QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed void ProcessPropertyDecl(ObjCPropertyDecl *property); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc, ObjCPropertyQueryKind QueryKind); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. ParsedAttributesView ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); void deduceOpenCLAddressSpace(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// The message is sent to 'super'. ObjCSuperMessage, /// The message is an instance message. ObjCInstanceMessage, /// The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs, bool Diagnose = true); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr, bool Diagnose = true); bool CheckConversionToObjCLiteral(QualType DstType, Expr *&SrcExpr, bool Diagnose = true); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; void CheckObjCMethodDirectOverrides(ObjCMethodDecl *method, ObjCMethodDecl *overridden); void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaClangSection - Called on well formed \#pragma clang section void ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionAction Action, PragmaClangSectionKind SecKind, StringRef SecName); /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action, StringRef SlotLabel, Expr *Alignment); enum class PragmaAlignPackDiagnoseKind { NonDefaultStateAtInclude, ChangedStateAtExit }; void DiagnoseNonDefaultPragmaAlignPack(PragmaAlignPackDiagnoseKind Kind, SourceLocation IncludeLoc); void DiagnoseUnterminatedPragmaAlignPack(); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action, SourceLocation PragmaLoc, MSVtorDispMode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, NamedDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// Called on #pragma clang __debug dump II void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name, StringRef Value); /// Are precise floating point semantics currently enabled? bool isPreciseFPEnabled() { return !CurFPFeatures.getAllowFPReassociate() && !CurFPFeatures.getNoSignedZero() && !CurFPFeatures.getAllowReciprocal() && !CurFPFeatures.getAllowApproxFunc(); } /// ActOnPragmaFloatControl - Call on well-formed \#pragma float_control void ActOnPragmaFloatControl(SourceLocation Loc, PragmaMsStackAction Action, PragmaFloatControlKind Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT and /// \#pragma clang fp contract void ActOnPragmaFPContract(SourceLocation Loc, LangOptions::FPModeKind FPC); /// Called on well formed /// \#pragma clang fp reassociate void ActOnPragmaFPReassociate(SourceLocation Loc, bool IsEnabled); /// ActOnPragmaFenvAccess - Called on well formed /// \#pragma STDC FENV_ACCESS void ActOnPragmaFEnvAccess(SourceLocation Loc, bool IsEnabled); /// Called on well formed '\#pragma clang fp' that has option 'exceptions'. void ActOnPragmaFPExceptions(SourceLocation Loc, LangOptions::FPExceptionModeKind); /// Called to set constant rounding mode for floating point operations. void setRoundingMode(SourceLocation Loc, llvm::RoundingMode); /// Called to set exception behavior for floating point operations. void setExceptionMode(SourceLocation Loc, LangOptions::FPExceptionModeKind); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute, SourceLocation PragmaLoc, attr::ParsedSubjectMatchRuleSet Rules); void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Called on well-formed '\#pragma clang attribute pop'. void ActOnPragmaAttributePop(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Adds the attributes that have been specified using the /// '\#pragma clang attribute push' directives to the given declaration. void AddPragmaAttributes(Scope *S, Decl *D); void DiagnoseUnterminatedPragmaAttribute(); /// Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E, bool IsPackExpansion); void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E, Expr *OE); /// AddAllocAlignAttr - Adds an alloc_align attribute to a particular /// declaration. void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI, Expr *ParamExpr); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E); /// AddAnnotationAttr - Adds an annotation Annot with Args arguments to D. void AddAnnotationAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Annot, MutableArrayRef<Expr *> Args); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI, Expr *MaxThreads, Expr *MinBlocks); /// AddModeAttr - Adds a mode attribute to a particular declaration. void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name, bool InInstantiation = false); void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI, ParameterABI ABI); enum class RetainOwnershipKind {NS, CF, OS}; void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI, RetainOwnershipKind K, bool IsTemplateInstantiation); /// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size /// attribute to a particular declaration. void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI, Expr *Min, Expr *Max); /// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a /// particular declaration. void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI, Expr *Min, Expr *Max); bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type); //===--------------------------------------------------------------------===// // C++ Coroutines TS // bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc, StringRef Keyword); ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E); StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, UnresolvedLookupExpr* Lookup); ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E); StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs); bool buildCoroutineParameterMoves(SourceLocation Loc); VarDecl *buildCoroutinePromise(SourceLocation Loc); void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body); ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc, SourceLocation FuncLoc); /// Check that the expression co_await promise.final_suspend() shall not be /// potentially-throwing. bool checkFinalSuspendNoThrow(const Stmt *FinalSuspend); //===--------------------------------------------------------------------===// // OpenCL extensions. // private: std::string CurrOpenCLExtension; /// Extensions required by an OpenCL type. llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap; /// Extensions required by an OpenCL declaration. llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap; public: llvm::StringRef getCurrentOpenCLExtension() const { return CurrOpenCLExtension; } /// Check if a function declaration \p FD associates with any /// extensions present in OpenCLDeclExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD); /// Check if a function type \p FT associates with any /// extensions present in OpenCLTypeExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT); /// Find an extension in an appropriate extension map and return its name template<typename T, typename MapT> std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map); void setCurrentOpenCLExtension(llvm::StringRef Ext) { CurrOpenCLExtension = std::string(Ext); } /// Set OpenCL extensions for a type which can only be used when these /// OpenCL extensions are enabled. If \p Exts is empty, do nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts); /// Set OpenCL extensions for a declaration which can only be /// used when these OpenCL extensions are enabled. If \p Exts is empty, do /// nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts); /// Set current OpenCL extensions for a type which can only be used /// when these OpenCL extensions are enabled. If current OpenCL extension is /// empty, do nothing. void setCurrentOpenCLExtensionForType(QualType T); /// Set current OpenCL extensions for a declaration which /// can only be used when these OpenCL extensions are enabled. If current /// OpenCL extension is empty, do nothing. void setCurrentOpenCLExtensionForDecl(Decl *FD); bool isOpenCLDisabledDecl(Decl *FD); /// Check if type \p T corresponding to declaration specifier \p DS /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T); /// Check if declaration \p D used by expression \p E /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E); //===--------------------------------------------------------------------===// // OpenMP directives and clauses. // private: void *VarDataSharingAttributesStack; /// Number of nested '#pragma omp declare target' directives. SmallVector<SourceLocation, 4> DeclareTargetNesting; /// Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind, bool StrictlyPositive = true); /// Returns OpenMP nesting level for current directive. unsigned getOpenMPNestingLevel() const; /// Adjusts the function scopes index for the target-based regions. void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex, unsigned Level) const; /// Returns the number of scopes associated with the construct on the given /// OpenMP level. int getNumberOfConstructScopes(unsigned Level) const; /// Push new OpenMP function region for non-capturing function. void pushOpenMPFunctionRegion(); /// Pop OpenMP function region for non-capturing function. void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI); /// Checks if a type or a declaration is disabled due to the owning extension /// being disabled, and emits diagnostic messages if it is disabled. /// \param D type or declaration to be checked. /// \param DiagLoc source location for the diagnostic message. /// \param DiagInfo information to be emitted for the diagnostic message. /// \param SrcRange source range of the declaration. /// \param Map maps type or declaration to the extensions. /// \param Selector selects diagnostic message: 0 for type and 1 for /// declaration. /// \return true if the type or declaration is disabled. template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT> bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo, MapT &Map, unsigned Selector = 0, SourceRange SrcRange = SourceRange()); /// Helper to keep information about the current `omp begin/end declare /// variant` nesting. struct OMPDeclareVariantScope { /// The associated OpenMP context selector. OMPTraitInfo *TI; /// The associated OpenMP context selector mangling. std::string NameSuffix; OMPDeclareVariantScope(OMPTraitInfo &TI); }; /// Return the OMPTraitInfo for the surrounding scope, if any. OMPTraitInfo *getOMPTraitInfoForSurroundingScope() { return OMPDeclareVariantScopes.empty() ? nullptr : OMPDeclareVariantScopes.back().TI; } /// The current `omp begin/end declare variant` scopes. SmallVector<OMPDeclareVariantScope, 4> OMPDeclareVariantScopes; /// The current `omp begin/end assumes` scopes. SmallVector<AssumptionAttr *, 4> OMPAssumeScoped; /// All `omp assumes` we encountered so far. SmallVector<AssumptionAttr *, 4> OMPAssumeGlobal; public: /// The declarator \p D defines a function in the scope \p S which is nested /// in an `omp begin/end declare variant` scope. In this method we create a /// declaration for \p D and rename \p D according to the OpenMP context /// selector of the surrounding scope. Return all base functions in \p Bases. void ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope( Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, SmallVectorImpl<FunctionDecl *> &Bases); /// Register \p D as specialization of all base functions in \p Bases in the /// current `omp begin/end declare variant` scope. void ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope( Decl *D, SmallVectorImpl<FunctionDecl *> &Bases); /// Act on \p D, a function definition inside of an `omp [begin/end] assumes`. void ActOnFinishedFunctionDefinitionInOpenMPAssumeScope(Decl *D); /// Can we exit an OpenMP declare variant scope at the moment. bool isInOpenMPDeclareVariantScope() const { return !OMPDeclareVariantScopes.empty(); } /// Given the potential call expression \p Call, determine if there is a /// specialization via the OpenMP declare variant mechanism available. If /// there is, return the specialized call expression, otherwise return the /// original \p Call. ExprResult ActOnOpenMPCall(ExprResult Call, Scope *Scope, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig); /// Handle a `omp begin declare variant`. void ActOnOpenMPBeginDeclareVariant(SourceLocation Loc, OMPTraitInfo &TI); /// Handle a `omp end declare variant`. void ActOnOpenMPEndDeclareVariant(); /// Checks if the variant/multiversion functions are compatible. bool areMultiversionVariantFunctionsCompatible( const FunctionDecl *OldFD, const FunctionDecl *NewFD, const PartialDiagnostic &NoProtoDiagID, const PartialDiagnosticAt &NoteCausedDiagIDAt, const PartialDiagnosticAt &NoSupportDiagIDAt, const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported, bool ConstexprSupported, bool CLinkageMayDiffer); /// Function tries to capture lambda's captured variables in the OpenMP region /// before the original lambda is captured. void tryCaptureOpenMPLambdas(ValueDecl *V); /// Return true if the provided declaration \a VD should be captured by /// reference. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. /// \param OpenMPCaptureLevel Capture level within an OpenMP construct. bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level, unsigned OpenMPCaptureLevel) const; /// Check if the specified variable is used in one of the private /// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP /// constructs. VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false, unsigned StopAt = 0); ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK, ExprObjectKind OK, SourceLocation Loc); /// If the current region is a loop-based region, mark the start of the loop /// construct. void startOpenMPLoop(); /// If the current region is a range loop-based region, mark the start of the /// loop construct. void startOpenMPCXXRangeFor(); /// Check if the specified variable is used in 'private' clause. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. OpenMPClauseKind isOpenMPPrivateDecl(ValueDecl *D, unsigned Level, unsigned CapLevel) const; /// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.) /// for \p FD based on DSA for the provided corresponding captured declaration /// \p D. void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level); /// Check if the specified variable is captured by 'target' directive. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level, unsigned CaptureLevel) const; /// Check if the specified global variable must be captured by outer capture /// regions. /// \param Level Relative level of nested OpenMP construct for that /// the check is performed. bool isOpenMPGlobalCapturedDecl(ValueDecl *D, unsigned Level, unsigned CaptureLevel) const; ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// End analysis of clauses. void EndOpenMPClause(); /// Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); // OpenMP directives and clauses. /// Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OpenMPDirectiveKind Kind); /// Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList); /// Called on well-formed '#pragma omp allocate'. DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc, ArrayRef<Expr *> VarList, ArrayRef<OMPClause *> Clauses, DeclContext *Owner = nullptr); /// Called on well-formed '#pragma omp [begin] assume[s]'. void ActOnOpenMPAssumesDirective(SourceLocation Loc, OpenMPDirectiveKind DKind, ArrayRef<StringRef> Assumptions, bool SkippedClauses); /// Check if there is an active global `omp begin assumes` directive. bool isInOpenMPAssumeScope() const { return !OMPAssumeScoped.empty(); } /// Check if there is an active global `omp assumes` directive. bool hasGlobalOpenMPAssumes() const { return !OMPAssumeGlobal.empty(); } /// Called on well-formed '#pragma omp end assumes'. void ActOnOpenMPEndAssumesDirective(); /// Called on well-formed '#pragma omp requires'. DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc, ArrayRef<OMPClause *> ClauseList); /// Check restrictions on Requires directive OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc, ArrayRef<OMPClause *> Clauses); /// Check if the specified type is allowed to be used in 'omp declare /// reduction' construct. QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Initialize declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner); /// Initialize declare reduction construct initializer. /// \return omp_priv variable. VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer, VarDecl *OmpPrivParm); /// Called at the end of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd( Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid); /// Check variable declaration in 'omp declare mapper' construct. TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D); /// Check if the specified type is allowed to be used in 'omp declare /// mapper' construct. QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare mapper'. DeclGroupPtrTy ActOnOpenMPDeclareMapperDirective( Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType, SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS, Expr *MapperVarRef, ArrayRef<OMPClause *> Clauses, Decl *PrevDeclInScope = nullptr); /// Build the mapper variable of '#pragma omp declare mapper'. ExprResult ActOnOpenMPDeclareMapperDirectiveVarDecl(Scope *S, QualType MapperType, SourceLocation StartLoc, DeclarationName VN); bool isOpenMPDeclareMapperVarDeclAllowed(const VarDecl *VD) const; const ValueDecl *getOpenMPDeclareMapperVarName() const; /// Called on the start of target region i.e. '#pragma omp declare target'. bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc); /// Called at the end of target region i.e. '#pragme omp end declare target'. void ActOnFinishOpenMPDeclareTargetDirective(); /// Searches for the provided declaration name for OpenMP declare target /// directive. NamedDecl * lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, NamedDeclSetType &SameDirectiveDecls); /// Called on correct id-expression from the '#pragma omp declare target'. void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc, OMPDeclareTargetDeclAttr::MapTypeTy MT, OMPDeclareTargetDeclAttr::DevTypeTy DT); /// Check declaration inside target region. void checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D, SourceLocation IdLoc = SourceLocation()); /// Finishes analysis of the deferred functions calls that may be declared as /// host/nohost during device/host compilation. void finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller, const FunctionDecl *Callee, SourceLocation Loc); /// Return true inside OpenMP declare target region. bool isInOpenMPDeclareTargetContext() const { return !DeclareTargetNesting.empty(); } /// Return true inside OpenMP target region. bool isInOpenMPTargetExecutionDirective() const; /// Return the number of captured regions created for an OpenMP directive. static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind); /// Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// Called for syntactical loops (ForStmt or CXXForRangeStmt) associated to /// an OpenMP loop directive. StmtResult ActOnOpenMPCanonicalLoop(Stmt *AStmt); /// End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); using VarsWithInheritedDSAType = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>; /// Called on well-formed '\#pragma omp simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '#pragma omp tile' after parsing of its clauses and /// the associated statement. StmtResult ActOnOpenMPTileDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp sections' after parsing /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp section' after parsing of the /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp single' after parsing of the /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp master' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp critical' after parsing of the /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel for' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel sections' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp task' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskyield'. StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp barrier'. StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskwait'. StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskgroup'. StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp flush'. StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp depobj'. StmtResult ActOnOpenMPDepobjDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp scan'. StmtResult ActOnOpenMPScanDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp ordered' after parsing of the /// associated statement. StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp atomic' after parsing of the /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target data' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target enter data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target exit data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target parallel' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp cancellation point'. StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp cancel'. StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp master taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterTaskLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp master taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPMasterTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master taskloop' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterTaskLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master taskloop simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target update'. StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp distribute parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute' after parsing of /// the associated statement. StmtResult ActOnOpenMPTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target teams distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for /// simd' after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp interop'. StmtResult ActOnOpenMPInteropDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Checks correctness of linear modifiers. bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind, SourceLocation LinLoc); /// Checks that the specified declaration matches requirements for the linear /// decls. bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc, OpenMPLinearClauseKind LinKind, QualType Type, bool IsDeclareSimd = false); /// Called on well-formed '\#pragma omp declare simd' after parsing of /// the associated method/function. DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective( DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds, ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears, ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR); /// Checks '\#pragma omp declare variant' variant function and original /// functions after parsing of the associated method/function. /// \param DG Function declaration to which declare variant directive is /// applied to. /// \param VariantRef Expression that references the variant function, which /// must be used instead of the original one, specified in \p DG. /// \param TI The trait info object representing the match clause. /// \returns None, if the function/variant function are not compatible with /// the pragma, pair of original function/variant ref expression otherwise. Optional<std::pair<FunctionDecl *, Expr *>> checkOpenMPDeclareVariantFunction(DeclGroupPtrTy DG, Expr *VariantRef, OMPTraitInfo &TI, SourceRange SR); /// Called on well-formed '\#pragma omp declare variant' after parsing of /// the associated method/function. /// \param FD Function declaration to which declare variant directive is /// applied to. /// \param VariantRef Expression that references the variant function, which /// must be used instead of the original one, specified in \p DG. /// \param TI The context traits associated with the function variant. void ActOnOpenMPDeclareVariantDirective(FunctionDecl *FD, Expr *VariantRef, OMPTraitInfo &TI, SourceRange SR); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocator' clause. OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'simdlen' clause. OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-form 'sizes' clause. OMPClause *ActOnOpenMPSizesClause(ArrayRef<Expr *> SizeExprs, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'ordered' clause. OMPClause * ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc, SourceLocation LParenLoc = SourceLocation(), Expr *NumForLoops = nullptr); /// Called on well-formed 'grainsize' clause. OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_tasks' clause. OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'hint' clause. OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'detach' clause. OMPClause *ActOnOpenMPDetachClause(Expr *Evt, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(llvm::omp::DefaultKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(llvm::omp::ProcBindKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'order' clause. OMPClause *ActOnOpenMPOrderClause(OpenMPOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(OpenMPDependClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause( OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc, SourceLocation EndLoc); /// Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause( OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'acq_rel' clause. OMPClause *ActOnOpenMPAcqRelClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'acquire' clause. OMPClause *ActOnOpenMPAcquireClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'release' clause. OMPClause *ActOnOpenMPReleaseClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'relaxed' clause. OMPClause *ActOnOpenMPRelaxedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'init' clause. OMPClause *ActOnOpenMPInitClause(Expr *InteropVar, ArrayRef<Expr *> PrefExprs, bool IsTarget, bool IsTargetSync, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation VarLoc, SourceLocation EndLoc); /// Called on well-formed 'use' clause. OMPClause *ActOnOpenMPUseClause(Expr *InteropVar, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation VarLoc, SourceLocation EndLoc); /// Called on well-formed 'destroy' clause. OMPClause *ActOnOpenMPDestroyClause(Expr *InteropVar, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation VarLoc, SourceLocation EndLoc); /// Called on well-formed 'threads' clause. OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'simd' clause. OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nogroup' clause. OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'reverse_offload' clause. OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'dynamic_allocators' clause. OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'atomic_default_mem_order' clause. OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause( OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *DepModOrTailExpr, const OMPVarListLocTy &Locs, SourceLocation ColonLoc, CXXScopeSpec &ReductionOrMapperIdScopeSpec, DeclarationNameInfo &ReductionOrMapperId, int ExtraModifier, ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, bool IsMapTypeImplicit, SourceLocation ExtraModifierLoc, ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc); /// Called on well-formed 'inclusive' clause. OMPClause *ActOnOpenMPInclusiveClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'exclusive' clause. OMPClause *ActOnOpenMPExclusiveClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocate' clause. OMPClause * ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation ColonLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause( ArrayRef<Expr *> VarList, OpenMPLastprivateModifier LPKind, SourceLocation LPKindLoc, SourceLocation ColonLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'reduction' clause. OMPClause *ActOnOpenMPReductionClause( ArrayRef<Expr *> VarList, OpenMPReductionClauseModifier Modifier, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'task_reduction' clause. OMPClause *ActOnOpenMPTaskReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'in_reduction' clause. OMPClause *ActOnOpenMPInReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'linear' clause. OMPClause * ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind, SourceLocation LinLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depobj' pseudo clause. OMPClause *ActOnOpenMPDepobjClause(Expr *Depobj, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(Expr *DepModifier, OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'device' clause. OMPClause *ActOnOpenMPDeviceClause(OpenMPDeviceClauseModifier Modifier, Expr *Device, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation EndLoc); /// Called on well-formed 'map' clause. OMPClause * ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'num_teams' clause. OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'thread_limit' clause. OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'priority' clause. OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'dist_schedule' clause. OMPClause *ActOnOpenMPDistScheduleClause( OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// Called on well-formed 'defaultmap' clause. OMPClause *ActOnOpenMPDefaultmapClause( OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KindLoc, SourceLocation EndLoc); /// Called on well-formed 'to' clause. OMPClause * ActOnOpenMPToClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'from' clause. OMPClause * ActOnOpenMPFromClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'use_device_ptr' clause. OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'use_device_addr' clause. OMPClause *ActOnOpenMPUseDeviceAddrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'is_device_ptr' clause. OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'nontemporal' clause. OMPClause *ActOnOpenMPNontemporalClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Data for list of allocators. struct UsesAllocatorsData { /// Allocator. Expr *Allocator = nullptr; /// Allocator traits. Expr *AllocatorTraits = nullptr; /// Locations of '(' and ')' symbols. SourceLocation LParenLoc, RParenLoc; }; /// Called on well-formed 'uses_allocators' clause. OMPClause *ActOnOpenMPUsesAllocatorClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<UsesAllocatorsData> Data); /// Called on well-formed 'affinity' clause. OMPClause *ActOnOpenMPAffinityClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, Expr *Modifier, ArrayRef<Expr *> Locators); /// The kind of conversion being performed. enum CheckedConversionKind { /// An implicit conversion. CCK_ImplicitConversion, /// A C-style cast. CCK_CStyleCast, /// A functional-style cast. CCK_FunctionalCast, /// A cast other than a C-style cast. CCK_OtherCast, /// A conversion for an operand of a builtin overloaded operator. CCK_ForBuiltinOverloadedOp }; static bool isCast(CheckedConversionKind CCK) { return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast || CCK == CCK_OtherCast; } /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_RValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E, bool Diagnose = true); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This function is a no-op if the operand has a function type // or an array type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); /// If \p E is a prvalue denoting an unmaterialized temporary, materialize /// it as an xvalue. In C++98, the result will still be a prvalue, because /// we don't have xvalues there. ExprResult TemporaryMaterializationConversion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); /// Context in which we're performing a usual arithmetic conversion. enum ArithConvKind { /// An arithmetic operation. ACK_Arithmetic, /// A bitwise operation. ACK_BitwiseOp, /// A comparison. ACK_Comparison, /// A conditional (?:) operator. ACK_Conditional, /// A compound assignment expression. ACK_CompAssign, }; // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, ArithConvKind ACK); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatibleFunctionPointer - The assignment is between two function /// pointers types that are not compatible, but we accept them as an /// extension. IncompatibleFunctionPointer, /// IncompatiblePointerSign - The assignment is between two pointers types /// which point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerAddressSpaceMismatch - The assignment /// changes address spaces in nested pointer types which is not allowed. /// For instance, converting __private int ** to __generic int ** is /// illegal even though __private could be converted to __generic. IncompatibleNestedPointerAddressSpaceMismatch, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and optionally prepare for a conversion of /// the RHS to the LHS type. The conversion is prepared for if ConvertRHS /// is true. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind, bool ConvertRHS = true); /// Check assignment constraints for an assignment of RHS to LHSType. /// /// \param LHSType The destination type for the assignment. /// \param RHS The source expression for the assignment. /// \param Diagnose If \c true, diagnostics may be produced when checking /// for assignability. If a diagnostic is produced, \p RHS will be /// set to ExprError(). Note that this function may still return /// without producing a diagnostic, even for an invalid assignment. /// \param DiagnoseCFAudited If \c true, the target is a function parameter /// in an audited Core Foundation API and does not need to be checked /// for ARC retain issues. /// \param ConvertRHS If \c true, \p RHS will be updated to model the /// conversions necessary to perform the assignment. If \c false, /// \p Diagnose must also be \c false. AssignConvertType CheckSingleAssignmentConstraints( QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false, bool ConvertRHS = true); // If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); ExprResult PerformQualificationConversion( Expr *E, QualType Ty, ExprValueKind VK = VK_RValue, CheckedConversionKind CCK = CCK_ImplicitConversion); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, bool IsCompAssign = false); void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType CheckVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool ConvertArgs = true); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool ConvertArgs = true) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); /// Type checking for matrix binary operators. QualType CheckMatrixElementwiseOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign); QualType CheckMatrixMultiplyOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign); bool isValidSveBitcast(QualType srcType, QualType destType); bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible - The two types are reference-compatible. Ref_Compatible }; // Fake up a scoped enumeration that still contextually converts to bool. struct ReferenceConversionsScope { /// The conversions that would be performed on an lvalue of type T2 when /// binding a reference of type T1 to it, as determined when evaluating /// whether T1 is reference-compatible with T2. enum ReferenceConversions { Qualification = 0x1, NestedQualification = 0x2, Function = 0x4, DerivedToBase = 0x8, ObjC = 0x10, ObjCLifetime = 0x20, LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/ObjCLifetime) }; }; using ReferenceConversions = ReferenceConversionsScope::ReferenceConversions; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, ReferenceConversions *Conv = nullptr); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); /// Prepare `SplattedExpr` for a vector splat operation, adding /// implicit casts if necessary. ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error }; /// Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds for ARC and Weak. ARCConversionResult CheckObjCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool Diagnose = true, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); class ConditionResult { Decl *ConditionVar; FullExprArg Condition; bool Invalid; bool HasKnownValue; bool KnownValue; friend class Sema; ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition, bool IsConstexpr) : ConditionVar(ConditionVar), Condition(Condition), Invalid(false), HasKnownValue(IsConstexpr && Condition.get() && !Condition.get()->isValueDependent()), KnownValue(HasKnownValue && !!Condition.get()->EvaluateKnownConstInt(S.Context)) {} explicit ConditionResult(bool Invalid) : ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid), HasKnownValue(false), KnownValue(false) {} public: ConditionResult() : ConditionResult(false) {} bool isInvalid() const { return Invalid; } std::pair<VarDecl *, Expr *> get() const { return std::make_pair(cast_or_null<VarDecl>(ConditionVar), Condition.get()); } llvm::Optional<bool> getKnownValue() const { if (!HasKnownValue) return None; return KnownValue; } }; static ConditionResult ConditionError() { return ConditionResult(true); } enum class ConditionKind { Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'. ConstexprIf, ///< A constant boolean condition from 'if constexpr'. Switch ///< An integral condition for a 'switch' statement. }; ConditionResult ActOnCondition(Scope *S, SourceLocation Loc, Expr *SubExpr, ConditionKind CK); ConditionResult ActOnConditionVariable(Decl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E, bool IsConstexpr = false); /// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression /// found in an explicit(bool) specifier. ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E); /// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier. /// Returns true if the explicit specifier is now resolved. bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual SemaDiagnosticBuilder diagnoseNotICEType(Sema &S, SourceLocation Loc, QualType T); virtual SemaDiagnosticBuilder diagnoseNotICE(Sema &S, SourceLocation Loc) = 0; virtual SemaDiagnosticBuilder diagnoseFold(Sema &S, SourceLocation Loc); virtual ~VerifyICEDiagnoser() {} }; enum AllowFoldKind { NoFold, AllowFold, }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, AllowFoldKind CanFold = NoFold); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, AllowFoldKind CanFold = NoFold); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr, AllowFoldKind CanFold = NoFold); ExprResult VerifyIntegerConstantExpression(Expr *E, AllowFoldKind CanFold = NoFold) { return VerifyIntegerConstantExpression(E, nullptr, CanFold); } /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); private: unsigned ForceCUDAHostDeviceDepth = 0; public: /// Increments our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. So long as this count is greater /// than zero, all functions encountered will be __host__ __device__. void PushForceCUDAHostDevice(); /// Decrements our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. Returns false if the count is 0 /// before incrementing, so you can emit an error. bool PopForceCUDAHostDevice(); /// Diagnostics that are emitted only if we discover that the given function /// must be codegen'ed. Because handling these correctly adds overhead to /// compilation, this is currently only enabled for CUDA compilations. llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>, std::vector<PartialDiagnosticAt>> DeviceDeferredDiags; /// A pair of a canonical FunctionDecl and a SourceLocation. When used as the /// key in a hashtable, both the FD and location are hashed. struct FunctionDeclAndLoc { CanonicalDeclPtr<FunctionDecl> FD; SourceLocation Loc; }; /// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a /// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the /// same deferred diag twice. llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags; /// An inverse call graph, mapping known-emitted functions to one of their /// known-emitted callers (plus the location of the call). /// /// Functions that we can tell a priori must be emitted aren't added to this /// map. llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>, /* Caller = */ FunctionDeclAndLoc> DeviceKnownEmittedFns; /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a __host__ function, does not emit any diagnostics /// unless \p EmitOnBothSides is true. /// - If CurContext is a __device__ or __global__ function, emits the /// diagnostics immediately. /// - If CurContext is a __host__ __device__ function and we are compiling for /// the device, creates a diagnostic which is emitted if and when we realize /// that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in CUDA device code. /// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget()) /// return ExprError(); /// // Otherwise, continue parsing as normal. SemaDiagnosticBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as host code". /// /// Same as CUDADiagIfDeviceCode, with "host" and "device" switched. SemaDiagnosticBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID); /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the device, emits the diagnostics immediately. /// - If CurContext is a non-`declare target` function and we are compiling /// for the device, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. SemaDiagnosticBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD); /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as host code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the host, emits the diagnostics immediately. /// - If CurContext is a non-host function, just ignore it. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. SemaDiagnosticBuilder diagIfOpenMPHostCode(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD); SemaDiagnosticBuilder targetDiag(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD = nullptr); SemaDiagnosticBuilder targetDiag(SourceLocation Loc, const PartialDiagnostic &PD, FunctionDecl *FD = nullptr) { return targetDiag(Loc, PD.getDiagID(), FD) << PD; } /// Check if the expression is allowed to be used in expressions for the /// offloading devices. void checkDeviceDecl(ValueDecl *D, SourceLocation Loc); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; /// Determines whether the given function is a CUDA device/host/kernel/etc. /// function. /// /// Use this rather than examining the function's attributes yourself -- you /// will get it wrong. Returns CFT_Host if D is null. CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D, bool IgnoreImplicitHDAttr = false); CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs); /// Gets the CUDA target for the current context. CUDAFunctionTarget CurrentCUDATarget() { return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext)); } static bool isCUDAImplicitHostDeviceFunction(const FunctionDecl *D); // CUDA function call preference. Must be ordered numerically from // worst to best. enum CUDAFunctionPreference { CFP_Never, // Invalid caller/callee combination. CFP_WrongSide, // Calls from host-device to host or device // function that do not match current compilation // mode. CFP_HostDevice, // Any calls to host/device functions. CFP_SameSide, // Calls from host-device to host or device // function matching current compilation mode. CFP_Native, // host-to-host or device-to-device calls. }; /// Identifies relative preference of a given Caller/Callee /// combination, based on their host/device attributes. /// \param Caller function which needs address of \p Callee. /// nullptr in case of global context. /// \param Callee target function /// /// \returns preference value for particular Caller/Callee combination. CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Determines whether Caller may invoke Callee, based on their CUDA /// host/device attributes. Returns false if the call is not allowed. /// /// Note: Will return true for CFP_WrongSide calls. These may appear in /// semantically correct CUDA programs, but only if they're never codegen'ed. bool IsAllowedCUDACall(const FunctionDecl *Caller, const FunctionDecl *Callee) { return IdentifyCUDAPreference(Caller, Callee) != CFP_Never; } /// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD, /// depending on FD and the current compilation settings. void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD, const LookupResult &Previous); /// May add implicit CUDAConstantAttr attribute to VD, depending on VD /// and current compilation settings. void MaybeAddCUDAConstantAttr(VarDecl *VD); public: /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// (CFP_Never), emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to /// be emitted if and when the caller is codegen'ed, and returns true. /// /// Will only create deferred diagnostics for a given SourceLocation once, /// so you can safely call this multiple times without generating duplicate /// deferred errors. /// /// - Otherwise, returns true without emitting any diagnostics. bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee); void CUDACheckLambdaCapture(CXXMethodDecl *D, const sema::Capture &Capture); /// Set __device__ or __host__ __device__ attributes on the given lambda /// operator() method. /// /// CUDA lambdas by default is host device function unless it has explicit /// host or device attribute. void CUDASetLambdaAttrs(CXXMethodDecl *Method); /// Finds a function in \p Matches with highest calling priority /// from \p Caller context and erases all functions with lower /// calling priority. void EraseUnwantedCUDAMatches( const FunctionDecl *Caller, SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \return true if \p CD can be considered empty according to CUDA /// (E.2.3.1 in CUDA 7.5 Programming guide). bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD); bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD); // \brief Checks that initializers of \p Var satisfy CUDA restrictions. In // case of error emits appropriate diagnostic and invalidates \p Var. // // \details CUDA allows only empty constructors as initializers for global // variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all // __shared__ variables whether they are local or not (they all are implicitly // static in CUDA). One exception is that CUDA allows constant initializers // for __constant__ and __device__ variables. void checkAllowedCUDAInitializer(VarDecl *VD); /// Check whether NewFD is a valid overload for CUDA. Emits /// diagnostics and invalidates NewFD if not. void checkCUDATargetOverload(FunctionDecl *NewFD, const LookupResult &Previous); /// Copies target attributes from the template TD to the function FD. void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD); /// Returns the name of the launch configuration function. This is the name /// of the function that will be called to configure kernel call, with the /// parameters specified via <<<>>>. std::string getCudaConfigureFuncName() const; /// \name Code completion //@{ /// Describes the context in which code completion occurs. enum ParserCompletionContext { /// Code completion occurs at top-level or namespace context. PCC_Namespace, /// Code completion occurs within a class, struct, or union. PCC_Class, /// Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// Code completion occurs following one or more template /// headers. PCC_Template, /// Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// Code completion occurs within an expression. PCC_Expression, /// Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// Code completion occurs where only a type is permitted. PCC_Type, /// Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteExpression(Scope *S, QualType PreferredType, bool IsParenthesized = false); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase, SourceLocation OpLoc, bool IsArrow, bool IsBaseExprStatement, QualType PreferredType); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS, QualType PreferredType); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D, const VirtSpecifiers *VS = nullptr); void CodeCompleteBracketDeclarator(Scope *S); void CodeCompleteCase(Scope *S); /// Determines the preferred type of the current function argument, by /// examining the signatures of all possible overloads. /// Returns null if unknown or ambiguous, or if code completion is off. /// /// If the code completion point has been reached, also reports the function /// signatures that were considered. /// /// FIXME: rename to GuessCallArgumentType to reduce confusion. QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl, CXXScopeSpec SS, ParsedType TemplateTypeTy, ArrayRef<Expr *> ArgExprs, IdentifierInfo *II, SourceLocation OpenParLoc); void CodeCompleteInitializer(Scope *S, Decl *D); /// Trigger code completion for a record of \p BaseType. \p InitExprs are /// expressions in the initializer list seen so far and \p D is the current /// Designation being parsed. void CodeCompleteDesignator(const QualType BaseType, llvm::ArrayRef<Expr *> InitExprs, const Designation &D); void CodeCompleteAfterIf(Scope *S, bool IsBracedThen); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext, bool IsUsingDeclaration, QualType BaseType, QualType PreferredType); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteAfterFunctionEquals(Declarator &D); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences( ArrayRef<IdentifierLocPair> Protocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName, SourceLocation ClassNameLoc, bool IsBaseExprStatement); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled); void CodeCompleteNaturalLanguage(); void CodeCompleteAvailabilityPlatformName(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, QualType ThisType, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void CheckArgAlignment(SourceLocation Loc, NamedDecl *FDecl, StringRef ParamName, QualType ArgTy, QualType ParamTy); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, const Expr *ThisArg, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckOSLogFormatStringArg(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); bool CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckARMCoprocessorImmediate(const TargetInfo &TI, const Expr *CoprocArg, bool WantCDE); bool CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall, ArrayRef<int> ArgNums); bool CheckX86BuiltinTileDuplicate(CallExpr *TheCall, ArrayRef<int> ArgNums); bool CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall, ArrayRef<int> ArgNums); bool CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckRISCVBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); bool SemaBuiltinComplex(CallExpr *TheCall); bool SemaBuiltinVSX(CallExpr *TheCall); bool SemaBuiltinOSLogFormat(CallExpr *TheCall); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult, bool IsDelete); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High, bool RangeIsError = true); bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, unsigned Multiple); bool SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum); bool SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum, unsigned ArgBits); bool SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum, unsigned ArgBits); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinPPCMMACall(CallExpr *TheCall, const char *TypeDesc); bool CheckPPCMMAType(QualType Type, SourceLocation TypeLoc); // Matrix builtin handling. ExprResult SemaBuiltinMatrixTranspose(CallExpr *TheCall, ExprResult CallResult); ExprResult SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall, ExprResult CallResult); ExprResult SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall, ExprResult CallResult); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_OSLog, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); bool FormatStringHasSArg(const StringLiteral *FExpr); static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckFreeArguments(const CallExpr *E); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); public: void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS); private: void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(const Expr *E); /// Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// Check if there is a field shadowing. void CheckShadowInheritedFields(const SourceLocation &Loc, DeclarationName FieldName, const CXXRecordDecl *RD, bool DeclIsField = true); /// Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void CheckTCBEnforcement(const CallExpr *TheCall, const FunctionDecl *Callee); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const ArrayRef<const Expr *> ExprArgs, SourceLocation CallSiteLoc); /// Check if we are taking the address of a packed field /// as this may be a problem if the pointer value is dereferenced. void CheckAddressOfPackedMember(Expr *rhs); /// The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Nullable_result = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; /// The handler for the FileChanged preprocessor events. /// /// Used for diagnostics that implement custom semantic analysis for #include /// directives, like -Wpragma-pack. sema::SemaPPCallbacks *SemaPPCallbackHandler; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; bool isCFError(RecordDecl *D); /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// Determine the number of levels of enclosing template parameters. This is /// only usable while parsing. Note that this does not include dependent /// contexts in which no template parameters have yet been declared, such as /// in a terse function template or generic lambda before the first 'auto' is /// encountered. unsigned getTemplateDepth(Scope *S) const; /// To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } // Emitting members of dllexported classes is delayed until the class // (including field initializers) is fully parsed. SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses; SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions; private: int ParsingClassDepth = 0; class SavePendingParsedClassStateRAII { public: SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); } ~SavePendingParsedClassStateRAII() { assert(S.DelayedOverridingExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedEquivalentExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); swapSavedState(); } private: Sema &S; decltype(DelayedOverridingExceptionSpecChecks) SavedOverridingExceptionSpecChecks; decltype(DelayedEquivalentExceptionSpecChecks) SavedEquivalentExceptionSpecChecks; void swapSavedState() { SavedOverridingExceptionSpecChecks.swap( S.DelayedOverridingExceptionSpecChecks); SavedEquivalentExceptionSpecChecks.swap( S.DelayedEquivalentExceptionSpecChecks); } }; /// Helper class that collects misaligned member designations and /// their location info for delayed diagnostics. struct MisalignedMember { Expr *E; RecordDecl *RD; ValueDecl *MD; CharUnits Alignment; MisalignedMember() : E(), RD(), MD(), Alignment() {} MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment) : E(E), RD(RD), MD(MD), Alignment(Alignment) {} explicit MisalignedMember(Expr *E) : MisalignedMember(E, nullptr, nullptr, CharUnits()) {} bool operator==(const MisalignedMember &m) { return this->E == m.E; } }; /// Small set of gathered accesses to potentially misaligned members /// due to the packed attribute. SmallVector<MisalignedMember, 4> MisalignedMembers; /// Adds an expression to the set of gathered misaligned members. void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment); public: /// Diagnoses the current set of gathered accesses. This typically /// happens at full expression level. The set is cleared after emitting the /// diagnostics. void DiagnoseMisalignedMembers(); /// This function checks if the expression is in the sef of potentially /// misaligned members and it is converted to some pointer type T with lower /// or equal alignment requirements. If so it removes it. This is used when /// we do not want to diagnose such misaligned access (e.g. in conversions to /// void*). void DiscardMisalignedMemberAddress(const Type *T, Expr *E); /// This function calls Action when it determines that E designates a /// misaligned member due to the packed attribute. This is used to emit /// local diagnostics like in reference binding. void RefersToMemberWithReducedAlignment( Expr *E, llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> Action); /// Describes the reason a calling convention specification was ignored, used /// for diagnostics. enum class CallingConventionIgnoredReason { ForThisTarget = 0, VariadicFunction, ConstructorDestructor, BuiltinFunction }; /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurLexicalContext is a kernel function or it is known that the /// function will be emitted for the device, emits the diagnostics /// immediately. /// - If CurLexicalContext is a function and we are compiling /// for the device, but we don't know that this function will be codegen'ed /// for devive yet, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// Diagnose __float128 type usage only from SYCL device code if the current /// target doesn't support it /// if (!S.Context.getTargetInfo().hasFloat128Type() && /// S.getLangOpts().SYCLIsDevice) /// SYCLDiagIfDeviceCode(Loc, diag::err_type_unsupported) << "__float128"; SemaDiagnosticBuilder SYCLDiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed, creates a deferred diagnostic to be emitted if /// and when the caller is codegen'ed, and returns true. /// /// - Otherwise, returns true without emitting any diagnostics. /// /// Adds Callee to DeviceCallGraph if we don't know if its caller will be /// codegen'ed yet. bool checkSYCLDeviceFunction(SourceLocation Loc, FunctionDecl *Callee); }; /// RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; bool Entered = true; public: EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other, bool ShouldEnter = true) : Actions(Actions), Entered(ShouldEnter) { if (Entered) Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, ExprContext); } EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other) : Actions(Actions) { Actions.PushExpressionEvaluationContext( NewContext, Sema::ReuseLambdaContextDecl, ExprContext); } enum InitListTag { InitList }; EnterExpressionEvaluationContext(Sema &Actions, InitListTag, bool ShouldEnter = true) : Actions(Actions), Entered(false) { // In C++11 onwards, narrowing checks are performed on the contents of // braced-init-lists, even when they occur within unevaluated operands. // Therefore we still need to instantiate constexpr functions used in such // a context. if (ShouldEnter && Actions.isUnevaluatedContext() && Actions.getLangOpts().CPlusPlus11) { Actions.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::UnevaluatedList); Entered = true; } } ~EnterExpressionEvaluationContext() { if (Entered) Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// The template function declaration to be late parsed. Decl *D; }; template <> void Sema::PragmaStack<Sema::AlignPackInfo>::Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, AlignPackInfo Value); } // end namespace clang namespace llvm { // Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its // SourceLocation. template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> { using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc; using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>; static FunctionDeclAndLoc getEmptyKey() { return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()}; } static FunctionDeclAndLoc getTombstoneKey() { return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()}; } static unsigned getHashValue(const FunctionDeclAndLoc &FDL) { return hash_combine(FDBaseInfo::getHashValue(FDL.FD), FDL.Loc.getHashValue()); } static bool isEqual(const FunctionDeclAndLoc &LHS, const FunctionDeclAndLoc &RHS) { return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc; } }; } // namespace llvm #endif
GB_unop__ainv_uint64_uint64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__ainv_uint64_uint64) // op(A') function: GB (_unop_tran__ainv_uint64_uint64) // C type: uint64_t // A type: uint64_t // cast: uint64_t cij = aij // unaryop: cij = -aij #define GB_ATYPE \ uint64_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CAST(z, aij) \ uint64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint64_t z = aij ; \ Cx [pC] = -z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__ainv_uint64_uint64) ( uint64_t *Cx, // Cx and Ax may be aliased const uint64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint64_t aij = Ax [p] ; uint64_t z = aij ; Cx [p] = -z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint64_t aij = Ax [p] ; uint64_t z = aij ; Cx [p] = -z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__ainv_uint64_uint64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__ainv_uint64_uint64) // op(A') function: GB (_unop_tran__ainv_uint64_uint64) // C type: uint64_t // A type: uint64_t // cast: uint64_t cij = aij // unaryop: cij = -aij #define GB_ATYPE \ uint64_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CAST(z, aij) \ uint64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint64_t z = aij ; \ Cx [pC] = -z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__ainv_uint64_uint64) ( uint64_t *Cx, // Cx and Ax may be aliased const uint64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { for (p = 0 ; p < anz ; p++) { uint64_t aij = Ax [p] ; uint64_t z = aij ; Cx [p] = -z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint64_t aij = Ax [p] ; uint64_t z = aij ; Cx [p] = -z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__ainv_uint64_uint64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__ainv_uint64_uint64) // op(A') function: GB (_unop_tran__ainv_uint64_uint64) // C type: uint64_t // A type: uint64_t // cast: uint64_t cij = aij // unaryop: cij = -aij #define GB_ATYPE \ uint64_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CAST(z, aij) \ uint64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint64_t z = aij ; \ Cx [pC] = -z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__ainv_uint64_uint64) ( uint64_t *Cx, // Cx and Ax may be aliased const uint64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint64_t aij = Ax [p] ; uint64_t z = aij ; Cx [p] = -z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint64_t aij = Ax [p] ; uint64_t z = aij ; Cx [p] = -z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__ainv_uint64_uint64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
macro-2.c
// { dg-do compile } #define p parallel #define s(x) shared(x##1, x##2) #define d(x) default(x) void bar(int, int, int, int); void foo(void) { int a1, a2, b1, b2; #pragma omp p s(a) s(b) d(none) bar(a1, a2, b1, b2); }
// { dg-do compile } #define p parallel #define s(x) shared(x##1, x##2) #define d(x) default(x) void bar(int, int, int, int); void foo(void) { int a1, a2, b1, b2; bar(a1, a2, b1, b2); }
// { dg-do compile } #define p parallel #define s(x) shared(x##1, x##2) #define d(x) default(x) void bar(int, int, int, int); void foo(void) { int a1, a2, b1, b2; #pragma omp p s(a) s(b) d(none) bar(a1, a2, b1, b2); }
ccl_utils.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <gsl/gsl_errno.h> #include "ccl.h" /* ------- ROUTINE: ccl_linear spacing ------ INPUTS: [xmin,xmax] of the interval to be divided in N bins OUTPUT: bin edges in range [xmin,xmax] */ double * ccl_linear_spacing(double xmin, double xmax, int N) { double dx = (xmax-xmin)/(N -1.); double * x = malloc(sizeof(double)*N); if (x==NULL) { ccl_raise_warning( CCL_ERROR_MEMORY, "ERROR: Could not allocate memory for linear-spaced array (N=%d)\n", N); return x; } for (int i=0; i<N; i++) { x[i] = xmin + dx*i; } x[0]=xmin; //Make sure roundoff errors don't spoil edges x[N-1]=xmax; //Make sure roundoff errors don't spoil edges return x; } /* ------- ROUTINE: ccl_linlog spacing ------ * INPUTS: [xminlog,xmax] of the interval to be divided in bins * xmin when linear spacing starts * Nlog number of logarithmically spaced bins * Nlin number of linearly spaced bins * OUTPUT: bin edges in range [xminlog,xmax] * */ double * ccl_linlog_spacing(double xminlog, double xmin, double xmax, int Nlog, int Nlin) { if (Nlog<2) { ccl_raise_warning( CCL_ERROR_LINLOGSPACE, "ERROR: Cannot make log-spaced array with %d points - need at least 2\n", Nlog); return NULL; } if (!(xminlog>0 && xmin>0)) { ccl_raise_warning( CCL_ERROR_LINLOGSPACE, "ERROR: Cannot make log-spaced array xminlog or xmin non-positive (had %le, %le)\n", xminlog, xmin); return NULL; } if (xminlog>xmin){ ccl_raise_warning(CCL_ERROR_LINLOGSPACE, "ERROR: xminlog must be smaller as xmin"); return NULL; } if (xmin>xmax){ ccl_raise_warning(CCL_ERROR_LINLOGSPACE, "ERROR: xmin must be smaller as xmax"); return NULL; } double * x = malloc(sizeof(double)*(Nlin+Nlog-1)); if (x==NULL) { ccl_raise_warning( CCL_ERROR_MEMORY, "ERROR: Could not allocate memory for array of size (Nlin+Nlog-1)=%d)\n", (Nlin+Nlog-1)); return x; } double dx = (xmax-xmin)/(Nlin -1.); double log_xchange = log(xmin); double log_xmin = log(xminlog); double dlog_x = (log_xchange - log_xmin) / (Nlog-1.); for (int i=0; i<Nlin+Nlog-1; i++) { if (i<Nlog) x[i] = exp(log_xmin + dlog_x*i); if (i>=Nlog) x[i] = xmin + dx*(i-Nlog+1); } x[0]=xminlog; //Make sure roundoff errors don't spoil edges x[Nlog-1]=xmin; //Make sure roundoff errors don't spoil edges x[Nlin+Nlog-2]=xmax; //Make sure roundoff errors don't spoil edges return x; } /* ------- ROUTINE: ccl_log spacing ------ INPUTS: [xmin,xmax] of the interval to be divided logarithmically in N bins TASK: divide an interval in N logarithmic bins OUTPUT: bin edges in range [xmin,xmax] */ double * ccl_log_spacing(double xmin, double xmax, int N) { if (N<2) { ccl_raise_warning( CCL_ERROR_LOGSPACE, "ERROR: Cannot make log-spaced array with %d points - need at least 2\n", N); return NULL; } if (!(xmin>0 && xmax>0)) { ccl_raise_warning( CCL_ERROR_LOGSPACE, "ERROR: Cannot make log-spaced array xmax or xmax non-positive (had %le, %le)\n", xmin, xmax); return NULL; } double log_xmax = log(xmax); double log_xmin = log(xmin); double dlog_x = (log_xmax - log_xmin) / (N-1.); double * x = malloc(sizeof(double)*N); if (x==NULL) { ccl_raise_warning( CCL_ERROR_MEMORY, "ERROR: Could not allocate memory for log-spaced array (N=%d)\n", N); return x; } double xratio = exp(dlog_x); x[0] = xmin; //Make sure roundoff errors don't spoil edges for (int i=1; i<N-1; i++) { x[i] = x[i-1] * xratio; } x[N-1]=xmax; //Make sure roundoff errors don't spoil edges return x; } #define CCL_GAMMA1 2.6789385347077476336556 //Gamma(1/3) #define CCL_GAMMA2 1.3541179394264004169452 //Gamma(2/3) #define CCL_ROOTPI12 21.269446210866192327578 //12*sqrt(pi) double ccl_j_bessel(int l,double x) { double jl; double ax=fabs(x); double ax2=x*x; if(l<0) { fprintf(stderr,"CosmoMas: l>0 for Bessel function"); exit(1); } if(l<7) { if(l==0) { if(ax<0.1) jl=1-ax2*(1-ax2/20.)/6.; else jl=sin(x)/x; } else if(l==1) { if(ax<0.2) jl=ax*(1-ax2*(1-ax2/28)/10)/3; else jl=(sin(x)/ax-cos(x))/ax; } else if(l==2) { if(ax<0.3) jl=ax2*(1-ax2*(1-ax2/36)/14)/15; else jl=(-3*cos(x)/ax-sin(x)*(1-3/ax2))/ax; } else if(l==3) { if(ax<0.4) jl=ax*ax2*(1-ax2*(1-ax2/44)/18)/105; else jl=(cos(x)*(1-15/ax2)-sin(x)*(6-15/ax2)/ax)/ax; } else if(l==4) { if(ax<0.6) jl=ax2*ax2*(1-ax2*(1-ax2/52)/22)/945; else jl=(sin(x)*(1-(45-105/ax2)/ax2)+cos(x)*(10-105/ax2)/ax)/ax; } else if(l==5) { if(ax<1.0) jl=ax2*ax2*ax*(1-ax2*(1-ax2/60)/26)/10395; else { jl=(sin(x)*(15-(420-945/ax2)/ax2)/ax- cos(x)*(1-(105-945/ax2)/ax2))/ax; } } else { if(ax<1.0) jl=ax2*ax2*ax2*(1-ax2*(1-ax2/68)/30)/135135; else { jl=(sin(x)*(-1+(210-(4725-10395/ax2)/ax2)/ax2)+ cos(x)*(-21+(1260-10395/ax2)/ax2)/ax)/ax; } } } else { double nu=l+0.5; double nu2=nu*nu; if(ax<1.0E-40) jl=0; else if((ax2/l)<0.5) { jl=(exp(l*log(ax/nu)-M_LN2+nu*(1-M_LN2)-(1-(1-3.5/nu2)/(30*nu2))/(12*nu))/nu)* (1-ax2/(4*nu+4)*(1-ax2/(8*nu+16)*(1-ax2/(12*nu+36)))); } else if((l*l/ax)<0.5) { double beta=ax-0.5*M_PI*(l+1); jl=(cos(beta)*(1-(nu2-0.25)*(nu2-2.25)/(8*ax2)*(1-(nu2-6.25)*(nu2-12.25)/(48*ax2)))- sin(beta)*(nu2-0.25)/(2*ax)*(1-(nu2-2.25)*(nu2-6.25)/(24*ax2)* (1-(nu2-12.25)*(nu2-20.25)/(80*ax2))))/ax; } else { double l3=pow(nu,0.325); if(ax<nu-1.31*l3) { double cosb=nu/ax; double sx=sqrt(nu2-ax2); double cotb=nu/sx; double secb=ax/nu; double beta=log(cosb+sx/ax); double cot3b=cotb*cotb*cotb; double cot6b=cot3b*cot3b; double sec2b=secb*secb; double expterm=((2+3*sec2b)*cot3b/24 -((4+sec2b)*sec2b*cot6b/16 +((16-(1512+(3654+375*sec2b)*sec2b)*sec2b)*cot3b/5760 +(32+(288+(232+13*sec2b)*sec2b)*sec2b)*sec2b*cot6b/(128*nu))* cot6b/nu)/nu)/nu; jl=sqrt(cotb*cosb)/(2*nu)*exp(-nu*beta+nu/cotb-expterm); } else if(ax>nu+1.48*l3) { double cosb=nu/ax; double sx=sqrt(ax2-nu2); double cotb=nu/sx; double secb=ax/nu; double beta=acos(cosb); double cot3b=cotb*cotb*cotb; double cot6b=cot3b*cot3b; double sec2b=secb*secb; double trigarg=nu/cotb-nu*beta-0.25*M_PI- ((2+3*sec2b)*cot3b/24+(16-(1512+(3654+375*sec2b)*sec2b)*sec2b)* cot3b*cot6b/(5760*nu2))/nu; double expterm=((4+sec2b)*sec2b*cot6b/16- (32+(288+(232+13*sec2b)*sec2b)*sec2b)* sec2b*cot6b*cot6b/(128*nu2))/nu2; jl=sqrt(cotb*cosb)/nu*exp(-expterm)*cos(trigarg); } else { double beta=ax-nu; double beta2=beta*beta; double sx=6/ax; double sx2=sx*sx; double secb=pow(sx,0.3333333333333333333333); double sec2b=secb*secb; jl=(CCL_GAMMA1*secb+beta*CCL_GAMMA2*sec2b -(beta2/18-1.0/45.0)*beta*sx*secb*CCL_GAMMA1 -((beta2-1)*beta2/36+1.0/420.0)*sx*sec2b*CCL_GAMMA2 +(((beta2/1620-7.0/3240.0)*beta2+1.0/648.0)*beta2-1.0/8100.0)*sx2*secb*CCL_GAMMA1 +(((beta2/4536-1.0/810.0)*beta2+19.0/11340.0)*beta2-13.0/28350.0)*beta*sx2*sec2b*CCL_GAMMA2 -((((beta2/349920-1.0/29160.0)*beta2+71.0/583200.0)*beta2-121.0/874800.0)* beta2+7939.0/224532000.0)*beta*sx2*sx*secb*CCL_GAMMA1)*sqrt(sx)/CCL_ROOTPI12; } } } if((x<0)&&(l%2!=0)) jl=-jl; return jl; } void ccl_integ_spline(int ny, int nx,double *x,double **y, double a, double b, double *result, const gsl_interp_type *T, int *status) { if(b==a) { int iyy; for(iyy=0; iyy<ny; iyy++) result[iyy]=0; return; } if(b<a) { b=x[nx-1]; a=x[0]; } if((b>x[nx-1]) || (a<x[0])) { ccl_raise_warning(CCL_ERROR_SPLINE, "ERROR: integration limits beyond interpolated range\n"); *status = CCL_ERROR_SPLINE; return; } if(*status==0) { #pragma omp parallel default(none) \ shared(nx, ny, x, y, a, b, result, T, status) { int iy; int local_status=0; gsl_interp_accel *ia = NULL; gsl_spline *s = NULL; s = gsl_spline_alloc(T, nx); if(s == NULL) local_status = CCL_ERROR_MEMORY; if(!local_status) { ia = gsl_interp_accel_alloc(); if(ia == NULL) local_status = CCL_ERROR_MEMORY; } if(!local_status) { #pragma omp for for(iy=0; iy<ny; iy++) { if(!local_status) { if(gsl_spline_init(s, x, y[iy], nx)) { local_status = CCL_ERROR_SPLINE; result[iy] = NAN; } } if(!local_status) { int sstat = gsl_spline_eval_integ_e(s, a, b, ia, &(result[iy])); if(sstat) { local_status = CCL_ERROR_SPLINE_EV; result[iy] = NAN; } } } } gsl_spline_free(s); gsl_interp_accel_free(ia); if (local_status) { #pragma omp atomic write *status = local_status; } } //end omp parallel } }
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <gsl/gsl_errno.h> #include "ccl.h" /* ------- ROUTINE: ccl_linear spacing ------ INPUTS: [xmin,xmax] of the interval to be divided in N bins OUTPUT: bin edges in range [xmin,xmax] */ double * ccl_linear_spacing(double xmin, double xmax, int N) { double dx = (xmax-xmin)/(N -1.); double * x = malloc(sizeof(double)*N); if (x==NULL) { ccl_raise_warning( CCL_ERROR_MEMORY, "ERROR: Could not allocate memory for linear-spaced array (N=%d)\n", N); return x; } for (int i=0; i<N; i++) { x[i] = xmin + dx*i; } x[0]=xmin; //Make sure roundoff errors don't spoil edges x[N-1]=xmax; //Make sure roundoff errors don't spoil edges return x; } /* ------- ROUTINE: ccl_linlog spacing ------ * INPUTS: [xminlog,xmax] of the interval to be divided in bins * xmin when linear spacing starts * Nlog number of logarithmically spaced bins * Nlin number of linearly spaced bins * OUTPUT: bin edges in range [xminlog,xmax] * */ double * ccl_linlog_spacing(double xminlog, double xmin, double xmax, int Nlog, int Nlin) { if (Nlog<2) { ccl_raise_warning( CCL_ERROR_LINLOGSPACE, "ERROR: Cannot make log-spaced array with %d points - need at least 2\n", Nlog); return NULL; } if (!(xminlog>0 && xmin>0)) { ccl_raise_warning( CCL_ERROR_LINLOGSPACE, "ERROR: Cannot make log-spaced array xminlog or xmin non-positive (had %le, %le)\n", xminlog, xmin); return NULL; } if (xminlog>xmin){ ccl_raise_warning(CCL_ERROR_LINLOGSPACE, "ERROR: xminlog must be smaller as xmin"); return NULL; } if (xmin>xmax){ ccl_raise_warning(CCL_ERROR_LINLOGSPACE, "ERROR: xmin must be smaller as xmax"); return NULL; } double * x = malloc(sizeof(double)*(Nlin+Nlog-1)); if (x==NULL) { ccl_raise_warning( CCL_ERROR_MEMORY, "ERROR: Could not allocate memory for array of size (Nlin+Nlog-1)=%d)\n", (Nlin+Nlog-1)); return x; } double dx = (xmax-xmin)/(Nlin -1.); double log_xchange = log(xmin); double log_xmin = log(xminlog); double dlog_x = (log_xchange - log_xmin) / (Nlog-1.); for (int i=0; i<Nlin+Nlog-1; i++) { if (i<Nlog) x[i] = exp(log_xmin + dlog_x*i); if (i>=Nlog) x[i] = xmin + dx*(i-Nlog+1); } x[0]=xminlog; //Make sure roundoff errors don't spoil edges x[Nlog-1]=xmin; //Make sure roundoff errors don't spoil edges x[Nlin+Nlog-2]=xmax; //Make sure roundoff errors don't spoil edges return x; } /* ------- ROUTINE: ccl_log spacing ------ INPUTS: [xmin,xmax] of the interval to be divided logarithmically in N bins TASK: divide an interval in N logarithmic bins OUTPUT: bin edges in range [xmin,xmax] */ double * ccl_log_spacing(double xmin, double xmax, int N) { if (N<2) { ccl_raise_warning( CCL_ERROR_LOGSPACE, "ERROR: Cannot make log-spaced array with %d points - need at least 2\n", N); return NULL; } if (!(xmin>0 && xmax>0)) { ccl_raise_warning( CCL_ERROR_LOGSPACE, "ERROR: Cannot make log-spaced array xmax or xmax non-positive (had %le, %le)\n", xmin, xmax); return NULL; } double log_xmax = log(xmax); double log_xmin = log(xmin); double dlog_x = (log_xmax - log_xmin) / (N-1.); double * x = malloc(sizeof(double)*N); if (x==NULL) { ccl_raise_warning( CCL_ERROR_MEMORY, "ERROR: Could not allocate memory for log-spaced array (N=%d)\n", N); return x; } double xratio = exp(dlog_x); x[0] = xmin; //Make sure roundoff errors don't spoil edges for (int i=1; i<N-1; i++) { x[i] = x[i-1] * xratio; } x[N-1]=xmax; //Make sure roundoff errors don't spoil edges return x; } #define CCL_GAMMA1 2.6789385347077476336556 //Gamma(1/3) #define CCL_GAMMA2 1.3541179394264004169452 //Gamma(2/3) #define CCL_ROOTPI12 21.269446210866192327578 //12*sqrt(pi) double ccl_j_bessel(int l,double x) { double jl; double ax=fabs(x); double ax2=x*x; if(l<0) { fprintf(stderr,"CosmoMas: l>0 for Bessel function"); exit(1); } if(l<7) { if(l==0) { if(ax<0.1) jl=1-ax2*(1-ax2/20.)/6.; else jl=sin(x)/x; } else if(l==1) { if(ax<0.2) jl=ax*(1-ax2*(1-ax2/28)/10)/3; else jl=(sin(x)/ax-cos(x))/ax; } else if(l==2) { if(ax<0.3) jl=ax2*(1-ax2*(1-ax2/36)/14)/15; else jl=(-3*cos(x)/ax-sin(x)*(1-3/ax2))/ax; } else if(l==3) { if(ax<0.4) jl=ax*ax2*(1-ax2*(1-ax2/44)/18)/105; else jl=(cos(x)*(1-15/ax2)-sin(x)*(6-15/ax2)/ax)/ax; } else if(l==4) { if(ax<0.6) jl=ax2*ax2*(1-ax2*(1-ax2/52)/22)/945; else jl=(sin(x)*(1-(45-105/ax2)/ax2)+cos(x)*(10-105/ax2)/ax)/ax; } else if(l==5) { if(ax<1.0) jl=ax2*ax2*ax*(1-ax2*(1-ax2/60)/26)/10395; else { jl=(sin(x)*(15-(420-945/ax2)/ax2)/ax- cos(x)*(1-(105-945/ax2)/ax2))/ax; } } else { if(ax<1.0) jl=ax2*ax2*ax2*(1-ax2*(1-ax2/68)/30)/135135; else { jl=(sin(x)*(-1+(210-(4725-10395/ax2)/ax2)/ax2)+ cos(x)*(-21+(1260-10395/ax2)/ax2)/ax)/ax; } } } else { double nu=l+0.5; double nu2=nu*nu; if(ax<1.0E-40) jl=0; else if((ax2/l)<0.5) { jl=(exp(l*log(ax/nu)-M_LN2+nu*(1-M_LN2)-(1-(1-3.5/nu2)/(30*nu2))/(12*nu))/nu)* (1-ax2/(4*nu+4)*(1-ax2/(8*nu+16)*(1-ax2/(12*nu+36)))); } else if((l*l/ax)<0.5) { double beta=ax-0.5*M_PI*(l+1); jl=(cos(beta)*(1-(nu2-0.25)*(nu2-2.25)/(8*ax2)*(1-(nu2-6.25)*(nu2-12.25)/(48*ax2)))- sin(beta)*(nu2-0.25)/(2*ax)*(1-(nu2-2.25)*(nu2-6.25)/(24*ax2)* (1-(nu2-12.25)*(nu2-20.25)/(80*ax2))))/ax; } else { double l3=pow(nu,0.325); if(ax<nu-1.31*l3) { double cosb=nu/ax; double sx=sqrt(nu2-ax2); double cotb=nu/sx; double secb=ax/nu; double beta=log(cosb+sx/ax); double cot3b=cotb*cotb*cotb; double cot6b=cot3b*cot3b; double sec2b=secb*secb; double expterm=((2+3*sec2b)*cot3b/24 -((4+sec2b)*sec2b*cot6b/16 +((16-(1512+(3654+375*sec2b)*sec2b)*sec2b)*cot3b/5760 +(32+(288+(232+13*sec2b)*sec2b)*sec2b)*sec2b*cot6b/(128*nu))* cot6b/nu)/nu)/nu; jl=sqrt(cotb*cosb)/(2*nu)*exp(-nu*beta+nu/cotb-expterm); } else if(ax>nu+1.48*l3) { double cosb=nu/ax; double sx=sqrt(ax2-nu2); double cotb=nu/sx; double secb=ax/nu; double beta=acos(cosb); double cot3b=cotb*cotb*cotb; double cot6b=cot3b*cot3b; double sec2b=secb*secb; double trigarg=nu/cotb-nu*beta-0.25*M_PI- ((2+3*sec2b)*cot3b/24+(16-(1512+(3654+375*sec2b)*sec2b)*sec2b)* cot3b*cot6b/(5760*nu2))/nu; double expterm=((4+sec2b)*sec2b*cot6b/16- (32+(288+(232+13*sec2b)*sec2b)*sec2b)* sec2b*cot6b*cot6b/(128*nu2))/nu2; jl=sqrt(cotb*cosb)/nu*exp(-expterm)*cos(trigarg); } else { double beta=ax-nu; double beta2=beta*beta; double sx=6/ax; double sx2=sx*sx; double secb=pow(sx,0.3333333333333333333333); double sec2b=secb*secb; jl=(CCL_GAMMA1*secb+beta*CCL_GAMMA2*sec2b -(beta2/18-1.0/45.0)*beta*sx*secb*CCL_GAMMA1 -((beta2-1)*beta2/36+1.0/420.0)*sx*sec2b*CCL_GAMMA2 +(((beta2/1620-7.0/3240.0)*beta2+1.0/648.0)*beta2-1.0/8100.0)*sx2*secb*CCL_GAMMA1 +(((beta2/4536-1.0/810.0)*beta2+19.0/11340.0)*beta2-13.0/28350.0)*beta*sx2*sec2b*CCL_GAMMA2 -((((beta2/349920-1.0/29160.0)*beta2+71.0/583200.0)*beta2-121.0/874800.0)* beta2+7939.0/224532000.0)*beta*sx2*sx*secb*CCL_GAMMA1)*sqrt(sx)/CCL_ROOTPI12; } } } if((x<0)&&(l%2!=0)) jl=-jl; return jl; } void ccl_integ_spline(int ny, int nx,double *x,double **y, double a, double b, double *result, const gsl_interp_type *T, int *status) { if(b==a) { int iyy; for(iyy=0; iyy<ny; iyy++) result[iyy]=0; return; } if(b<a) { b=x[nx-1]; a=x[0]; } if((b>x[nx-1]) || (a<x[0])) { ccl_raise_warning(CCL_ERROR_SPLINE, "ERROR: integration limits beyond interpolated range\n"); *status = CCL_ERROR_SPLINE; return; } if(*status==0) { shared(nx, ny, x, y, a, b, result, T, status) { int iy; int local_status=0; gsl_interp_accel *ia = NULL; gsl_spline *s = NULL; s = gsl_spline_alloc(T, nx); if(s == NULL) local_status = CCL_ERROR_MEMORY; if(!local_status) { ia = gsl_interp_accel_alloc(); if(ia == NULL) local_status = CCL_ERROR_MEMORY; } if(!local_status) { for(iy=0; iy<ny; iy++) { if(!local_status) { if(gsl_spline_init(s, x, y[iy], nx)) { local_status = CCL_ERROR_SPLINE; result[iy] = NAN; } } if(!local_status) { int sstat = gsl_spline_eval_integ_e(s, a, b, ia, &(result[iy])); if(sstat) { local_status = CCL_ERROR_SPLINE_EV; result[iy] = NAN; } } } } gsl_spline_free(s); gsl_interp_accel_free(ia); if (local_status) { *status = local_status; } } //end omp parallel } }
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <gsl/gsl_errno.h> #include "ccl.h" /* ------- ROUTINE: ccl_linear spacing ------ INPUTS: [xmin,xmax] of the interval to be divided in N bins OUTPUT: bin edges in range [xmin,xmax] */ double * ccl_linear_spacing(double xmin, double xmax, int N) { double dx = (xmax-xmin)/(N -1.); double * x = malloc(sizeof(double)*N); if (x==NULL) { ccl_raise_warning( CCL_ERROR_MEMORY, "ERROR: Could not allocate memory for linear-spaced array (N=%d)\n", N); return x; } for (int i=0; i<N; i++) { x[i] = xmin + dx*i; } x[0]=xmin; //Make sure roundoff errors don't spoil edges x[N-1]=xmax; //Make sure roundoff errors don't spoil edges return x; } /* ------- ROUTINE: ccl_linlog spacing ------ * INPUTS: [xminlog,xmax] of the interval to be divided in bins * xmin when linear spacing starts * Nlog number of logarithmically spaced bins * Nlin number of linearly spaced bins * OUTPUT: bin edges in range [xminlog,xmax] * */ double * ccl_linlog_spacing(double xminlog, double xmin, double xmax, int Nlog, int Nlin) { if (Nlog<2) { ccl_raise_warning( CCL_ERROR_LINLOGSPACE, "ERROR: Cannot make log-spaced array with %d points - need at least 2\n", Nlog); return NULL; } if (!(xminlog>0 && xmin>0)) { ccl_raise_warning( CCL_ERROR_LINLOGSPACE, "ERROR: Cannot make log-spaced array xminlog or xmin non-positive (had %le, %le)\n", xminlog, xmin); return NULL; } if (xminlog>xmin){ ccl_raise_warning(CCL_ERROR_LINLOGSPACE, "ERROR: xminlog must be smaller as xmin"); return NULL; } if (xmin>xmax){ ccl_raise_warning(CCL_ERROR_LINLOGSPACE, "ERROR: xmin must be smaller as xmax"); return NULL; } double * x = malloc(sizeof(double)*(Nlin+Nlog-1)); if (x==NULL) { ccl_raise_warning( CCL_ERROR_MEMORY, "ERROR: Could not allocate memory for array of size (Nlin+Nlog-1)=%d)\n", (Nlin+Nlog-1)); return x; } double dx = (xmax-xmin)/(Nlin -1.); double log_xchange = log(xmin); double log_xmin = log(xminlog); double dlog_x = (log_xchange - log_xmin) / (Nlog-1.); for (int i=0; i<Nlin+Nlog-1; i++) { if (i<Nlog) x[i] = exp(log_xmin + dlog_x*i); if (i>=Nlog) x[i] = xmin + dx*(i-Nlog+1); } x[0]=xminlog; //Make sure roundoff errors don't spoil edges x[Nlog-1]=xmin; //Make sure roundoff errors don't spoil edges x[Nlin+Nlog-2]=xmax; //Make sure roundoff errors don't spoil edges return x; } /* ------- ROUTINE: ccl_log spacing ------ INPUTS: [xmin,xmax] of the interval to be divided logarithmically in N bins TASK: divide an interval in N logarithmic bins OUTPUT: bin edges in range [xmin,xmax] */ double * ccl_log_spacing(double xmin, double xmax, int N) { if (N<2) { ccl_raise_warning( CCL_ERROR_LOGSPACE, "ERROR: Cannot make log-spaced array with %d points - need at least 2\n", N); return NULL; } if (!(xmin>0 && xmax>0)) { ccl_raise_warning( CCL_ERROR_LOGSPACE, "ERROR: Cannot make log-spaced array xmax or xmax non-positive (had %le, %le)\n", xmin, xmax); return NULL; } double log_xmax = log(xmax); double log_xmin = log(xmin); double dlog_x = (log_xmax - log_xmin) / (N-1.); double * x = malloc(sizeof(double)*N); if (x==NULL) { ccl_raise_warning( CCL_ERROR_MEMORY, "ERROR: Could not allocate memory for log-spaced array (N=%d)\n", N); return x; } double xratio = exp(dlog_x); x[0] = xmin; //Make sure roundoff errors don't spoil edges for (int i=1; i<N-1; i++) { x[i] = x[i-1] * xratio; } x[N-1]=xmax; //Make sure roundoff errors don't spoil edges return x; } #define CCL_GAMMA1 2.6789385347077476336556 //Gamma(1/3) #define CCL_GAMMA2 1.3541179394264004169452 //Gamma(2/3) #define CCL_ROOTPI12 21.269446210866192327578 //12*sqrt(pi) double ccl_j_bessel(int l,double x) { double jl; double ax=fabs(x); double ax2=x*x; if(l<0) { fprintf(stderr,"CosmoMas: l>0 for Bessel function"); exit(1); } if(l<7) { if(l==0) { if(ax<0.1) jl=1-ax2*(1-ax2/20.)/6.; else jl=sin(x)/x; } else if(l==1) { if(ax<0.2) jl=ax*(1-ax2*(1-ax2/28)/10)/3; else jl=(sin(x)/ax-cos(x))/ax; } else if(l==2) { if(ax<0.3) jl=ax2*(1-ax2*(1-ax2/36)/14)/15; else jl=(-3*cos(x)/ax-sin(x)*(1-3/ax2))/ax; } else if(l==3) { if(ax<0.4) jl=ax*ax2*(1-ax2*(1-ax2/44)/18)/105; else jl=(cos(x)*(1-15/ax2)-sin(x)*(6-15/ax2)/ax)/ax; } else if(l==4) { if(ax<0.6) jl=ax2*ax2*(1-ax2*(1-ax2/52)/22)/945; else jl=(sin(x)*(1-(45-105/ax2)/ax2)+cos(x)*(10-105/ax2)/ax)/ax; } else if(l==5) { if(ax<1.0) jl=ax2*ax2*ax*(1-ax2*(1-ax2/60)/26)/10395; else { jl=(sin(x)*(15-(420-945/ax2)/ax2)/ax- cos(x)*(1-(105-945/ax2)/ax2))/ax; } } else { if(ax<1.0) jl=ax2*ax2*ax2*(1-ax2*(1-ax2/68)/30)/135135; else { jl=(sin(x)*(-1+(210-(4725-10395/ax2)/ax2)/ax2)+ cos(x)*(-21+(1260-10395/ax2)/ax2)/ax)/ax; } } } else { double nu=l+0.5; double nu2=nu*nu; if(ax<1.0E-40) jl=0; else if((ax2/l)<0.5) { jl=(exp(l*log(ax/nu)-M_LN2+nu*(1-M_LN2)-(1-(1-3.5/nu2)/(30*nu2))/(12*nu))/nu)* (1-ax2/(4*nu+4)*(1-ax2/(8*nu+16)*(1-ax2/(12*nu+36)))); } else if((l*l/ax)<0.5) { double beta=ax-0.5*M_PI*(l+1); jl=(cos(beta)*(1-(nu2-0.25)*(nu2-2.25)/(8*ax2)*(1-(nu2-6.25)*(nu2-12.25)/(48*ax2)))- sin(beta)*(nu2-0.25)/(2*ax)*(1-(nu2-2.25)*(nu2-6.25)/(24*ax2)* (1-(nu2-12.25)*(nu2-20.25)/(80*ax2))))/ax; } else { double l3=pow(nu,0.325); if(ax<nu-1.31*l3) { double cosb=nu/ax; double sx=sqrt(nu2-ax2); double cotb=nu/sx; double secb=ax/nu; double beta=log(cosb+sx/ax); double cot3b=cotb*cotb*cotb; double cot6b=cot3b*cot3b; double sec2b=secb*secb; double expterm=((2+3*sec2b)*cot3b/24 -((4+sec2b)*sec2b*cot6b/16 +((16-(1512+(3654+375*sec2b)*sec2b)*sec2b)*cot3b/5760 +(32+(288+(232+13*sec2b)*sec2b)*sec2b)*sec2b*cot6b/(128*nu))* cot6b/nu)/nu)/nu; jl=sqrt(cotb*cosb)/(2*nu)*exp(-nu*beta+nu/cotb-expterm); } else if(ax>nu+1.48*l3) { double cosb=nu/ax; double sx=sqrt(ax2-nu2); double cotb=nu/sx; double secb=ax/nu; double beta=acos(cosb); double cot3b=cotb*cotb*cotb; double cot6b=cot3b*cot3b; double sec2b=secb*secb; double trigarg=nu/cotb-nu*beta-0.25*M_PI- ((2+3*sec2b)*cot3b/24+(16-(1512+(3654+375*sec2b)*sec2b)*sec2b)* cot3b*cot6b/(5760*nu2))/nu; double expterm=((4+sec2b)*sec2b*cot6b/16- (32+(288+(232+13*sec2b)*sec2b)*sec2b)* sec2b*cot6b*cot6b/(128*nu2))/nu2; jl=sqrt(cotb*cosb)/nu*exp(-expterm)*cos(trigarg); } else { double beta=ax-nu; double beta2=beta*beta; double sx=6/ax; double sx2=sx*sx; double secb=pow(sx,0.3333333333333333333333); double sec2b=secb*secb; jl=(CCL_GAMMA1*secb+beta*CCL_GAMMA2*sec2b -(beta2/18-1.0/45.0)*beta*sx*secb*CCL_GAMMA1 -((beta2-1)*beta2/36+1.0/420.0)*sx*sec2b*CCL_GAMMA2 +(((beta2/1620-7.0/3240.0)*beta2+1.0/648.0)*beta2-1.0/8100.0)*sx2*secb*CCL_GAMMA1 +(((beta2/4536-1.0/810.0)*beta2+19.0/11340.0)*beta2-13.0/28350.0)*beta*sx2*sec2b*CCL_GAMMA2 -((((beta2/349920-1.0/29160.0)*beta2+71.0/583200.0)*beta2-121.0/874800.0)* beta2+7939.0/224532000.0)*beta*sx2*sx*secb*CCL_GAMMA1)*sqrt(sx)/CCL_ROOTPI12; } } } if((x<0)&&(l%2!=0)) jl=-jl; return jl; } void ccl_integ_spline(int ny, int nx,double *x,double **y, double a, double b, double *result, const gsl_interp_type *T, int *status) { if(b==a) { int iyy; for(iyy=0; iyy<ny; iyy++) result[iyy]=0; return; } if(b<a) { b=x[nx-1]; a=x[0]; } if((b>x[nx-1]) || (a<x[0])) { ccl_raise_warning(CCL_ERROR_SPLINE, "ERROR: integration limits beyond interpolated range\n"); *status = CCL_ERROR_SPLINE; return; } if(*status==0) { #pragma omp parallel default(none) \ shared(nx, ny, x, y, a, b, result, T, status) { int iy; int local_status=0; gsl_interp_accel *ia = NULL; gsl_spline *s = NULL; s = gsl_spline_alloc(T, nx); if(s == NULL) local_status = CCL_ERROR_MEMORY; if(!local_status) { ia = gsl_interp_accel_alloc(); if(ia == NULL) local_status = CCL_ERROR_MEMORY; } if(!local_status) { #pragma omp for for(iy=0; iy<ny; iy++) { if(!local_status) { if(gsl_spline_init(s, x, y[iy], nx)) { local_status = CCL_ERROR_SPLINE; result[iy] = NAN; } } if(!local_status) { int sstat = gsl_spline_eval_integ_e(s, a, b, ia, &(result[iy])); if(sstat) { local_status = CCL_ERROR_SPLINE_EV; result[iy] = NAN; } } } } gsl_spline_free(s); gsl_interp_accel_free(ia); if (local_status) { #pragma omp atomic write *status = local_status; } } //end omp parallel } }
generator.h
// Copyright (c) 2015, The Regents of the University of California (Regents) // See LICENSE.txt for license details #ifndef GENERATOR_H_ #define GENERATOR_H_ #include <algorithm> #include <cinttypes> #include <random> #include "graph.h" #include "pvector.h" #include "util.h" /* GAP Benchmark Suite Class: Generator Author: Scott Beamer Given scale and degree, generates edgelist for synthetic graph - Intended to be called from Builder - GenerateEL(uniform) generates and returns the edgelist - Can generate uniform random (uniform=true) or R-MAT graph according to Graph500 parameters (uniform=false) - Can also randomize weights within a weighted edgelist (InsertWeights) - Blocking/reseeding is for parallelism with deterministic output edgelist */ template <typename NodeID_, typename DestID_ = NodeID_, typename WeightT_ = NodeID_> class Generator { typedef EdgePair<NodeID_, DestID_> Edge; typedef EdgePair<NodeID_, NodeWeight<NodeID_, WeightT_>> WEdge; typedef pvector<Edge> EdgeList; public: Generator(int scale, int degree) { scale_ = scale; num_nodes_ = 1l << scale; num_edges_ = num_nodes_ * degree; if (num_nodes_ > std::numeric_limits<NodeID_>::max()) { std::cout << "NodeID type (max: " << std::numeric_limits<NodeID_>::max(); std::cout << ") too small to hold " << num_nodes_ << std::endl; std::cout << "Recommend changing NodeID (typedef'd in src/benchmark.h)"; std::cout << " to a wider type and recompiling" << std::endl; std::exit(-31); } } void PermuteIDs(EdgeList &el) { pvector<NodeID_> permutation(num_nodes_); std::mt19937 rng(kRandSeed); #pragma omp parallel for for (NodeID_ n=0; n < num_nodes_; n++) permutation[n] = n; shuffle(permutation.begin(), permutation.end(), rng); #pragma omp parallel for for (int64_t e=0; e < num_edges_; e++) el[e] = Edge(permutation[el[e].u], permutation[el[e].v]); } EdgeList MakeUniformEL() { EdgeList el(num_edges_); #pragma omp parallel { std::mt19937 rng; std::uniform_int_distribution<NodeID_> udist(0, num_nodes_-1); #pragma omp for for (int64_t block=0; block < num_edges_; block+=block_size) { rng.seed(kRandSeed + block/block_size); for (int64_t e=block; e < std::min(block+block_size, num_edges_); e++) { el[e] = Edge(udist(rng), udist(rng)); } } } return el; } EdgeList MakeRMatEL() { const float A = 0.57f, B = 0.19f, C = 0.19f; EdgeList el(num_edges_); #pragma omp parallel { std::mt19937 rng; std::uniform_real_distribution<float> udist(0, 1.0f); #pragma omp for for (int64_t block=0; block < num_edges_; block+=block_size) { rng.seed(kRandSeed + block/block_size); for (int64_t e=block; e < std::min(block+block_size, num_edges_); e++) { NodeID_ src = 0, dst = 0; for (int depth=0; depth < scale_; depth++) { float rand_point = udist(rng); src = src << 1; dst = dst << 1; if (rand_point < A+B) { if (rand_point > A) dst++; } else { src++; if (rand_point > A+B+C) dst++; } } el[e] = Edge(src, dst); } } } PermuteIDs(el); // TIME_PRINT("Shuffle", std::shuffle(el.begin(), el.end(), // std::mt19937())); return el; } EdgeList GenerateEL(bool uniform) { EdgeList el; Timer t; t.Start(); if (uniform) el = MakeUniformEL(); else el = MakeRMatEL(); t.Stop(); PrintTime("Generate Time", t.Seconds()); return el; } static void InsertWeights(pvector<EdgePair<NodeID_, NodeID_>> &el) {} // Overwrites existing weights with random from [1,255] static void InsertWeights(pvector<WEdge> &el) { #pragma omp parallel { std::mt19937 rng; std::uniform_int_distribution<int> udist(1, 255); int64_t el_size = el.size(); #pragma omp for for (int64_t block=0; block < el_size; block+=block_size) { rng.seed(kRandSeed + block/block_size); for (int64_t e=block; e < std::min(block+block_size, el_size); e++) { el[e].v.w = static_cast<WeightT_>(udist(rng)); } } } } private: int scale_; int64_t num_nodes_; int64_t num_edges_; static const int64_t block_size = 1<<18; }; #endif // GENERATOR_H_
// Copyright(c) 2015, The Regents of the University of California(Regents) // See LICENSE.txt for license details #ifndef GENERATOR_H_ #define GENERATOR_H_ #include <algorithm> #include <cinttypes> #include <random> #include "graph.h" #include "pvector.h" #include "util.h" /* * GAP Benchmark Suite Class: Generator Author: Scott Beamer * * Given scale and degree, generates edgelist for synthetic graph - Intended * to be called from Builder - GenerateEL(uniform) generates and returns * the edgelist - Can generate uniform random (uniform=true) or R-MAT * graph according to Graph500 parameters (uniform=false) - Can also * randomize weights within a weighted edgelist (InsertWeights) - * Blocking/reseeding is for parallelism with deterministic output * edgelist */ template < typename NodeID_, typename DestID_ = NodeID_, typename WeightT_ = NodeID_ > class Generator { typedef EdgePair < NodeID_, DestID_ > Edge; typedef EdgePair < NodeID_, NodeWeight < NodeID_, WeightT_ >> WEdge; typedef pvector < Edge > EdgeList; public: Generator(int scale, int degree) { scale_ = scale; num_nodes_ = 1l << scale; num_edges_ = num_nodes_ * degree; if (num_nodes_ > std: : numeric_limits < NodeID_ >: :max()) { std: : cout << "NodeID type (max: " << std: : numeric_limits < NodeID_ >: :max(); std: : cout << ") too small to hold " << num_nodes_ << std: :endl; std: : cout << "Recommend changing NodeID (typedef'd in src/benchmark.h)"; std: : cout << " to a wider type and recompiling" << std: :endl; std: : exit(-31); } } void PermuteIDs(EdgeList & el) { pvector < NodeID_ > permutation(num_nodes_); std: : mt19937 rng(kRandSeed); for (NodeID_ n = 0; n < num_nodes_; n++) permutation[n] = n; shuffle(permutation.begin(), permutation.end(), rng); for (int64_t e = 0; e < num_edges_; e++) el[e] = Edge(permutation[el[e].u], permutation[el[e].v]); } EdgeList MakeUniformEL() { EdgeList el(num_edges_); std: : mt19937 rng; std: : uniform_int_distribution < NodeID_ > udist(0, num_nodes_ - 1); for (int64_t block = 0; block < num_edges_; block += block_size) { rng.seed(kRandSeed + block / block_size); for (int64_t e = block; e < std: :min(block + block_size, num_edges_); e++) { el[e] = Edge(udist(rng), udist(rng)); } } return el; } EdgeList MakeRMatEL() { const float A = 0.57 f, B = 0.19 f, C = 0.19 f; EdgeList el(num_edges_); std: : mt19937 rng; std: : uniform_real_distribution < float >udist(0, 1.0 f); for (int64_t block = 0; block < num_edges_; block += block_size) { rng.seed(kRandSeed + block / block_size); for (int64_t e = block; e < std: :min(block + block_size, num_edges_); e++) { NodeID_ src = 0, dst = 0; for (int depth = 0; depth < scale_; depth++) { float rand_point = udist(rng); src = src << 1; dst = dst << 1; if (rand_point < A + B) { if (rand_point > A) dst++; } else { src++; if (rand_point > A + B + C) dst++; } } el[e] = Edge(src, dst); } } PermuteIDs(el); //TIME_PRINT("Shuffle", std: :shuffle(el.begin(), el.end(), //std: : mt19937())); return el; } EdgeList GenerateEL(bool uniform) { EdgeList el; Timer t; t.Start(); if (uniform) el = MakeUniformEL(); else el = MakeRMatEL(); t.Stop(); PrintTime("Generate Time", t.Seconds()); return el; } static void InsertWeights(pvector < EdgePair < NodeID_, NodeID_ >> &el) { } //Overwrites existing weights with random from[1, 255] static void InsertWeights(pvector < WEdge > &el) { std::mt19937 rng; std: : uniform_int_distribution < int >udist(1, 255); int64_t el_size = el.size(); for (int64_t block = 0; block < el_size; block += block_size) { rng.seed(kRandSeed + block / block_size); for (int64_t e = block; e < std: :min(block + block_size, el_size); e++) { el[e].v.w = static_cast < WeightT_ > (udist(rng)); } } } private: int scale_; int64_t num_nodes_; int64_t num_edges_; static const int64_t block_size = 1 << 18; }; #endif /* // GENERATOR_H_ */
// Copyright(c) 2015, The Regents of the University of California(Regents) // See LICENSE.txt for license details #ifndef GENERATOR_H_ #define GENERATOR_H_ #include <algorithm> #include <cinttypes> #include <random> #include "graph.h" #include "pvector.h" #include "util.h" /* * GAP Benchmark Suite Class: Generator Author: Scott Beamer * * Given scale and degree, generates edgelist for synthetic graph - Intended * to be called from Builder - GenerateEL(uniform) generates and returns * the edgelist - Can generate uniform random (uniform=true) or R-MAT * graph according to Graph500 parameters (uniform=false) - Can also * randomize weights within a weighted edgelist (InsertWeights) - * Blocking/reseeding is for parallelism with deterministic output * edgelist */ template < typename NodeID_, typename DestID_ = NodeID_, typename WeightT_ = NodeID_ > class Generator { typedef EdgePair < NodeID_, DestID_ > Edge; typedef EdgePair < NodeID_, NodeWeight < NodeID_, WeightT_ >> WEdge; typedef pvector < Edge > EdgeList; public: Generator(int scale, int degree) { scale_ = scale; num_nodes_ = 1l << scale; num_edges_ = num_nodes_ * degree; if (num_nodes_ > std: : numeric_limits < NodeID_ >: :max()) { std: : cout << "NodeID type (max: " << std: : numeric_limits < NodeID_ >: :max(); std: : cout << ") too small to hold " << num_nodes_ << std: :endl; std: : cout << "Recommend changing NodeID (typedef'd in src/benchmark.h)"; std: : cout << " to a wider type and recompiling" << std: :endl; std: : exit(-31); } } void PermuteIDs(EdgeList & el) { pvector < NodeID_ > permutation(num_nodes_); std: : mt19937 rng(kRandSeed); #pragma omp parallel for for (NodeID_ n = 0; n < num_nodes_; n++) permutation[n] = n; shuffle(permutation.begin(), permutation.end(), rng); #pragma omp parallel for for (int64_t e = 0; e < num_edges_; e++) el[e] = Edge(permutation[el[e].u], permutation[el[e].v]); } EdgeList MakeUniformEL() { EdgeList el(num_edges_); #pragma omp parallel { std: : mt19937 rng; std: : uniform_int_distribution < NodeID_ > udist(0, num_nodes_ - 1); #pragma omp for for (int64_t block = 0; block < num_edges_; block += block_size) { rng.seed(kRandSeed + block / block_size); for (int64_t e = block; e < std: :min(block + block_size, num_edges_); e++) { el[e] = Edge(udist(rng), udist(rng)); } } } return el; } EdgeList MakeRMatEL() { const float A = 0.57 f, B = 0.19 f, C = 0.19 f; EdgeList el(num_edges_); #pragma omp parallel { std: : mt19937 rng; std: : uniform_real_distribution < float >udist(0, 1.0 f); #pragma omp for for (int64_t block = 0; block < num_edges_; block += block_size) { rng.seed(kRandSeed + block / block_size); for (int64_t e = block; e < std: :min(block + block_size, num_edges_); e++) { NodeID_ src = 0, dst = 0; for (int depth = 0; depth < scale_; depth++) { float rand_point = udist(rng); src = src << 1; dst = dst << 1; if (rand_point < A + B) { if (rand_point > A) dst++; } else { src++; if (rand_point > A + B + C) dst++; } } el[e] = Edge(src, dst); } } } PermuteIDs(el); //TIME_PRINT("Shuffle", std: :shuffle(el.begin(), el.end(), //std: : mt19937())); return el; } EdgeList GenerateEL(bool uniform) { EdgeList el; Timer t; t.Start(); if (uniform) el = MakeUniformEL(); else el = MakeRMatEL(); t.Stop(); PrintTime("Generate Time", t.Seconds()); return el; } static void InsertWeights(pvector < EdgePair < NodeID_, NodeID_ >> &el) { } //Overwrites existing weights with random from[1, 255] static void InsertWeights(pvector < WEdge > &el) { #pragma omp parallel { std::mt19937 rng; std: : uniform_int_distribution < int >udist(1, 255); int64_t el_size = el.size(); #pragma omp for for (int64_t block = 0; block < el_size; block += block_size) { rng.seed(kRandSeed + block / block_size); for (int64_t e = block; e < std: :min(block + block_size, el_size); e++) { el[e].v.w = static_cast < WeightT_ > (udist(rng)); } } } } private: int scale_; int64_t num_nodes_; int64_t num_edges_; static const int64_t block_size = 1 << 18; }; #endif /* // GENERATOR_H_ */
thread_scale.c
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */ /* * See COPYRIGHT in top-level directory. */ #define _GNU_SOURCE #include <stdio.h> #include <omp.h> #include <sched.h> #include <unistd.h> #include <pthread.h> #include "zmtest_abslock.h" #define TEST_NITER (1<<22) #define WARMUP_ITER 128 #define CACHELINE_SZ 64 #define ARRAY_LEN 10 char cache_lines[CACHELINE_SZ*ARRAY_LEN] = {0}; #if ARRAY_LEN == 10 int indices [] = {3,6,1,7,0,2,9,4,8,5}; #elif ARRAY_LEN == 4 int indices [] = {2,1,3,0}; #endif zm_abslock_t lock; #if defined (ZM_BIND_MANUAL) void bind_compact(){ int tid = omp_get_thread_num(); /* Compute the target core */ int tgt_core = tid; cpu_set_t set; CPU_ZERO(&set); CPU_SET(tgt_core, &set); if (pthread_setaffinity_np(pthread_self(), sizeof(set), &set) < 0) { perror("pthread_setaffinity_np"); } } #else #define bind_compact() #endif static void test_thruput() { unsigned nthreads = omp_get_max_threads(); zm_abslock_init(&lock); int cur_nthreads; /* Throughput = lock acquisitions per second */ printf("nthreads,thruput,lat\n"); for(cur_nthreads=1; cur_nthreads <= nthreads; cur_nthreads+= ((cur_nthreads==1) ? 1 : 2)) { double start_time, stop_time; #pragma omp parallel num_threads(cur_nthreads) { bind_compact(); int tid = omp_get_thread_num(); /* Warmup */ for(int iter=0; iter < WARMUP_ITER; iter++) { zm_abslock_acquire(&lock); /* Computation */ for(int i = 0; i < ARRAY_LEN; i++) cache_lines[indices[i]] += cache_lines[indices[ARRAY_LEN-1-i]]; zm_abslock_release(&lock); } #pragma omp barrier #pragma omp single { start_time = omp_get_wtime(); } #pragma omp for schedule(static) for(int iter = 0; iter < TEST_NITER; iter++) { zm_abslock_acquire(&lock); /* Computation */ for(int i = 0; i < ARRAY_LEN; i++) cache_lines[indices[i]] += cache_lines[indices[ARRAY_LEN-1-i]]; zm_abslock_release(&lock); } } stop_time = omp_get_wtime(); double elapsed_time = stop_time - start_time; double thruput = (double)TEST_NITER/elapsed_time; double latency = elapsed_time*1e9/TEST_NITER; // latency in nanoseconds printf("%d,%.2lf,%.2lf\n", cur_nthreads, thruput, latency); } } int main(int argc, char **argv) { test_thruput(); return 0; }
#define _GNU_SOURCE #include <stdio.h> #include <omp.h> #include <sched.h> #include <unistd.h> #include <pthread.h> #include "zmtest_abslock.h" #define TEST_NITER (1<<22) #define WARMUP_ITER 128 #define CACHELINE_SZ 64 #define ARRAY_LEN 10 char cache_lines[CACHELINE_SZ * ARRAY_LEN] = {0}; #if ARRAY_LEN == 10 int indices[] = {3, 6, 1, 7, 0, 2, 9, 4, 8, 5}; #elif ARRAY_LEN == 4 int indices[] = {2, 1, 3, 0}; #endif zm_abslock_t lock; #if defined (ZM_BIND_MANUAL) void bind_compact() { int tid = omp_get_thread_num(); /* Compute the target core */ int tgt_core = tid; cpu_set_t set; CPU_ZERO(&set); CPU_SET(tgt_core, &set); if (pthread_setaffinity_np(pthread_self(), sizeof(set), &set) < 0) { perror("pthread_setaffinity_np"); } } #else #define bind_compact() #endif static void test_thruput() { unsigned nthreads = omp_get_max_threads(); zm_abslock_init(&lock); int cur_nthreads; /* Throughput = lock acquisitions per second */ printf("nthreads,thruput,lat\n"); for (cur_nthreads = 1; cur_nthreads <= nthreads; cur_nthreads += ((cur_nthreads == 1) ? 1 : 2)) { double start_time, stop_time; bind_compact(); int tid = omp_get_thread_num(); /* Warmup */ for (int iter = 0; iter < WARMUP_ITER; iter++) { zm_abslock_acquire(&lock); /* Computation */ for (int i = 0; i < ARRAY_LEN; i++) cache_lines[indices[i]] += cache_lines[indices[ARRAY_LEN - 1 - i]]; zm_abslock_release(&lock); } start_time = omp_get_wtime(); for (int iter = 0; iter < TEST_NITER; iter++) { zm_abslock_acquire(&lock); /* Computation */ for (int i = 0; i < ARRAY_LEN; i++) cache_lines[indices[i]] += cache_lines[indices[ARRAY_LEN - 1 - i]]; zm_abslock_release(&lock); } stop_time = omp_get_wtime(); double elapsed_time = stop_time - start_time; double thruput = (double)TEST_NITER / elapsed_time; double latency = elapsed_time * 1e9 / TEST_NITER; //latency in nanoseconds printf("%d,%.2lf,%.2lf\n", cur_nthreads, thruput, latency); } } int main(int argc, char **argv) { test_thruput(); return 0; }
#define _GNU_SOURCE #include <stdio.h> #include <omp.h> #include <sched.h> #include <unistd.h> #include <pthread.h> #include "zmtest_abslock.h" #define TEST_NITER (1<<22) #define WARMUP_ITER 128 #define CACHELINE_SZ 64 #define ARRAY_LEN 10 char cache_lines[CACHELINE_SZ * ARRAY_LEN] = {0}; #if ARRAY_LEN == 10 int indices[] = {3, 6, 1, 7, 0, 2, 9, 4, 8, 5}; #elif ARRAY_LEN == 4 int indices[] = {2, 1, 3, 0}; #endif zm_abslock_t lock; #if defined (ZM_BIND_MANUAL) void bind_compact() { int tid = omp_get_thread_num(); /* Compute the target core */ int tgt_core = tid; cpu_set_t set; CPU_ZERO(&set); CPU_SET(tgt_core, &set); if (pthread_setaffinity_np(pthread_self(), sizeof(set), &set) < 0) { perror("pthread_setaffinity_np"); } } #else #define bind_compact() #endif static void test_thruput() { unsigned nthreads = omp_get_max_threads(); zm_abslock_init(&lock); int cur_nthreads; /* Throughput = lock acquisitions per second */ printf("nthreads,thruput,lat\n"); for (cur_nthreads = 1; cur_nthreads <= nthreads; cur_nthreads += ((cur_nthreads == 1) ? 1 : 2)) { double start_time, stop_time; #pragma omp parallel num_threads(cur_nthreads) { bind_compact(); int tid = omp_get_thread_num(); /* Warmup */ for (int iter = 0; iter < WARMUP_ITER; iter++) { zm_abslock_acquire(&lock); /* Computation */ for (int i = 0; i < ARRAY_LEN; i++) cache_lines[indices[i]] += cache_lines[indices[ARRAY_LEN - 1 - i]]; zm_abslock_release(&lock); } #pragma omp barrier #pragma omp single { start_time = omp_get_wtime(); } #pragma omp for schedule(static) for (int iter = 0; iter < TEST_NITER; iter++) { zm_abslock_acquire(&lock); /* Computation */ for (int i = 0; i < ARRAY_LEN; i++) cache_lines[indices[i]] += cache_lines[indices[ARRAY_LEN - 1 - i]]; zm_abslock_release(&lock); } } stop_time = omp_get_wtime(); double elapsed_time = stop_time - start_time; double thruput = (double)TEST_NITER / elapsed_time; double latency = elapsed_time * 1e9 / TEST_NITER; //latency in nanoseconds printf("%d,%.2lf,%.2lf\n", cur_nthreads, thruput, latency); } } int main(int argc, char **argv) { test_thruput(); return 0; }
wgs64.c
#include <stdio.h> #include <omp.h> int n =64; int main(void) { int fail = 0; int a = -1; // #if 1 #pragma omp target { //nothing } #endif #pragma omp target teams distribute thread_limit(64) for (int k =0; k < n; k++) { // nothing } printf("Succeeded\n"); return fail; }
#include <stdio.h> #include <omp.h> int n = 64; int main(void) { int fail = 0; int a = -1; // #if 1 //nothing #endif for (int k = 0; k < n; k++) { //nothing } printf("Succeeded\n"); return fail; }
#include <stdio.h> #include <omp.h> int n = 64; int main(void) { int fail = 0; int a = -1; // #if 1 #pragma omp target { //nothing } #endif #pragma omp target teams distribute thread_limit(64) for (int k = 0; k < n; k++) { //nothing } printf("Succeeded\n"); return fail; }
ellipticBuildJacobi.c
/* The MIT License (MIT) Copyright (c) 2017 Tim Warburton, Noel Chalmers, Jesse Chan, Ali Karakus Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "elliptic.h" void BuildLocalIpdgBBDiagTri2D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dlong eM, dfloat *A); void BuildLocalIpdgDiagTri2D (elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dlong eM, dfloat *A); void BuildLocalIpdgDiagQuad2D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dfloat *B, dfloat *Br, dfloat *Bs, dlong eM, dfloat *A); void BuildLocalIpdgDiagTet3D (elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dlong eM, dfloat *A); void BuildLocalIpdgDiagHex3D (elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dfloat *B, dfloat *Br, dfloat *Bs, dfloat *Bt, dlong eM, dfloat *A); void BuildLocalContinuousDiagTri2D (elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dlong eM, dfloat *A); void BuildLocalContinuousDiagQuad2D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dlong eM, dfloat *B, dfloat *Br, dfloat *Bs, dfloat *A); void BuildLocalContinuousDiagTet3D (elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dlong eM, dfloat *A); void BuildLocalContinuousDiagHex3D (elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dlong eM, dfloat *B, dfloat *Br, dfloat *Bs, dfloat *Bt, dfloat *A); void ellipticBuildJacobi(elliptic_t* elliptic, dfloat lambda, dfloat **invDiagA){ mesh_t *mesh = elliptic->mesh; setupAide options = elliptic->options; // surface mass matrices MS = MM*LIFT dfloat *MS = (dfloat *) calloc(mesh->Nfaces*mesh->Nfp*mesh->Nfp,sizeof(dfloat)); for (int f=0;f<mesh->Nfaces;f++) { for (int n=0;n<mesh->Nfp;n++) { int fn = mesh->faceNodes[f*mesh->Nfp+n]; for (int m=0;m<mesh->Nfp;m++) { dfloat MSnm = 0; for (int i=0;i<mesh->Np;i++){ MSnm += mesh->MM[fn+i*mesh->Np]*mesh->LIFT[i*mesh->Nfp*mesh->Nfaces+f*mesh->Nfp+m]; } MS[m+n*mesh->Nfp + f*mesh->Nfp*mesh->Nfp] = MSnm; } } } // build some monolithic basis arrays (for quads and hexes) dfloat *B = (dfloat*) calloc(mesh->Np*mesh->Np, sizeof(dfloat)); dfloat *Br = (dfloat*) calloc(mesh->Np*mesh->Np, sizeof(dfloat)); dfloat *Bs = (dfloat*) calloc(mesh->Np*mesh->Np, sizeof(dfloat)); dfloat *Bt = (dfloat*) calloc(mesh->Np*mesh->Np, sizeof(dfloat)); if (elliptic->elementType==QUADRILATERALS) { int mode = 0; for(int nj=0;nj<mesh->N+1;++nj){ for(int ni=0;ni<mesh->N+1;++ni){ int node = 0; for(int j=0;j<mesh->N+1;++j){ for(int i=0;i<mesh->N+1;++i){ if(nj==j && ni==i) B[mode*mesh->Np+node] = 1; if(nj==j) Br[mode*mesh->Np+node] = mesh->D[ni+mesh->Nq*i]; if(ni==i) Bs[mode*mesh->Np+node] = mesh->D[nj+mesh->Nq*j]; ++node; } } ++mode; } } } if (elliptic->elementType==HEXAHEDRA) { int mode = 0; for(int nk=0;nk<mesh->N+1;++nk){ for(int nj=0;nj<mesh->N+1;++nj){ for(int ni=0;ni<mesh->N+1;++ni){ int node = 0; for(int k=0;k<mesh->N+1;++k){ for(int j=0;j<mesh->N+1;++j){ for(int i=0;i<mesh->N+1;++i){ if(nk==k && nj==j && ni==i) B[mode*mesh->Np+node] = 1; if(nj==j && nk==k) Br[mode*mesh->Np+node] = mesh->D[ni+mesh->Nq*i]; if(ni==i && nk==k) Bs[mode*mesh->Np+node] = mesh->D[nj+mesh->Nq*j]; if(ni==i && nj==j) Bt[mode*mesh->Np+node] = mesh->D[nk+mesh->Nq*k]; ++node; } } } ++mode; } } } } dlong diagNnum = mesh->Np*mesh->Nelements; dfloat *diagA = (dfloat*) calloc(diagNnum, sizeof(dfloat)); if(mesh->rank==0) printf("Building diagonal...");fflush(stdout); if (options.compareArgs("DISCRETIZATION","IPDG")) { switch(elliptic->elementType){ case TRIANGLES: if (options.compareArgs("BASIS","BERN")) { #pragma omp parallel for for(dlong eM=0;eM<mesh->Nelements;++eM) BuildLocalIpdgBBDiagTri2D(elliptic, mesh, lambda, MS, eM, diagA + eM*mesh->Np); } else { #pragma omp parallel for for(dlong eM=0;eM<mesh->Nelements;++eM) BuildLocalIpdgDiagTri2D(elliptic, mesh, lambda, MS, eM, diagA + eM*mesh->Np); } break; case QUADRILATERALS: #pragma omp parallel for for(dlong eM=0;eM<mesh->Nelements;++eM) BuildLocalIpdgDiagQuad2D(elliptic, mesh, lambda, MS, B, Br, Bs, eM, diagA + eM*mesh->Np); break; case TETRAHEDRA: #pragma omp parallel for for(dlong eM=0;eM<mesh->Nelements;++eM) BuildLocalIpdgDiagTet3D(elliptic, mesh, lambda, MS, eM, diagA + eM*mesh->Np); break; case HEXAHEDRA: #pragma omp parallel for for(dlong eM=0;eM<mesh->Nelements;++eM) BuildLocalIpdgDiagHex3D(elliptic, mesh, lambda, MS, B, Br, Bs, Bt, eM, diagA + eM*mesh->Np); break; } } else if (options.compareArgs("DISCRETIZATION","CONTINUOUS")) { switch(elliptic->elementType){ case TRIANGLES: #pragma omp parallel for for(dlong eM=0;eM<mesh->Nelements;++eM) BuildLocalContinuousDiagTri2D(elliptic, mesh, lambda, eM, diagA + eM*mesh->Np); break; case QUADRILATERALS: #pragma omp parallel for for(dlong eM=0;eM<mesh->Nelements;++eM) BuildLocalContinuousDiagQuad2D(elliptic, mesh, lambda, eM, B, Br, Bs, diagA + eM*mesh->Np); break; case TETRAHEDRA: #pragma omp parallel for for(dlong eM=0;eM<mesh->Nelements;++eM) BuildLocalContinuousDiagTet3D(elliptic, mesh, lambda, eM, diagA + eM*mesh->Np); break; case HEXAHEDRA: #pragma omp parallel for for(dlong eM=0;eM<mesh->Nelements;++eM) BuildLocalContinuousDiagHex3D(elliptic, mesh, lambda, eM, B, Br, Bs, Bt, diagA + eM*mesh->Np); break; } } if (options.compareArgs("DISCRETIZATION","CONTINUOUS")) gsParallelGatherScatter(mesh->hostGsh, diagA, dfloatString, "add"); *invDiagA = (dfloat*) calloc(diagNnum, sizeof(dfloat)); for (dlong n=0;n<mesh->Nelements*mesh->Np;n++) { (*invDiagA)[n] = 1/diagA[n]; } if(mesh->rank==0) printf("done.\n"); free(diagA); free(MS); free(B); free(Br); free(Bs); free(Bt); } void BuildLocalIpdgDiagTri2D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dlong eM, dfloat *A) { dlong vbase = eM*mesh->Nvgeo; dfloat drdx = mesh->vgeo[vbase+RXID]; dfloat drdy = mesh->vgeo[vbase+RYID]; dfloat dsdx = mesh->vgeo[vbase+SXID]; dfloat dsdy = mesh->vgeo[vbase+SYID]; dfloat J = mesh->vgeo[vbase+JID]; /* start with stiffness matrix */ for(int n=0;n<mesh->Np;++n){ A[n] = J*lambda*mesh->MM[n*mesh->Np+n]; A[n] += J*drdx*drdx*mesh->Srr[n*mesh->Np+n]; A[n] += J*drdx*dsdx*mesh->Srs[n*mesh->Np+n]; A[n] += J*dsdx*drdx*mesh->Ssr[n*mesh->Np+n]; A[n] += J*dsdx*dsdx*mesh->Sss[n*mesh->Np+n]; A[n] += J*drdy*drdy*mesh->Srr[n*mesh->Np+n]; A[n] += J*drdy*dsdy*mesh->Srs[n*mesh->Np+n]; A[n] += J*dsdy*drdy*mesh->Ssr[n*mesh->Np+n]; A[n] += J*dsdy*dsdy*mesh->Sss[n*mesh->Np+n]; } //add the rank boost for the allNeumann Poisson problem if (elliptic->allNeumann) { for(int n=0;n<mesh->Np;++n){ A[n] += elliptic->allNeumannPenalty*elliptic->allNeumannScale*elliptic->allNeumannScale; } } for (int fM=0;fM<mesh->Nfaces;fM++) { // load surface geofactors for this face dlong sid = mesh->Nsgeo*(eM*mesh->Nfaces+fM); dfloat nx = mesh->sgeo[sid+NXID]; dfloat ny = mesh->sgeo[sid+NYID]; dfloat sJ = mesh->sgeo[sid+SJID]; dfloat hinv = mesh->sgeo[sid+IHID]; int bc = mesh->EToB[fM+mesh->Nfaces*eM]; //raw boundary flag dfloat penalty = elliptic->tau*hinv; int bcD = 0, bcN =0; int bcType = 0; if(bc>0) bcType = elliptic->BCType[bc]; //find its type (Dirichlet/Neumann) // this needs to be double checked (and the code where these are used) if(bcType==1){ // Dirichlet bcD = 1; bcN = 0; } else if(bcType==2){ // Neumann bcD = 0; bcN = 1; } // mass matrix for this face dfloat *MSf = MS+fM*mesh->Nfp*mesh->Nfp; // penalty term just involves face nodes for(int n=0;n<mesh->Nfp;++n){ int nM = mesh->faceNodes[fM*mesh->Nfp+n]; for(int m=0;m<mesh->Nfp;++m){ int mM = mesh->faceNodes[fM*mesh->Nfp+m]; if (mM == nM) { // OP11 = OP11 + 0.5*( gtau*mmE ) dfloat MSfnm = sJ*MSf[n*mesh->Nfp+m]; A[nM] += 0.5*(1.-bcN)*(1.+bcD)*penalty*MSfnm; } } } // now add differential surface terms for(int n=0;n<mesh->Nfp;++n){ int nM = mesh->faceNodes[fM*mesh->Nfp+n]; for(int i=0;i<mesh->Nfp;++i){ int iM = mesh->faceNodes[fM*mesh->Nfp+i]; dfloat MSfni = sJ*MSf[n*mesh->Nfp+i]; // surface Jacobian built in dfloat DxMim = drdx*mesh->Dr[iM*mesh->Np+nM] + dsdx*mesh->Ds[iM*mesh->Np+nM]; dfloat DyMim = drdy*mesh->Dr[iM*mesh->Np+nM] + dsdy*mesh->Ds[iM*mesh->Np+nM]; // OP11 = OP11 + 0.5*( - mmE*Dn1) A[nM] += -0.5*nx*(1+bcD)*(1-bcN)*MSfni*DxMim; A[nM] += -0.5*ny*(1+bcD)*(1-bcN)*MSfni*DyMim; } } for(int n=0;n<mesh->Np;++n){ for(int m=0;m<mesh->Nfp;++m){ int mM = mesh->faceNodes[fM*mesh->Nfp+m]; if (mM==n) { for(int i=0;i<mesh->Nfp;++i){ int iM = mesh->faceNodes[fM*mesh->Nfp+i]; dfloat MSfim = sJ*MSf[i*mesh->Nfp+m]; dfloat DxMin = drdx*mesh->Dr[iM*mesh->Np+n] + dsdx*mesh->Ds[iM*mesh->Np+n]; dfloat DyMin = drdy*mesh->Dr[iM*mesh->Np+n] + dsdy*mesh->Ds[iM*mesh->Np+n]; // OP11 = OP11 + (- Dn1'*mmE ); A[n] += -0.5*nx*(1+bcD)*(1-bcN)*DxMin*MSfim; A[n] += -0.5*ny*(1+bcD)*(1-bcN)*DyMin*MSfim; } } } } } } void BuildLocalIpdgPatchAxTri2D(elliptic_t* elliptic, mesh_t* mesh, int basisNp, dfloat *basis, dfloat lambda, dfloat *MS, dlong eM, dfloat *A); //generate the BB diagonal by extracting it from the transformed patch void BuildLocalIpdgBBDiagTri2D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dlong eM, dfloat *A) { dfloat *patchA = (dfloat *) calloc(mesh->Np*mesh->Np,sizeof(dfloat)); int basisNp = mesh->Np; dfloat *basis = mesh->VB; BuildLocalIpdgPatchAxTri2D(elliptic, mesh, basisNp, basis, lambda, MS, eM, patchA); for(int n=0;n<mesh->Np;++n) { A[n] = patchA[n*mesh->Np+n]; //store the diagonal entry } free(patchA); } //returns the continuous C0 patch A matrix for element eM void BuildLocalContinuousDiagTri2D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dlong eM, dfloat *A) { dlong gbase = eM*mesh->Nggeo; dfloat Grr = mesh->ggeo[gbase + G00ID]; dfloat Grs = mesh->ggeo[gbase + G01ID]; dfloat Gss = mesh->ggeo[gbase + G11ID]; dfloat J = mesh->ggeo[gbase + GWJID]; /* start with stiffness matrix */ for(int n=0;n<mesh->Np;++n){ if (elliptic->mapB[n+eM*mesh->Np]!=1) { //dont fill rows for masked nodes A[n] = J*lambda*mesh->MM[n+n*mesh->Np]; A[n] += Grr*mesh->Srr[n+n*mesh->Np]; A[n] += Grs*mesh->Srs[n+n*mesh->Np]; A[n] += Grs*mesh->Ssr[n+n*mesh->Np]; A[n] += Gss*mesh->Sss[n+n*mesh->Np]; } else { A[n] = 1; //just put a 1 so A is invertable } } //add the rank boost for the allNeumann Poisson problem if (elliptic->allNeumann) { for(int n=0;n<mesh->Np;++n){ if (elliptic->mapB[n+eM*mesh->Np]!=1) { //dont fill rows for masked nodes A[n] += elliptic->allNeumannPenalty*elliptic->allNeumannScale*elliptic->allNeumannScale; } } } } void BuildLocalIpdgDiagQuad2D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dfloat *B, dfloat *Br, dfloat *Bs, dlong eM, dfloat *A) { /* start with stiffness matrix */ for(int n=0;n<mesh->Np;++n){ A[n] = 0; // (grad phi_n, grad phi_m)_{D^e} for(int i=0;i<mesh->Np;++i){ dlong base = eM*mesh->Np*mesh->Nvgeo + i; dfloat drdx = mesh->vgeo[base+mesh->Np*RXID]; dfloat drdy = mesh->vgeo[base+mesh->Np*RYID]; dfloat dsdx = mesh->vgeo[base+mesh->Np*SXID]; dfloat dsdy = mesh->vgeo[base+mesh->Np*SYID]; dfloat JW = mesh->vgeo[base+mesh->Np*JWID]; int idn = n*mesh->Np+i; dfloat dlndx = drdx*Br[idn] + dsdx*Bs[idn]; dfloat dlndy = drdy*Br[idn] + dsdy*Bs[idn]; A[n] += JW*(dlndx*dlndx+dlndy*dlndy); A[n] += lambda*JW*B[idn]*B[idn]; } for (int fM=0;fM<mesh->Nfaces;fM++) { // accumulate flux terms for negative and positive traces for(int i=0;i<mesh->Nfp;++i){ int vidM = mesh->faceNodes[i+fM*mesh->Nfp]; // grab vol geofacs at surface nodes dlong baseM = eM*mesh->Np*mesh->Nvgeo + vidM; dfloat drdxM = mesh->vgeo[baseM+mesh->Np*RXID]; dfloat drdyM = mesh->vgeo[baseM+mesh->Np*RYID]; dfloat dsdxM = mesh->vgeo[baseM+mesh->Np*SXID]; dfloat dsdyM = mesh->vgeo[baseM+mesh->Np*SYID]; // grab surface geometric factors dlong base = mesh->Nsgeo*(eM*mesh->Nfp*mesh->Nfaces + fM*mesh->Nfp + i); dfloat nx = mesh->sgeo[base+NXID]; dfloat ny = mesh->sgeo[base+NYID]; dfloat wsJ = mesh->sgeo[base+WSJID]; dfloat hinv = mesh->sgeo[base+IHID]; // form negative trace terms in IPDG int idnM = n*mesh->Np+vidM; dfloat dlndxM = drdxM*Br[idnM] + dsdxM*Bs[idnM]; dfloat dlndyM = drdyM*Br[idnM] + dsdyM*Bs[idnM]; dfloat ndotgradlnM = nx*dlndxM+ny*dlndyM; dfloat lnM = B[idnM]; dfloat penalty = elliptic->tau*hinv; int bc = mesh->EToB[fM+mesh->Nfaces*eM]; //raw boundary flag int bcD = 0, bcN =0; int bcType = 0; if(bc>0) bcType = elliptic->BCType[bc]; //find its type (Dirichlet/Neumann) // this needs to be double checked (and the code where these are used) if(bcType==1){ // Dirichlet bcD = 1; bcN = 0; } else if(bcType==2){ // Neumann bcD = 0; bcN = 1; } A[n] += -0.5*(1+bcD)*(1-bcN)*wsJ*lnM*ndotgradlnM; // -(ln^-, N.grad lm^-) A[n] += -0.5*(1+bcD)*(1-bcN)*wsJ*ndotgradlnM*lnM; // -(N.grad ln^-, lm^-) A[n] += +0.5*(1+bcD)*(1-bcN)*wsJ*penalty*lnM*lnM; // +((tau/h)*ln^-,lm^-) } } } } void BuildLocalContinuousDiagQuad2D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dlong eM, dfloat *B, dfloat *Br, dfloat* Bs, dfloat *A) { for (int ny=0;ny<mesh->Nq;ny++) { for (int nx=0;nx<mesh->Nq;nx++) { int iid = nx+ny*mesh->Nq; if (elliptic->mapB[nx+ny*mesh->Nq+eM*mesh->Np]!=1) { A[iid] = 0; for (int k=0;k<mesh->Nq;k++) { int id = k+ny*mesh->Nq; dfloat Grr = mesh->ggeo[eM*mesh->Np*mesh->Nggeo + id + G00ID*mesh->Np]; A[iid] += Grr*mesh->D[nx+k*mesh->Nq]*mesh->D[nx+k*mesh->Nq]; } for (int k=0;k<mesh->Nq;k++) { int id = nx+k*mesh->Nq; dfloat Gss = mesh->ggeo[eM*mesh->Np*mesh->Nggeo + id + G11ID*mesh->Np]; A[iid] += Gss*mesh->D[ny+k*mesh->Nq]*mesh->D[ny+k*mesh->Nq]; } int id = nx+ny*mesh->Nq; dfloat Grs = mesh->ggeo[eM*mesh->Np*mesh->Nggeo + id + G01ID*mesh->Np]; A[iid] += 2*Grs*mesh->D[nx+nx*mesh->Nq]*mesh->D[ny+ny*mesh->Nq]; dfloat JW = mesh->ggeo[eM*mesh->Np*mesh->Nggeo + id + GWJID*mesh->Np]; A[iid] += JW*lambda; } else { A[iid] = 1; //just put a 1 so A is invertable } } } //add the rank boost for the allNeumann Poisson problem if (elliptic->allNeumann) { for(int n=0;n<mesh->Np;++n){ if (elliptic->mapB[n+eM*mesh->Np]!=1) { //dont fill rows for masked nodes A[n] += elliptic->allNeumannPenalty*elliptic->allNeumannScale*elliptic->allNeumannScale; } } } } void BuildLocalIpdgDiagTet3D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dlong eM, dfloat *A) { dlong vbase = eM*mesh->Nvgeo; dfloat drdx = mesh->vgeo[vbase+RXID]; dfloat drdy = mesh->vgeo[vbase+RYID]; dfloat drdz = mesh->vgeo[vbase+RZID]; dfloat dsdx = mesh->vgeo[vbase+SXID]; dfloat dsdy = mesh->vgeo[vbase+SYID]; dfloat dsdz = mesh->vgeo[vbase+SZID]; dfloat dtdx = mesh->vgeo[vbase+TXID]; dfloat dtdy = mesh->vgeo[vbase+TYID]; dfloat dtdz = mesh->vgeo[vbase+TZID]; dfloat J = mesh->vgeo[vbase+JID]; dfloat G00 = drdx*drdx + drdy*drdy + drdz*drdz; dfloat G01 = drdx*dsdx + drdy*dsdy + drdz*dsdz; dfloat G02 = drdx*dtdx + drdy*dtdy + drdz*dtdz; dfloat G10 = dsdx*drdx + dsdy*drdy + dsdz*drdz; dfloat G11 = dsdx*dsdx + dsdy*dsdy + dsdz*dsdz; dfloat G12 = dsdx*dtdx + dsdy*dtdy + dsdz*dtdz; dfloat G20 = dtdx*drdx + dtdy*drdy + dtdz*drdz; dfloat G21 = dtdx*dsdx + dtdy*dsdy + dtdz*dsdz; dfloat G22 = dtdx*dtdx + dtdy*dtdy + dtdz*dtdz; /* start with stiffness matrix */ for(int n=0;n<mesh->Np;++n){ A[n] = J*lambda*mesh->MM[n*mesh->Np+n]; A[n] += J*G00*mesh->Srr[n*mesh->Np+n]; A[n] += J*G01*mesh->Srs[n*mesh->Np+n]; A[n] += J*G02*mesh->Srt[n*mesh->Np+n]; A[n] += J*G10*mesh->Ssr[n*mesh->Np+n]; A[n] += J*G11*mesh->Sss[n*mesh->Np+n]; A[n] += J*G12*mesh->Sst[n*mesh->Np+n]; A[n] += J*G20*mesh->Str[n*mesh->Np+n]; A[n] += J*G21*mesh->Sts[n*mesh->Np+n]; A[n] += J*G22*mesh->Stt[n*mesh->Np+n]; } //add the rank boost for the allNeumann Poisson problem if (elliptic->allNeumann) { for(int n=0;n<mesh->Np;++n){ A[n] += elliptic->allNeumannPenalty*elliptic->allNeumannScale*elliptic->allNeumannScale; } } for (int fM=0;fM<mesh->Nfaces;fM++) { // load surface geofactors for this face dlong sid = mesh->Nsgeo*(eM*mesh->Nfaces+fM); dfloat nx = mesh->sgeo[sid+NXID]; dfloat ny = mesh->sgeo[sid+NYID]; dfloat nz = mesh->sgeo[sid+NZID]; dfloat sJ = mesh->sgeo[sid+SJID]; dfloat hinv = mesh->sgeo[sid+IHID]; int bc = mesh->EToB[fM+mesh->Nfaces*eM]; //raw boundary flag dfloat penalty = elliptic->tau*hinv; int bcD = 0, bcN =0; int bcType = 0; if(bc>0) bcType = elliptic->BCType[bc]; //find its type (Dirichlet/Neumann) // this needs to be double checked (and the code where these are used) if(bcType==1){ // Dirichlet bcD = 1; bcN = 0; } else if(bcType==2){ // Neumann bcD = 0; bcN = 1; } // mass matrix for this face dfloat *MSf = MS+fM*mesh->Nfp*mesh->Nfp; // penalty term just involves face nodes for(int n=0;n<mesh->Nfp;++n){ for(int m=0;m<mesh->Nfp;++m){ int nM = mesh->faceNodes[fM*mesh->Nfp+n]; int mM = mesh->faceNodes[fM*mesh->Nfp+m]; if (mM==nM) { // OP11 = OP11 + 0.5*( gtau*mmE ) dfloat MSfnm = sJ*MSf[n*mesh->Nfp+m]; A[nM] += 0.5*(1.-bcN)*(1.+bcD)*penalty*MSfnm; } } } // now add differential surface terms for(int n=0;n<mesh->Nfp;++n){ int nM = mesh->faceNodes[fM*mesh->Nfp+n]; for(int i=0;i<mesh->Nfp;++i){ int iM = mesh->faceNodes[fM*mesh->Nfp+i]; dfloat MSfni = sJ*MSf[n*mesh->Nfp+i]; // surface Jacobian built in dfloat DxMim = drdx*mesh->Dr[iM*mesh->Np+nM] + dsdx*mesh->Ds[iM*mesh->Np+nM] + dtdx*mesh->Dt[iM*mesh->Np+nM]; dfloat DyMim = drdy*mesh->Dr[iM*mesh->Np+nM] + dsdy*mesh->Ds[iM*mesh->Np+nM] + dtdy*mesh->Dt[iM*mesh->Np+nM]; dfloat DzMim = drdz*mesh->Dr[iM*mesh->Np+nM] + dsdz*mesh->Ds[iM*mesh->Np+nM] + dtdz*mesh->Dt[iM*mesh->Np+nM]; // OP11 = OP11 + 0.5*( - mmE*Dn1) A[nM] += -0.5*nx*(1+bcD)*(1-bcN)*MSfni*DxMim; A[nM] += -0.5*ny*(1+bcD)*(1-bcN)*MSfni*DyMim; A[nM] += -0.5*nz*(1+bcD)*(1-bcN)*MSfni*DzMim; } } for(int n=0;n<mesh->Np;++n){ for(int m=0;m<mesh->Nfp;++m){ int mM = mesh->faceNodes[fM*mesh->Nfp+m]; if (mM==n) { for(int i=0;i<mesh->Nfp;++i){ int iM = mesh->faceNodes[fM*mesh->Nfp+i]; dfloat MSfim = sJ*MSf[i*mesh->Nfp+m]; dfloat DxMin = drdx*mesh->Dr[iM*mesh->Np+n] + dsdx*mesh->Ds[iM*mesh->Np+n] + dtdx*mesh->Dt[iM*mesh->Np+n]; dfloat DyMin = drdy*mesh->Dr[iM*mesh->Np+n] + dsdy*mesh->Ds[iM*mesh->Np+n] + dtdy*mesh->Dt[iM*mesh->Np+n]; dfloat DzMin = drdz*mesh->Dr[iM*mesh->Np+n] + dsdz*mesh->Ds[iM*mesh->Np+n] + dtdz*mesh->Dt[iM*mesh->Np+n]; // OP11 = OP11 + (- Dn1'*mmE ); A[n] += -0.5*nx*(1+bcD)*(1-bcN)*DxMin*MSfim; A[n] += -0.5*ny*(1+bcD)*(1-bcN)*DyMin*MSfim; A[n] += -0.5*nz*(1+bcD)*(1-bcN)*DzMin*MSfim; } } } } } } void BuildLocalContinuousDiagTet3D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dlong eM, dfloat *A) { dlong gbase = eM*mesh->Nggeo; dfloat Grr = mesh->ggeo[gbase + G00ID]; dfloat Grs = mesh->ggeo[gbase + G01ID]; dfloat Grt = mesh->ggeo[gbase + G02ID]; dfloat Gss = mesh->ggeo[gbase + G11ID]; dfloat Gst = mesh->ggeo[gbase + G12ID]; dfloat Gtt = mesh->ggeo[gbase + G22ID]; dfloat J = mesh->ggeo[gbase + GWJID]; /* start with stiffness matrix */ for(int n=0;n<mesh->Np;++n){ if (elliptic->mapB[n+eM*mesh->Np]!=1) { //dont fill rows for masked nodes A[n] = J*lambda*mesh->MM[n+n*mesh->Np]; A[n] += Grr*mesh->Srr[n+n*mesh->Np]; A[n] += Grs*mesh->Srs[n+n*mesh->Np]; A[n] += Grt*mesh->Srt[n+n*mesh->Np]; A[n] += Grs*mesh->Ssr[n+n*mesh->Np]; A[n] += Gss*mesh->Sss[n+n*mesh->Np]; A[n] += Gst*mesh->Sst[n+n*mesh->Np]; A[n] += Grt*mesh->Str[n+n*mesh->Np]; A[n] += Gst*mesh->Sts[n+n*mesh->Np]; A[n] += Gtt*mesh->Stt[n+n*mesh->Np]; } else { A[n] = 1; //just put a 1 so A is invertable } } //add the rank boost for the allNeumann Poisson problem if (elliptic->allNeumann) { for(int n=0;n<mesh->Np;++n){ if (elliptic->mapB[n+eM*mesh->Np]!=1) { //dont fill rows for masked nodes A[n] += elliptic->allNeumannPenalty*elliptic->allNeumannScale*elliptic->allNeumannScale; } } } } void BuildLocalIpdgDiagHex3D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dfloat *B, dfloat *Br, dfloat *Bs, dfloat *Bt, dlong eM, dfloat *A) { /* start with stiffness matrix */ for(int n=0;n<mesh->Np;++n){ A[n] = 0; // (grad phi_n, grad phi_m)_{D^e} for(int i=0;i<mesh->Np;++i){ dlong base = eM*mesh->Np*mesh->Nvgeo + i; dfloat drdx = mesh->vgeo[base+mesh->Np*RXID]; dfloat drdy = mesh->vgeo[base+mesh->Np*RYID]; dfloat drdz = mesh->vgeo[base+mesh->Np*RZID]; dfloat dsdx = mesh->vgeo[base+mesh->Np*SXID]; dfloat dsdy = mesh->vgeo[base+mesh->Np*SYID]; dfloat dsdz = mesh->vgeo[base+mesh->Np*SZID]; dfloat dtdx = mesh->vgeo[base+mesh->Np*TXID]; dfloat dtdy = mesh->vgeo[base+mesh->Np*TYID]; dfloat dtdz = mesh->vgeo[base+mesh->Np*TZID]; dfloat JW = mesh->vgeo[base+mesh->Np*JWID]; int idn = n*mesh->Np+i; dfloat dlndx = drdx*Br[idn] + dsdx*Bs[idn] + dtdx*Bt[idn]; dfloat dlndy = drdy*Br[idn] + dsdy*Bs[idn] + dtdy*Bt[idn]; dfloat dlndz = drdz*Br[idn] + dsdz*Bs[idn] + dtdz*Bt[idn]; A[n] += JW*(dlndx*dlndx+dlndy*dlndy+dlndz*dlndz); A[n] += lambda*JW*B[idn]*B[idn]; } for (int fM=0;fM<mesh->Nfaces;fM++) { // accumulate flux terms for negative and positive traces for(int i=0;i<mesh->Nfp;++i){ int vidM = mesh->faceNodes[i+fM*mesh->Nfp]; // grab vol geofacs at surface nodes dlong baseM = eM*mesh->Np*mesh->Nvgeo + vidM; dfloat drdxM = mesh->vgeo[baseM+mesh->Np*RXID]; dfloat drdyM = mesh->vgeo[baseM+mesh->Np*RYID]; dfloat drdzM = mesh->vgeo[baseM+mesh->Np*RZID]; dfloat dsdxM = mesh->vgeo[baseM+mesh->Np*SXID]; dfloat dsdyM = mesh->vgeo[baseM+mesh->Np*SYID]; dfloat dsdzM = mesh->vgeo[baseM+mesh->Np*SZID]; dfloat dtdxM = mesh->vgeo[baseM+mesh->Np*TXID]; dfloat dtdyM = mesh->vgeo[baseM+mesh->Np*TYID]; dfloat dtdzM = mesh->vgeo[baseM+mesh->Np*TZID]; // grab surface geometric factors dlong base = mesh->Nsgeo*(eM*mesh->Nfp*mesh->Nfaces + fM*mesh->Nfp + i); dfloat nx = mesh->sgeo[base+NXID]; dfloat ny = mesh->sgeo[base+NYID]; dfloat nz = mesh->sgeo[base+NZID]; dfloat wsJ = mesh->sgeo[base+WSJID]; dfloat hinv = mesh->sgeo[base+IHID]; // form negative trace terms in IPDG int idnM = n*mesh->Np+vidM; dfloat dlndxM = drdxM*Br[idnM] + dsdxM*Bs[idnM] + dtdxM*Bt[idnM]; dfloat dlndyM = drdyM*Br[idnM] + dsdyM*Bs[idnM] + dtdyM*Bt[idnM]; dfloat dlndzM = drdzM*Br[idnM] + dsdzM*Bs[idnM] + dtdzM*Bt[idnM]; dfloat ndotgradlnM = nx*dlndxM+ny*dlndyM+nz*dlndzM; dfloat lnM = B[idnM]; dfloat penalty = elliptic->tau*hinv; int bc = mesh->EToB[fM+mesh->Nfaces*eM]; //raw boundary flag int bcD = 0, bcN =0; int bcType = 0; if(bc>0) bcType = elliptic->BCType[bc]; //find its type (Dirichlet/Neumann) // this needs to be double checked (and the code where these are used) if(bcType==1){ // Dirichlet bcD = 1; bcN = 0; } else if(bcType==2){ // Neumann bcD = 0; bcN = 1; } A[n] += -0.5*(1+bcD)*(1-bcN)*wsJ*lnM*ndotgradlnM; // -(ln^-, N.grad lm^-) A[n] += -0.5*(1+bcD)*(1-bcN)*wsJ*ndotgradlnM*lnM; // -(N.grad ln^-, lm^-) A[n] += +0.5*(1+bcD)*(1-bcN)*wsJ*penalty*lnM*lnM; // +((tau/h)*ln^-,lm^-) } } } } void BuildLocalContinuousDiagHex3D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dlong eM, dfloat *B, dfloat *Br, dfloat *Bs, dfloat *Bt, dfloat *A) { for (int nz=0;nz<mesh->Nq;nz++) { for (int ny=0;ny<mesh->Nq;ny++) { for (int nx=0;nx<mesh->Nq;nx++) { int idn = nx+ny*mesh->Nq+nz*mesh->Nq*mesh->Nq; if (elliptic->mapB[idn+eM*mesh->Np]!=1) { A[idn] = 0; int id = nx+ny*mesh->Nq+nz*mesh->Nq*mesh->Nq; dlong base = eM*mesh->Np*mesh->Nggeo; dfloat Grs = mesh->ggeo[base + id + G01ID*mesh->Np]; A[idn] += 2*Grs*mesh->D[nx+nx*mesh->Nq]*mesh->D[ny+ny*mesh->Nq]; dfloat Grt = mesh->ggeo[base + id + G02ID*mesh->Np]; A[idn] += 2*Grt*mesh->D[nx+nx*mesh->Nq]*mesh->D[nz+nz*mesh->Nq]; dfloat Gst = mesh->ggeo[base + id + G12ID*mesh->Np]; A[idn] += 2*Gst*mesh->D[ny+ny*mesh->Nq]*mesh->D[nz+nz*mesh->Nq]; for (int k=0;k<mesh->Nq;k++) { int iid = k+ny*mesh->Nq+nz*mesh->Nq*mesh->Nq; dfloat Grr = mesh->ggeo[base + iid + G00ID*mesh->Np]; A[idn] += Grr*mesh->D[nx+k*mesh->Nq]*mesh->D[nx+k*mesh->Nq]; } for (int k=0;k<mesh->Nq;k++) { int iid = nx+k*mesh->Nq+nz*mesh->Nq*mesh->Nq; dfloat Gss = mesh->ggeo[base + iid + G11ID*mesh->Np]; A[idn] += Gss*mesh->D[ny+k*mesh->Nq]*mesh->D[ny+k*mesh->Nq]; } for (int k=0;k<mesh->Nq;k++) { int iid = nx+ny*mesh->Nq+k*mesh->Nq*mesh->Nq; dfloat Gtt = mesh->ggeo[base + iid + G22ID*mesh->Np]; A[idn] += Gtt*mesh->D[nz+k*mesh->Nq]*mesh->D[nz+k*mesh->Nq]; } dfloat JW = mesh->ggeo[base + id + GWJID*mesh->Np]; A[idn] += JW*lambda; } else { A[idn] = 1; //just put a 1 so A is invertable } } } } //add the rank boost for the allNeumann Poisson problem if (elliptic->allNeumann) { for(int n=0;n<mesh->Np;++n){ if (elliptic->mapB[n+eM*mesh->Np]!=1) { //dont fill rows for masked nodes A[n] += elliptic->allNeumannPenalty*elliptic->allNeumannScale*elliptic->allNeumannScale; } } } }
#include "elliptic.h" void BuildLocalIpdgBBDiagTri2D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dlong eM, dfloat *A); void BuildLocalIpdgDiagTri2D (elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dlong eM, dfloat *A); void BuildLocalIpdgDiagQuad2D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dfloat *B, dfloat *Br, dfloat *Bs, dlong eM, dfloat *A); void BuildLocalIpdgDiagTet3D (elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dlong eM, dfloat *A); void BuildLocalIpdgDiagHex3D (elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dfloat *B, dfloat *Br, dfloat *Bs, dfloat *Bt, dlong eM, dfloat *A); void BuildLocalContinuousDiagTri2D (elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dlong eM, dfloat *A); void BuildLocalContinuousDiagQuad2D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dlong eM, dfloat *B, dfloat *Br, dfloat *Bs, dfloat *A); void BuildLocalContinuousDiagTet3D (elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dlong eM, dfloat *A); void BuildLocalContinuousDiagHex3D (elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dlong eM, dfloat *B, dfloat *Br, dfloat *Bs, dfloat *Bt, dfloat *A); void ellipticBuildJacobi(elliptic_t* elliptic, dfloat lambda, dfloat **invDiagA){ mesh_t *mesh = elliptic->mesh; setupAide options = elliptic->options; // surface mass matrices MS = MM*LIFT dfloat *MS = (dfloat *) calloc(mesh->Nfaces*mesh->Nfp*mesh->Nfp,sizeof(dfloat)); for (int f=0;f<mesh->Nfaces;f++) { for (int n=0;n<mesh->Nfp;n++) { int fn = mesh->faceNodes[f*mesh->Nfp+n]; for (int m=0;m<mesh->Nfp;m++) { dfloat MSnm = 0; for (int i=0;i<mesh->Np;i++){ MSnm += mesh->MM[fn+i*mesh->Np]*mesh->LIFT[i*mesh->Nfp*mesh->Nfaces+f*mesh->Nfp+m]; } MS[m+n*mesh->Nfp + f*mesh->Nfp*mesh->Nfp] = MSnm; } } } // build some monolithic basis arrays (for quads and hexes) dfloat *B = (dfloat*) calloc(mesh->Np*mesh->Np, sizeof(dfloat)); dfloat *Br = (dfloat*) calloc(mesh->Np*mesh->Np, sizeof(dfloat)); dfloat *Bs = (dfloat*) calloc(mesh->Np*mesh->Np, sizeof(dfloat)); dfloat *Bt = (dfloat*) calloc(mesh->Np*mesh->Np, sizeof(dfloat)); if (elliptic->elementType==QUADRILATERALS) { int mode = 0; for(int nj=0;nj<mesh->N+1;++nj){ for(int ni=0;ni<mesh->N+1;++ni){ int node = 0; for(int j=0;j<mesh->N+1;++j){ for(int i=0;i<mesh->N+1;++i){ if(nj==j && ni==i) B[mode*mesh->Np+node] = 1; if(nj==j) Br[mode*mesh->Np+node] = mesh->D[ni+mesh->Nq*i]; if(ni==i) Bs[mode*mesh->Np+node] = mesh->D[nj+mesh->Nq*j]; ++node; } } ++mode; } } } if (elliptic->elementType==HEXAHEDRA) { int mode = 0; for(int nk=0;nk<mesh->N+1;++nk){ for(int nj=0;nj<mesh->N+1;++nj){ for(int ni=0;ni<mesh->N+1;++ni){ int node = 0; for(int k=0;k<mesh->N+1;++k){ for(int j=0;j<mesh->N+1;++j){ for(int i=0;i<mesh->N+1;++i){ if(nk==k && nj==j && ni==i) B[mode*mesh->Np+node] = 1; if(nj==j && nk==k) Br[mode*mesh->Np+node] = mesh->D[ni+mesh->Nq*i]; if(ni==i && nk==k) Bs[mode*mesh->Np+node] = mesh->D[nj+mesh->Nq*j]; if(ni==i && nj==j) Bt[mode*mesh->Np+node] = mesh->D[nk+mesh->Nq*k]; ++node; } } } ++mode; } } } } dlong diagNnum = mesh->Np*mesh->Nelements; dfloat *diagA = (dfloat*) calloc(diagNnum, sizeof(dfloat)); if(mesh->rank==0) printf("Building diagonal...");fflush(stdout); if (options.compareArgs("DISCRETIZATION","IPDG")) { switch(elliptic->elementType){ case TRIANGLES: if (options.compareArgs("BASIS","BERN")) { for(dlong eM=0;eM<mesh->Nelements;++eM) BuildLocalIpdgBBDiagTri2D(elliptic, mesh, lambda, MS, eM, diagA + eM*mesh->Np); } else { for(dlong eM=0;eM<mesh->Nelements;++eM) BuildLocalIpdgDiagTri2D(elliptic, mesh, lambda, MS, eM, diagA + eM*mesh->Np); } break; case QUADRILATERALS: for(dlong eM=0;eM<mesh->Nelements;++eM) BuildLocalIpdgDiagQuad2D(elliptic, mesh, lambda, MS, B, Br, Bs, eM, diagA + eM*mesh->Np); break; case TETRAHEDRA: for(dlong eM=0;eM<mesh->Nelements;++eM) BuildLocalIpdgDiagTet3D(elliptic, mesh, lambda, MS, eM, diagA + eM*mesh->Np); break; case HEXAHEDRA: for(dlong eM=0;eM<mesh->Nelements;++eM) BuildLocalIpdgDiagHex3D(elliptic, mesh, lambda, MS, B, Br, Bs, Bt, eM, diagA + eM*mesh->Np); break; } } else if (options.compareArgs("DISCRETIZATION","CONTINUOUS")) { switch(elliptic->elementType){ case TRIANGLES: for(dlong eM=0;eM<mesh->Nelements;++eM) BuildLocalContinuousDiagTri2D(elliptic, mesh, lambda, eM, diagA + eM*mesh->Np); break; case QUADRILATERALS: for(dlong eM=0;eM<mesh->Nelements;++eM) BuildLocalContinuousDiagQuad2D(elliptic, mesh, lambda, eM, B, Br, Bs, diagA + eM*mesh->Np); break; case TETRAHEDRA: for(dlong eM=0;eM<mesh->Nelements;++eM) BuildLocalContinuousDiagTet3D(elliptic, mesh, lambda, eM, diagA + eM*mesh->Np); break; case HEXAHEDRA: for(dlong eM=0;eM<mesh->Nelements;++eM) BuildLocalContinuousDiagHex3D(elliptic, mesh, lambda, eM, B, Br, Bs, Bt, diagA + eM*mesh->Np); break; } } if (options.compareArgs("DISCRETIZATION","CONTINUOUS")) gsParallelGatherScatter(mesh->hostGsh, diagA, dfloatString, "add"); *invDiagA = (dfloat*) calloc(diagNnum, sizeof(dfloat)); for (dlong n=0;n<mesh->Nelements*mesh->Np;n++) { (*invDiagA)[n] = 1/diagA[n]; } if(mesh->rank==0) printf("done.\n"); free(diagA); free(MS); free(B); free(Br); free(Bs); free(Bt); } void BuildLocalIpdgDiagTri2D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dlong eM, dfloat *A) { dlong vbase = eM*mesh->Nvgeo; dfloat drdx = mesh->vgeo[vbase+RXID]; dfloat drdy = mesh->vgeo[vbase+RYID]; dfloat dsdx = mesh->vgeo[vbase+SXID]; dfloat dsdy = mesh->vgeo[vbase+SYID]; dfloat J = mesh->vgeo[vbase+JID]; /* start with stiffness matrix */ for(int n=0;n<mesh->Np;++n){ A[n] = J*lambda*mesh->MM[n*mesh->Np+n]; A[n] += J*drdx*drdx*mesh->Srr[n*mesh->Np+n]; A[n] += J*drdx*dsdx*mesh->Srs[n*mesh->Np+n]; A[n] += J*dsdx*drdx*mesh->Ssr[n*mesh->Np+n]; A[n] += J*dsdx*dsdx*mesh->Sss[n*mesh->Np+n]; A[n] += J*drdy*drdy*mesh->Srr[n*mesh->Np+n]; A[n] += J*drdy*dsdy*mesh->Srs[n*mesh->Np+n]; A[n] += J*dsdy*drdy*mesh->Ssr[n*mesh->Np+n]; A[n] += J*dsdy*dsdy*mesh->Sss[n*mesh->Np+n]; } //add the rank boost for the allNeumann Poisson problem if (elliptic->allNeumann) { for(int n=0;n<mesh->Np;++n){ A[n] += elliptic->allNeumannPenalty*elliptic->allNeumannScale*elliptic->allNeumannScale; } } for (int fM=0;fM<mesh->Nfaces;fM++) { // load surface geofactors for this face dlong sid = mesh->Nsgeo*(eM*mesh->Nfaces+fM); dfloat nx = mesh->sgeo[sid+NXID]; dfloat ny = mesh->sgeo[sid+NYID]; dfloat sJ = mesh->sgeo[sid+SJID]; dfloat hinv = mesh->sgeo[sid+IHID]; int bc = mesh->EToB[fM+mesh->Nfaces*eM]; //raw boundary flag dfloat penalty = elliptic->tau*hinv; int bcD = 0, bcN =0; int bcType = 0; if(bc>0) bcType = elliptic->BCType[bc]; //find its type (Dirichlet/Neumann) // this needs to be double checked (and the code where these are used) if(bcType==1){ // Dirichlet bcD = 1; bcN = 0; } else if(bcType==2){ // Neumann bcD = 0; bcN = 1; } // mass matrix for this face dfloat *MSf = MS+fM*mesh->Nfp*mesh->Nfp; // penalty term just involves face nodes for(int n=0;n<mesh->Nfp;++n){ int nM = mesh->faceNodes[fM*mesh->Nfp+n]; for(int m=0;m<mesh->Nfp;++m){ int mM = mesh->faceNodes[fM*mesh->Nfp+m]; if (mM == nM) { // OP11 = OP11 + 0.5*( gtau*mmE ) dfloat MSfnm = sJ*MSf[n*mesh->Nfp+m]; A[nM] += 0.5*(1.-bcN)*(1.+bcD)*penalty*MSfnm; } } } // now add differential surface terms for(int n=0;n<mesh->Nfp;++n){ int nM = mesh->faceNodes[fM*mesh->Nfp+n]; for(int i=0;i<mesh->Nfp;++i){ int iM = mesh->faceNodes[fM*mesh->Nfp+i]; dfloat MSfni = sJ*MSf[n*mesh->Nfp+i]; // surface Jacobian built in dfloat DxMim = drdx*mesh->Dr[iM*mesh->Np+nM] + dsdx*mesh->Ds[iM*mesh->Np+nM]; dfloat DyMim = drdy*mesh->Dr[iM*mesh->Np+nM] + dsdy*mesh->Ds[iM*mesh->Np+nM]; // OP11 = OP11 + 0.5*( - mmE*Dn1) A[nM] += -0.5*nx*(1+bcD)*(1-bcN)*MSfni*DxMim; A[nM] += -0.5*ny*(1+bcD)*(1-bcN)*MSfni*DyMim; } } for(int n=0;n<mesh->Np;++n){ for(int m=0;m<mesh->Nfp;++m){ int mM = mesh->faceNodes[fM*mesh->Nfp+m]; if (mM==n) { for(int i=0;i<mesh->Nfp;++i){ int iM = mesh->faceNodes[fM*mesh->Nfp+i]; dfloat MSfim = sJ*MSf[i*mesh->Nfp+m]; dfloat DxMin = drdx*mesh->Dr[iM*mesh->Np+n] + dsdx*mesh->Ds[iM*mesh->Np+n]; dfloat DyMin = drdy*mesh->Dr[iM*mesh->Np+n] + dsdy*mesh->Ds[iM*mesh->Np+n]; // OP11 = OP11 + (- Dn1'*mmE ); A[n] += -0.5*nx*(1+bcD)*(1-bcN)*DxMin*MSfim; A[n] += -0.5*ny*(1+bcD)*(1-bcN)*DyMin*MSfim; } } } } } } void BuildLocalIpdgPatchAxTri2D(elliptic_t* elliptic, mesh_t* mesh, int basisNp, dfloat *basis, dfloat lambda, dfloat *MS, dlong eM, dfloat *A); //generate the BB diagonal by extracting it from the transformed patch void BuildLocalIpdgBBDiagTri2D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dlong eM, dfloat *A) { dfloat *patchA = (dfloat *) calloc(mesh->Np*mesh->Np,sizeof(dfloat)); int basisNp = mesh->Np; dfloat *basis = mesh->VB; BuildLocalIpdgPatchAxTri2D(elliptic, mesh, basisNp, basis, lambda, MS, eM, patchA); for(int n=0;n<mesh->Np;++n) { A[n] = patchA[n*mesh->Np+n]; //store the diagonal entry } free(patchA); } //returns the continuous C0 patch A matrix for element eM void BuildLocalContinuousDiagTri2D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dlong eM, dfloat *A) { dlong gbase = eM*mesh->Nggeo; dfloat Grr = mesh->ggeo[gbase + G00ID]; dfloat Grs = mesh->ggeo[gbase + G01ID]; dfloat Gss = mesh->ggeo[gbase + G11ID]; dfloat J = mesh->ggeo[gbase + GWJID]; /* start with stiffness matrix */ for(int n=0;n<mesh->Np;++n){ if (elliptic->mapB[n+eM*mesh->Np]!=1) { //dont fill rows for masked nodes A[n] = J*lambda*mesh->MM[n+n*mesh->Np]; A[n] += Grr*mesh->Srr[n+n*mesh->Np]; A[n] += Grs*mesh->Srs[n+n*mesh->Np]; A[n] += Grs*mesh->Ssr[n+n*mesh->Np]; A[n] += Gss*mesh->Sss[n+n*mesh->Np]; } else { A[n] = 1; //just put a 1 so A is invertable } } //add the rank boost for the allNeumann Poisson problem if (elliptic->allNeumann) { for(int n=0;n<mesh->Np;++n){ if (elliptic->mapB[n+eM*mesh->Np]!=1) { //dont fill rows for masked nodes A[n] += elliptic->allNeumannPenalty*elliptic->allNeumannScale*elliptic->allNeumannScale; } } } } void BuildLocalIpdgDiagQuad2D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dfloat *B, dfloat *Br, dfloat *Bs, dlong eM, dfloat *A) { /* start with stiffness matrix */ for(int n=0;n<mesh->Np;++n){ A[n] = 0; // (grad phi_n, grad phi_m)_{D^e} for(int i=0;i<mesh->Np;++i){ dlong base = eM*mesh->Np*mesh->Nvgeo + i; dfloat drdx = mesh->vgeo[base+mesh->Np*RXID]; dfloat drdy = mesh->vgeo[base+mesh->Np*RYID]; dfloat dsdx = mesh->vgeo[base+mesh->Np*SXID]; dfloat dsdy = mesh->vgeo[base+mesh->Np*SYID]; dfloat JW = mesh->vgeo[base+mesh->Np*JWID]; int idn = n*mesh->Np+i; dfloat dlndx = drdx*Br[idn] + dsdx*Bs[idn]; dfloat dlndy = drdy*Br[idn] + dsdy*Bs[idn]; A[n] += JW*(dlndx*dlndx+dlndy*dlndy); A[n] += lambda*JW*B[idn]*B[idn]; } for (int fM=0;fM<mesh->Nfaces;fM++) { // accumulate flux terms for negative and positive traces for(int i=0;i<mesh->Nfp;++i){ int vidM = mesh->faceNodes[i+fM*mesh->Nfp]; // grab vol geofacs at surface nodes dlong baseM = eM*mesh->Np*mesh->Nvgeo + vidM; dfloat drdxM = mesh->vgeo[baseM+mesh->Np*RXID]; dfloat drdyM = mesh->vgeo[baseM+mesh->Np*RYID]; dfloat dsdxM = mesh->vgeo[baseM+mesh->Np*SXID]; dfloat dsdyM = mesh->vgeo[baseM+mesh->Np*SYID]; // grab surface geometric factors dlong base = mesh->Nsgeo*(eM*mesh->Nfp*mesh->Nfaces + fM*mesh->Nfp + i); dfloat nx = mesh->sgeo[base+NXID]; dfloat ny = mesh->sgeo[base+NYID]; dfloat wsJ = mesh->sgeo[base+WSJID]; dfloat hinv = mesh->sgeo[base+IHID]; // form negative trace terms in IPDG int idnM = n*mesh->Np+vidM; dfloat dlndxM = drdxM*Br[idnM] + dsdxM*Bs[idnM]; dfloat dlndyM = drdyM*Br[idnM] + dsdyM*Bs[idnM]; dfloat ndotgradlnM = nx*dlndxM+ny*dlndyM; dfloat lnM = B[idnM]; dfloat penalty = elliptic->tau*hinv; int bc = mesh->EToB[fM+mesh->Nfaces*eM]; //raw boundary flag int bcD = 0, bcN =0; int bcType = 0; if(bc>0) bcType = elliptic->BCType[bc]; //find its type (Dirichlet/Neumann) // this needs to be double checked (and the code where these are used) if(bcType==1){ // Dirichlet bcD = 1; bcN = 0; } else if(bcType==2){ // Neumann bcD = 0; bcN = 1; } A[n] += -0.5*(1+bcD)*(1-bcN)*wsJ*lnM*ndotgradlnM; // -(ln^-, N.grad lm^-) A[n] += -0.5*(1+bcD)*(1-bcN)*wsJ*ndotgradlnM*lnM; // -(N.grad ln^-, lm^-) A[n] += +0.5*(1+bcD)*(1-bcN)*wsJ*penalty*lnM*lnM; // +((tau/h)*ln^-,lm^-) } } } } void BuildLocalContinuousDiagQuad2D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dlong eM, dfloat *B, dfloat *Br, dfloat* Bs, dfloat *A) { for (int ny=0;ny<mesh->Nq;ny++) { for (int nx=0;nx<mesh->Nq;nx++) { int iid = nx+ny*mesh->Nq; if (elliptic->mapB[nx+ny*mesh->Nq+eM*mesh->Np]!=1) { A[iid] = 0; for (int k=0;k<mesh->Nq;k++) { int id = k+ny*mesh->Nq; dfloat Grr = mesh->ggeo[eM*mesh->Np*mesh->Nggeo + id + G00ID*mesh->Np]; A[iid] += Grr*mesh->D[nx+k*mesh->Nq]*mesh->D[nx+k*mesh->Nq]; } for (int k=0;k<mesh->Nq;k++) { int id = nx+k*mesh->Nq; dfloat Gss = mesh->ggeo[eM*mesh->Np*mesh->Nggeo + id + G11ID*mesh->Np]; A[iid] += Gss*mesh->D[ny+k*mesh->Nq]*mesh->D[ny+k*mesh->Nq]; } int id = nx+ny*mesh->Nq; dfloat Grs = mesh->ggeo[eM*mesh->Np*mesh->Nggeo + id + G01ID*mesh->Np]; A[iid] += 2*Grs*mesh->D[nx+nx*mesh->Nq]*mesh->D[ny+ny*mesh->Nq]; dfloat JW = mesh->ggeo[eM*mesh->Np*mesh->Nggeo + id + GWJID*mesh->Np]; A[iid] += JW*lambda; } else { A[iid] = 1; //just put a 1 so A is invertable } } } //add the rank boost for the allNeumann Poisson problem if (elliptic->allNeumann) { for(int n=0;n<mesh->Np;++n){ if (elliptic->mapB[n+eM*mesh->Np]!=1) { //dont fill rows for masked nodes A[n] += elliptic->allNeumannPenalty*elliptic->allNeumannScale*elliptic->allNeumannScale; } } } } void BuildLocalIpdgDiagTet3D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dlong eM, dfloat *A) { dlong vbase = eM*mesh->Nvgeo; dfloat drdx = mesh->vgeo[vbase+RXID]; dfloat drdy = mesh->vgeo[vbase+RYID]; dfloat drdz = mesh->vgeo[vbase+RZID]; dfloat dsdx = mesh->vgeo[vbase+SXID]; dfloat dsdy = mesh->vgeo[vbase+SYID]; dfloat dsdz = mesh->vgeo[vbase+SZID]; dfloat dtdx = mesh->vgeo[vbase+TXID]; dfloat dtdy = mesh->vgeo[vbase+TYID]; dfloat dtdz = mesh->vgeo[vbase+TZID]; dfloat J = mesh->vgeo[vbase+JID]; dfloat G00 = drdx*drdx + drdy*drdy + drdz*drdz; dfloat G01 = drdx*dsdx + drdy*dsdy + drdz*dsdz; dfloat G02 = drdx*dtdx + drdy*dtdy + drdz*dtdz; dfloat G10 = dsdx*drdx + dsdy*drdy + dsdz*drdz; dfloat G11 = dsdx*dsdx + dsdy*dsdy + dsdz*dsdz; dfloat G12 = dsdx*dtdx + dsdy*dtdy + dsdz*dtdz; dfloat G20 = dtdx*drdx + dtdy*drdy + dtdz*drdz; dfloat G21 = dtdx*dsdx + dtdy*dsdy + dtdz*dsdz; dfloat G22 = dtdx*dtdx + dtdy*dtdy + dtdz*dtdz; /* start with stiffness matrix */ for(int n=0;n<mesh->Np;++n){ A[n] = J*lambda*mesh->MM[n*mesh->Np+n]; A[n] += J*G00*mesh->Srr[n*mesh->Np+n]; A[n] += J*G01*mesh->Srs[n*mesh->Np+n]; A[n] += J*G02*mesh->Srt[n*mesh->Np+n]; A[n] += J*G10*mesh->Ssr[n*mesh->Np+n]; A[n] += J*G11*mesh->Sss[n*mesh->Np+n]; A[n] += J*G12*mesh->Sst[n*mesh->Np+n]; A[n] += J*G20*mesh->Str[n*mesh->Np+n]; A[n] += J*G21*mesh->Sts[n*mesh->Np+n]; A[n] += J*G22*mesh->Stt[n*mesh->Np+n]; } //add the rank boost for the allNeumann Poisson problem if (elliptic->allNeumann) { for(int n=0;n<mesh->Np;++n){ A[n] += elliptic->allNeumannPenalty*elliptic->allNeumannScale*elliptic->allNeumannScale; } } for (int fM=0;fM<mesh->Nfaces;fM++) { // load surface geofactors for this face dlong sid = mesh->Nsgeo*(eM*mesh->Nfaces+fM); dfloat nx = mesh->sgeo[sid+NXID]; dfloat ny = mesh->sgeo[sid+NYID]; dfloat nz = mesh->sgeo[sid+NZID]; dfloat sJ = mesh->sgeo[sid+SJID]; dfloat hinv = mesh->sgeo[sid+IHID]; int bc = mesh->EToB[fM+mesh->Nfaces*eM]; //raw boundary flag dfloat penalty = elliptic->tau*hinv; int bcD = 0, bcN =0; int bcType = 0; if(bc>0) bcType = elliptic->BCType[bc]; //find its type (Dirichlet/Neumann) // this needs to be double checked (and the code where these are used) if(bcType==1){ // Dirichlet bcD = 1; bcN = 0; } else if(bcType==2){ // Neumann bcD = 0; bcN = 1; } // mass matrix for this face dfloat *MSf = MS+fM*mesh->Nfp*mesh->Nfp; // penalty term just involves face nodes for(int n=0;n<mesh->Nfp;++n){ for(int m=0;m<mesh->Nfp;++m){ int nM = mesh->faceNodes[fM*mesh->Nfp+n]; int mM = mesh->faceNodes[fM*mesh->Nfp+m]; if (mM==nM) { // OP11 = OP11 + 0.5*( gtau*mmE ) dfloat MSfnm = sJ*MSf[n*mesh->Nfp+m]; A[nM] += 0.5*(1.-bcN)*(1.+bcD)*penalty*MSfnm; } } } // now add differential surface terms for(int n=0;n<mesh->Nfp;++n){ int nM = mesh->faceNodes[fM*mesh->Nfp+n]; for(int i=0;i<mesh->Nfp;++i){ int iM = mesh->faceNodes[fM*mesh->Nfp+i]; dfloat MSfni = sJ*MSf[n*mesh->Nfp+i]; // surface Jacobian built in dfloat DxMim = drdx*mesh->Dr[iM*mesh->Np+nM] + dsdx*mesh->Ds[iM*mesh->Np+nM] + dtdx*mesh->Dt[iM*mesh->Np+nM]; dfloat DyMim = drdy*mesh->Dr[iM*mesh->Np+nM] + dsdy*mesh->Ds[iM*mesh->Np+nM] + dtdy*mesh->Dt[iM*mesh->Np+nM]; dfloat DzMim = drdz*mesh->Dr[iM*mesh->Np+nM] + dsdz*mesh->Ds[iM*mesh->Np+nM] + dtdz*mesh->Dt[iM*mesh->Np+nM]; // OP11 = OP11 + 0.5*( - mmE*Dn1) A[nM] += -0.5*nx*(1+bcD)*(1-bcN)*MSfni*DxMim; A[nM] += -0.5*ny*(1+bcD)*(1-bcN)*MSfni*DyMim; A[nM] += -0.5*nz*(1+bcD)*(1-bcN)*MSfni*DzMim; } } for(int n=0;n<mesh->Np;++n){ for(int m=0;m<mesh->Nfp;++m){ int mM = mesh->faceNodes[fM*mesh->Nfp+m]; if (mM==n) { for(int i=0;i<mesh->Nfp;++i){ int iM = mesh->faceNodes[fM*mesh->Nfp+i]; dfloat MSfim = sJ*MSf[i*mesh->Nfp+m]; dfloat DxMin = drdx*mesh->Dr[iM*mesh->Np+n] + dsdx*mesh->Ds[iM*mesh->Np+n] + dtdx*mesh->Dt[iM*mesh->Np+n]; dfloat DyMin = drdy*mesh->Dr[iM*mesh->Np+n] + dsdy*mesh->Ds[iM*mesh->Np+n] + dtdy*mesh->Dt[iM*mesh->Np+n]; dfloat DzMin = drdz*mesh->Dr[iM*mesh->Np+n] + dsdz*mesh->Ds[iM*mesh->Np+n] + dtdz*mesh->Dt[iM*mesh->Np+n]; // OP11 = OP11 + (- Dn1'*mmE ); A[n] += -0.5*nx*(1+bcD)*(1-bcN)*DxMin*MSfim; A[n] += -0.5*ny*(1+bcD)*(1-bcN)*DyMin*MSfim; A[n] += -0.5*nz*(1+bcD)*(1-bcN)*DzMin*MSfim; } } } } } } void BuildLocalContinuousDiagTet3D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dlong eM, dfloat *A) { dlong gbase = eM*mesh->Nggeo; dfloat Grr = mesh->ggeo[gbase + G00ID]; dfloat Grs = mesh->ggeo[gbase + G01ID]; dfloat Grt = mesh->ggeo[gbase + G02ID]; dfloat Gss = mesh->ggeo[gbase + G11ID]; dfloat Gst = mesh->ggeo[gbase + G12ID]; dfloat Gtt = mesh->ggeo[gbase + G22ID]; dfloat J = mesh->ggeo[gbase + GWJID]; /* start with stiffness matrix */ for(int n=0;n<mesh->Np;++n){ if (elliptic->mapB[n+eM*mesh->Np]!=1) { //dont fill rows for masked nodes A[n] = J*lambda*mesh->MM[n+n*mesh->Np]; A[n] += Grr*mesh->Srr[n+n*mesh->Np]; A[n] += Grs*mesh->Srs[n+n*mesh->Np]; A[n] += Grt*mesh->Srt[n+n*mesh->Np]; A[n] += Grs*mesh->Ssr[n+n*mesh->Np]; A[n] += Gss*mesh->Sss[n+n*mesh->Np]; A[n] += Gst*mesh->Sst[n+n*mesh->Np]; A[n] += Grt*mesh->Str[n+n*mesh->Np]; A[n] += Gst*mesh->Sts[n+n*mesh->Np]; A[n] += Gtt*mesh->Stt[n+n*mesh->Np]; } else { A[n] = 1; //just put a 1 so A is invertable } } //add the rank boost for the allNeumann Poisson problem if (elliptic->allNeumann) { for(int n=0;n<mesh->Np;++n){ if (elliptic->mapB[n+eM*mesh->Np]!=1) { //dont fill rows for masked nodes A[n] += elliptic->allNeumannPenalty*elliptic->allNeumannScale*elliptic->allNeumannScale; } } } } void BuildLocalIpdgDiagHex3D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dfloat *B, dfloat *Br, dfloat *Bs, dfloat *Bt, dlong eM, dfloat *A) { /* start with stiffness matrix */ for(int n=0;n<mesh->Np;++n){ A[n] = 0; // (grad phi_n, grad phi_m)_{D^e} for(int i=0;i<mesh->Np;++i){ dlong base = eM*mesh->Np*mesh->Nvgeo + i; dfloat drdx = mesh->vgeo[base+mesh->Np*RXID]; dfloat drdy = mesh->vgeo[base+mesh->Np*RYID]; dfloat drdz = mesh->vgeo[base+mesh->Np*RZID]; dfloat dsdx = mesh->vgeo[base+mesh->Np*SXID]; dfloat dsdy = mesh->vgeo[base+mesh->Np*SYID]; dfloat dsdz = mesh->vgeo[base+mesh->Np*SZID]; dfloat dtdx = mesh->vgeo[base+mesh->Np*TXID]; dfloat dtdy = mesh->vgeo[base+mesh->Np*TYID]; dfloat dtdz = mesh->vgeo[base+mesh->Np*TZID]; dfloat JW = mesh->vgeo[base+mesh->Np*JWID]; int idn = n*mesh->Np+i; dfloat dlndx = drdx*Br[idn] + dsdx*Bs[idn] + dtdx*Bt[idn]; dfloat dlndy = drdy*Br[idn] + dsdy*Bs[idn] + dtdy*Bt[idn]; dfloat dlndz = drdz*Br[idn] + dsdz*Bs[idn] + dtdz*Bt[idn]; A[n] += JW*(dlndx*dlndx+dlndy*dlndy+dlndz*dlndz); A[n] += lambda*JW*B[idn]*B[idn]; } for (int fM=0;fM<mesh->Nfaces;fM++) { // accumulate flux terms for negative and positive traces for(int i=0;i<mesh->Nfp;++i){ int vidM = mesh->faceNodes[i+fM*mesh->Nfp]; // grab vol geofacs at surface nodes dlong baseM = eM*mesh->Np*mesh->Nvgeo + vidM; dfloat drdxM = mesh->vgeo[baseM+mesh->Np*RXID]; dfloat drdyM = mesh->vgeo[baseM+mesh->Np*RYID]; dfloat drdzM = mesh->vgeo[baseM+mesh->Np*RZID]; dfloat dsdxM = mesh->vgeo[baseM+mesh->Np*SXID]; dfloat dsdyM = mesh->vgeo[baseM+mesh->Np*SYID]; dfloat dsdzM = mesh->vgeo[baseM+mesh->Np*SZID]; dfloat dtdxM = mesh->vgeo[baseM+mesh->Np*TXID]; dfloat dtdyM = mesh->vgeo[baseM+mesh->Np*TYID]; dfloat dtdzM = mesh->vgeo[baseM+mesh->Np*TZID]; // grab surface geometric factors dlong base = mesh->Nsgeo*(eM*mesh->Nfp*mesh->Nfaces + fM*mesh->Nfp + i); dfloat nx = mesh->sgeo[base+NXID]; dfloat ny = mesh->sgeo[base+NYID]; dfloat nz = mesh->sgeo[base+NZID]; dfloat wsJ = mesh->sgeo[base+WSJID]; dfloat hinv = mesh->sgeo[base+IHID]; // form negative trace terms in IPDG int idnM = n*mesh->Np+vidM; dfloat dlndxM = drdxM*Br[idnM] + dsdxM*Bs[idnM] + dtdxM*Bt[idnM]; dfloat dlndyM = drdyM*Br[idnM] + dsdyM*Bs[idnM] + dtdyM*Bt[idnM]; dfloat dlndzM = drdzM*Br[idnM] + dsdzM*Bs[idnM] + dtdzM*Bt[idnM]; dfloat ndotgradlnM = nx*dlndxM+ny*dlndyM+nz*dlndzM; dfloat lnM = B[idnM]; dfloat penalty = elliptic->tau*hinv; int bc = mesh->EToB[fM+mesh->Nfaces*eM]; //raw boundary flag int bcD = 0, bcN =0; int bcType = 0; if(bc>0) bcType = elliptic->BCType[bc]; //find its type (Dirichlet/Neumann) // this needs to be double checked (and the code where these are used) if(bcType==1){ // Dirichlet bcD = 1; bcN = 0; } else if(bcType==2){ // Neumann bcD = 0; bcN = 1; } A[n] += -0.5*(1+bcD)*(1-bcN)*wsJ*lnM*ndotgradlnM; // -(ln^-, N.grad lm^-) A[n] += -0.5*(1+bcD)*(1-bcN)*wsJ*ndotgradlnM*lnM; // -(N.grad ln^-, lm^-) A[n] += +0.5*(1+bcD)*(1-bcN)*wsJ*penalty*lnM*lnM; // +((tau/h)*ln^-,lm^-) } } } } void BuildLocalContinuousDiagHex3D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dlong eM, dfloat *B, dfloat *Br, dfloat *Bs, dfloat *Bt, dfloat *A) { for (int nz=0;nz<mesh->Nq;nz++) { for (int ny=0;ny<mesh->Nq;ny++) { for (int nx=0;nx<mesh->Nq;nx++) { int idn = nx+ny*mesh->Nq+nz*mesh->Nq*mesh->Nq; if (elliptic->mapB[idn+eM*mesh->Np]!=1) { A[idn] = 0; int id = nx+ny*mesh->Nq+nz*mesh->Nq*mesh->Nq; dlong base = eM*mesh->Np*mesh->Nggeo; dfloat Grs = mesh->ggeo[base + id + G01ID*mesh->Np]; A[idn] += 2*Grs*mesh->D[nx+nx*mesh->Nq]*mesh->D[ny+ny*mesh->Nq]; dfloat Grt = mesh->ggeo[base + id + G02ID*mesh->Np]; A[idn] += 2*Grt*mesh->D[nx+nx*mesh->Nq]*mesh->D[nz+nz*mesh->Nq]; dfloat Gst = mesh->ggeo[base + id + G12ID*mesh->Np]; A[idn] += 2*Gst*mesh->D[ny+ny*mesh->Nq]*mesh->D[nz+nz*mesh->Nq]; for (int k=0;k<mesh->Nq;k++) { int iid = k+ny*mesh->Nq+nz*mesh->Nq*mesh->Nq; dfloat Grr = mesh->ggeo[base + iid + G00ID*mesh->Np]; A[idn] += Grr*mesh->D[nx+k*mesh->Nq]*mesh->D[nx+k*mesh->Nq]; } for (int k=0;k<mesh->Nq;k++) { int iid = nx+k*mesh->Nq+nz*mesh->Nq*mesh->Nq; dfloat Gss = mesh->ggeo[base + iid + G11ID*mesh->Np]; A[idn] += Gss*mesh->D[ny+k*mesh->Nq]*mesh->D[ny+k*mesh->Nq]; } for (int k=0;k<mesh->Nq;k++) { int iid = nx+ny*mesh->Nq+k*mesh->Nq*mesh->Nq; dfloat Gtt = mesh->ggeo[base + iid + G22ID*mesh->Np]; A[idn] += Gtt*mesh->D[nz+k*mesh->Nq]*mesh->D[nz+k*mesh->Nq]; } dfloat JW = mesh->ggeo[base + id + GWJID*mesh->Np]; A[idn] += JW*lambda; } else { A[idn] = 1; //just put a 1 so A is invertable } } } } //add the rank boost for the allNeumann Poisson problem if (elliptic->allNeumann) { for(int n=0;n<mesh->Np;++n){ if (elliptic->mapB[n+eM*mesh->Np]!=1) { //dont fill rows for masked nodes A[n] += elliptic->allNeumannPenalty*elliptic->allNeumannScale*elliptic->allNeumannScale; } } } }
#include "elliptic.h" void BuildLocalIpdgBBDiagTri2D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dlong eM, dfloat *A); void BuildLocalIpdgDiagTri2D (elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dlong eM, dfloat *A); void BuildLocalIpdgDiagQuad2D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dfloat *B, dfloat *Br, dfloat *Bs, dlong eM, dfloat *A); void BuildLocalIpdgDiagTet3D (elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dlong eM, dfloat *A); void BuildLocalIpdgDiagHex3D (elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dfloat *B, dfloat *Br, dfloat *Bs, dfloat *Bt, dlong eM, dfloat *A); void BuildLocalContinuousDiagTri2D (elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dlong eM, dfloat *A); void BuildLocalContinuousDiagQuad2D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dlong eM, dfloat *B, dfloat *Br, dfloat *Bs, dfloat *A); void BuildLocalContinuousDiagTet3D (elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dlong eM, dfloat *A); void BuildLocalContinuousDiagHex3D (elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dlong eM, dfloat *B, dfloat *Br, dfloat *Bs, dfloat *Bt, dfloat *A); void ellipticBuildJacobi(elliptic_t* elliptic, dfloat lambda, dfloat **invDiagA){ mesh_t *mesh = elliptic->mesh; setupAide options = elliptic->options; // surface mass matrices MS = MM*LIFT dfloat *MS = (dfloat *) calloc(mesh->Nfaces*mesh->Nfp*mesh->Nfp,sizeof(dfloat)); for (int f=0;f<mesh->Nfaces;f++) { for (int n=0;n<mesh->Nfp;n++) { int fn = mesh->faceNodes[f*mesh->Nfp+n]; for (int m=0;m<mesh->Nfp;m++) { dfloat MSnm = 0; for (int i=0;i<mesh->Np;i++){ MSnm += mesh->MM[fn+i*mesh->Np]*mesh->LIFT[i*mesh->Nfp*mesh->Nfaces+f*mesh->Nfp+m]; } MS[m+n*mesh->Nfp + f*mesh->Nfp*mesh->Nfp] = MSnm; } } } // build some monolithic basis arrays (for quads and hexes) dfloat *B = (dfloat*) calloc(mesh->Np*mesh->Np, sizeof(dfloat)); dfloat *Br = (dfloat*) calloc(mesh->Np*mesh->Np, sizeof(dfloat)); dfloat *Bs = (dfloat*) calloc(mesh->Np*mesh->Np, sizeof(dfloat)); dfloat *Bt = (dfloat*) calloc(mesh->Np*mesh->Np, sizeof(dfloat)); if (elliptic->elementType==QUADRILATERALS) { int mode = 0; for(int nj=0;nj<mesh->N+1;++nj){ for(int ni=0;ni<mesh->N+1;++ni){ int node = 0; for(int j=0;j<mesh->N+1;++j){ for(int i=0;i<mesh->N+1;++i){ if(nj==j && ni==i) B[mode*mesh->Np+node] = 1; if(nj==j) Br[mode*mesh->Np+node] = mesh->D[ni+mesh->Nq*i]; if(ni==i) Bs[mode*mesh->Np+node] = mesh->D[nj+mesh->Nq*j]; ++node; } } ++mode; } } } if (elliptic->elementType==HEXAHEDRA) { int mode = 0; for(int nk=0;nk<mesh->N+1;++nk){ for(int nj=0;nj<mesh->N+1;++nj){ for(int ni=0;ni<mesh->N+1;++ni){ int node = 0; for(int k=0;k<mesh->N+1;++k){ for(int j=0;j<mesh->N+1;++j){ for(int i=0;i<mesh->N+1;++i){ if(nk==k && nj==j && ni==i) B[mode*mesh->Np+node] = 1; if(nj==j && nk==k) Br[mode*mesh->Np+node] = mesh->D[ni+mesh->Nq*i]; if(ni==i && nk==k) Bs[mode*mesh->Np+node] = mesh->D[nj+mesh->Nq*j]; if(ni==i && nj==j) Bt[mode*mesh->Np+node] = mesh->D[nk+mesh->Nq*k]; ++node; } } } ++mode; } } } } dlong diagNnum = mesh->Np*mesh->Nelements; dfloat *diagA = (dfloat*) calloc(diagNnum, sizeof(dfloat)); if(mesh->rank==0) printf("Building diagonal...");fflush(stdout); if (options.compareArgs("DISCRETIZATION","IPDG")) { switch(elliptic->elementType){ case TRIANGLES: if (options.compareArgs("BASIS","BERN")) { #pragma omp parallel for for(dlong eM=0;eM<mesh->Nelements;++eM) BuildLocalIpdgBBDiagTri2D(elliptic, mesh, lambda, MS, eM, diagA + eM*mesh->Np); } else { #pragma omp parallel for for(dlong eM=0;eM<mesh->Nelements;++eM) BuildLocalIpdgDiagTri2D(elliptic, mesh, lambda, MS, eM, diagA + eM*mesh->Np); } break; case QUADRILATERALS: #pragma omp parallel for for(dlong eM=0;eM<mesh->Nelements;++eM) BuildLocalIpdgDiagQuad2D(elliptic, mesh, lambda, MS, B, Br, Bs, eM, diagA + eM*mesh->Np); break; case TETRAHEDRA: #pragma omp parallel for for(dlong eM=0;eM<mesh->Nelements;++eM) BuildLocalIpdgDiagTet3D(elliptic, mesh, lambda, MS, eM, diagA + eM*mesh->Np); break; case HEXAHEDRA: #pragma omp parallel for for(dlong eM=0;eM<mesh->Nelements;++eM) BuildLocalIpdgDiagHex3D(elliptic, mesh, lambda, MS, B, Br, Bs, Bt, eM, diagA + eM*mesh->Np); break; } } else if (options.compareArgs("DISCRETIZATION","CONTINUOUS")) { switch(elliptic->elementType){ case TRIANGLES: #pragma omp parallel for for(dlong eM=0;eM<mesh->Nelements;++eM) BuildLocalContinuousDiagTri2D(elliptic, mesh, lambda, eM, diagA + eM*mesh->Np); break; case QUADRILATERALS: #pragma omp parallel for for(dlong eM=0;eM<mesh->Nelements;++eM) BuildLocalContinuousDiagQuad2D(elliptic, mesh, lambda, eM, B, Br, Bs, diagA + eM*mesh->Np); break; case TETRAHEDRA: #pragma omp parallel for for(dlong eM=0;eM<mesh->Nelements;++eM) BuildLocalContinuousDiagTet3D(elliptic, mesh, lambda, eM, diagA + eM*mesh->Np); break; case HEXAHEDRA: #pragma omp parallel for for(dlong eM=0;eM<mesh->Nelements;++eM) BuildLocalContinuousDiagHex3D(elliptic, mesh, lambda, eM, B, Br, Bs, Bt, diagA + eM*mesh->Np); break; } } if (options.compareArgs("DISCRETIZATION","CONTINUOUS")) gsParallelGatherScatter(mesh->hostGsh, diagA, dfloatString, "add"); *invDiagA = (dfloat*) calloc(diagNnum, sizeof(dfloat)); for (dlong n=0;n<mesh->Nelements*mesh->Np;n++) { (*invDiagA)[n] = 1/diagA[n]; } if(mesh->rank==0) printf("done.\n"); free(diagA); free(MS); free(B); free(Br); free(Bs); free(Bt); } void BuildLocalIpdgDiagTri2D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dlong eM, dfloat *A) { dlong vbase = eM*mesh->Nvgeo; dfloat drdx = mesh->vgeo[vbase+RXID]; dfloat drdy = mesh->vgeo[vbase+RYID]; dfloat dsdx = mesh->vgeo[vbase+SXID]; dfloat dsdy = mesh->vgeo[vbase+SYID]; dfloat J = mesh->vgeo[vbase+JID]; /* start with stiffness matrix */ for(int n=0;n<mesh->Np;++n){ A[n] = J*lambda*mesh->MM[n*mesh->Np+n]; A[n] += J*drdx*drdx*mesh->Srr[n*mesh->Np+n]; A[n] += J*drdx*dsdx*mesh->Srs[n*mesh->Np+n]; A[n] += J*dsdx*drdx*mesh->Ssr[n*mesh->Np+n]; A[n] += J*dsdx*dsdx*mesh->Sss[n*mesh->Np+n]; A[n] += J*drdy*drdy*mesh->Srr[n*mesh->Np+n]; A[n] += J*drdy*dsdy*mesh->Srs[n*mesh->Np+n]; A[n] += J*dsdy*drdy*mesh->Ssr[n*mesh->Np+n]; A[n] += J*dsdy*dsdy*mesh->Sss[n*mesh->Np+n]; } //add the rank boost for the allNeumann Poisson problem if (elliptic->allNeumann) { for(int n=0;n<mesh->Np;++n){ A[n] += elliptic->allNeumannPenalty*elliptic->allNeumannScale*elliptic->allNeumannScale; } } for (int fM=0;fM<mesh->Nfaces;fM++) { // load surface geofactors for this face dlong sid = mesh->Nsgeo*(eM*mesh->Nfaces+fM); dfloat nx = mesh->sgeo[sid+NXID]; dfloat ny = mesh->sgeo[sid+NYID]; dfloat sJ = mesh->sgeo[sid+SJID]; dfloat hinv = mesh->sgeo[sid+IHID]; int bc = mesh->EToB[fM+mesh->Nfaces*eM]; //raw boundary flag dfloat penalty = elliptic->tau*hinv; int bcD = 0, bcN =0; int bcType = 0; if(bc>0) bcType = elliptic->BCType[bc]; //find its type (Dirichlet/Neumann) // this needs to be double checked (and the code where these are used) if(bcType==1){ // Dirichlet bcD = 1; bcN = 0; } else if(bcType==2){ // Neumann bcD = 0; bcN = 1; } // mass matrix for this face dfloat *MSf = MS+fM*mesh->Nfp*mesh->Nfp; // penalty term just involves face nodes for(int n=0;n<mesh->Nfp;++n){ int nM = mesh->faceNodes[fM*mesh->Nfp+n]; for(int m=0;m<mesh->Nfp;++m){ int mM = mesh->faceNodes[fM*mesh->Nfp+m]; if (mM == nM) { // OP11 = OP11 + 0.5*( gtau*mmE ) dfloat MSfnm = sJ*MSf[n*mesh->Nfp+m]; A[nM] += 0.5*(1.-bcN)*(1.+bcD)*penalty*MSfnm; } } } // now add differential surface terms for(int n=0;n<mesh->Nfp;++n){ int nM = mesh->faceNodes[fM*mesh->Nfp+n]; for(int i=0;i<mesh->Nfp;++i){ int iM = mesh->faceNodes[fM*mesh->Nfp+i]; dfloat MSfni = sJ*MSf[n*mesh->Nfp+i]; // surface Jacobian built in dfloat DxMim = drdx*mesh->Dr[iM*mesh->Np+nM] + dsdx*mesh->Ds[iM*mesh->Np+nM]; dfloat DyMim = drdy*mesh->Dr[iM*mesh->Np+nM] + dsdy*mesh->Ds[iM*mesh->Np+nM]; // OP11 = OP11 + 0.5*( - mmE*Dn1) A[nM] += -0.5*nx*(1+bcD)*(1-bcN)*MSfni*DxMim; A[nM] += -0.5*ny*(1+bcD)*(1-bcN)*MSfni*DyMim; } } for(int n=0;n<mesh->Np;++n){ for(int m=0;m<mesh->Nfp;++m){ int mM = mesh->faceNodes[fM*mesh->Nfp+m]; if (mM==n) { for(int i=0;i<mesh->Nfp;++i){ int iM = mesh->faceNodes[fM*mesh->Nfp+i]; dfloat MSfim = sJ*MSf[i*mesh->Nfp+m]; dfloat DxMin = drdx*mesh->Dr[iM*mesh->Np+n] + dsdx*mesh->Ds[iM*mesh->Np+n]; dfloat DyMin = drdy*mesh->Dr[iM*mesh->Np+n] + dsdy*mesh->Ds[iM*mesh->Np+n]; // OP11 = OP11 + (- Dn1'*mmE ); A[n] += -0.5*nx*(1+bcD)*(1-bcN)*DxMin*MSfim; A[n] += -0.5*ny*(1+bcD)*(1-bcN)*DyMin*MSfim; } } } } } } void BuildLocalIpdgPatchAxTri2D(elliptic_t* elliptic, mesh_t* mesh, int basisNp, dfloat *basis, dfloat lambda, dfloat *MS, dlong eM, dfloat *A); //generate the BB diagonal by extracting it from the transformed patch void BuildLocalIpdgBBDiagTri2D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dlong eM, dfloat *A) { dfloat *patchA = (dfloat *) calloc(mesh->Np*mesh->Np,sizeof(dfloat)); int basisNp = mesh->Np; dfloat *basis = mesh->VB; BuildLocalIpdgPatchAxTri2D(elliptic, mesh, basisNp, basis, lambda, MS, eM, patchA); for(int n=0;n<mesh->Np;++n) { A[n] = patchA[n*mesh->Np+n]; //store the diagonal entry } free(patchA); } //returns the continuous C0 patch A matrix for element eM void BuildLocalContinuousDiagTri2D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dlong eM, dfloat *A) { dlong gbase = eM*mesh->Nggeo; dfloat Grr = mesh->ggeo[gbase + G00ID]; dfloat Grs = mesh->ggeo[gbase + G01ID]; dfloat Gss = mesh->ggeo[gbase + G11ID]; dfloat J = mesh->ggeo[gbase + GWJID]; /* start with stiffness matrix */ for(int n=0;n<mesh->Np;++n){ if (elliptic->mapB[n+eM*mesh->Np]!=1) { //dont fill rows for masked nodes A[n] = J*lambda*mesh->MM[n+n*mesh->Np]; A[n] += Grr*mesh->Srr[n+n*mesh->Np]; A[n] += Grs*mesh->Srs[n+n*mesh->Np]; A[n] += Grs*mesh->Ssr[n+n*mesh->Np]; A[n] += Gss*mesh->Sss[n+n*mesh->Np]; } else { A[n] = 1; //just put a 1 so A is invertable } } //add the rank boost for the allNeumann Poisson problem if (elliptic->allNeumann) { for(int n=0;n<mesh->Np;++n){ if (elliptic->mapB[n+eM*mesh->Np]!=1) { //dont fill rows for masked nodes A[n] += elliptic->allNeumannPenalty*elliptic->allNeumannScale*elliptic->allNeumannScale; } } } } void BuildLocalIpdgDiagQuad2D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dfloat *B, dfloat *Br, dfloat *Bs, dlong eM, dfloat *A) { /* start with stiffness matrix */ for(int n=0;n<mesh->Np;++n){ A[n] = 0; // (grad phi_n, grad phi_m)_{D^e} for(int i=0;i<mesh->Np;++i){ dlong base = eM*mesh->Np*mesh->Nvgeo + i; dfloat drdx = mesh->vgeo[base+mesh->Np*RXID]; dfloat drdy = mesh->vgeo[base+mesh->Np*RYID]; dfloat dsdx = mesh->vgeo[base+mesh->Np*SXID]; dfloat dsdy = mesh->vgeo[base+mesh->Np*SYID]; dfloat JW = mesh->vgeo[base+mesh->Np*JWID]; int idn = n*mesh->Np+i; dfloat dlndx = drdx*Br[idn] + dsdx*Bs[idn]; dfloat dlndy = drdy*Br[idn] + dsdy*Bs[idn]; A[n] += JW*(dlndx*dlndx+dlndy*dlndy); A[n] += lambda*JW*B[idn]*B[idn]; } for (int fM=0;fM<mesh->Nfaces;fM++) { // accumulate flux terms for negative and positive traces for(int i=0;i<mesh->Nfp;++i){ int vidM = mesh->faceNodes[i+fM*mesh->Nfp]; // grab vol geofacs at surface nodes dlong baseM = eM*mesh->Np*mesh->Nvgeo + vidM; dfloat drdxM = mesh->vgeo[baseM+mesh->Np*RXID]; dfloat drdyM = mesh->vgeo[baseM+mesh->Np*RYID]; dfloat dsdxM = mesh->vgeo[baseM+mesh->Np*SXID]; dfloat dsdyM = mesh->vgeo[baseM+mesh->Np*SYID]; // grab surface geometric factors dlong base = mesh->Nsgeo*(eM*mesh->Nfp*mesh->Nfaces + fM*mesh->Nfp + i); dfloat nx = mesh->sgeo[base+NXID]; dfloat ny = mesh->sgeo[base+NYID]; dfloat wsJ = mesh->sgeo[base+WSJID]; dfloat hinv = mesh->sgeo[base+IHID]; // form negative trace terms in IPDG int idnM = n*mesh->Np+vidM; dfloat dlndxM = drdxM*Br[idnM] + dsdxM*Bs[idnM]; dfloat dlndyM = drdyM*Br[idnM] + dsdyM*Bs[idnM]; dfloat ndotgradlnM = nx*dlndxM+ny*dlndyM; dfloat lnM = B[idnM]; dfloat penalty = elliptic->tau*hinv; int bc = mesh->EToB[fM+mesh->Nfaces*eM]; //raw boundary flag int bcD = 0, bcN =0; int bcType = 0; if(bc>0) bcType = elliptic->BCType[bc]; //find its type (Dirichlet/Neumann) // this needs to be double checked (and the code where these are used) if(bcType==1){ // Dirichlet bcD = 1; bcN = 0; } else if(bcType==2){ // Neumann bcD = 0; bcN = 1; } A[n] += -0.5*(1+bcD)*(1-bcN)*wsJ*lnM*ndotgradlnM; // -(ln^-, N.grad lm^-) A[n] += -0.5*(1+bcD)*(1-bcN)*wsJ*ndotgradlnM*lnM; // -(N.grad ln^-, lm^-) A[n] += +0.5*(1+bcD)*(1-bcN)*wsJ*penalty*lnM*lnM; // +((tau/h)*ln^-,lm^-) } } } } void BuildLocalContinuousDiagQuad2D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dlong eM, dfloat *B, dfloat *Br, dfloat* Bs, dfloat *A) { for (int ny=0;ny<mesh->Nq;ny++) { for (int nx=0;nx<mesh->Nq;nx++) { int iid = nx+ny*mesh->Nq; if (elliptic->mapB[nx+ny*mesh->Nq+eM*mesh->Np]!=1) { A[iid] = 0; for (int k=0;k<mesh->Nq;k++) { int id = k+ny*mesh->Nq; dfloat Grr = mesh->ggeo[eM*mesh->Np*mesh->Nggeo + id + G00ID*mesh->Np]; A[iid] += Grr*mesh->D[nx+k*mesh->Nq]*mesh->D[nx+k*mesh->Nq]; } for (int k=0;k<mesh->Nq;k++) { int id = nx+k*mesh->Nq; dfloat Gss = mesh->ggeo[eM*mesh->Np*mesh->Nggeo + id + G11ID*mesh->Np]; A[iid] += Gss*mesh->D[ny+k*mesh->Nq]*mesh->D[ny+k*mesh->Nq]; } int id = nx+ny*mesh->Nq; dfloat Grs = mesh->ggeo[eM*mesh->Np*mesh->Nggeo + id + G01ID*mesh->Np]; A[iid] += 2*Grs*mesh->D[nx+nx*mesh->Nq]*mesh->D[ny+ny*mesh->Nq]; dfloat JW = mesh->ggeo[eM*mesh->Np*mesh->Nggeo + id + GWJID*mesh->Np]; A[iid] += JW*lambda; } else { A[iid] = 1; //just put a 1 so A is invertable } } } //add the rank boost for the allNeumann Poisson problem if (elliptic->allNeumann) { for(int n=0;n<mesh->Np;++n){ if (elliptic->mapB[n+eM*mesh->Np]!=1) { //dont fill rows for masked nodes A[n] += elliptic->allNeumannPenalty*elliptic->allNeumannScale*elliptic->allNeumannScale; } } } } void BuildLocalIpdgDiagTet3D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dlong eM, dfloat *A) { dlong vbase = eM*mesh->Nvgeo; dfloat drdx = mesh->vgeo[vbase+RXID]; dfloat drdy = mesh->vgeo[vbase+RYID]; dfloat drdz = mesh->vgeo[vbase+RZID]; dfloat dsdx = mesh->vgeo[vbase+SXID]; dfloat dsdy = mesh->vgeo[vbase+SYID]; dfloat dsdz = mesh->vgeo[vbase+SZID]; dfloat dtdx = mesh->vgeo[vbase+TXID]; dfloat dtdy = mesh->vgeo[vbase+TYID]; dfloat dtdz = mesh->vgeo[vbase+TZID]; dfloat J = mesh->vgeo[vbase+JID]; dfloat G00 = drdx*drdx + drdy*drdy + drdz*drdz; dfloat G01 = drdx*dsdx + drdy*dsdy + drdz*dsdz; dfloat G02 = drdx*dtdx + drdy*dtdy + drdz*dtdz; dfloat G10 = dsdx*drdx + dsdy*drdy + dsdz*drdz; dfloat G11 = dsdx*dsdx + dsdy*dsdy + dsdz*dsdz; dfloat G12 = dsdx*dtdx + dsdy*dtdy + dsdz*dtdz; dfloat G20 = dtdx*drdx + dtdy*drdy + dtdz*drdz; dfloat G21 = dtdx*dsdx + dtdy*dsdy + dtdz*dsdz; dfloat G22 = dtdx*dtdx + dtdy*dtdy + dtdz*dtdz; /* start with stiffness matrix */ for(int n=0;n<mesh->Np;++n){ A[n] = J*lambda*mesh->MM[n*mesh->Np+n]; A[n] += J*G00*mesh->Srr[n*mesh->Np+n]; A[n] += J*G01*mesh->Srs[n*mesh->Np+n]; A[n] += J*G02*mesh->Srt[n*mesh->Np+n]; A[n] += J*G10*mesh->Ssr[n*mesh->Np+n]; A[n] += J*G11*mesh->Sss[n*mesh->Np+n]; A[n] += J*G12*mesh->Sst[n*mesh->Np+n]; A[n] += J*G20*mesh->Str[n*mesh->Np+n]; A[n] += J*G21*mesh->Sts[n*mesh->Np+n]; A[n] += J*G22*mesh->Stt[n*mesh->Np+n]; } //add the rank boost for the allNeumann Poisson problem if (elliptic->allNeumann) { for(int n=0;n<mesh->Np;++n){ A[n] += elliptic->allNeumannPenalty*elliptic->allNeumannScale*elliptic->allNeumannScale; } } for (int fM=0;fM<mesh->Nfaces;fM++) { // load surface geofactors for this face dlong sid = mesh->Nsgeo*(eM*mesh->Nfaces+fM); dfloat nx = mesh->sgeo[sid+NXID]; dfloat ny = mesh->sgeo[sid+NYID]; dfloat nz = mesh->sgeo[sid+NZID]; dfloat sJ = mesh->sgeo[sid+SJID]; dfloat hinv = mesh->sgeo[sid+IHID]; int bc = mesh->EToB[fM+mesh->Nfaces*eM]; //raw boundary flag dfloat penalty = elliptic->tau*hinv; int bcD = 0, bcN =0; int bcType = 0; if(bc>0) bcType = elliptic->BCType[bc]; //find its type (Dirichlet/Neumann) // this needs to be double checked (and the code where these are used) if(bcType==1){ // Dirichlet bcD = 1; bcN = 0; } else if(bcType==2){ // Neumann bcD = 0; bcN = 1; } // mass matrix for this face dfloat *MSf = MS+fM*mesh->Nfp*mesh->Nfp; // penalty term just involves face nodes for(int n=0;n<mesh->Nfp;++n){ for(int m=0;m<mesh->Nfp;++m){ int nM = mesh->faceNodes[fM*mesh->Nfp+n]; int mM = mesh->faceNodes[fM*mesh->Nfp+m]; if (mM==nM) { // OP11 = OP11 + 0.5*( gtau*mmE ) dfloat MSfnm = sJ*MSf[n*mesh->Nfp+m]; A[nM] += 0.5*(1.-bcN)*(1.+bcD)*penalty*MSfnm; } } } // now add differential surface terms for(int n=0;n<mesh->Nfp;++n){ int nM = mesh->faceNodes[fM*mesh->Nfp+n]; for(int i=0;i<mesh->Nfp;++i){ int iM = mesh->faceNodes[fM*mesh->Nfp+i]; dfloat MSfni = sJ*MSf[n*mesh->Nfp+i]; // surface Jacobian built in dfloat DxMim = drdx*mesh->Dr[iM*mesh->Np+nM] + dsdx*mesh->Ds[iM*mesh->Np+nM] + dtdx*mesh->Dt[iM*mesh->Np+nM]; dfloat DyMim = drdy*mesh->Dr[iM*mesh->Np+nM] + dsdy*mesh->Ds[iM*mesh->Np+nM] + dtdy*mesh->Dt[iM*mesh->Np+nM]; dfloat DzMim = drdz*mesh->Dr[iM*mesh->Np+nM] + dsdz*mesh->Ds[iM*mesh->Np+nM] + dtdz*mesh->Dt[iM*mesh->Np+nM]; // OP11 = OP11 + 0.5*( - mmE*Dn1) A[nM] += -0.5*nx*(1+bcD)*(1-bcN)*MSfni*DxMim; A[nM] += -0.5*ny*(1+bcD)*(1-bcN)*MSfni*DyMim; A[nM] += -0.5*nz*(1+bcD)*(1-bcN)*MSfni*DzMim; } } for(int n=0;n<mesh->Np;++n){ for(int m=0;m<mesh->Nfp;++m){ int mM = mesh->faceNodes[fM*mesh->Nfp+m]; if (mM==n) { for(int i=0;i<mesh->Nfp;++i){ int iM = mesh->faceNodes[fM*mesh->Nfp+i]; dfloat MSfim = sJ*MSf[i*mesh->Nfp+m]; dfloat DxMin = drdx*mesh->Dr[iM*mesh->Np+n] + dsdx*mesh->Ds[iM*mesh->Np+n] + dtdx*mesh->Dt[iM*mesh->Np+n]; dfloat DyMin = drdy*mesh->Dr[iM*mesh->Np+n] + dsdy*mesh->Ds[iM*mesh->Np+n] + dtdy*mesh->Dt[iM*mesh->Np+n]; dfloat DzMin = drdz*mesh->Dr[iM*mesh->Np+n] + dsdz*mesh->Ds[iM*mesh->Np+n] + dtdz*mesh->Dt[iM*mesh->Np+n]; // OP11 = OP11 + (- Dn1'*mmE ); A[n] += -0.5*nx*(1+bcD)*(1-bcN)*DxMin*MSfim; A[n] += -0.5*ny*(1+bcD)*(1-bcN)*DyMin*MSfim; A[n] += -0.5*nz*(1+bcD)*(1-bcN)*DzMin*MSfim; } } } } } } void BuildLocalContinuousDiagTet3D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dlong eM, dfloat *A) { dlong gbase = eM*mesh->Nggeo; dfloat Grr = mesh->ggeo[gbase + G00ID]; dfloat Grs = mesh->ggeo[gbase + G01ID]; dfloat Grt = mesh->ggeo[gbase + G02ID]; dfloat Gss = mesh->ggeo[gbase + G11ID]; dfloat Gst = mesh->ggeo[gbase + G12ID]; dfloat Gtt = mesh->ggeo[gbase + G22ID]; dfloat J = mesh->ggeo[gbase + GWJID]; /* start with stiffness matrix */ for(int n=0;n<mesh->Np;++n){ if (elliptic->mapB[n+eM*mesh->Np]!=1) { //dont fill rows for masked nodes A[n] = J*lambda*mesh->MM[n+n*mesh->Np]; A[n] += Grr*mesh->Srr[n+n*mesh->Np]; A[n] += Grs*mesh->Srs[n+n*mesh->Np]; A[n] += Grt*mesh->Srt[n+n*mesh->Np]; A[n] += Grs*mesh->Ssr[n+n*mesh->Np]; A[n] += Gss*mesh->Sss[n+n*mesh->Np]; A[n] += Gst*mesh->Sst[n+n*mesh->Np]; A[n] += Grt*mesh->Str[n+n*mesh->Np]; A[n] += Gst*mesh->Sts[n+n*mesh->Np]; A[n] += Gtt*mesh->Stt[n+n*mesh->Np]; } else { A[n] = 1; //just put a 1 so A is invertable } } //add the rank boost for the allNeumann Poisson problem if (elliptic->allNeumann) { for(int n=0;n<mesh->Np;++n){ if (elliptic->mapB[n+eM*mesh->Np]!=1) { //dont fill rows for masked nodes A[n] += elliptic->allNeumannPenalty*elliptic->allNeumannScale*elliptic->allNeumannScale; } } } } void BuildLocalIpdgDiagHex3D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dfloat *B, dfloat *Br, dfloat *Bs, dfloat *Bt, dlong eM, dfloat *A) { /* start with stiffness matrix */ for(int n=0;n<mesh->Np;++n){ A[n] = 0; // (grad phi_n, grad phi_m)_{D^e} for(int i=0;i<mesh->Np;++i){ dlong base = eM*mesh->Np*mesh->Nvgeo + i; dfloat drdx = mesh->vgeo[base+mesh->Np*RXID]; dfloat drdy = mesh->vgeo[base+mesh->Np*RYID]; dfloat drdz = mesh->vgeo[base+mesh->Np*RZID]; dfloat dsdx = mesh->vgeo[base+mesh->Np*SXID]; dfloat dsdy = mesh->vgeo[base+mesh->Np*SYID]; dfloat dsdz = mesh->vgeo[base+mesh->Np*SZID]; dfloat dtdx = mesh->vgeo[base+mesh->Np*TXID]; dfloat dtdy = mesh->vgeo[base+mesh->Np*TYID]; dfloat dtdz = mesh->vgeo[base+mesh->Np*TZID]; dfloat JW = mesh->vgeo[base+mesh->Np*JWID]; int idn = n*mesh->Np+i; dfloat dlndx = drdx*Br[idn] + dsdx*Bs[idn] + dtdx*Bt[idn]; dfloat dlndy = drdy*Br[idn] + dsdy*Bs[idn] + dtdy*Bt[idn]; dfloat dlndz = drdz*Br[idn] + dsdz*Bs[idn] + dtdz*Bt[idn]; A[n] += JW*(dlndx*dlndx+dlndy*dlndy+dlndz*dlndz); A[n] += lambda*JW*B[idn]*B[idn]; } for (int fM=0;fM<mesh->Nfaces;fM++) { // accumulate flux terms for negative and positive traces for(int i=0;i<mesh->Nfp;++i){ int vidM = mesh->faceNodes[i+fM*mesh->Nfp]; // grab vol geofacs at surface nodes dlong baseM = eM*mesh->Np*mesh->Nvgeo + vidM; dfloat drdxM = mesh->vgeo[baseM+mesh->Np*RXID]; dfloat drdyM = mesh->vgeo[baseM+mesh->Np*RYID]; dfloat drdzM = mesh->vgeo[baseM+mesh->Np*RZID]; dfloat dsdxM = mesh->vgeo[baseM+mesh->Np*SXID]; dfloat dsdyM = mesh->vgeo[baseM+mesh->Np*SYID]; dfloat dsdzM = mesh->vgeo[baseM+mesh->Np*SZID]; dfloat dtdxM = mesh->vgeo[baseM+mesh->Np*TXID]; dfloat dtdyM = mesh->vgeo[baseM+mesh->Np*TYID]; dfloat dtdzM = mesh->vgeo[baseM+mesh->Np*TZID]; // grab surface geometric factors dlong base = mesh->Nsgeo*(eM*mesh->Nfp*mesh->Nfaces + fM*mesh->Nfp + i); dfloat nx = mesh->sgeo[base+NXID]; dfloat ny = mesh->sgeo[base+NYID]; dfloat nz = mesh->sgeo[base+NZID]; dfloat wsJ = mesh->sgeo[base+WSJID]; dfloat hinv = mesh->sgeo[base+IHID]; // form negative trace terms in IPDG int idnM = n*mesh->Np+vidM; dfloat dlndxM = drdxM*Br[idnM] + dsdxM*Bs[idnM] + dtdxM*Bt[idnM]; dfloat dlndyM = drdyM*Br[idnM] + dsdyM*Bs[idnM] + dtdyM*Bt[idnM]; dfloat dlndzM = drdzM*Br[idnM] + dsdzM*Bs[idnM] + dtdzM*Bt[idnM]; dfloat ndotgradlnM = nx*dlndxM+ny*dlndyM+nz*dlndzM; dfloat lnM = B[idnM]; dfloat penalty = elliptic->tau*hinv; int bc = mesh->EToB[fM+mesh->Nfaces*eM]; //raw boundary flag int bcD = 0, bcN =0; int bcType = 0; if(bc>0) bcType = elliptic->BCType[bc]; //find its type (Dirichlet/Neumann) // this needs to be double checked (and the code where these are used) if(bcType==1){ // Dirichlet bcD = 1; bcN = 0; } else if(bcType==2){ // Neumann bcD = 0; bcN = 1; } A[n] += -0.5*(1+bcD)*(1-bcN)*wsJ*lnM*ndotgradlnM; // -(ln^-, N.grad lm^-) A[n] += -0.5*(1+bcD)*(1-bcN)*wsJ*ndotgradlnM*lnM; // -(N.grad ln^-, lm^-) A[n] += +0.5*(1+bcD)*(1-bcN)*wsJ*penalty*lnM*lnM; // +((tau/h)*ln^-,lm^-) } } } } void BuildLocalContinuousDiagHex3D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dlong eM, dfloat *B, dfloat *Br, dfloat *Bs, dfloat *Bt, dfloat *A) { for (int nz=0;nz<mesh->Nq;nz++) { for (int ny=0;ny<mesh->Nq;ny++) { for (int nx=0;nx<mesh->Nq;nx++) { int idn = nx+ny*mesh->Nq+nz*mesh->Nq*mesh->Nq; if (elliptic->mapB[idn+eM*mesh->Np]!=1) { A[idn] = 0; int id = nx+ny*mesh->Nq+nz*mesh->Nq*mesh->Nq; dlong base = eM*mesh->Np*mesh->Nggeo; dfloat Grs = mesh->ggeo[base + id + G01ID*mesh->Np]; A[idn] += 2*Grs*mesh->D[nx+nx*mesh->Nq]*mesh->D[ny+ny*mesh->Nq]; dfloat Grt = mesh->ggeo[base + id + G02ID*mesh->Np]; A[idn] += 2*Grt*mesh->D[nx+nx*mesh->Nq]*mesh->D[nz+nz*mesh->Nq]; dfloat Gst = mesh->ggeo[base + id + G12ID*mesh->Np]; A[idn] += 2*Gst*mesh->D[ny+ny*mesh->Nq]*mesh->D[nz+nz*mesh->Nq]; for (int k=0;k<mesh->Nq;k++) { int iid = k+ny*mesh->Nq+nz*mesh->Nq*mesh->Nq; dfloat Grr = mesh->ggeo[base + iid + G00ID*mesh->Np]; A[idn] += Grr*mesh->D[nx+k*mesh->Nq]*mesh->D[nx+k*mesh->Nq]; } for (int k=0;k<mesh->Nq;k++) { int iid = nx+k*mesh->Nq+nz*mesh->Nq*mesh->Nq; dfloat Gss = mesh->ggeo[base + iid + G11ID*mesh->Np]; A[idn] += Gss*mesh->D[ny+k*mesh->Nq]*mesh->D[ny+k*mesh->Nq]; } for (int k=0;k<mesh->Nq;k++) { int iid = nx+ny*mesh->Nq+k*mesh->Nq*mesh->Nq; dfloat Gtt = mesh->ggeo[base + iid + G22ID*mesh->Np]; A[idn] += Gtt*mesh->D[nz+k*mesh->Nq]*mesh->D[nz+k*mesh->Nq]; } dfloat JW = mesh->ggeo[base + id + GWJID*mesh->Np]; A[idn] += JW*lambda; } else { A[idn] = 1; //just put a 1 so A is invertable } } } } //add the rank boost for the allNeumann Poisson problem if (elliptic->allNeumann) { for(int n=0;n<mesh->Np;++n){ if (elliptic->mapB[n+eM*mesh->Np]!=1) { //dont fill rows for masked nodes A[n] += elliptic->allNeumannPenalty*elliptic->allNeumannScale*elliptic->allNeumannScale; } } } }
integral_omp_sync.c
// Author: Fabio Rodrigues Pereira // E-mail: fabior@uio.no // compiling & running // clang -Xpreprocessor -fopenmp integral_omp_sync.c -lomp // ./a.out // SPMD technique *video9 #include <stdlib.h> // rand, malloc, calloc and free. #include <stdio.h> // printf #include <math.h> #include <time.h> #include <omp.h> #define NUM_THREADS 2 int main() { static long num_steps = 100000; double step; double pi=0.0; step=1.0/(double)num_steps; omp_set_num_threads(NUM_THREADS); #pragma omp parallel { int i, id, nthrds; double x, sum; // being sure that the compiler gave a correct num of threads id = omp_get_thread_num(); nthrds = omp_get_num_threads(); for (i = id, sum = 0.0; i < num_steps; i = i + nthrds) { x = (i + 0.5) * step; sum += 4.0 / (1.0 + x * x); } #pragma omp critical pi+=sum*step; } printf("%f", pi); return 0; }
// Author: Fabio Rodrigues Pereira // E-mail: fabior@uio.no // compiling & running // clang -Xpreprocessor -fopenmp integral_omp_sync.c -lomp // ./a.out // SPMD technique *video9 #include <stdlib.h> // rand, malloc, calloc and free. #include <stdio.h> // printf #include <math.h> #include <time.h> #include <omp.h> #define NUM_THREADS 2 int main() { static long num_steps = 100000; double step; double pi=0.0; step=1.0/(double)num_steps; omp_set_num_threads(NUM_THREADS); int i, id, nthrds; double x, sum; // being sure that the compiler gave a correct num of threads id = omp_get_thread_num(); nthrds = omp_get_num_threads(); for (i = id, sum = 0.0; i < num_steps; i = i + nthrds) { x = (i + 0.5) * step; sum += 4.0 / (1.0 + x * x); } pi+=sum*step; printf("%f", pi); return 0; }
// Author: Fabio Rodrigues Pereira // E-mail: fabior@uio.no // compiling & running // clang -Xpreprocessor -fopenmp integral_omp_sync.c -lomp // ./a.out // SPMD technique *video9 #include <stdlib.h> // rand, malloc, calloc and free. #include <stdio.h> // printf #include <math.h> #include <time.h> #include <omp.h> #define NUM_THREADS 2 int main() { static long num_steps = 100000; double step; double pi=0.0; step=1.0/(double)num_steps; omp_set_num_threads(NUM_THREADS); #pragma omp parallel { int i, id, nthrds; double x, sum; // being sure that the compiler gave a correct num of threads id = omp_get_thread_num(); nthrds = omp_get_num_threads(); for (i = id, sum = 0.0; i < num_steps; i = i + nthrds) { x = (i + 0.5) * step; sum += 4.0 / (1.0 + x * x); } #pragma omp critical pi+=sum*step; } printf("%f", pi); return 0; }
GB_unaryop__abs_int64_uint64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_int64_uint64 // op(A') function: GB_tran__abs_int64_uint64 // C type: int64_t // A type: uint64_t // cast: int64_t cij = (int64_t) aij // unaryop: cij = GB_IABS (aij) #define GB_ATYPE \ uint64_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IABS (x) ; // casting #define GB_CASTING(z, x) \ int64_t z = (int64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_INT64 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_int64_uint64 ( int64_t *restrict Cx, const uint64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_int64_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_int64_uint64 // op(A') function: GB_tran__abs_int64_uint64 // C type: int64_t // A type: uint64_t // cast: int64_t cij = (int64_t) aij // unaryop: cij = GB_IABS (aij) #define GB_ATYPE \ uint64_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IABS (x) ; // casting #define GB_CASTING(z, x) \ int64_t z = (int64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_INT64 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_int64_uint64 ( int64_t *restrict Cx, const uint64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_int64_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_int64_uint64 // op(A') function: GB_tran__abs_int64_uint64 // C type: int64_t // A type: uint64_t // cast: int64_t cij = (int64_t) aij // unaryop: cij = GB_IABS (aij) #define GB_ATYPE \ uint64_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IABS (x) ; // casting #define GB_CASTING(z, x) \ int64_t z = (int64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_INT64 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_int64_uint64 ( int64_t *restrict Cx, const uint64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_int64_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
linear.c
//IN method. //Linear transformation (weights only, no biases) of Ni inputs to No outputs. //Input X has Ni neurons and output Y has No neurons. //The vecs of length Ni are always contiguous in memory, such that: //If col-major: Y[:,l] = W' * X[:,l] //where: //X has size Ni x L //Y has size No x L //W has size Ni x No //If row-major: Y[l,:] = X[l,:] * W' //X has size L x Ni //Y has size L x No //W has size No x Ni //For a different set-up that allows linear transformation of vecs in //any orientation, use the linear function from math. //#include <omp.h> #ifdef __cplusplus namespace codee { extern "C" { #endif int linear_s (float *Y, const float *X, const float *W, const size_t Ni, const size_t No, const size_t L); int linear_d (double *Y, const double *X, const double *W, const size_t Ni, const size_t No, const size_t L); int linear_c (float *Y, const float *X, const float *W, const size_t Ni, const size_t No, const size_t L); int linear_z (double *Y, const double *X, const double *W, const size_t Ni, const size_t No, const size_t L); int linear_s (float *Y, const float *X, const float *W, const size_t Ni, const size_t No, const size_t L) { const size_t Nw = Ni*No; float sm; for (size_t l=L; l>0u; --l, W-=Nw, X+=Ni) { for (size_t o=No; o>0u; --o, X-=Ni, ++Y) { sm = 0.0f; for (size_t i=Ni; i>0u; --i, ++X, ++W) { sm += *X * *W; } *Y = sm; } } return 0; } int linear_d (double *Y, const double *X, const double *W, const size_t Ni, const size_t No, const size_t L) { const size_t Nw = Ni*No; double sm; for (size_t l=L; l>0u; --l, W-=Nw, X+=Ni) { for (size_t o=No; o>0u; --o, X-=Ni, ++Y) { sm = 0.0; for (size_t i=Ni; i>0u; --i, ++X, ++W) { sm += *X * *W; } *Y = sm; } } return 0; } int linear_c (float *Y, const float *X, const float *W, const size_t Ni, const size_t No, const size_t L) { const size_t Nw = Ni*No; float smr, smi; for (size_t l=L; l>0u; --l, W-=2u*Nw, X+=2u*Ni) { for (size_t o=No; o>0u; --o, X-=2u*Ni) { smr = smi = 0.0f; for (size_t i=Ni; i>0u; --i, X+=2, W+=2) { smr += *X**W - *(X+1)**(W+1); smi += *X**(W+1) + *(X+1)**W; } *Y++ = smr; *Y++ = smi; } } return 0; } int linear_z (double *Y, const double *X, const double *W, const size_t Ni, const size_t No, const size_t L) { const size_t Nw = Ni*No; double smr, smi; for (size_t l=L; l>0u; --l, W-=2u*Nw, X+=2u*Ni) { for (size_t o=No; o>0u; --o, X-=2u*Ni) { smr = smi = 0.0; for (size_t i=Ni; i>0u; --i, X+=2, W+=2) { smr += *X**W - *(X+1)**(W+1); smi += *X**(W+1) + *(X+1)**W; } *Y++ = smr; *Y++ = smi; } } return 0; } //Although this compiles and runs, it does not give the right output // int linear_omp_s (float *Y, const float *X, const float *W, const size_t Ni, const size_t No, const size_t L) // { // for (size_t l=L; l>0u; --l) // { // #pragma omp parallel for // for (size_t o=No; o>0u; --o) // { // float sm = 0.0f; // for (size_t i=Ni; i>0u; --i) // { // sm += X[i+l*Ni] * W[i+o*Ni]; // } // Y[o] = sm; // } // } // return 0; // } #ifdef __cplusplus } } #endif
//IN method. //Linear transformation (weights only, no biases) of Ni inputs to No outputs. //Input X has Ni neurons and output Y has No neurons. //The vecs of length Ni are always contiguous in memory, such that: //If col-major: Y[:,l] = W' * X[:,l] //where: //X has size Ni x L //Y has size No x L //W has size Ni x No //If row-major: Y[l,:] = X[l,:] * W' //X has size L x Ni //Y has size L x No //W has size No x Ni //For a different set-up that allows linear transformation of vecs in //any orientation, use the linear function from math. //#include <omp.h> #ifdef __cplusplus namespace codee { extern "C" { #endif int linear_s (float *Y, const float *X, const float *W, const size_t Ni, const size_t No, const size_t L); int linear_d (double *Y, const double *X, const double *W, const size_t Ni, const size_t No, const size_t L); int linear_c (float *Y, const float *X, const float *W, const size_t Ni, const size_t No, const size_t L); int linear_z (double *Y, const double *X, const double *W, const size_t Ni, const size_t No, const size_t L); int linear_s (float *Y, const float *X, const float *W, const size_t Ni, const size_t No, const size_t L) { const size_t Nw = Ni*No; float sm; for (size_t l=L; l>0u; --l, W-=Nw, X+=Ni) { for (size_t o=No; o>0u; --o, X-=Ni, ++Y) { sm = 0.0f; for (size_t i=Ni; i>0u; --i, ++X, ++W) { sm += *X * *W; } *Y = sm; } } return 0; } int linear_d (double *Y, const double *X, const double *W, const size_t Ni, const size_t No, const size_t L) { const size_t Nw = Ni*No; double sm; for (size_t l=L; l>0u; --l, W-=Nw, X+=Ni) { for (size_t o=No; o>0u; --o, X-=Ni, ++Y) { sm = 0.0; for (size_t i=Ni; i>0u; --i, ++X, ++W) { sm += *X * *W; } *Y = sm; } } return 0; } int linear_c (float *Y, const float *X, const float *W, const size_t Ni, const size_t No, const size_t L) { const size_t Nw = Ni*No; float smr, smi; for (size_t l=L; l>0u; --l, W-=2u*Nw, X+=2u*Ni) { for (size_t o=No; o>0u; --o, X-=2u*Ni) { smr = smi = 0.0f; for (size_t i=Ni; i>0u; --i, X+=2, W+=2) { smr += *X**W - *(X+1)**(W+1); smi += *X**(W+1) + *(X+1)**W; } *Y++ = smr; *Y++ = smi; } } return 0; } int linear_z (double *Y, const double *X, const double *W, const size_t Ni, const size_t No, const size_t L) { const size_t Nw = Ni*No; double smr, smi; for (size_t l=L; l>0u; --l, W-=2u*Nw, X+=2u*Ni) { for (size_t o=No; o>0u; --o, X-=2u*Ni) { smr = smi = 0.0; for (size_t i=Ni; i>0u; --i, X+=2, W+=2) { smr += *X**W - *(X+1)**(W+1); smi += *X**(W+1) + *(X+1)**W; } *Y++ = smr; *Y++ = smi; } } return 0; } //Although this compiles and runs, it does not give the right output // int linear_omp_s (float *Y, const float *X, const float *W, const size_t Ni, const size_t No, const size_t L) // { // for (size_t l=L; l>0u; --l) // { // // for (size_t o=No; o>0u; --o) // { // float sm = 0.0f; // for (size_t i=Ni; i>0u; --i) // { // sm += X[i+l*Ni] * W[i+o*Ni]; // } // Y[o] = sm; // } // } // return 0; // } #ifdef __cplusplus } } #endif
//IN method. //Linear transformation (weights only, no biases) of Ni inputs to No outputs. //Input X has Ni neurons and output Y has No neurons. //The vecs of length Ni are always contiguous in memory, such that: //If col-major: Y[:,l] = W' * X[:,l] //where: //X has size Ni x L //Y has size No x L //W has size Ni x No //If row-major: Y[l,:] = X[l,:] * W' //X has size L x Ni //Y has size L x No //W has size No x Ni //For a different set-up that allows linear transformation of vecs in //any orientation, use the linear function from math. //#include <omp.h> #ifdef __cplusplus namespace codee { extern "C" { #endif int linear_s (float *Y, const float *X, const float *W, const size_t Ni, const size_t No, const size_t L); int linear_d (double *Y, const double *X, const double *W, const size_t Ni, const size_t No, const size_t L); int linear_c (float *Y, const float *X, const float *W, const size_t Ni, const size_t No, const size_t L); int linear_z (double *Y, const double *X, const double *W, const size_t Ni, const size_t No, const size_t L); int linear_s (float *Y, const float *X, const float *W, const size_t Ni, const size_t No, const size_t L) { const size_t Nw = Ni*No; float sm; for (size_t l=L; l>0u; --l, W-=Nw, X+=Ni) { for (size_t o=No; o>0u; --o, X-=Ni, ++Y) { sm = 0.0f; for (size_t i=Ni; i>0u; --i, ++X, ++W) { sm += *X * *W; } *Y = sm; } } return 0; } int linear_d (double *Y, const double *X, const double *W, const size_t Ni, const size_t No, const size_t L) { const size_t Nw = Ni*No; double sm; for (size_t l=L; l>0u; --l, W-=Nw, X+=Ni) { for (size_t o=No; o>0u; --o, X-=Ni, ++Y) { sm = 0.0; for (size_t i=Ni; i>0u; --i, ++X, ++W) { sm += *X * *W; } *Y = sm; } } return 0; } int linear_c (float *Y, const float *X, const float *W, const size_t Ni, const size_t No, const size_t L) { const size_t Nw = Ni*No; float smr, smi; for (size_t l=L; l>0u; --l, W-=2u*Nw, X+=2u*Ni) { for (size_t o=No; o>0u; --o, X-=2u*Ni) { smr = smi = 0.0f; for (size_t i=Ni; i>0u; --i, X+=2, W+=2) { smr += *X**W - *(X+1)**(W+1); smi += *X**(W+1) + *(X+1)**W; } *Y++ = smr; *Y++ = smi; } } return 0; } int linear_z (double *Y, const double *X, const double *W, const size_t Ni, const size_t No, const size_t L) { const size_t Nw = Ni*No; double smr, smi; for (size_t l=L; l>0u; --l, W-=2u*Nw, X+=2u*Ni) { for (size_t o=No; o>0u; --o, X-=2u*Ni) { smr = smi = 0.0; for (size_t i=Ni; i>0u; --i, X+=2, W+=2) { smr += *X**W - *(X+1)**(W+1); smi += *X**(W+1) + *(X+1)**W; } *Y++ = smr; *Y++ = smi; } } return 0; } //Although this compiles and runs, it does not give the right output // int linear_omp_s (float *Y, const float *X, const float *W, const size_t Ni, const size_t No, const size_t L) // { // for (size_t l=L; l>0u; --l) // { // #pragma omp parallel for // for (size_t o=No; o>0u; --o) // { // float sm = 0.0f; // for (size_t i=Ni; i>0u; --i) // { // sm += X[i+l*Ni] * W[i+o*Ni]; // } // Y[o] = sm; // } // } // return 0; // } #ifdef __cplusplus } } #endif
raytracer.h
#pragma once #include "resource.h" #include <linalg.h> #include <memory> #include <omp.h> #include <random> #include <time.h> using namespace linalg::aliases; namespace cg::renderer { struct ray { ray(float3 position, float3 direction) : position(position) { this->direction = normalize(direction); } float3 position; float3 direction; }; struct payload { float t; float3 bary; cg::color color; }; template<typename VB> struct triangle { triangle(const VB& vertex_a, const VB& vertex_b, const VB& vertex_c); float3 a; float3 b; float3 c; float3 ba; float3 ca; float3 na; float3 nb; float3 nc; float3 ambient; float3 diffuse; float3 emissive; }; template<typename VB> inline triangle<VB>::triangle(const VB& vertex_a, const VB& vertex_b, const VB& vertex_c) { a = float3{ vertex_a.x, vertex_a.y, vertex_a.z }; b = float3{ vertex_b.x, vertex_b.y, vertex_b.z }; c = float3{ vertex_c.x, vertex_c.y, vertex_c.z }; ba = b - a; ca = c - a; na = float3{ vertex_a.nx, vertex_a.ny, vertex_a.nz }; nb = float3{ vertex_b.nx, vertex_b.ny, vertex_b.nz }; nc = float3{ vertex_c.nx, vertex_c.ny, vertex_c.nz }; ambient = { vertex_a.ambient_r, vertex_a.ambient_g, vertex_a.ambient_b, }; diffuse = { vertex_a.diffuse_r, vertex_a.diffuse_g, vertex_a.diffuse_b, }; emissive = { vertex_a.emissive_r, vertex_a.emissive_g, vertex_a.emissive_b, }; } template<typename VB> class aabb { public: void add_triangle(const triangle<VB> triangle); const std::vector<triangle<VB>>& get_traingles() const; bool aabb_test(const ray& ray) const; protected: std::vector<triangle<VB>> triangles; float3 aabb_min; float3 aabb_max; }; struct light { float3 position; float3 color; }; template<typename VB, typename RT> class raytracer { public: raytracer(){}; ~raytracer(){}; void set_render_target(std::shared_ptr<resource<RT>> in_render_target); void clear_render_target(const RT& in_clear_value); void set_viewport(size_t in_width, size_t in_height); void set_per_shape_vertex_buffer( std::vector<std::shared_ptr<cg::resource<VB>>> in_per_shape_vertex_buffer); void build_acceleration_structure(); std::vector<aabb<VB>> acceleration_structures; void ray_generation(float3 position, float3 direction, float3 right, float3 up); payload trace_ray(const ray& ray, size_t depth, float max_t = 1000.f, float min_t = 0.001f) const; payload intersection_shader(const triangle<VB>& triangle, const ray& ray) const; std::function<payload(const ray& ray)> miss_shader = nullptr; std::function<payload(const ray& ray, payload& payload, const triangle<VB>& triangle)> closest_hit_shader = nullptr; std::function<payload(const ray& ray, payload& payload, const triangle<VB>& triangle)> any_hit_shader = nullptr; protected: std::shared_ptr<cg::resource<RT>> render_target; std::vector<std::shared_ptr<cg::resource<VB>>> per_shape_vertex_buffer; float get_random(const int thread_num, float range = 0.1f) const; size_t width = 1920; size_t height = 1080; }; template<typename VB, typename RT> inline void raytracer<VB, RT>::set_render_target(std::shared_ptr<resource<RT>> in_render_target) { render_target = in_render_target; } template<typename VB, typename RT> inline void raytracer<VB, RT>::clear_render_target(const RT& in_clear_value) { for (size_t i = 0; i < render_target->get_number_of_elements(); i++) { render_target->item(i) = in_clear_value; } } template<typename VB, typename RT> inline void raytracer<VB, RT>::set_per_shape_vertex_buffer( std::vector<std::shared_ptr<cg::resource<VB>>> in_per_shape_vertex_buffer) { per_shape_vertex_buffer = in_per_shape_vertex_buffer; } template<typename VB, typename RT> inline void raytracer<VB, RT>::build_acceleration_structure() { for (auto& vertex_buffer : per_shape_vertex_buffer) { size_t vertex_id = 0; aabb<VB> aabb; while (vertex_id < vertex_buffer->get_number_of_elements()) { triangle<VB> triangle( vertex_buffer->item(vertex_id++), vertex_buffer->item(vertex_id++), vertex_buffer->item(vertex_id++)); aabb.add_triangle(triangle); } acceleration_structures.push_back(aabb); } } template<typename VB, typename RT> inline void raytracer<VB, RT>::set_viewport(size_t in_width, size_t in_height) { width = in_width; height = in_height; } template<typename VB, typename RT> inline void raytracer<VB, RT>::ray_generation( float3 position, float3 direction, float3 right, float3 up) { for (int x = 0; x < width; x++) { #pragma omp parallel for for (int y = 0; y < height; y++) { // from [0, width-1] to [-1, 1] float u = 2.f * x / static_cast<float>(width - 1) - 1.f; u *= static_cast<float>(width) / static_cast<float>(height); float v = 2.f * y / static_cast<float>(height - 1) - 1.f; float3 ray_direction = direction + u * right - v * up; ray ray(position, ray_direction); payload payload = trace_ray(ray, 1); render_target->item(x, y) = RT::from_color(payload.color); } } } template<typename VB, typename RT> inline payload raytracer<VB, RT>::trace_ray(const ray& ray, size_t depth, float max_t, float min_t) const { if (depth == 0) { return miss_shader(ray); } depth--; payload closest_hit_payload = {}; closest_hit_payload.t = max_t; const triangle<VB>* closest_triangle = nullptr; for (auto& aabb : acceleration_structures) { if (!aabb.aabb_test(ray)) continue; for (auto& triangle : aabb.get_traingles()) { payload payload = intersection_shader(triangle, ray); if (payload.t > min_t && payload.t < closest_hit_payload.t) { closest_hit_payload = payload; closest_triangle = &triangle; if (any_hit_shader) return any_hit_shader(ray, payload, triangle); } } } if (closest_hit_payload.t < max_t) { if (closest_hit_shader) { return closest_hit_shader(ray, closest_hit_payload, *closest_triangle); } } return miss_shader(ray); } template<typename VB, typename RT> inline payload raytracer<VB, RT>::intersection_shader(const triangle<VB>& triangle, const ray& ray) const { payload payload{}; payload.t = -1.f; float3 pvec = cross(ray.direction, triangle.ca); float det = dot(triangle.ba, pvec); const float eps = 1e-8f; if (abs(det) < eps) { return payload; } float inv_det = 1.f / det; float3 tvec = ray.position - triangle.a; float u = dot(tvec, pvec) * inv_det; if (u < 0.f || u > 1.f) { return payload; } float3 qvec = cross(tvec, triangle.ba); float v = dot(ray.direction, qvec) * inv_det; if (v < 0.f || (u + v) > 1.f) { return payload; } payload.t = dot(triangle.ca, qvec) * inv_det; payload.bary = float3{ 1.f - u - v, u, v }; return payload; } template<typename VB, typename RT> inline float raytracer<VB, RT>::get_random(const int thread_num, const float range) const { static std::default_random_engine generator(thread_num); static std::normal_distribution<float> distribution(0.f, range); return distribution(generator); } template<typename VB> inline void aabb<VB>::add_triangle(const triangle<VB> triangle) { if (triangles.empty()) aabb_max = aabb_min = triangle.a; triangles.push_back(triangle); aabb_max = max(triangle.a, aabb_max); aabb_max = max(triangle.b, aabb_max); aabb_max = max(triangle.c, aabb_max); aabb_min = min(triangle.a, aabb_min); aabb_min = min(triangle.b, aabb_min); aabb_min = min(triangle.c, aabb_min); } template<typename VB> inline const std::vector<triangle<VB>>& aabb<VB>::get_traingles() const { return triangles; } template<typename VB> inline bool aabb<VB>::aabb_test(const ray& ray) const { float3 inv_ray_direction = float3(1.f) / ray.direction; float3 t0 = (aabb_max - ray.position) * inv_ray_direction; float3 t1 = (aabb_min - ray.position) * inv_ray_direction; float3 tmin = min(t0, t1); float3 tmax = max(t0, t1); return maxelem(tmin) <= minelem(tmax); } } // namespace cg::renderer
#pragma once #include "resource.h" #include <linalg.h> #include <memory> #include <omp.h> #include <random> #include <time.h> using namespace linalg::aliases; namespace cg::renderer { struct ray { ray(float3 position, float3 direction):position(position) { this->direction = normalize(direction); } float3 position; float3 direction; }; struct payload { float t; float3 bary; cg: : color color; }; template < typename VB > struct triangle { triangle(const VB & vertex_a, const VB & vertex_b, const VB & vertex_c); float3 a; float3 b; float3 c; float3 ba; float3 ca; float3 na; float3 nb; float3 nc; float3 ambient; float3 diffuse; float3 emissive; }; template < typename VB > inline triangle < VB >: : triangle(const VB & vertex_a, const VB & vertex_b, const VB & vertex_c) { a = float3 { vertex_a.x, vertex_a.y, vertex_a.z }; b = float3 { vertex_b.x, vertex_b.y, vertex_b.z }; c = float3 { vertex_c.x, vertex_c.y, vertex_c.z }; ba = b - a; ca = c - a; na = float3 { vertex_a.nx, vertex_a.ny, vertex_a.nz }; nb = float3 { vertex_b.nx, vertex_b.ny, vertex_b.nz }; nc = float3 { vertex_c.nx, vertex_c.ny, vertex_c.nz }; ambient = { vertex_a.ambient_r, vertex_a.ambient_g, vertex_a.ambient_b, }; diffuse = { vertex_a.diffuse_r, vertex_a.diffuse_g, vertex_a.diffuse_b, }; emissive = { vertex_a.emissive_r, vertex_a.emissive_g, vertex_a.emissive_b, }; } template < typename VB > class aabb { public: void add_triangle(const triangle < VB > triangle); const std::vector < triangle < VB >> &get_traingles() const; bool aabb_test(const ray & ray)const; protected: std: : vector < triangle < VB >> triangles; float3 aabb_min; float3 aabb_max; }; struct light { float3 position; float3 color; }; template < typename VB, typename RT > class raytracer { public: raytracer() { }; ~raytracer() { }; void set_render_target(std::shared_ptr < resource < RT >> in_render_target); void clear_render_target(const RT & in_clear_value); void set_viewport(size_t in_width, size_t in_height); void set_per_shape_vertex_buffer( std::vector < std::shared_ptr < cg::resource < VB >> >in_per_shape_vertex_buffer); void build_acceleration_structure(); std: : vector < aabb < VB >> acceleration_structures; void ray_generation(float3 position, float3 direction, float3 right, float3 up); payload trace_ray(const ray & ray, size_t depth, float max_t = 1000. f, float min_t = 0.001 f)const; payload intersection_shader(const triangle < VB > &triangle, const ray & ray)const; std: : function < payload(const ray & ray)>miss_shader = nullptr; std: : function < payload(const ray & ray, payload & payload, const triangle < VB > &triangle)>closest_hit_shader = nullptr; std: : function < payload(const ray & ray, payload & payload, const triangle < VB > &triangle)>any_hit_shader = nullptr; protected: std: : shared_ptr < cg: :resource < RT >> render_target; std: : vector < std: : shared_ptr < cg: :resource < VB >> >per_shape_vertex_buffer; float get_random(const int thread_num, float range = 0.1 f)const; size_t width = 1920; size_t height = 1080; }; template < typename VB, typename RT > inline void raytracer < VB, RT >::set_render_target(std::shared_ptr < resource < RT >> in_render_target) { render_target = in_render_target; } template < typename VB, typename RT > inline void raytracer < VB, RT >::clear_render_target(const RT & in_clear_value) { for (size_t i = 0; i < render_target->get_number_of_elements(); i++) { render_target->item(i) = in_clear_value; } } template < typename VB, typename RT > inline void raytracer < VB, RT >::set_per_shape_vertex_buffer( std::vector < std::shared_ptr < cg::resource < VB >> >in_per_shape_vertex_buffer) { per_shape_vertex_buffer = in_per_shape_vertex_buffer; } template < typename VB, typename RT > inline void raytracer < VB, RT >::build_acceleration_structure() { for (auto & vertex_buffer:per_shape_vertex_buffer) { size_t vertex_id = 0; aabb < VB > aabb; while (vertex_id < vertex_buffer->get_number_of_elements()) { triangle < VB > triangle( vertex_buffer->item(vertex_id++), vertex_buffer->item(vertex_id++), vertex_buffer->item(vertex_id++)); aabb.add_triangle(triangle); } acceleration_structures.push_back(aabb); } } template < typename VB, typename RT > inline void raytracer < VB, RT >::set_viewport(size_t in_width, size_t in_height) { width = in_width; height = in_height; } template < typename VB, typename RT > inline void raytracer < VB, RT >::ray_generation( float3 position, float3 direction, float3 right, float3 up) { for (int x = 0; x < width; x++) { for (int y = 0; y < height; y++) { //from[0, width - 1] to[-1, 1] float u = 2. f * x / static_cast < float >(width - 1) - 1. f; u *= static_cast < float >(width) / static_cast < float >(height); float v = 2. f * y / static_cast < float >(height - 1) - 1. f; float3 ray_direction = direction + u * right - v * up; ray ray(position, ray_direction); payload payload = trace_ray(ray, 1); render_target->item(x, y) = RT: : from_color(payload.color); } } } template < typename VB, typename RT > inline payload raytracer < VB, RT >: : trace_ray(const ray & ray, size_t depth, float max_t, float min_t)const { if (depth == 0) { return miss_shader(ray); } depth--; payload closest_hit_payload = {}; closest_hit_payload.t = max_t; const triangle < VB > *closest_triangle = nullptr; for (auto & aabb: acceleration_structures) { if (!aabb.aabb_test(ray)) continue; for (auto & triangle:aabb.get_traingles()) { payload payload = intersection_shader(triangle, ray); if (payload.t > min_t && payload.t < closest_hit_payload.t) { closest_hit_payload = payload; closest_triangle = &triangle; if (any_hit_shader) return any_hit_shader(ray, payload, triangle); } } } if (closest_hit_payload.t < max_t) { if (closest_hit_shader) { return closest_hit_shader(ray, closest_hit_payload, *closest_triangle); } } return miss_shader(ray); } template < typename VB, typename RT > inline payload raytracer < VB, RT >: :intersection_shader(const triangle < VB > &triangle, const ray & ray)const { payload payload { }; payload.t = -1. f; float3 pvec = cross(ray.direction, triangle.ca); float det = dot(triangle.ba, pvec); const float eps = 1e-8 f; if (abs(det) < eps) { return payload; } float inv_det = 1. f / det; float3 tvec = ray.position - triangle.a; float u = dot(tvec, pvec) * inv_det; if (u < 0. f || u > 1. f) { return payload; } float3 qvec = cross(tvec, triangle.ba); float v = dot(ray.direction, qvec) * inv_det; if (v < 0. f || (u + v) > 1. f) { return payload; } payload.t = dot(triangle.ca, qvec) * inv_det; payload.bary = float3 { 1. f - u - v, u, v }; return payload; } template < typename VB, typename RT > inline float raytracer < VB, RT >::get_random(const int thread_num, const float range)const { static std::default_random_engine generator(thread_num); static std::normal_distribution < float >distribution(0. f, range); return distribution(generator); } template < typename VB > inline void aabb < VB >::add_triangle(const triangle < VB > triangle) { if (triangles.empty()) aabb_max = aabb_min = triangle.a; triangles.push_back(triangle); aabb_max = max(triangle.a, aabb_max); aabb_max = max(triangle.b, aabb_max); aabb_max = max(triangle.c, aabb_max); aabb_min = min(triangle.a, aabb_min); aabb_min = min(triangle.b, aabb_min); aabb_min = min(triangle.c, aabb_min); } template < typename VB > inline const std::vector < triangle < VB >> &aabb < VB >::get_traingles() const { return triangles; } template < typename VB > inline bool aabb < VB >: : aabb_test(const ray & ray)const { float3 inv_ray_direction = float3(1. f) / ray.direction; float3 t0 = (aabb_max - ray.position) * inv_ray_direction; float3 t1 = (aabb_min - ray.position) * inv_ray_direction; float3 tmin = min(t0, t1); float3 tmax = max(t0, t1); return maxelem(tmin) <= minelem(tmax); } } //namespace cg: : renderer
#pragma once #include "resource.h" #include <linalg.h> #include <memory> #include <omp.h> #include <random> #include <time.h> using namespace linalg::aliases; namespace cg::renderer { struct ray { ray(float3 position, float3 direction):position(position) { this->direction = normalize(direction); } float3 position; float3 direction; }; struct payload { float t; float3 bary; cg: : color color; }; template < typename VB > struct triangle { triangle(const VB & vertex_a, const VB & vertex_b, const VB & vertex_c); float3 a; float3 b; float3 c; float3 ba; float3 ca; float3 na; float3 nb; float3 nc; float3 ambient; float3 diffuse; float3 emissive; }; template < typename VB > inline triangle < VB >: : triangle(const VB & vertex_a, const VB & vertex_b, const VB & vertex_c) { a = float3 { vertex_a.x, vertex_a.y, vertex_a.z }; b = float3 { vertex_b.x, vertex_b.y, vertex_b.z }; c = float3 { vertex_c.x, vertex_c.y, vertex_c.z }; ba = b - a; ca = c - a; na = float3 { vertex_a.nx, vertex_a.ny, vertex_a.nz }; nb = float3 { vertex_b.nx, vertex_b.ny, vertex_b.nz }; nc = float3 { vertex_c.nx, vertex_c.ny, vertex_c.nz }; ambient = { vertex_a.ambient_r, vertex_a.ambient_g, vertex_a.ambient_b, }; diffuse = { vertex_a.diffuse_r, vertex_a.diffuse_g, vertex_a.diffuse_b, }; emissive = { vertex_a.emissive_r, vertex_a.emissive_g, vertex_a.emissive_b, }; } template < typename VB > class aabb { public: void add_triangle(const triangle < VB > triangle); const std::vector < triangle < VB >> &get_traingles() const; bool aabb_test(const ray & ray)const; protected: std: : vector < triangle < VB >> triangles; float3 aabb_min; float3 aabb_max; }; struct light { float3 position; float3 color; }; template < typename VB, typename RT > class raytracer { public: raytracer() { }; ~raytracer() { }; void set_render_target(std::shared_ptr < resource < RT >> in_render_target); void clear_render_target(const RT & in_clear_value); void set_viewport(size_t in_width, size_t in_height); void set_per_shape_vertex_buffer( std::vector < std::shared_ptr < cg::resource < VB >> >in_per_shape_vertex_buffer); void build_acceleration_structure(); std: : vector < aabb < VB >> acceleration_structures; void ray_generation(float3 position, float3 direction, float3 right, float3 up); payload trace_ray(const ray & ray, size_t depth, float max_t = 1000. f, float min_t = 0.001 f)const; payload intersection_shader(const triangle < VB > &triangle, const ray & ray)const; std: : function < payload(const ray & ray)>miss_shader = nullptr; std: : function < payload(const ray & ray, payload & payload, const triangle < VB > &triangle)>closest_hit_shader = nullptr; std: : function < payload(const ray & ray, payload & payload, const triangle < VB > &triangle)>any_hit_shader = nullptr; protected: std: : shared_ptr < cg: :resource < RT >> render_target; std: : vector < std: : shared_ptr < cg: :resource < VB >> >per_shape_vertex_buffer; float get_random(const int thread_num, float range = 0.1 f)const; size_t width = 1920; size_t height = 1080; }; template < typename VB, typename RT > inline void raytracer < VB, RT >::set_render_target(std::shared_ptr < resource < RT >> in_render_target) { render_target = in_render_target; } template < typename VB, typename RT > inline void raytracer < VB, RT >::clear_render_target(const RT & in_clear_value) { for (size_t i = 0; i < render_target->get_number_of_elements(); i++) { render_target->item(i) = in_clear_value; } } template < typename VB, typename RT > inline void raytracer < VB, RT >::set_per_shape_vertex_buffer( std::vector < std::shared_ptr < cg::resource < VB >> >in_per_shape_vertex_buffer) { per_shape_vertex_buffer = in_per_shape_vertex_buffer; } template < typename VB, typename RT > inline void raytracer < VB, RT >::build_acceleration_structure() { for (auto & vertex_buffer:per_shape_vertex_buffer) { size_t vertex_id = 0; aabb < VB > aabb; while (vertex_id < vertex_buffer->get_number_of_elements()) { triangle < VB > triangle( vertex_buffer->item(vertex_id++), vertex_buffer->item(vertex_id++), vertex_buffer->item(vertex_id++)); aabb.add_triangle(triangle); } acceleration_structures.push_back(aabb); } } template < typename VB, typename RT > inline void raytracer < VB, RT >::set_viewport(size_t in_width, size_t in_height) { width = in_width; height = in_height; } template < typename VB, typename RT > inline void raytracer < VB, RT >::ray_generation( float3 position, float3 direction, float3 right, float3 up) { for (int x = 0; x < width; x++) { #pragma omp parallel for for (int y = 0; y < height; y++) { //from[0, width - 1] to[-1, 1] float u = 2. f * x / static_cast < float >(width - 1) - 1. f; u *= static_cast < float >(width) / static_cast < float >(height); float v = 2. f * y / static_cast < float >(height - 1) - 1. f; float3 ray_direction = direction + u * right - v * up; ray ray(position, ray_direction); payload payload = trace_ray(ray, 1); render_target->item(x, y) = RT: : from_color(payload.color); } } } template < typename VB, typename RT > inline payload raytracer < VB, RT >: : trace_ray(const ray & ray, size_t depth, float max_t, float min_t)const { if (depth == 0) { return miss_shader(ray); } depth--; payload closest_hit_payload = {}; closest_hit_payload.t = max_t; const triangle < VB > *closest_triangle = nullptr; for (auto & aabb: acceleration_structures) { if (!aabb.aabb_test(ray)) continue; for (auto & triangle:aabb.get_traingles()) { payload payload = intersection_shader(triangle, ray); if (payload.t > min_t && payload.t < closest_hit_payload.t) { closest_hit_payload = payload; closest_triangle = &triangle; if (any_hit_shader) return any_hit_shader(ray, payload, triangle); } } } if (closest_hit_payload.t < max_t) { if (closest_hit_shader) { return closest_hit_shader(ray, closest_hit_payload, *closest_triangle); } } return miss_shader(ray); } template < typename VB, typename RT > inline payload raytracer < VB, RT >: :intersection_shader(const triangle < VB > &triangle, const ray & ray)const { payload payload { }; payload.t = -1. f; float3 pvec = cross(ray.direction, triangle.ca); float det = dot(triangle.ba, pvec); const float eps = 1e-8 f; if (abs(det) < eps) { return payload; } float inv_det = 1. f / det; float3 tvec = ray.position - triangle.a; float u = dot(tvec, pvec) * inv_det; if (u < 0. f || u > 1. f) { return payload; } float3 qvec = cross(tvec, triangle.ba); float v = dot(ray.direction, qvec) * inv_det; if (v < 0. f || (u + v) > 1. f) { return payload; } payload.t = dot(triangle.ca, qvec) * inv_det; payload.bary = float3 { 1. f - u - v, u, v }; return payload; } template < typename VB, typename RT > inline float raytracer < VB, RT >::get_random(const int thread_num, const float range)const { static std::default_random_engine generator(thread_num); static std::normal_distribution < float >distribution(0. f, range); return distribution(generator); } template < typename VB > inline void aabb < VB >::add_triangle(const triangle < VB > triangle) { if (triangles.empty()) aabb_max = aabb_min = triangle.a; triangles.push_back(triangle); aabb_max = max(triangle.a, aabb_max); aabb_max = max(triangle.b, aabb_max); aabb_max = max(triangle.c, aabb_max); aabb_min = min(triangle.a, aabb_min); aabb_min = min(triangle.b, aabb_min); aabb_min = min(triangle.c, aabb_min); } template < typename VB > inline const std::vector < triangle < VB >> &aabb < VB >::get_traingles() const { return triangles; } template < typename VB > inline bool aabb < VB >: : aabb_test(const ray & ray)const { float3 inv_ray_direction = float3(1. f) / ray.direction; float3 t0 = (aabb_max - ray.position) * inv_ray_direction; float3 t1 = (aabb_min - ray.position) * inv_ray_direction; float3 tmin = min(t0, t1); float3 tmax = max(t0, t1); return maxelem(tmin) <= minelem(tmax); } } //namespace cg: : renderer
matrix.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M M AAA TTTTT RRRR IIIII X X % % MM MM A A T R R I X X % % M M M AAAAA T RRRR I X % % M M A A T R R I X X % % M M A A T R R IIIII X X % % % % % % MagickCore Matrix Methods % % % % Software Design % % Cristy % % August 2007 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image-private.h" #include "MagickCore/matrix.h" #include "MagickCore/memory_.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/thread-private.h" #include "MagickCore/utility.h" /* Typedef declaration. */ struct _MatrixInfo { CacheType type; size_t columns, rows, stride; MagickSizeType length; MagickBooleanType mapped, synchronize; char path[MagickPathExtent]; int file; void *elements; SemaphoreInfo *semaphore; size_t signature; }; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e M a t r i x I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireMatrixInfo() allocates the ImageInfo structure. % % The format of the AcquireMatrixInfo method is: % % MatrixInfo *AcquireMatrixInfo(const size_t columns,const size_t rows, % const size_t stride,ExceptionInfo *exception) % % A description of each parameter follows: % % o columns: the matrix columns. % % o rows: the matrix rows. % % o stride: the matrix stride. % % o exception: return any errors or warnings in this structure. % */ #if defined(SIGBUS) static void MatrixSignalHandler(int status) { ThrowFatalException(CacheFatalError,"UnableToExtendMatrixCache"); } #endif static inline MagickOffsetType WriteMatrixElements( const MatrixInfo *magick_restrict matrix_info,const MagickOffsetType offset, const MagickSizeType length,const unsigned char *magick_restrict buffer) { register MagickOffsetType i; ssize_t count; #if !defined(MAGICKCORE_HAVE_PWRITE) LockSemaphoreInfo(matrix_info->semaphore); if (lseek(matrix_info->file,offset,SEEK_SET) < 0) { UnlockSemaphoreInfo(matrix_info->semaphore); return((MagickOffsetType) -1); } #endif count=0; for (i=0; i < (MagickOffsetType) length; i+=count) { #if !defined(MAGICKCORE_HAVE_PWRITE) count=write(matrix_info->file,buffer+i,(size_t) MagickMin(length-i, (MagickSizeType) SSIZE_MAX)); #else count=pwrite(matrix_info->file,buffer+i,(size_t) MagickMin(length-i, (MagickSizeType) SSIZE_MAX),(off_t) (offset+i)); #endif if (count <= 0) { count=0; if (errno != EINTR) break; } } #if !defined(MAGICKCORE_HAVE_PWRITE) UnlockSemaphoreInfo(matrix_info->semaphore); #endif return(i); } static MagickBooleanType SetMatrixExtent( MatrixInfo *magick_restrict matrix_info, MagickSizeType length) { MagickOffsetType count, extent, offset; if (length != (MagickSizeType) ((MagickOffsetType) length)) return(MagickFalse); offset=(MagickOffsetType) lseek(matrix_info->file,0,SEEK_END); if (offset < 0) return(MagickFalse); if ((MagickSizeType) offset >= length) return(MagickTrue); extent=(MagickOffsetType) length-1; count=WriteMatrixElements(matrix_info,extent,1,(const unsigned char *) ""); #if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE) if (matrix_info->synchronize != MagickFalse) (void) posix_fallocate(matrix_info->file,offset+1,extent-offset); #endif #if defined(SIGBUS) (void) signal(SIGBUS,MatrixSignalHandler); #endif return(count != (MagickOffsetType) 1 ? MagickFalse : MagickTrue); } MagickExport MatrixInfo *AcquireMatrixInfo(const size_t columns, const size_t rows,const size_t stride,ExceptionInfo *exception) { char *synchronize; MagickBooleanType status; MatrixInfo *matrix_info; matrix_info=(MatrixInfo *) AcquireMagickMemory(sizeof(*matrix_info)); if (matrix_info == (MatrixInfo *) NULL) return((MatrixInfo *) NULL); (void) ResetMagickMemory(matrix_info,0,sizeof(*matrix_info)); matrix_info->signature=MagickCoreSignature; matrix_info->columns=columns; matrix_info->rows=rows; matrix_info->stride=stride; matrix_info->semaphore=AcquireSemaphoreInfo(); synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE"); if (synchronize != (const char *) NULL) { matrix_info->synchronize=IsStringTrue(synchronize); synchronize=DestroyString(synchronize); } matrix_info->length=(MagickSizeType) columns*rows*stride; if (matrix_info->columns != (size_t) (matrix_info->length/rows/stride)) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'","matrix cache"); return(DestroyMatrixInfo(matrix_info)); } matrix_info->type=MemoryCache; status=AcquireMagickResource(AreaResource,matrix_info->length); if ((status != MagickFalse) && (matrix_info->length == (MagickSizeType) ((size_t) matrix_info->length))) { status=AcquireMagickResource(MemoryResource,matrix_info->length); if (status != MagickFalse) { matrix_info->mapped=MagickFalse; matrix_info->elements=AcquireMagickMemory((size_t) matrix_info->length); if (matrix_info->elements == NULL) { matrix_info->mapped=MagickTrue; matrix_info->elements=MapBlob(-1,IOMode,0,(size_t) matrix_info->length); } if (matrix_info->elements == (unsigned short *) NULL) RelinquishMagickResource(MemoryResource,matrix_info->length); } } matrix_info->file=(-1); if (matrix_info->elements == (unsigned short *) NULL) { status=AcquireMagickResource(DiskResource,matrix_info->length); if (status == MagickFalse) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'","matrix cache"); return(DestroyMatrixInfo(matrix_info)); } matrix_info->type=DiskCache; (void) AcquireMagickResource(MemoryResource,matrix_info->length); matrix_info->file=AcquireUniqueFileResource(matrix_info->path); if (matrix_info->file == -1) return(DestroyMatrixInfo(matrix_info)); status=AcquireMagickResource(MapResource,matrix_info->length); if (status != MagickFalse) { status=SetMatrixExtent(matrix_info,matrix_info->length); if (status != MagickFalse) { matrix_info->elements=(void *) MapBlob(matrix_info->file,IOMode,0, (size_t) matrix_info->length); if (matrix_info->elements != NULL) matrix_info->type=MapCache; else RelinquishMagickResource(MapResource,matrix_info->length); } } } return(matrix_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e M a g i c k M a t r i x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireMagickMatrix() allocates and returns a matrix in the form of an % array of pointers to an array of doubles, with all values pre-set to zero. % % This used to generate the two dimensional matrix, and vectors required % for the GaussJordanElimination() method below, solving some system of % simultanious equations. % % The format of the AcquireMagickMatrix method is: % % double **AcquireMagickMatrix(const size_t number_rows, % const size_t size) % % A description of each parameter follows: % % o number_rows: the number pointers for the array of pointers % (first dimension). % % o size: the size of the array of doubles each pointer points to % (second dimension). % */ MagickExport double **AcquireMagickMatrix(const size_t number_rows, const size_t size) { double **matrix; register ssize_t i, j; matrix=(double **) AcquireQuantumMemory(number_rows,sizeof(*matrix)); if (matrix == (double **) NULL) return((double **) NULL); for (i=0; i < (ssize_t) number_rows; i++) { matrix[i]=(double *) AcquireQuantumMemory(size,sizeof(*matrix[i])); if (matrix[i] == (double *) NULL) { for (j=0; j < i; j++) matrix[j]=(double *) RelinquishMagickMemory(matrix[j]); matrix=(double **) RelinquishMagickMemory(matrix); return((double **) NULL); } for (j=0; j < (ssize_t) size; j++) matrix[i][j]=0.0; } return(matrix); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y M a t r i x I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyMatrixInfo() dereferences a matrix, deallocating memory associated % with the matrix. % % The format of the DestroyImage method is: % % MatrixInfo *DestroyMatrixInfo(MatrixInfo *matrix_info) % % A description of each parameter follows: % % o matrix_info: the matrix. % */ MagickExport MatrixInfo *DestroyMatrixInfo(MatrixInfo *matrix_info) { assert(matrix_info != (MatrixInfo *) NULL); assert(matrix_info->signature == MagickCoreSignature); LockSemaphoreInfo(matrix_info->semaphore); switch (matrix_info->type) { case MemoryCache: { if (matrix_info->mapped == MagickFalse) matrix_info->elements=RelinquishMagickMemory(matrix_info->elements); else { (void) UnmapBlob(matrix_info->elements,(size_t) matrix_info->length); matrix_info->elements=(unsigned short *) NULL; } RelinquishMagickResource(MemoryResource,matrix_info->length); break; } case MapCache: { (void) UnmapBlob(matrix_info->elements,(size_t) matrix_info->length); matrix_info->elements=NULL; RelinquishMagickResource(MapResource,matrix_info->length); } case DiskCache: { if (matrix_info->file != -1) (void) close(matrix_info->file); (void) RelinquishUniqueFileResource(matrix_info->path); RelinquishMagickResource(DiskResource,matrix_info->length); break; } default: break; } UnlockSemaphoreInfo(matrix_info->semaphore); RelinquishSemaphoreInfo(&matrix_info->semaphore); return((MatrixInfo *) RelinquishMagickMemory(matrix_info)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G a u s s J o r d a n E l i m i n a t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GaussJordanElimination() returns a matrix in reduced row echelon form, % while simultaneously reducing and thus solving the augumented results % matrix. % % See also http://en.wikipedia.org/wiki/Gauss-Jordan_elimination % % The format of the GaussJordanElimination method is: % % MagickBooleanType GaussJordanElimination(double **matrix, % double **vectors,const size_t rank,const size_t number_vectors) % % A description of each parameter follows: % % o matrix: the matrix to be reduced, as an 'array of row pointers'. % % o vectors: the additional matrix argumenting the matrix for row reduction. % Producing an 'array of column vectors'. % % o rank: The size of the matrix (both rows and columns). % Also represents the number terms that need to be solved. % % o number_vectors: Number of vectors columns, argumenting the above matrix. % Usally 1, but can be more for more complex equation solving. % % Note that the 'matrix' is given as a 'array of row pointers' of rank size. % That is values can be assigned as matrix[row][column] where 'row' is % typically the equation, and 'column' is the term of the equation. % That is the matrix is in the form of a 'row first array'. % % However 'vectors' is a 'array of column pointers' which can have any number % of columns, with each column array the same 'rank' size as 'matrix'. % % This allows for simpler handling of the results, especially is only one % column 'vector' is all that is required to produce the desired solution. % % For example, the 'vectors' can consist of a pointer to a simple array of % doubles. when only one set of simultanious equations is to be solved from % the given set of coefficient weighted terms. % % double **matrix = AcquireMagickMatrix(8UL,8UL); % double coefficents[8]; % ... % GaussJordanElimination(matrix, &coefficents, 8UL, 1UL); % % However by specifing more 'columns' (as an 'array of vector columns', % you can use this function to solve a set of 'separable' equations. % % For example a distortion function where u = U(x,y) v = V(x,y) % And the functions U() and V() have separate coefficents, but are being % generated from a common x,y->u,v data set. % % Another example is generation of a color gradient from a set of colors at % specific coordients, such as a list x,y -> r,g,b,a. % % You can also use the 'vectors' to generate an inverse of the given 'matrix' % though as a 'column first array' rather than a 'row first array'. For % details see http://en.wikipedia.org/wiki/Gauss-Jordan_elimination % */ MagickPrivate MagickBooleanType GaussJordanElimination(double **matrix, double **vectors,const size_t rank,const size_t number_vectors) { #define GaussJordanSwap(x,y) \ { \ if ((x) != (y)) \ { \ (x)+=(y); \ (y)=(x)-(y); \ (x)=(x)-(y); \ } \ } double max, scale; register ssize_t i, j, k; ssize_t column, *columns, *pivots, row, *rows; columns=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*columns)); rows=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*rows)); pivots=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*pivots)); if ((rows == (ssize_t *) NULL) || (columns == (ssize_t *) NULL) || (pivots == (ssize_t *) NULL)) { if (pivots != (ssize_t *) NULL) pivots=(ssize_t *) RelinquishMagickMemory(pivots); if (columns != (ssize_t *) NULL) columns=(ssize_t *) RelinquishMagickMemory(columns); if (rows != (ssize_t *) NULL) rows=(ssize_t *) RelinquishMagickMemory(rows); return(MagickFalse); } (void) ResetMagickMemory(columns,0,rank*sizeof(*columns)); (void) ResetMagickMemory(rows,0,rank*sizeof(*rows)); (void) ResetMagickMemory(pivots,0,rank*sizeof(*pivots)); column=0; row=0; for (i=0; i < (ssize_t) rank; i++) { max=0.0; for (j=0; j < (ssize_t) rank; j++) if (pivots[j] != 1) { for (k=0; k < (ssize_t) rank; k++) if (pivots[k] != 0) { if (pivots[k] > 1) return(MagickFalse); } else if (fabs(matrix[j][k]) >= max) { max=fabs(matrix[j][k]); row=j; column=k; } } pivots[column]++; if (row != column) { for (k=0; k < (ssize_t) rank; k++) GaussJordanSwap(matrix[row][k],matrix[column][k]); for (k=0; k < (ssize_t) number_vectors; k++) GaussJordanSwap(vectors[k][row],vectors[k][column]); } rows[i]=row; columns[i]=column; if (matrix[column][column] == 0.0) return(MagickFalse); /* sigularity */ scale=PerceptibleReciprocal(matrix[column][column]); matrix[column][column]=1.0; for (j=0; j < (ssize_t) rank; j++) matrix[column][j]*=scale; for (j=0; j < (ssize_t) number_vectors; j++) vectors[j][column]*=scale; for (j=0; j < (ssize_t) rank; j++) if (j != column) { scale=matrix[j][column]; matrix[j][column]=0.0; for (k=0; k < (ssize_t) rank; k++) matrix[j][k]-=scale*matrix[column][k]; for (k=0; k < (ssize_t) number_vectors; k++) vectors[k][j]-=scale*vectors[k][column]; } } for (j=(ssize_t) rank-1; j >= 0; j--) if (columns[j] != rows[j]) for (i=0; i < (ssize_t) rank; i++) GaussJordanSwap(matrix[i][rows[j]],matrix[i][columns[j]]); pivots=(ssize_t *) RelinquishMagickMemory(pivots); rows=(ssize_t *) RelinquishMagickMemory(rows); columns=(ssize_t *) RelinquishMagickMemory(columns); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t M a t r i x C o l u m n s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetMatrixColumns() returns the number of columns in the matrix. % % The format of the GetMatrixColumns method is: % % size_t GetMatrixColumns(const MatrixInfo *matrix_info) % % A description of each parameter follows: % % o matrix_info: the matrix. % */ MagickExport size_t GetMatrixColumns(const MatrixInfo *matrix_info) { assert(matrix_info != (MatrixInfo *) NULL); assert(matrix_info->signature == MagickCoreSignature); return(matrix_info->columns); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t M a t r i x E l e m e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetMatrixElement() returns the specifed element in the matrix. % % The format of the GetMatrixElement method is: % % MagickBooleanType GetMatrixElement(const MatrixInfo *matrix_info, % const ssize_t x,const ssize_t y,void *value) % % A description of each parameter follows: % % o matrix_info: the matrix columns. % % o x: the matrix x-offset. % % o y: the matrix y-offset. % % o value: return the matrix element in this buffer. % */ static inline ssize_t EdgeX(const ssize_t x,const size_t columns) { if (x < 0L) return(0L); if (x >= (ssize_t) columns) return((ssize_t) (columns-1)); return(x); } static inline ssize_t EdgeY(const ssize_t y,const size_t rows) { if (y < 0L) return(0L); if (y >= (ssize_t) rows) return((ssize_t) (rows-1)); return(y); } static inline MagickOffsetType ReadMatrixElements( const MatrixInfo *magick_restrict matrix_info,const MagickOffsetType offset, const MagickSizeType length,unsigned char *magick_restrict buffer) { register MagickOffsetType i; ssize_t count; #if !defined(MAGICKCORE_HAVE_PREAD) LockSemaphoreInfo(matrix_info->semaphore); if (lseek(matrix_info->file,offset,SEEK_SET) < 0) { UnlockSemaphoreInfo(matrix_info->semaphore); return((MagickOffsetType) -1); } #endif count=0; for (i=0; i < (MagickOffsetType) length; i+=count) { #if !defined(MAGICKCORE_HAVE_PREAD) count=read(matrix_info->file,buffer+i,(size_t) MagickMin(length-i, (MagickSizeType) SSIZE_MAX)); #else count=pread(matrix_info->file,buffer+i,(size_t) MagickMin(length-i, (MagickSizeType) SSIZE_MAX),(off_t) (offset+i)); #endif if (count <= 0) { count=0; if (errno != EINTR) break; } } #if !defined(MAGICKCORE_HAVE_PREAD) UnlockSemaphoreInfo(matrix_info->semaphore); #endif return(i); } MagickExport MagickBooleanType GetMatrixElement(const MatrixInfo *matrix_info, const ssize_t x,const ssize_t y,void *value) { MagickOffsetType count, i; assert(matrix_info != (const MatrixInfo *) NULL); assert(matrix_info->signature == MagickCoreSignature); i=(MagickOffsetType) EdgeY(y,matrix_info->rows)*matrix_info->columns+ EdgeX(x,matrix_info->columns); if (matrix_info->type != DiskCache) { (void) memcpy(value,(unsigned char *) matrix_info->elements+i* matrix_info->stride,matrix_info->stride); return(MagickTrue); } count=ReadMatrixElements(matrix_info,i*matrix_info->stride, matrix_info->stride,(unsigned char *) value); if (count != (MagickOffsetType) matrix_info->stride) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t M a t r i x R o w s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetMatrixRows() returns the number of rows in the matrix. % % The format of the GetMatrixRows method is: % % size_t GetMatrixRows(const MatrixInfo *matrix_info) % % A description of each parameter follows: % % o matrix_info: the matrix. % */ MagickExport size_t GetMatrixRows(const MatrixInfo *matrix_info) { assert(matrix_info != (const MatrixInfo *) NULL); assert(matrix_info->signature == MagickCoreSignature); return(matrix_info->rows); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + L e a s t S q u a r e s A d d T e r m s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LeastSquaresAddTerms() adds one set of terms and associate results to the % given matrix and vectors for solving using least-squares function fitting. % % The format of the AcquireMagickMatrix method is: % % void LeastSquaresAddTerms(double **matrix,double **vectors, % const double *terms,const double *results,const size_t rank, % const size_t number_vectors); % % A description of each parameter follows: % % o matrix: the square matrix to add given terms/results to. % % o vectors: the result vectors to add terms/results to. % % o terms: the pre-calculated terms (without the unknown coefficent % weights) that forms the equation being added. % % o results: the result(s) that should be generated from the given terms % weighted by the yet-to-be-solved coefficents. % % o rank: the rank or size of the dimensions of the square matrix. % Also the length of vectors, and number of terms being added. % % o number_vectors: Number of result vectors, and number or results being % added. Also represents the number of separable systems of equations % that is being solved. % % Example of use... % % 2 dimensional Affine Equations (which are separable) % c0*x + c2*y + c4*1 => u % c1*x + c3*y + c5*1 => v % % double **matrix = AcquireMagickMatrix(3UL,3UL); % double **vectors = AcquireMagickMatrix(2UL,3UL); % double terms[3], results[2]; % ... % for each given x,y -> u,v % terms[0] = x; % terms[1] = y; % terms[2] = 1; % results[0] = u; % results[1] = v; % LeastSquaresAddTerms(matrix,vectors,terms,results,3UL,2UL); % ... % if ( GaussJordanElimination(matrix,vectors,3UL,2UL) ) { % c0 = vectors[0][0]; % c2 = vectors[0][1]; % c4 = vectors[0][2]; % c1 = vectors[1][0]; % c3 = vectors[1][1]; % c5 = vectors[1][2]; % } % else % printf("Matrix unsolvable\n); % RelinquishMagickMatrix(matrix,3UL); % RelinquishMagickMatrix(vectors,2UL); % */ MagickPrivate void LeastSquaresAddTerms(double **matrix,double **vectors, const double *terms,const double *results,const size_t rank, const size_t number_vectors) { register ssize_t i, j; for (j=0; j < (ssize_t) rank; j++) { for (i=0; i < (ssize_t) rank; i++) matrix[i][j]+=terms[i]*terms[j]; for (i=0; i < (ssize_t) number_vectors; i++) vectors[i][j]+=results[i]*terms[j]; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a t r i x T o I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MatrixToImage() returns a matrix as an image. The matrix elements must be % of type double otherwise nonsense is returned. % % The format of the MatrixToImage method is: % % Image *MatrixToImage(const MatrixInfo *matrix_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o matrix_info: the matrix. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MatrixToImage(const MatrixInfo *matrix_info, ExceptionInfo *exception) { CacheView *image_view; double max_value, min_value, scale_factor, value; Image *image; MagickBooleanType status; ssize_t y; assert(matrix_info != (const MatrixInfo *) NULL); assert(matrix_info->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (matrix_info->stride < sizeof(double)) return((Image *) NULL); /* Determine range of matrix. */ (void) GetMatrixElement(matrix_info,0,0,&value); min_value=value; max_value=value; for (y=0; y < (ssize_t) matrix_info->rows; y++) { register ssize_t x; for (x=0; x < (ssize_t) matrix_info->columns; x++) { if (GetMatrixElement(matrix_info,x,y,&value) == MagickFalse) continue; if (value < min_value) min_value=value; else if (value > max_value) max_value=value; } } if ((min_value == 0.0) && (max_value == 0.0)) scale_factor=0; else if (min_value == max_value) { scale_factor=(double) QuantumRange/min_value; min_value=0; } else scale_factor=(double) QuantumRange/(max_value-min_value); /* Convert matrix to image. */ image=AcquireImage((ImageInfo *) NULL,exception); image->columns=matrix_info->columns; image->rows=matrix_info->rows; image->colorspace=GRAYColorspace; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double value; register Quantum *q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (GetMatrixElement(matrix_info,x,y,&value) == MagickFalse) continue; value=scale_factor*(value-min_value); *q=ClampToQuantum(value); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (status == MagickFalse) image=DestroyImage(image); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N u l l M a t r i x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NullMatrix() sets all elements of the matrix to zero. % % The format of the ResetMagickMemory method is: % % MagickBooleanType *NullMatrix(MatrixInfo *matrix_info) % % A description of each parameter follows: % % o matrix_info: the matrix. % */ MagickExport MagickBooleanType NullMatrix(MatrixInfo *matrix_info) { register ssize_t x; ssize_t count, y; unsigned char value; assert(matrix_info != (const MatrixInfo *) NULL); assert(matrix_info->signature == MagickCoreSignature); if (matrix_info->type != DiskCache) { (void) ResetMagickMemory(matrix_info->elements,0,(size_t) matrix_info->length); return(MagickTrue); } value=0; (void) lseek(matrix_info->file,0,SEEK_SET); for (y=0; y < (ssize_t) matrix_info->rows; y++) { for (x=0; x < (ssize_t) matrix_info->length; x++) { count=write(matrix_info->file,&value,sizeof(value)); if (count != (ssize_t) sizeof(value)) break; } if (x < (ssize_t) matrix_info->length) break; } return(y < (ssize_t) matrix_info->rows ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e l i n q u i s h M a g i c k M a t r i x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RelinquishMagickMatrix() frees the previously acquired matrix (array of % pointers to arrays of doubles). % % The format of the RelinquishMagickMatrix method is: % % double **RelinquishMagickMatrix(double **matrix, % const size_t number_rows) % % A description of each parameter follows: % % o matrix: the matrix to relinquish % % o number_rows: the first dimension of the acquired matrix (number of % pointers) % */ MagickExport double **RelinquishMagickMatrix(double **matrix, const size_t number_rows) { register ssize_t i; if (matrix == (double **) NULL ) return(matrix); for (i=0; i < (ssize_t) number_rows; i++) matrix[i]=(double *) RelinquishMagickMemory(matrix[i]); matrix=(double **) RelinquishMagickMemory(matrix); return(matrix); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t M a t r i x E l e m e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetMatrixElement() sets the specifed element in the matrix. % % The format of the SetMatrixElement method is: % % MagickBooleanType SetMatrixElement(const MatrixInfo *matrix_info, % const ssize_t x,const ssize_t y,void *value) % % A description of each parameter follows: % % o matrix_info: the matrix columns. % % o x: the matrix x-offset. % % o y: the matrix y-offset. % % o value: set the matrix element to this value. % */ MagickExport MagickBooleanType SetMatrixElement(const MatrixInfo *matrix_info, const ssize_t x,const ssize_t y,const void *value) { MagickOffsetType count, i; assert(matrix_info != (const MatrixInfo *) NULL); assert(matrix_info->signature == MagickCoreSignature); i=(MagickOffsetType) y*matrix_info->columns+x; if ((i < 0) || ((MagickSizeType) (i*matrix_info->stride) >= matrix_info->length)) return(MagickFalse); if (matrix_info->type != DiskCache) { (void) memcpy((unsigned char *) matrix_info->elements+i* matrix_info->stride,value,matrix_info->stride); return(MagickTrue); } count=WriteMatrixElements(matrix_info,i*matrix_info->stride, matrix_info->stride,(unsigned char *) value); if (count != (MagickOffsetType) matrix_info->stride) return(MagickFalse); return(MagickTrue); }
/* * Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image-private.h" #include "MagickCore/matrix.h" #include "MagickCore/memory_.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/thread-private.h" #include "MagickCore/utility.h" /* * Typedef declaration. */ struct _MatrixInfo { CacheType type; size_t columns, rows, stride; MagickSizeType length; MagickBooleanType mapped, synchronize; char path[MagickPathExtent]; int file; void *elements; SemaphoreInfo * semaphore; size_t signature; }; /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % A c q u i r e M a t r i x I n f o * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % AcquireMatrixInfo() allocates the ImageInfo structure. % % The * format of the AcquireMatrixInfo method is: % % MatrixInfo * *AcquireMatrixInfo(const size_t columns,const size_t rows, % const * size_t stride,ExceptionInfo *exception) % % A description of each * parameter follows: % % o columns: the matrix columns. % % o rows: * the matrix rows. % % o stride: the matrix stride. % % o exception: * return any errors or warnings in this structure. % */ #if defined(SIGBUS) static void MatrixSignalHandler(int status) { ThrowFatalException(CacheFatalError, "UnableToExtendMatrixCache"); } #endif static inline MagickOffsetType WriteMatrixElements( const MatrixInfo * magick_restrict matrix_info, const MagickOffsetType offset, const MagickSizeType length, const unsigned char *magick_restrict buffer) { register MagickOffsetType i; ssize_t count; #if !defined(MAGICKCORE_HAVE_PWRITE) LockSemaphoreInfo(matrix_info->semaphore); if (lseek(matrix_info->file, offset, SEEK_SET) < 0) { UnlockSemaphoreInfo(matrix_info->semaphore); return ((MagickOffsetType) - 1); } #endif count = 0; for (i = 0; i < (MagickOffsetType) length; i += count) { #if !defined(MAGICKCORE_HAVE_PWRITE) count = write(matrix_info->file, buffer + i, (size_t) MagickMin(length - i, (MagickSizeType) SSIZE_MAX)); #else count = pwrite(matrix_info->file, buffer + i, (size_t) MagickMin(length - i, (MagickSizeType) SSIZE_MAX), (off_t) (offset + i)); #endif if (count <= 0) { count = 0; if (errno != EINTR) break; } } #if !defined(MAGICKCORE_HAVE_PWRITE) UnlockSemaphoreInfo(matrix_info->semaphore); #endif return (i); } static MagickBooleanType SetMatrixExtent( MatrixInfo * magick_restrict matrix_info, MagickSizeType length) { MagickOffsetType count, extent, offset; if (length != (MagickSizeType) ((MagickOffsetType) length)) return (MagickFalse); offset = (MagickOffsetType) lseek(matrix_info->file, 0, SEEK_END); if (offset < 0) return (MagickFalse); if ((MagickSizeType) offset >= length) return (MagickTrue); extent = (MagickOffsetType) length - 1; count = WriteMatrixElements(matrix_info, extent, 1, (const unsigned char *)""); #if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE) if (matrix_info->synchronize != MagickFalse) (void)posix_fallocate(matrix_info->file, offset + 1, extent - offset); #endif #if defined(SIGBUS) (void)signal(SIGBUS, MatrixSignalHandler); #endif return (count != (MagickOffsetType) 1 ? MagickFalse : MagickTrue); } MagickExport MatrixInfo * AcquireMatrixInfo(const size_t columns, const size_t rows, const size_t stride, ExceptionInfo * exception) { char *synchronize; MagickBooleanType status; MatrixInfo * matrix_info; matrix_info = (MatrixInfo *) AcquireMagickMemory(sizeof(*matrix_info)); if (matrix_info == (MatrixInfo *) NULL) return ((MatrixInfo *) NULL); (void)ResetMagickMemory(matrix_info, 0, sizeof(*matrix_info)); matrix_info->signature = MagickCoreSignature; matrix_info->columns = columns; matrix_info->rows = rows; matrix_info->stride = stride; matrix_info->semaphore = AcquireSemaphoreInfo(); synchronize = GetEnvironmentValue("MAGICK_SYNCHRONIZE"); if (synchronize != (const char *)NULL) { matrix_info->synchronize = IsStringTrue(synchronize); synchronize = DestroyString(synchronize); } matrix_info->length = (MagickSizeType) columns *rows * stride; if (matrix_info->columns != (size_t) (matrix_info->length / rows / stride)) { (void)ThrowMagickException(exception, GetMagickModule(), CacheError, "CacheResourcesExhausted", "`%s'", "matrix cache"); return (DestroyMatrixInfo(matrix_info)); } matrix_info->type = MemoryCache; status = AcquireMagickResource(AreaResource, matrix_info->length); if ((status != MagickFalse) && (matrix_info->length == (MagickSizeType) ((size_t) matrix_info->length))) { status = AcquireMagickResource(MemoryResource, matrix_info->length); if (status != MagickFalse) { matrix_info->mapped = MagickFalse; matrix_info->elements = AcquireMagickMemory((size_t) matrix_info->length); if (matrix_info->elements == NULL) { matrix_info->mapped = MagickTrue; matrix_info->elements = MapBlob(-1, IOMode, 0, (size_t) matrix_info->length); } if (matrix_info->elements == (unsigned short *)NULL) RelinquishMagickResource(MemoryResource, matrix_info->length); } } matrix_info->file = (-1); if (matrix_info->elements == (unsigned short *)NULL) { status = AcquireMagickResource(DiskResource, matrix_info->length); if (status == MagickFalse) { (void)ThrowMagickException(exception, GetMagickModule(), CacheError, "CacheResourcesExhausted", "`%s'", "matrix cache"); return (DestroyMatrixInfo(matrix_info)); } matrix_info->type = DiskCache; (void)AcquireMagickResource(MemoryResource, matrix_info->length); matrix_info->file = AcquireUniqueFileResource(matrix_info->path); if (matrix_info->file == -1) return (DestroyMatrixInfo(matrix_info)); status = AcquireMagickResource(MapResource, matrix_info->length); if (status != MagickFalse) { status = SetMatrixExtent(matrix_info, matrix_info->length); if (status != MagickFalse) { matrix_info->elements = (void *)MapBlob(matrix_info->file, IOMode, 0, (size_t) matrix_info->length); if (matrix_info->elements != NULL) matrix_info->type = MapCache; else RelinquishMagickResource(MapResource, matrix_info->length); } } } return (matrix_info); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % A c q u i r e M a g i c k M a t r i x * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % AcquireMagickMatrix() allocates and returns a matrix in the form * of an % array of pointers to an array of doubles, with all values pre-set * to zero. % % This used to generate the two dimensional matrix, and * vectors required % for the GaussJordanElimination() method below, solving * some system of % simultanious equations. % % The format of the * AcquireMagickMatrix method is: % % double **AcquireMagickMatrix(const * size_t number_rows, % const size_t size) % % A description of each * parameter follows: % % o number_rows: the number pointers for the array * of pointers % (first dimension). % % o size: the size of the array * of doubles each pointer points to % (second dimension). % */ MagickExport double ** AcquireMagickMatrix(const size_t number_rows, const size_t size) { double **matrix; register ssize_t i, j; matrix = (double **)AcquireQuantumMemory(number_rows, sizeof(*matrix)); if (matrix == (double **)NULL) return ((double **)NULL); for (i = 0; i < (ssize_t) number_rows; i++) { matrix[i] = (double *)AcquireQuantumMemory(size, sizeof(*matrix[i])); if (matrix[i] == (double *)NULL) { for (j = 0; j < i; j++) matrix[j] = (double *)RelinquishMagickMemory(matrix[j]); matrix = (double **)RelinquishMagickMemory(matrix); return ((double **)NULL); } for (j = 0; j < (ssize_t) size; j++) matrix[i][j] = 0.0; } return (matrix); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % D e s t r o y M a t r i x I n f o * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DestroyMatrixInfo() dereferences a matrix, deallocating memory * associated % with the matrix. % % The format of the DestroyImage method * is: % % MatrixInfo *DestroyMatrixInfo(MatrixInfo *matrix_info) % % A * description of each parameter follows: % % o matrix_info: the matrix. % */ MagickExport MatrixInfo * DestroyMatrixInfo(MatrixInfo * matrix_info) { assert(matrix_info != (MatrixInfo *) NULL); assert(matrix_info->signature == MagickCoreSignature); LockSemaphoreInfo(matrix_info->semaphore); switch (matrix_info->type) { case MemoryCache: { if (matrix_info->mapped == MagickFalse) matrix_info->elements = RelinquishMagickMemory(matrix_info->elements); else { (void)UnmapBlob(matrix_info->elements, (size_t) matrix_info->length); matrix_info->elements = (unsigned short *)NULL; } RelinquishMagickResource(MemoryResource, matrix_info->length); break; } case MapCache: { (void)UnmapBlob(matrix_info->elements, (size_t) matrix_info->length); matrix_info->elements = NULL; RelinquishMagickResource(MapResource, matrix_info->length); } case DiskCache: { if (matrix_info->file != -1) (void)close(matrix_info->file); (void)RelinquishUniqueFileResource(matrix_info->path); RelinquishMagickResource(DiskResource, matrix_info->length); break; } default: break; } UnlockSemaphoreInfo(matrix_info->semaphore); RelinquishSemaphoreInfo(&matrix_info->semaphore); return ((MatrixInfo *) RelinquishMagickMemory(matrix_info)); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + G a u s s J o r d a n E l i m i n a t i o n * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % GaussJordanElimination() returns a matrix in reduced row echelon * form, % while simultaneously reducing and thus solving the augumented * results % matrix. % % See also * http://en.wikipedia.org/wiki/Gauss-Jordan_elimination % % The format of * the GaussJordanElimination method is: % % MagickBooleanType * GaussJordanElimination(double **matrix, % double **vectors,const * size_t rank,const size_t number_vectors) % % A description of each * parameter follows: % % o matrix: the matrix to be reduced, as an 'array * of row pointers'. % % o vectors: the additional matrix argumenting the * matrix for row reduction. % Producing an 'array of column * vectors'. % % o rank: The size of the matrix (both rows and columns). * % Also represents the number terms that need to be solved. % % * o number_vectors: Number of vectors columns, argumenting the above matrix. * % Usally 1, but can be more for more complex equation solving. * % % Note that the 'matrix' is given as a 'array of row pointers' of rank * size. % That is values can be assigned as matrix[row][column] where * 'row' is % typically the equation, and 'column' is the term of the * equation. % That is the matrix is in the form of a 'row first array'. % % * However 'vectors' is a 'array of column pointers' which can have any * number % of columns, with each column array the same 'rank' size as * 'matrix'. % % This allows for simpler handling of the results, especially * is only one % column 'vector' is all that is required to produce the * desired solution. % % For example, the 'vectors' can consist of a pointer * to a simple array of % doubles. when only one set of simultanious * equations is to be solved from % the given set of coefficient weighted * terms. % % double **matrix = AcquireMagickMatrix(8UL,8UL); % * double coefficents[8]; % ... % GaussJordanElimination(matrix, * &coefficents, 8UL, 1UL); % % However by specifing more 'columns' (as an * 'array of vector columns', % you can use this function to solve a set of * 'separable' equations. % % For example a distortion function where u = * U(x,y) v = V(x,y) % And the functions U() and V() have separate * coefficents, but are being % generated from a common x,y->u,v data set. * % % Another example is generation of a color gradient from a set of * colors at % specific coordients, such as a list x,y -> r,g,b,a. % % You * can also use the 'vectors' to generate an inverse of the given 'matrix' % * though as a 'column first array' rather than a 'row first array'. For % * details see http://en.wikipedia.org/wiki/Gauss-Jordan_elimination % */ MagickPrivate MagickBooleanType GaussJordanElimination(double **matrix, double **vectors, const size_t rank, const size_t number_vectors) { #define GaussJordanSwap(x,y) \ { \ if ((x) != (y)) \ { \ (x)+=(y); \ (y)=(x)-(y); \ (x)=(x)-(y); \ } \ } double max, scale; register ssize_t i, j, k; ssize_t column, *columns, *pivots, row, *rows; columns = (ssize_t *) AcquireQuantumMemory(rank, sizeof(*columns)); rows = (ssize_t *) AcquireQuantumMemory(rank, sizeof(*rows)); pivots = (ssize_t *) AcquireQuantumMemory(rank, sizeof(*pivots)); if ((rows == (ssize_t *) NULL) || (columns == (ssize_t *) NULL) || (pivots == (ssize_t *) NULL)) { if (pivots != (ssize_t *) NULL) pivots = (ssize_t *) RelinquishMagickMemory(pivots); if (columns != (ssize_t *) NULL) columns = (ssize_t *) RelinquishMagickMemory(columns); if (rows != (ssize_t *) NULL) rows = (ssize_t *) RelinquishMagickMemory(rows); return (MagickFalse); } (void)ResetMagickMemory(columns, 0, rank * sizeof(*columns)); (void)ResetMagickMemory(rows, 0, rank * sizeof(*rows)); (void)ResetMagickMemory(pivots, 0, rank * sizeof(*pivots)); column = 0; row = 0; for (i = 0; i < (ssize_t) rank; i++) { max = 0.0; for (j = 0; j < (ssize_t) rank; j++) if (pivots[j] != 1) { for (k = 0; k < (ssize_t) rank; k++) if (pivots[k] != 0) { if (pivots[k] > 1) return (MagickFalse); } else if (fabs(matrix[j][k]) >= max) { max = fabs(matrix[j][k]); row = j; column = k; } } pivots[column]++; if (row != column) { for (k = 0; k < (ssize_t) rank; k++) GaussJordanSwap(matrix[row][k], matrix[column][k]); for (k = 0; k < (ssize_t) number_vectors; k++) GaussJordanSwap(vectors[k][row], vectors[k][column]); } rows[i] = row; columns[i] = column; if (matrix[column][column] == 0.0) return (MagickFalse); /* sigularity */ scale = PerceptibleReciprocal(matrix[column][column]); matrix[column][column] = 1.0; for (j = 0; j < (ssize_t) rank; j++) matrix[column][j] *= scale; for (j = 0; j < (ssize_t) number_vectors; j++) vectors[j][column] *= scale; for (j = 0; j < (ssize_t) rank; j++) if (j != column) { scale = matrix[j][column]; matrix[j][column] = 0.0; for (k = 0; k < (ssize_t) rank; k++) matrix[j][k] -= scale * matrix[column][k]; for (k = 0; k < (ssize_t) number_vectors; k++) vectors[k][j] -= scale * vectors[k][column]; } } for (j = (ssize_t) rank - 1; j >= 0; j--) if (columns[j] != rows[j]) for (i = 0; i < (ssize_t) rank; i++) GaussJordanSwap(matrix[i][rows[j]], matrix[i][columns[j]]); pivots = (ssize_t *) RelinquishMagickMemory(pivots); rows = (ssize_t *) RelinquishMagickMemory(rows); columns = (ssize_t *) RelinquishMagickMemory(columns); return (MagickTrue); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % G e t M a t r i x C o l u m n s * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % GetMatrixColumns() returns the number of columns in the matrix. * % % The format of the GetMatrixColumns method is: % % size_t * GetMatrixColumns(const MatrixInfo *matrix_info) % % A description of each * parameter follows: % % o matrix_info: the matrix. % */ MagickExport size_t GetMatrixColumns(const MatrixInfo * matrix_info) { assert(matrix_info != (MatrixInfo *) NULL); assert(matrix_info->signature == MagickCoreSignature); return (matrix_info->columns); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % G e t M a t r i x E l e m e n t * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % GetMatrixElement() returns the specifed element in the matrix. % * % The format of the GetMatrixElement method is: % % * MagickBooleanType GetMatrixElement(const MatrixInfo *matrix_info, % * const ssize_t x,const ssize_t y,void *value) % % A description of each * parameter follows: % % o matrix_info: the matrix columns. % % o x: * the matrix x-offset. % % o y: the matrix y-offset. % % o value: * return the matrix element in this buffer. % */ static inline ssize_t EdgeX(const ssize_t x, const size_t columns) { if (x < 0L) return (0L); if (x >= (ssize_t) columns) return ((ssize_t) (columns - 1)); return (x); } static inline ssize_t EdgeY(const ssize_t y, const size_t rows) { if (y < 0L) return (0L); if (y >= (ssize_t) rows) return ((ssize_t) (rows - 1)); return (y); } static inline MagickOffsetType ReadMatrixElements( const MatrixInfo * magick_restrict matrix_info, const MagickOffsetType offset, const MagickSizeType length, unsigned char *magick_restrict buffer) { register MagickOffsetType i; ssize_t count; #if !defined(MAGICKCORE_HAVE_PREAD) LockSemaphoreInfo(matrix_info->semaphore); if (lseek(matrix_info->file, offset, SEEK_SET) < 0) { UnlockSemaphoreInfo(matrix_info->semaphore); return ((MagickOffsetType) - 1); } #endif count = 0; for (i = 0; i < (MagickOffsetType) length; i += count) { #if !defined(MAGICKCORE_HAVE_PREAD) count = read(matrix_info->file, buffer + i, (size_t) MagickMin(length - i, (MagickSizeType) SSIZE_MAX)); #else count = pread(matrix_info->file, buffer + i, (size_t) MagickMin(length - i, (MagickSizeType) SSIZE_MAX), (off_t) (offset + i)); #endif if (count <= 0) { count = 0; if (errno != EINTR) break; } } #if !defined(MAGICKCORE_HAVE_PREAD) UnlockSemaphoreInfo(matrix_info->semaphore); #endif return (i); } MagickExport MagickBooleanType GetMatrixElement(const MatrixInfo * matrix_info, const ssize_t x, const ssize_t y, void *value) { MagickOffsetType count, i; assert(matrix_info != (const MatrixInfo *)NULL); assert(matrix_info->signature == MagickCoreSignature); i = (MagickOffsetType) EdgeY(y, matrix_info->rows) * matrix_info->columns + EdgeX(x, matrix_info->columns); if (matrix_info->type != DiskCache) { (void)memcpy(value, (unsigned char *)matrix_info->elements + i * matrix_info->stride, matrix_info->stride); return (MagickTrue); } count = ReadMatrixElements(matrix_info, i * matrix_info->stride, matrix_info->stride, (unsigned char *)value); if (count != (MagickOffsetType) matrix_info->stride) return (MagickFalse); return (MagickTrue); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % G e t M a t r i x R o w s * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % GetMatrixRows() returns the number of rows in the matrix. % % * The format of the GetMatrixRows method is: % % size_t * GetMatrixRows(const MatrixInfo *matrix_info) % % A description of each * parameter follows: % % o matrix_info: the matrix. % */ MagickExport size_t GetMatrixRows(const MatrixInfo * matrix_info) { assert(matrix_info != (const MatrixInfo *)NULL); assert(matrix_info->signature == MagickCoreSignature); return (matrix_info->rows); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + L e a s t S q u a r e s A d d T e r m s * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % LeastSquaresAddTerms() adds one set of terms and associate * results to the % given matrix and vectors for solving using least-squares * function fitting. % % The format of the AcquireMagickMatrix method is: % * % void LeastSquaresAddTerms(double **matrix,double **vectors, % * const double *terms,const double *results,const size_t rank, % * const size_t number_vectors); % % A description of each parameter * follows: % % o matrix: the square matrix to add given terms/results to. * % % o vectors: the result vectors to add terms/results to. % % o * terms: the pre-calculated terms (without the unknown coefficent % * weights) that forms the equation being added. % % o results: the * result(s) that should be generated from the given terms % * weighted by the yet-to-be-solved coefficents. % % o rank: the rank or * size of the dimensions of the square matrix. % Also the length * of vectors, and number of terms being added. % % o number_vectors: * Number of result vectors, and number or results being % added. Also * represents the number of separable systems of equations % that is * being solved. % % Example of use... % % 2 dimensional Affine * Equations (which are separable) % c0*x + c2*y + c4*1 => u % * c1*x + c3*y + c5*1 => v % % double **matrix = * AcquireMagickMatrix(3UL,3UL); % double **vectors = * AcquireMagickMatrix(2UL,3UL); % double terms[3], results[2]; % ... * % for each given x,y -> u,v % terms[0] = x; % terms[1] = * y; % terms[2] = 1; % results[0] = u; % results[1] = * v; % LeastSquaresAddTerms(matrix,vectors,terms,results,3UL,2UL); % * ... % if ( GaussJordanElimination(matrix,vectors,3UL,2UL) ) { % * c0 = vectors[0][0]; % c2 = vectors[0][1]; % c4 = * vectors[0][2]; % c1 = vectors[1][0]; % c3 = vectors[1][1]; % * c5 = vectors[1][2]; % } % else % printf("Matrix * unsolvable\n); % RelinquishMagickMatrix(matrix,3UL); % * RelinquishMagickMatrix(vectors,2UL); % */ MagickPrivate void LeastSquaresAddTerms(double **matrix, double **vectors, const double *terms, const double *results, const size_t rank, const size_t number_vectors) { register ssize_t i, j; for (j = 0; j < (ssize_t) rank; j++) { for (i = 0; i < (ssize_t) rank; i++) matrix[i][j] += terms[i] * terms[j]; for (i = 0; i < (ssize_t) number_vectors; i++) vectors[i][j] += results[i] * terms[j]; } } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % M a t r i x T o I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % MatrixToImage() returns a matrix as an image. The matrix * elements must be % of type double otherwise nonsense is returned. % % * The format of the MatrixToImage method is: % % Image * *MatrixToImage(const MatrixInfo *matrix_info, % ExceptionInfo * *exception) % % A description of each parameter follows: % % o * matrix_info: the matrix. % % o exception: return any errors or warnings * in this structure. % */ MagickExport Image * MatrixToImage(const MatrixInfo * matrix_info, ExceptionInfo * exception) { CacheView * image_view; double max_value, min_value, scale_factor, value; Image * image; MagickBooleanType status; ssize_t y; assert(matrix_info != (const MatrixInfo *)NULL); assert(matrix_info->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (matrix_info->stride < sizeof(double)) return ((Image *) NULL); /* * Determine range of matrix. */ (void)GetMatrixElement(matrix_info, 0, 0, &value); min_value = value; max_value = value; for (y = 0; y < (ssize_t) matrix_info->rows; y++) { register ssize_t x; for (x = 0; x < (ssize_t) matrix_info->columns; x++) { if (GetMatrixElement(matrix_info, x, y, &value) == MagickFalse) continue; if (value < min_value) min_value = value; else if (value > max_value) max_value = value; } } if ((min_value == 0.0) && (max_value == 0.0)) scale_factor = 0; else if (min_value == max_value) { scale_factor = (double)QuantumRange / min_value; min_value = 0; } else scale_factor = (double)QuantumRange / (max_value - min_value); /* * Convert matrix to image. */ image = AcquireImage((ImageInfo *) NULL, exception); image->columns = matrix_info->columns; image->rows = matrix_info->rows; image->colorspace = GRAYColorspace; status = MagickTrue; image_view = AcquireAuthenticCacheView(image, exception); for (y = 0; y < (ssize_t) image->rows; y++) { double value; register Quantum * q; register ssize_t x; if (status == MagickFalse) continue; q = QueueCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { if (GetMatrixElement(matrix_info, x, y, &value) == MagickFalse) continue; value = scale_factor * (value - min_value); *q = ClampToQuantum(value); q += GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; } image_view = DestroyCacheView(image_view); if (status == MagickFalse) image = DestroyImage(image); return (image); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % N u l l M a t r i x * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % NullMatrix() sets all elements of the matrix to zero. % % The * format of the ResetMagickMemory method is: % % MagickBooleanType * *NullMatrix(MatrixInfo *matrix_info) % % A description of each parameter * follows: % % o matrix_info: the matrix. % */ MagickExport MagickBooleanType NullMatrix(MatrixInfo * matrix_info) { register ssize_t x; ssize_t count, y; unsigned char value; assert(matrix_info != (const MatrixInfo *)NULL); assert(matrix_info->signature == MagickCoreSignature); if (matrix_info->type != DiskCache) { (void)ResetMagickMemory(matrix_info->elements, 0, (size_t) matrix_info->length); return (MagickTrue); } value = 0; (void)lseek(matrix_info->file, 0, SEEK_SET); for (y = 0; y < (ssize_t) matrix_info->rows; y++) { for (x = 0; x < (ssize_t) matrix_info->length; x++) { count = write(matrix_info->file, &value, sizeof(value)); if (count != (ssize_t) sizeof(value)) break; } if (x < (ssize_t) matrix_info->length) break; } return (y < (ssize_t) matrix_info->rows ? MagickFalse : MagickTrue); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % R e l i n q u i s h M a g i c k M a t r i x * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % RelinquishMagickMatrix() frees the previously acquired matrix * (array of % pointers to arrays of doubles). % % The format of the * RelinquishMagickMatrix method is: % % double * **RelinquishMagickMatrix(double **matrix, % const size_t * number_rows) % % A description of each parameter follows: % % o * matrix: the matrix to relinquish % % o number_rows: the first dimension * of the acquired matrix (number of % pointers) % */ MagickExport double ** RelinquishMagickMatrix(double **matrix, const size_t number_rows) { register ssize_t i; if (matrix == (double **)NULL) return (matrix); for (i = 0; i < (ssize_t) number_rows; i++) matrix[i] = (double *)RelinquishMagickMemory(matrix[i]); matrix = (double **)RelinquishMagickMemory(matrix); return (matrix); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % S e t M a t r i x E l e m e n t * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % SetMatrixElement() sets the specifed element in the matrix. % % * The format of the SetMatrixElement method is: % % MagickBooleanType * SetMatrixElement(const MatrixInfo *matrix_info, % const ssize_t * x,const ssize_t y,void *value) % % A description of each parameter * follows: % % o matrix_info: the matrix columns. % % o x: the matrix * x-offset. % % o y: the matrix y-offset. % % o value: set the matrix * element to this value. % */ MagickExport MagickBooleanType SetMatrixElement(const MatrixInfo * matrix_info, const ssize_t x, const ssize_t y, const void *value) { MagickOffsetType count, i; assert(matrix_info != (const MatrixInfo *)NULL); assert(matrix_info->signature == MagickCoreSignature); i = (MagickOffsetType) y *matrix_info->columns + x; if ((i < 0) || ((MagickSizeType) (i * matrix_info->stride) >= matrix_info->length)) return (MagickFalse); if (matrix_info->type != DiskCache) { (void)memcpy((unsigned char *)matrix_info->elements + i * matrix_info->stride, value, matrix_info->stride); return (MagickTrue); } count = WriteMatrixElements(matrix_info, i * matrix_info->stride, matrix_info->stride, (unsigned char *)value); if (count != (MagickOffsetType) matrix_info->stride) return (MagickFalse); return (MagickTrue); }
/* * Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image-private.h" #include "MagickCore/matrix.h" #include "MagickCore/memory_.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/thread-private.h" #include "MagickCore/utility.h" /* * Typedef declaration. */ struct _MatrixInfo { CacheType type; size_t columns, rows, stride; MagickSizeType length; MagickBooleanType mapped, synchronize; char path[MagickPathExtent]; int file; void *elements; SemaphoreInfo * semaphore; size_t signature; }; /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % A c q u i r e M a t r i x I n f o * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % AcquireMatrixInfo() allocates the ImageInfo structure. % % The * format of the AcquireMatrixInfo method is: % % MatrixInfo * *AcquireMatrixInfo(const size_t columns,const size_t rows, % const * size_t stride,ExceptionInfo *exception) % % A description of each * parameter follows: % % o columns: the matrix columns. % % o rows: * the matrix rows. % % o stride: the matrix stride. % % o exception: * return any errors or warnings in this structure. % */ #if defined(SIGBUS) static void MatrixSignalHandler(int status) { ThrowFatalException(CacheFatalError, "UnableToExtendMatrixCache"); } #endif static inline MagickOffsetType WriteMatrixElements( const MatrixInfo * magick_restrict matrix_info, const MagickOffsetType offset, const MagickSizeType length, const unsigned char *magick_restrict buffer) { register MagickOffsetType i; ssize_t count; #if !defined(MAGICKCORE_HAVE_PWRITE) LockSemaphoreInfo(matrix_info->semaphore); if (lseek(matrix_info->file, offset, SEEK_SET) < 0) { UnlockSemaphoreInfo(matrix_info->semaphore); return ((MagickOffsetType) - 1); } #endif count = 0; for (i = 0; i < (MagickOffsetType) length; i += count) { #if !defined(MAGICKCORE_HAVE_PWRITE) count = write(matrix_info->file, buffer + i, (size_t) MagickMin(length - i, (MagickSizeType) SSIZE_MAX)); #else count = pwrite(matrix_info->file, buffer + i, (size_t) MagickMin(length - i, (MagickSizeType) SSIZE_MAX), (off_t) (offset + i)); #endif if (count <= 0) { count = 0; if (errno != EINTR) break; } } #if !defined(MAGICKCORE_HAVE_PWRITE) UnlockSemaphoreInfo(matrix_info->semaphore); #endif return (i); } static MagickBooleanType SetMatrixExtent( MatrixInfo * magick_restrict matrix_info, MagickSizeType length) { MagickOffsetType count, extent, offset; if (length != (MagickSizeType) ((MagickOffsetType) length)) return (MagickFalse); offset = (MagickOffsetType) lseek(matrix_info->file, 0, SEEK_END); if (offset < 0) return (MagickFalse); if ((MagickSizeType) offset >= length) return (MagickTrue); extent = (MagickOffsetType) length - 1; count = WriteMatrixElements(matrix_info, extent, 1, (const unsigned char *)""); #if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE) if (matrix_info->synchronize != MagickFalse) (void)posix_fallocate(matrix_info->file, offset + 1, extent - offset); #endif #if defined(SIGBUS) (void)signal(SIGBUS, MatrixSignalHandler); #endif return (count != (MagickOffsetType) 1 ? MagickFalse : MagickTrue); } MagickExport MatrixInfo * AcquireMatrixInfo(const size_t columns, const size_t rows, const size_t stride, ExceptionInfo * exception) { char *synchronize; MagickBooleanType status; MatrixInfo * matrix_info; matrix_info = (MatrixInfo *) AcquireMagickMemory(sizeof(*matrix_info)); if (matrix_info == (MatrixInfo *) NULL) return ((MatrixInfo *) NULL); (void)ResetMagickMemory(matrix_info, 0, sizeof(*matrix_info)); matrix_info->signature = MagickCoreSignature; matrix_info->columns = columns; matrix_info->rows = rows; matrix_info->stride = stride; matrix_info->semaphore = AcquireSemaphoreInfo(); synchronize = GetEnvironmentValue("MAGICK_SYNCHRONIZE"); if (synchronize != (const char *)NULL) { matrix_info->synchronize = IsStringTrue(synchronize); synchronize = DestroyString(synchronize); } matrix_info->length = (MagickSizeType) columns *rows * stride; if (matrix_info->columns != (size_t) (matrix_info->length / rows / stride)) { (void)ThrowMagickException(exception, GetMagickModule(), CacheError, "CacheResourcesExhausted", "`%s'", "matrix cache"); return (DestroyMatrixInfo(matrix_info)); } matrix_info->type = MemoryCache; status = AcquireMagickResource(AreaResource, matrix_info->length); if ((status != MagickFalse) && (matrix_info->length == (MagickSizeType) ((size_t) matrix_info->length))) { status = AcquireMagickResource(MemoryResource, matrix_info->length); if (status != MagickFalse) { matrix_info->mapped = MagickFalse; matrix_info->elements = AcquireMagickMemory((size_t) matrix_info->length); if (matrix_info->elements == NULL) { matrix_info->mapped = MagickTrue; matrix_info->elements = MapBlob(-1, IOMode, 0, (size_t) matrix_info->length); } if (matrix_info->elements == (unsigned short *)NULL) RelinquishMagickResource(MemoryResource, matrix_info->length); } } matrix_info->file = (-1); if (matrix_info->elements == (unsigned short *)NULL) { status = AcquireMagickResource(DiskResource, matrix_info->length); if (status == MagickFalse) { (void)ThrowMagickException(exception, GetMagickModule(), CacheError, "CacheResourcesExhausted", "`%s'", "matrix cache"); return (DestroyMatrixInfo(matrix_info)); } matrix_info->type = DiskCache; (void)AcquireMagickResource(MemoryResource, matrix_info->length); matrix_info->file = AcquireUniqueFileResource(matrix_info->path); if (matrix_info->file == -1) return (DestroyMatrixInfo(matrix_info)); status = AcquireMagickResource(MapResource, matrix_info->length); if (status != MagickFalse) { status = SetMatrixExtent(matrix_info, matrix_info->length); if (status != MagickFalse) { matrix_info->elements = (void *)MapBlob(matrix_info->file, IOMode, 0, (size_t) matrix_info->length); if (matrix_info->elements != NULL) matrix_info->type = MapCache; else RelinquishMagickResource(MapResource, matrix_info->length); } } } return (matrix_info); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % A c q u i r e M a g i c k M a t r i x * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % AcquireMagickMatrix() allocates and returns a matrix in the form * of an % array of pointers to an array of doubles, with all values pre-set * to zero. % % This used to generate the two dimensional matrix, and * vectors required % for the GaussJordanElimination() method below, solving * some system of % simultanious equations. % % The format of the * AcquireMagickMatrix method is: % % double **AcquireMagickMatrix(const * size_t number_rows, % const size_t size) % % A description of each * parameter follows: % % o number_rows: the number pointers for the array * of pointers % (first dimension). % % o size: the size of the array * of doubles each pointer points to % (second dimension). % */ MagickExport double ** AcquireMagickMatrix(const size_t number_rows, const size_t size) { double **matrix; register ssize_t i, j; matrix = (double **)AcquireQuantumMemory(number_rows, sizeof(*matrix)); if (matrix == (double **)NULL) return ((double **)NULL); for (i = 0; i < (ssize_t) number_rows; i++) { matrix[i] = (double *)AcquireQuantumMemory(size, sizeof(*matrix[i])); if (matrix[i] == (double *)NULL) { for (j = 0; j < i; j++) matrix[j] = (double *)RelinquishMagickMemory(matrix[j]); matrix = (double **)RelinquishMagickMemory(matrix); return ((double **)NULL); } for (j = 0; j < (ssize_t) size; j++) matrix[i][j] = 0.0; } return (matrix); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % D e s t r o y M a t r i x I n f o * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DestroyMatrixInfo() dereferences a matrix, deallocating memory * associated % with the matrix. % % The format of the DestroyImage method * is: % % MatrixInfo *DestroyMatrixInfo(MatrixInfo *matrix_info) % % A * description of each parameter follows: % % o matrix_info: the matrix. % */ MagickExport MatrixInfo * DestroyMatrixInfo(MatrixInfo * matrix_info) { assert(matrix_info != (MatrixInfo *) NULL); assert(matrix_info->signature == MagickCoreSignature); LockSemaphoreInfo(matrix_info->semaphore); switch (matrix_info->type) { case MemoryCache: { if (matrix_info->mapped == MagickFalse) matrix_info->elements = RelinquishMagickMemory(matrix_info->elements); else { (void)UnmapBlob(matrix_info->elements, (size_t) matrix_info->length); matrix_info->elements = (unsigned short *)NULL; } RelinquishMagickResource(MemoryResource, matrix_info->length); break; } case MapCache: { (void)UnmapBlob(matrix_info->elements, (size_t) matrix_info->length); matrix_info->elements = NULL; RelinquishMagickResource(MapResource, matrix_info->length); } case DiskCache: { if (matrix_info->file != -1) (void)close(matrix_info->file); (void)RelinquishUniqueFileResource(matrix_info->path); RelinquishMagickResource(DiskResource, matrix_info->length); break; } default: break; } UnlockSemaphoreInfo(matrix_info->semaphore); RelinquishSemaphoreInfo(&matrix_info->semaphore); return ((MatrixInfo *) RelinquishMagickMemory(matrix_info)); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + G a u s s J o r d a n E l i m i n a t i o n * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % GaussJordanElimination() returns a matrix in reduced row echelon * form, % while simultaneously reducing and thus solving the augumented * results % matrix. % % See also * http://en.wikipedia.org/wiki/Gauss-Jordan_elimination % % The format of * the GaussJordanElimination method is: % % MagickBooleanType * GaussJordanElimination(double **matrix, % double **vectors,const * size_t rank,const size_t number_vectors) % % A description of each * parameter follows: % % o matrix: the matrix to be reduced, as an 'array * of row pointers'. % % o vectors: the additional matrix argumenting the * matrix for row reduction. % Producing an 'array of column * vectors'. % % o rank: The size of the matrix (both rows and columns). * % Also represents the number terms that need to be solved. % % * o number_vectors: Number of vectors columns, argumenting the above matrix. * % Usally 1, but can be more for more complex equation solving. * % % Note that the 'matrix' is given as a 'array of row pointers' of rank * size. % That is values can be assigned as matrix[row][column] where * 'row' is % typically the equation, and 'column' is the term of the * equation. % That is the matrix is in the form of a 'row first array'. % % * However 'vectors' is a 'array of column pointers' which can have any * number % of columns, with each column array the same 'rank' size as * 'matrix'. % % This allows for simpler handling of the results, especially * is only one % column 'vector' is all that is required to produce the * desired solution. % % For example, the 'vectors' can consist of a pointer * to a simple array of % doubles. when only one set of simultanious * equations is to be solved from % the given set of coefficient weighted * terms. % % double **matrix = AcquireMagickMatrix(8UL,8UL); % * double coefficents[8]; % ... % GaussJordanElimination(matrix, * &coefficents, 8UL, 1UL); % % However by specifing more 'columns' (as an * 'array of vector columns', % you can use this function to solve a set of * 'separable' equations. % % For example a distortion function where u = * U(x,y) v = V(x,y) % And the functions U() and V() have separate * coefficents, but are being % generated from a common x,y->u,v data set. * % % Another example is generation of a color gradient from a set of * colors at % specific coordients, such as a list x,y -> r,g,b,a. % % You * can also use the 'vectors' to generate an inverse of the given 'matrix' % * though as a 'column first array' rather than a 'row first array'. For % * details see http://en.wikipedia.org/wiki/Gauss-Jordan_elimination % */ MagickPrivate MagickBooleanType GaussJordanElimination(double **matrix, double **vectors, const size_t rank, const size_t number_vectors) { #define GaussJordanSwap(x,y) \ { \ if ((x) != (y)) \ { \ (x)+=(y); \ (y)=(x)-(y); \ (x)=(x)-(y); \ } \ } double max, scale; register ssize_t i, j, k; ssize_t column, *columns, *pivots, row, *rows; columns = (ssize_t *) AcquireQuantumMemory(rank, sizeof(*columns)); rows = (ssize_t *) AcquireQuantumMemory(rank, sizeof(*rows)); pivots = (ssize_t *) AcquireQuantumMemory(rank, sizeof(*pivots)); if ((rows == (ssize_t *) NULL) || (columns == (ssize_t *) NULL) || (pivots == (ssize_t *) NULL)) { if (pivots != (ssize_t *) NULL) pivots = (ssize_t *) RelinquishMagickMemory(pivots); if (columns != (ssize_t *) NULL) columns = (ssize_t *) RelinquishMagickMemory(columns); if (rows != (ssize_t *) NULL) rows = (ssize_t *) RelinquishMagickMemory(rows); return (MagickFalse); } (void)ResetMagickMemory(columns, 0, rank * sizeof(*columns)); (void)ResetMagickMemory(rows, 0, rank * sizeof(*rows)); (void)ResetMagickMemory(pivots, 0, rank * sizeof(*pivots)); column = 0; row = 0; for (i = 0; i < (ssize_t) rank; i++) { max = 0.0; for (j = 0; j < (ssize_t) rank; j++) if (pivots[j] != 1) { for (k = 0; k < (ssize_t) rank; k++) if (pivots[k] != 0) { if (pivots[k] > 1) return (MagickFalse); } else if (fabs(matrix[j][k]) >= max) { max = fabs(matrix[j][k]); row = j; column = k; } } pivots[column]++; if (row != column) { for (k = 0; k < (ssize_t) rank; k++) GaussJordanSwap(matrix[row][k], matrix[column][k]); for (k = 0; k < (ssize_t) number_vectors; k++) GaussJordanSwap(vectors[k][row], vectors[k][column]); } rows[i] = row; columns[i] = column; if (matrix[column][column] == 0.0) return (MagickFalse); /* sigularity */ scale = PerceptibleReciprocal(matrix[column][column]); matrix[column][column] = 1.0; for (j = 0; j < (ssize_t) rank; j++) matrix[column][j] *= scale; for (j = 0; j < (ssize_t) number_vectors; j++) vectors[j][column] *= scale; for (j = 0; j < (ssize_t) rank; j++) if (j != column) { scale = matrix[j][column]; matrix[j][column] = 0.0; for (k = 0; k < (ssize_t) rank; k++) matrix[j][k] -= scale * matrix[column][k]; for (k = 0; k < (ssize_t) number_vectors; k++) vectors[k][j] -= scale * vectors[k][column]; } } for (j = (ssize_t) rank - 1; j >= 0; j--) if (columns[j] != rows[j]) for (i = 0; i < (ssize_t) rank; i++) GaussJordanSwap(matrix[i][rows[j]], matrix[i][columns[j]]); pivots = (ssize_t *) RelinquishMagickMemory(pivots); rows = (ssize_t *) RelinquishMagickMemory(rows); columns = (ssize_t *) RelinquishMagickMemory(columns); return (MagickTrue); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % G e t M a t r i x C o l u m n s * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % GetMatrixColumns() returns the number of columns in the matrix. * % % The format of the GetMatrixColumns method is: % % size_t * GetMatrixColumns(const MatrixInfo *matrix_info) % % A description of each * parameter follows: % % o matrix_info: the matrix. % */ MagickExport size_t GetMatrixColumns(const MatrixInfo * matrix_info) { assert(matrix_info != (MatrixInfo *) NULL); assert(matrix_info->signature == MagickCoreSignature); return (matrix_info->columns); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % G e t M a t r i x E l e m e n t * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % GetMatrixElement() returns the specifed element in the matrix. % * % The format of the GetMatrixElement method is: % % * MagickBooleanType GetMatrixElement(const MatrixInfo *matrix_info, % * const ssize_t x,const ssize_t y,void *value) % % A description of each * parameter follows: % % o matrix_info: the matrix columns. % % o x: * the matrix x-offset. % % o y: the matrix y-offset. % % o value: * return the matrix element in this buffer. % */ static inline ssize_t EdgeX(const ssize_t x, const size_t columns) { if (x < 0L) return (0L); if (x >= (ssize_t) columns) return ((ssize_t) (columns - 1)); return (x); } static inline ssize_t EdgeY(const ssize_t y, const size_t rows) { if (y < 0L) return (0L); if (y >= (ssize_t) rows) return ((ssize_t) (rows - 1)); return (y); } static inline MagickOffsetType ReadMatrixElements( const MatrixInfo * magick_restrict matrix_info, const MagickOffsetType offset, const MagickSizeType length, unsigned char *magick_restrict buffer) { register MagickOffsetType i; ssize_t count; #if !defined(MAGICKCORE_HAVE_PREAD) LockSemaphoreInfo(matrix_info->semaphore); if (lseek(matrix_info->file, offset, SEEK_SET) < 0) { UnlockSemaphoreInfo(matrix_info->semaphore); return ((MagickOffsetType) - 1); } #endif count = 0; for (i = 0; i < (MagickOffsetType) length; i += count) { #if !defined(MAGICKCORE_HAVE_PREAD) count = read(matrix_info->file, buffer + i, (size_t) MagickMin(length - i, (MagickSizeType) SSIZE_MAX)); #else count = pread(matrix_info->file, buffer + i, (size_t) MagickMin(length - i, (MagickSizeType) SSIZE_MAX), (off_t) (offset + i)); #endif if (count <= 0) { count = 0; if (errno != EINTR) break; } } #if !defined(MAGICKCORE_HAVE_PREAD) UnlockSemaphoreInfo(matrix_info->semaphore); #endif return (i); } MagickExport MagickBooleanType GetMatrixElement(const MatrixInfo * matrix_info, const ssize_t x, const ssize_t y, void *value) { MagickOffsetType count, i; assert(matrix_info != (const MatrixInfo *)NULL); assert(matrix_info->signature == MagickCoreSignature); i = (MagickOffsetType) EdgeY(y, matrix_info->rows) * matrix_info->columns + EdgeX(x, matrix_info->columns); if (matrix_info->type != DiskCache) { (void)memcpy(value, (unsigned char *)matrix_info->elements + i * matrix_info->stride, matrix_info->stride); return (MagickTrue); } count = ReadMatrixElements(matrix_info, i * matrix_info->stride, matrix_info->stride, (unsigned char *)value); if (count != (MagickOffsetType) matrix_info->stride) return (MagickFalse); return (MagickTrue); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % G e t M a t r i x R o w s * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % GetMatrixRows() returns the number of rows in the matrix. % % * The format of the GetMatrixRows method is: % % size_t * GetMatrixRows(const MatrixInfo *matrix_info) % % A description of each * parameter follows: % % o matrix_info: the matrix. % */ MagickExport size_t GetMatrixRows(const MatrixInfo * matrix_info) { assert(matrix_info != (const MatrixInfo *)NULL); assert(matrix_info->signature == MagickCoreSignature); return (matrix_info->rows); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + L e a s t S q u a r e s A d d T e r m s * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % LeastSquaresAddTerms() adds one set of terms and associate * results to the % given matrix and vectors for solving using least-squares * function fitting. % % The format of the AcquireMagickMatrix method is: % * % void LeastSquaresAddTerms(double **matrix,double **vectors, % * const double *terms,const double *results,const size_t rank, % * const size_t number_vectors); % % A description of each parameter * follows: % % o matrix: the square matrix to add given terms/results to. * % % o vectors: the result vectors to add terms/results to. % % o * terms: the pre-calculated terms (without the unknown coefficent % * weights) that forms the equation being added. % % o results: the * result(s) that should be generated from the given terms % * weighted by the yet-to-be-solved coefficents. % % o rank: the rank or * size of the dimensions of the square matrix. % Also the length * of vectors, and number of terms being added. % % o number_vectors: * Number of result vectors, and number or results being % added. Also * represents the number of separable systems of equations % that is * being solved. % % Example of use... % % 2 dimensional Affine * Equations (which are separable) % c0*x + c2*y + c4*1 => u % * c1*x + c3*y + c5*1 => v % % double **matrix = * AcquireMagickMatrix(3UL,3UL); % double **vectors = * AcquireMagickMatrix(2UL,3UL); % double terms[3], results[2]; % ... * % for each given x,y -> u,v % terms[0] = x; % terms[1] = * y; % terms[2] = 1; % results[0] = u; % results[1] = * v; % LeastSquaresAddTerms(matrix,vectors,terms,results,3UL,2UL); % * ... % if ( GaussJordanElimination(matrix,vectors,3UL,2UL) ) { % * c0 = vectors[0][0]; % c2 = vectors[0][1]; % c4 = * vectors[0][2]; % c1 = vectors[1][0]; % c3 = vectors[1][1]; % * c5 = vectors[1][2]; % } % else % printf("Matrix * unsolvable\n); % RelinquishMagickMatrix(matrix,3UL); % * RelinquishMagickMatrix(vectors,2UL); % */ MagickPrivate void LeastSquaresAddTerms(double **matrix, double **vectors, const double *terms, const double *results, const size_t rank, const size_t number_vectors) { register ssize_t i, j; for (j = 0; j < (ssize_t) rank; j++) { for (i = 0; i < (ssize_t) rank; i++) matrix[i][j] += terms[i] * terms[j]; for (i = 0; i < (ssize_t) number_vectors; i++) vectors[i][j] += results[i] * terms[j]; } } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % M a t r i x T o I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % MatrixToImage() returns a matrix as an image. The matrix * elements must be % of type double otherwise nonsense is returned. % % * The format of the MatrixToImage method is: % % Image * *MatrixToImage(const MatrixInfo *matrix_info, % ExceptionInfo * *exception) % % A description of each parameter follows: % % o * matrix_info: the matrix. % % o exception: return any errors or warnings * in this structure. % */ MagickExport Image * MatrixToImage(const MatrixInfo * matrix_info, ExceptionInfo * exception) { CacheView * image_view; double max_value, min_value, scale_factor, value; Image * image; MagickBooleanType status; ssize_t y; assert(matrix_info != (const MatrixInfo *)NULL); assert(matrix_info->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (matrix_info->stride < sizeof(double)) return ((Image *) NULL); /* * Determine range of matrix. */ (void)GetMatrixElement(matrix_info, 0, 0, &value); min_value = value; max_value = value; for (y = 0; y < (ssize_t) matrix_info->rows; y++) { register ssize_t x; for (x = 0; x < (ssize_t) matrix_info->columns; x++) { if (GetMatrixElement(matrix_info, x, y, &value) == MagickFalse) continue; if (value < min_value) min_value = value; else if (value > max_value) max_value = value; } } if ((min_value == 0.0) && (max_value == 0.0)) scale_factor = 0; else if (min_value == max_value) { scale_factor = (double)QuantumRange / min_value; min_value = 0; } else scale_factor = (double)QuantumRange / (max_value - min_value); /* * Convert matrix to image. */ image = AcquireImage((ImageInfo *) NULL, exception); image->columns = matrix_info->columns; image->rows = matrix_info->rows; image->colorspace = GRAYColorspace; status = MagickTrue; image_view = AcquireAuthenticCacheView(image, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y = 0; y < (ssize_t) image->rows; y++) { double value; register Quantum * q; register ssize_t x; if (status == MagickFalse) continue; q = QueueCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { if (GetMatrixElement(matrix_info, x, y, &value) == MagickFalse) continue; value = scale_factor * (value - min_value); *q = ClampToQuantum(value); q += GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; } image_view = DestroyCacheView(image_view); if (status == MagickFalse) image = DestroyImage(image); return (image); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % N u l l M a t r i x * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % NullMatrix() sets all elements of the matrix to zero. % % The * format of the ResetMagickMemory method is: % % MagickBooleanType * *NullMatrix(MatrixInfo *matrix_info) % % A description of each parameter * follows: % % o matrix_info: the matrix. % */ MagickExport MagickBooleanType NullMatrix(MatrixInfo * matrix_info) { register ssize_t x; ssize_t count, y; unsigned char value; assert(matrix_info != (const MatrixInfo *)NULL); assert(matrix_info->signature == MagickCoreSignature); if (matrix_info->type != DiskCache) { (void)ResetMagickMemory(matrix_info->elements, 0, (size_t) matrix_info->length); return (MagickTrue); } value = 0; (void)lseek(matrix_info->file, 0, SEEK_SET); for (y = 0; y < (ssize_t) matrix_info->rows; y++) { for (x = 0; x < (ssize_t) matrix_info->length; x++) { count = write(matrix_info->file, &value, sizeof(value)); if (count != (ssize_t) sizeof(value)) break; } if (x < (ssize_t) matrix_info->length) break; } return (y < (ssize_t) matrix_info->rows ? MagickFalse : MagickTrue); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % R e l i n q u i s h M a g i c k M a t r i x * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % RelinquishMagickMatrix() frees the previously acquired matrix * (array of % pointers to arrays of doubles). % % The format of the * RelinquishMagickMatrix method is: % % double * **RelinquishMagickMatrix(double **matrix, % const size_t * number_rows) % % A description of each parameter follows: % % o * matrix: the matrix to relinquish % % o number_rows: the first dimension * of the acquired matrix (number of % pointers) % */ MagickExport double ** RelinquishMagickMatrix(double **matrix, const size_t number_rows) { register ssize_t i; if (matrix == (double **)NULL) return (matrix); for (i = 0; i < (ssize_t) number_rows; i++) matrix[i] = (double *)RelinquishMagickMemory(matrix[i]); matrix = (double **)RelinquishMagickMemory(matrix); return (matrix); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % S e t M a t r i x E l e m e n t * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % SetMatrixElement() sets the specifed element in the matrix. % % * The format of the SetMatrixElement method is: % % MagickBooleanType * SetMatrixElement(const MatrixInfo *matrix_info, % const ssize_t * x,const ssize_t y,void *value) % % A description of each parameter * follows: % % o matrix_info: the matrix columns. % % o x: the matrix * x-offset. % % o y: the matrix y-offset. % % o value: set the matrix * element to this value. % */ MagickExport MagickBooleanType SetMatrixElement(const MatrixInfo * matrix_info, const ssize_t x, const ssize_t y, const void *value) { MagickOffsetType count, i; assert(matrix_info != (const MatrixInfo *)NULL); assert(matrix_info->signature == MagickCoreSignature); i = (MagickOffsetType) y *matrix_info->columns + x; if ((i < 0) || ((MagickSizeType) (i * matrix_info->stride) >= matrix_info->length)) return (MagickFalse); if (matrix_info->type != DiskCache) { (void)memcpy((unsigned char *)matrix_info->elements + i * matrix_info->stride, value, matrix_info->stride); return (MagickTrue); } count = WriteMatrixElements(matrix_info, i * matrix_info->stride, matrix_info->stride, (unsigned char *)value); if (count != (MagickOffsetType) matrix_info->stride) return (MagickFalse); return (MagickTrue); }
generator_spgemm_csr_asparse.c
/****************************************************************************** ** Copyright (c) 2015-2019, Intel Corporation ** ** All rights reserved. ** ** ** ** Redistribution and use in source and binary forms, with or without ** ** modification, are permitted provided that the following conditions ** ** are met: ** ** 1. Redistributions of source code must retain the above copyright ** ** notice, this list of conditions and the following disclaimer. ** ** 2. Redistributions in binary form must reproduce the above copyright ** ** notice, this list of conditions and the following disclaimer in the ** ** documentation and/or other materials provided with the distribution. ** ** 3. Neither the name of the copyright holder nor the names of its ** ** contributors may be used to endorse or promote products derived ** ** from this software without specific prior written permission. ** ** ** ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ** ** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ** ** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ** ** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ** ** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ** ** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED ** ** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ** ** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ** ** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ** ** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ** ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ** ******************************************************************************/ /* Alexander Heinecke (Intel Corp.) ******************************************************************************/ #include "generator_spgemm_csr_asparse.h" #include "generator_common.h" #include "libxsmm_main.h" #if defined(LIBXSMM_OFFLOAD_TARGET) # pragma offload_attribute(push,target(LIBXSMM_OFFLOAD_TARGET)) #endif #include <stdlib.h> #include <string.h> #include <stdio.h> #if defined(LIBXSMM_OFFLOAD_TARGET) # pragma offload_attribute(pop) #endif LIBXSMM_API_INTERN void libxsmm_generator_spgemm_csr_asparse( libxsmm_generated_code* io_generated_code, const libxsmm_gemm_descriptor* i_xgemm_desc, const char* i_arch, const unsigned int* i_row_idx, const unsigned int* i_column_idx, const double* i_values ) { unsigned int l_m; unsigned int l_z; unsigned int l_row_elements; unsigned int l_flop_count = 0; char l_new_code[512]; int l_max_code_length = 511; int l_code_length = 0; LIBXSMM_UNUSED(i_values); l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " unsigned int l_n = 0;\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); /* reset C if beta is zero */ if (0 != (LIBXSMM_GEMM_FLAG_BETA_0 & i_xgemm_desc->flags)) { /* Beta=0 */ l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " unsigned int l_m = 0;\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " for ( l_m = 0; l_m < %u; l_m++) {\n", (unsigned int)i_xgemm_desc->m); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); if ( i_xgemm_desc->m > 1 ) { l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma simd\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma vector aligned\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); } if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) { l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " for ( l_n = 0; l_n < %u; l_n++) { C[(l_m*%u)+l_n] = 0.0; }\n", (unsigned int)i_xgemm_desc->ldc, (unsigned int)i_xgemm_desc->ldc); } else { l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " for ( l_n = 0; l_n < %u; l_n++) { C[(l_m*%u)+l_n] = 0.0f; }\n", (unsigned int)i_xgemm_desc->ldc, (unsigned int)i_xgemm_desc->ldc); } libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " }\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); } l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); /* determine the correct simd pragma for each architecture */ if ( ( strcmp( i_arch, "noarch" ) == 0 ) || ( strcmp( i_arch, "wsm" ) == 0 ) || ( strcmp( i_arch, "snb" ) == 0 ) || ( strcmp( i_arch, "hsw" ) == 0 ) ) { if ( i_xgemm_desc->n > 7 ) { l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma simd vectorlength(8)\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); } else if ( i_xgemm_desc->n > 3 ) { l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma simd vectorlength(4)\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); } else if ( i_xgemm_desc->n > 1 ) { l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma simd vectorlength(2)\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); } else {} } else if ( ( strcmp( i_arch, "knl" ) == 0 ) || ( strcmp( i_arch, "knm" ) == 0 ) || ( strcmp( i_arch, "skx" ) == 0 ) || ( strcmp( i_arch, "clx" ) == 0 ) || ( strcmp( i_arch, "cpx" ) == 0 ) ) { if ( (i_xgemm_desc->n > 1) ) { l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma simd vectorlength(16)\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); } } else { LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_ARCH ); return; } if ( (i_xgemm_desc->n > 1) && ((LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0) && ((LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0) ) { l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma vector aligned\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); } /* generate the actuel kernel */ l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " for ( l_n = 0; l_n < %u; l_n++) {\n", (unsigned int)i_xgemm_desc->n); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); for ( l_m = 0; l_m < (unsigned int)i_xgemm_desc->m; l_m++ ) { l_row_elements = i_row_idx[l_m+1] - i_row_idx[l_m]; for ( l_z = 0; l_z < l_row_elements; l_z++ ) { /* check k such that we just use columns which actually need to be multiplied */ if ( i_column_idx[i_row_idx[l_m] + l_z] < (unsigned int)i_xgemm_desc->k ) { l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " C[%u+l_n] += A[%u] * B[%u+l_n];\n", l_m * i_xgemm_desc->ldc, i_row_idx[l_m] + l_z, i_column_idx[i_row_idx[l_m] + l_z]*i_xgemm_desc->ldb ); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_flop_count += 2; } } } l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " }\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); /* add flop counter */ l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "\n#ifndef NDEBUG\n#ifdef _OPENMP\n#pragma omp atomic\n#endif\nlibxsmm_num_total_flops += %u;\n#endif\n", l_flop_count * (unsigned int)i_xgemm_desc->m); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); }
/* Alexander Heinecke (Intel Corp.) ******************************************************************************/ #include "generator_spgemm_csr_asparse.h" #include "generator_common.h" #include "libxsmm_main.h" #if defined(LIBXSMM_OFFLOAD_TARGET) # pragma offload_attribute(push,target(LIBXSMM_OFFLOAD_TARGET)) #endif #include <stdlib.h> #include <string.h> #include <stdio.h> #if defined(LIBXSMM_OFFLOAD_TARGET) # pragma offload_attribute(pop) #endif LIBXSMM_API_INTERN void libxsmm_generator_spgemm_csr_asparse( libxsmm_generated_code* io_generated_code, const libxsmm_gemm_descriptor* i_xgemm_desc, const char* i_arch, const unsigned int* i_row_idx, const unsigned int* i_column_idx, const double* i_values ) { unsigned int l_m; unsigned int l_z; unsigned int l_row_elements; unsigned int l_flop_count = 0; char l_new_code[512]; int l_max_code_length = 511; int l_code_length = 0; LIBXSMM_UNUSED(i_values); l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " unsigned int l_n = 0;\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); /* reset C if beta is zero */ if (0 != (LIBXSMM_GEMM_FLAG_BETA_0 & i_xgemm_desc->flags)) { /* Beta=0 */ l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " unsigned int l_m = 0;\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " for ( l_m = 0; l_m < %u; l_m++) {\n", (unsigned int)i_xgemm_desc->m); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); if ( i_xgemm_desc->m > 1 ) { l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma simd\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma vector aligned\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); } if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) { l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " for ( l_n = 0; l_n < %u; l_n++) { C[(l_m*%u)+l_n] = 0.0; }\n", (unsigned int)i_xgemm_desc->ldc, (unsigned int)i_xgemm_desc->ldc); } else { l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " for ( l_n = 0; l_n < %u; l_n++) { C[(l_m*%u)+l_n] = 0.0f; }\n", (unsigned int)i_xgemm_desc->ldc, (unsigned int)i_xgemm_desc->ldc); } libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " }\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); } l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); /* determine the correct simd pragma for each architecture */ if ( ( strcmp( i_arch, "noarch" ) == 0 ) || ( strcmp( i_arch, "wsm" ) == 0 ) || ( strcmp( i_arch, "snb" ) == 0 ) || ( strcmp( i_arch, "hsw" ) == 0 ) ) { if ( i_xgemm_desc->n > 7 ) { l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma simd vectorlength(8)\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); } else if ( i_xgemm_desc->n > 3 ) { l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma simd vectorlength(4)\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); } else if ( i_xgemm_desc->n > 1 ) { l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma simd vectorlength(2)\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); } else {} } else if ( ( strcmp( i_arch, "knl" ) == 0 ) || ( strcmp( i_arch, "knm" ) == 0 ) || ( strcmp( i_arch, "skx" ) == 0 ) || ( strcmp( i_arch, "clx" ) == 0 ) || ( strcmp( i_arch, "cpx" ) == 0 ) ) { if ( (i_xgemm_desc->n > 1) ) { l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma simd vectorlength(16)\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); } } else { LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_ARCH ); return; } if ( (i_xgemm_desc->n > 1) && ((LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0) && ((LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0) ) { l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma vector aligned\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); } /* generate the actuel kernel */ l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " for ( l_n = 0; l_n < %u; l_n++) {\n", (unsigned int)i_xgemm_desc->n); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); for ( l_m = 0; l_m < (unsigned int)i_xgemm_desc->m; l_m++ ) { l_row_elements = i_row_idx[l_m+1] - i_row_idx[l_m]; for ( l_z = 0; l_z < l_row_elements; l_z++ ) { /* check k such that we just use columns which actually need to be multiplied */ if ( i_column_idx[i_row_idx[l_m] + l_z] < (unsigned int)i_xgemm_desc->k ) { l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " C[%u+l_n] += A[%u] * B[%u+l_n];\n", l_m * i_xgemm_desc->ldc, i_row_idx[l_m] + l_z, i_column_idx[i_row_idx[l_m] + l_z]*i_xgemm_desc->ldb ); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_flop_count += 2; } } } l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " }\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); /* add flop counter */ l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "\n#ifndef NDEBUG\n#ifdef _OPENMP\n libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); }
/* * Alexander Heinecke (Intel Corp.) **************************************************************************** */ #include "generator_spgemm_csr_asparse.h" #include "generator_common.h" #include "libxsmm_main.h" #if defined(LIBXSMM_OFFLOAD_TARGET) #pragma offload_attribute(push,target(LIBXSMM_OFFLOAD_TARGET)) #endif #include <stdlib.h> #include <string.h> #include <stdio.h> #if defined(LIBXSMM_OFFLOAD_TARGET) #pragma offload_attribute(pop) #endif LIBXSMM_API_INTERN void libxsmm_generator_spgemm_csr_asparse(libxsmm_generated_code * io_generated_code, const libxsmm_gemm_descriptor * i_xgemm_desc, const char *i_arch, const unsigned int *i_row_idx, const unsigned int *i_column_idx, const double *i_values) { unsigned int l_m; unsigned int l_z; unsigned int l_row_elements; unsigned int l_flop_count = 0; char l_new_code[512]; int l_max_code_length = 511; int l_code_length = 0; LIBXSMM_UNUSED(i_values); l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " unsigned int l_n = 0;\n"); libxsmm_append_code_as_string(io_generated_code, l_new_code, l_code_length); /* reset C if beta is zero */ if (0 != (LIBXSMM_GEMM_FLAG_BETA_0 & i_xgemm_desc->flags)) { /* Beta=0 */ l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " unsigned int l_m = 0;\n"); libxsmm_append_code_as_string(io_generated_code, l_new_code, l_code_length); l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " for ( l_m = 0; l_m < %u; l_m++) {\n", (unsigned int)i_xgemm_desc->m); libxsmm_append_code_as_string(io_generated_code, l_new_code, l_code_length); if (i_xgemm_desc->m > 1) { l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma simd\n"); libxsmm_append_code_as_string(io_generated_code, l_new_code, l_code_length); l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma vector aligned\n"); libxsmm_append_code_as_string(io_generated_code, l_new_code, l_code_length); } if (LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP(i_xgemm_desc->datatype)) { l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " for ( l_n = 0; l_n < %u; l_n++) { C[(l_m*%u)+l_n] = 0.0; }\n", (unsigned int)i_xgemm_desc->ldc, (unsigned int)i_xgemm_desc->ldc); } else { l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " for ( l_n = 0; l_n < %u; l_n++) { C[(l_m*%u)+l_n] = 0.0f; }\n", (unsigned int)i_xgemm_desc->ldc, (unsigned int)i_xgemm_desc->ldc); } libxsmm_append_code_as_string(io_generated_code, l_new_code, l_code_length); l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " }\n"); libxsmm_append_code_as_string(io_generated_code, l_new_code, l_code_length); } l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "\n"); libxsmm_append_code_as_string(io_generated_code, l_new_code, l_code_length); /* determine the correct simd pragma for each architecture */ if ((strcmp(i_arch, "noarch") == 0) || (strcmp(i_arch, "wsm") == 0) || (strcmp(i_arch, "snb") == 0) || (strcmp(i_arch, "hsw") == 0)) { if (i_xgemm_desc->n > 7) { l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma simd vectorlength(8)\n"); libxsmm_append_code_as_string(io_generated_code, l_new_code, l_code_length); } else if (i_xgemm_desc->n > 3) { l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma simd vectorlength(4)\n"); libxsmm_append_code_as_string(io_generated_code, l_new_code, l_code_length); } else if (i_xgemm_desc->n > 1) { l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma simd vectorlength(2)\n"); libxsmm_append_code_as_string(io_generated_code, l_new_code, l_code_length); } else { } } else if ((strcmp(i_arch, "knl") == 0) || (strcmp(i_arch, "knm") == 0) || (strcmp(i_arch, "skx") == 0) || (strcmp(i_arch, "clx") == 0) || (strcmp(i_arch, "cpx") == 0)) { if ((i_xgemm_desc->n > 1)) { l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma simd vectorlength(16)\n"); libxsmm_append_code_as_string(io_generated_code, l_new_code, l_code_length); } } else { LIBXSMM_HANDLE_ERROR(io_generated_code, LIBXSMM_ERR_ARCH); return; } if ((i_xgemm_desc->n > 1) && ((LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0) && ((LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0)) { l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma vector aligned\n"); libxsmm_append_code_as_string(io_generated_code, l_new_code, l_code_length); } /* generate the actuel kernel */ l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " for ( l_n = 0; l_n < %u; l_n++) {\n", (unsigned int)i_xgemm_desc->n); libxsmm_append_code_as_string(io_generated_code, l_new_code, l_code_length); for (l_m = 0; l_m < (unsigned int)i_xgemm_desc->m; l_m++) { l_row_elements = i_row_idx[l_m + 1] - i_row_idx[l_m]; for (l_z = 0; l_z < l_row_elements; l_z++) { /* * check k such that we just use columns which actually need to * be multiplied */ if (i_column_idx[i_row_idx[l_m] + l_z] < (unsigned int)i_xgemm_desc->k) { l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " C[%u+l_n] += A[%u] * B[%u+l_n];\n", l_m * i_xgemm_desc->ldc, i_row_idx[l_m] + l_z, i_column_idx[i_row_idx[l_m] + l_z] * i_xgemm_desc->ldb); libxsmm_append_code_as_string(io_generated_code, l_new_code, l_code_length); l_flop_count += 2; } } } l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " }\n"); libxsmm_append_code_as_string(io_generated_code, l_new_code, l_code_length); /* add flop counter */ l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "\n#ifndef NDEBUG\n#ifdef _OPENMP\n#pragma omp atomic\n#endif\nlibxsmm_num_total_flops += %u;\n#endif\n", l_flop_count * (unsigned int)i_xgemm_desc->m); libxsmm_append_code_as_string(io_generated_code, l_new_code, l_code_length); }
rhs.c
//-------------------------------------------------------------------------// // // // This benchmark is an OpenMP C version of the NPB SP code. This OpenMP // // C version is developed by the Center for Manycore Programming at Seoul // // National University and derived from the OpenMP Fortran versions in // // "NPB3.3-OMP" developed by NAS. // // // // Permission to use, copy, distribute and modify this software for any // // purpose with or without fee is hereby granted. This software is // // provided "as is" without express or implied warranty. // // // // Information on NPB 3.3, including the technical report, the original // // specifications, source code, results and information on how to submit // // new results, is available at: // // // // http://www.nas.nasa.gov/Software/NPB/ // // // // Send comments or suggestions for this OpenMP C version to // // cmp@aces.snu.ac.kr // // // // Center for Manycore Programming // // School of Computer Science and Engineering // // Seoul National University // // Seoul 151-744, Korea // // // // E-mail: cmp@aces.snu.ac.kr // // // //-------------------------------------------------------------------------// //-------------------------------------------------------------------------// // Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, // // and Jaejin Lee // //-------------------------------------------------------------------------// #include <math.h> #include "header.h" void compute_rhs() { int i, j, k, m; double aux, rho_inv, uijk, up1, um1, vijk, vp1, vm1, wijk, wp1, wm1; /* //kai int k1,k2,k3,k4,k5,k6,k7,k8,k9,k10, k11; consistent_data(&k1, "int", 1); consistent_data(&k2, "int", 1); consistent_data(&k3, "int", 1); consistent_data(&k4, "int", 1); consistent_data(&k5, "int", 1); consistent_data(&k6, "int", 1); consistent_data(&k7, "int", 1); consistent_data(&k8, "int", 1); consistent_data(&k9, "int", 1); consistent_data(&k10, "int", 1); consistent_data(&k11, "int", 1); */ if (timeron) timer_start(t_rhs); #pragma omp parallel default(shared) private(i,j,k,m,rho_inv,aux,uijk, \ up1,um1,vijk,vp1,vm1,wijk,wp1,wm1) { //--------------------------------------------------------------------- // compute the reciprocal of density, and the kinetic energy, // and the speed of sound. //--------------------------------------------------------------------- #pragma omp for schedule(static) nowait for (k = k1+1; k <= grid_points[2]-1; k++) { for (j = 0; j <= grid_points[1]-1; j++) { for (i = 0; i <= grid_points[0]-1; i++) { rho_inv = 1.0/u[k][j][i][0]; rho_i[k][j][i] = rho_inv; us[k][j][i] = u[k][j][i][1] * rho_inv; vs[k][j][i] = u[k][j][i][2] * rho_inv; ws[k][j][i] = u[k][j][i][3] * rho_inv; square[k][j][i] = 0.5* ( u[k][j][i][1]*u[k][j][i][1] + u[k][j][i][2]*u[k][j][i][2] + u[k][j][i][3]*u[k][j][i][3] ) * rho_inv; qs[k][j][i] = square[k][j][i] * rho_inv; //------------------------------------------------------------------- // (don't need speed and ainx until the lhs computation) //------------------------------------------------------------------- aux = c1c2*rho_inv* (u[k][j][i][4] - square[k][j][i]); speed[k][j][i] = sqrt(aux); } } //kai k1 = 0; } //--------------------------------------------------------------------- // copy the exact forcing term to the right hand side; because // this forcing term is known, we can store it on the whole grid // including the boundary //--------------------------------------------------------------------- #pragma omp for schedule(static) for (k = k2+1; k <= nz2+1; k++) { for (j = 0; j <= ny2+1; j++) { for (i = 0; i <= nx2+1; i++) { for (m = 0; m < 5; m++) { rhs[k][j][i][m] = forcing[k][j][i][m]; } } } //kai k2 = 0; } //--------------------------------------------------------------------- // compute xi-direction fluxes //--------------------------------------------------------------------- #pragma omp master if (timeron) timer_start(t_rhsx); #pragma omp for schedule(static) nowait for (k = k3+1; k <= nz2; k++) { for (j = 1; j <= ny2; j++) { for (i = 1; i <= nx2; i++) { uijk = us[k][j][i]; up1 = us[k][j][i+1]; um1 = us[k][j][i-1]; rhs[k][j][i][0] = rhs[k][j][i][0] + dx1tx1 * (u[k][j][i+1][0] - 2.0*u[k][j][i][0] + u[k][j][i-1][0]) - tx2 * (u[k][j][i+1][1] - u[k][j][i-1][1]); rhs[k][j][i][1] = rhs[k][j][i][1] + dx2tx1 * (u[k][j][i+1][1] - 2.0*u[k][j][i][1] + u[k][j][i-1][1]) + xxcon2*con43 * (up1 - 2.0*uijk + um1) - tx2 * (u[k][j][i+1][1]*up1 - u[k][j][i-1][1]*um1 + (u[k][j][i+1][4] - square[k][j][i+1] - u[k][j][i-1][4] + square[k][j][i-1]) * c2); rhs[k][j][i][2] = rhs[k][j][i][2] + dx3tx1 * (u[k][j][i+1][2] - 2.0*u[k][j][i][2] + u[k][j][i-1][2]) + xxcon2 * (vs[k][j][i+1] - 2.0*vs[k][j][i] + vs[k][j][i-1]) - tx2 * (u[k][j][i+1][2]*up1 - u[k][j][i-1][2]*um1); rhs[k][j][i][3] = rhs[k][j][i][3] + dx4tx1 * (u[k][j][i+1][3] - 2.0*u[k][j][i][3] + u[k][j][i-1][3]) + xxcon2 * (ws[k][j][i+1] - 2.0*ws[k][j][i] + ws[k][j][i-1]) - tx2 * (u[k][j][i+1][3]*up1 - u[k][j][i-1][3]*um1); rhs[k][j][i][4] = rhs[k][j][i][4] + dx5tx1 * (u[k][j][i+1][4] - 2.0*u[k][j][i][4] + u[k][j][i-1][4]) + xxcon3 * (qs[k][j][i+1] - 2.0*qs[k][j][i] + qs[k][j][i-1]) + xxcon4 * (up1*up1 - 2.0*uijk*uijk + um1*um1) + xxcon5 * (u[k][j][i+1][4]*rho_i[k][j][i+1] - 2.0*u[k][j][i][4]*rho_i[k][j][i] + u[k][j][i-1][4]*rho_i[k][j][i-1]) - tx2 * ( (c1*u[k][j][i+1][4] - c2*square[k][j][i+1])*up1 - (c1*u[k][j][i-1][4] - c2*square[k][j][i-1])*um1 ); } } //--------------------------------------------------------------------- // add fourth order xi-direction dissipation //--------------------------------------------------------------------- for (j = 1; j <= ny2; j++) { i = 1; for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m]- dssp * (5.0*u[k][j][i][m] - 4.0*u[k][j][i+1][m] + u[k][j][i+2][m]); } i = 2; for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (-4.0*u[k][j][i-1][m] + 6.0*u[k][j][i][m] - 4.0*u[k][j][i+1][m] + u[k][j][i+2][m]); } } for (j = 1; j <= ny2; j++) { for (i = 3; i <= nx2-2; i++) { for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * ( u[k][j][i-2][m] - 4.0*u[k][j][i-1][m] + 6.0*u[k][j][i][m] - 4.0*u[k][j][i+1][m] + u[k][j][i+2][m] ); } } } for (j = 1; j <= ny2; j++) { i = nx2-1; for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * ( u[k][j][i-2][m] - 4.0*u[k][j][i-1][m] + 6.0*u[k][j][i][m] - 4.0*u[k][j][i+1][m] ); } i = nx2; for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * ( u[k][j][i-2][m] - 4.0*u[k][j][i-1][m] + 5.0*u[k][j][i][m] ); } } //kai k3 = 0; } #pragma omp master { if (timeron) timer_stop(t_rhsx); //--------------------------------------------------------------------- // compute eta-direction fluxes //--------------------------------------------------------------------- if (timeron) timer_start(t_rhsy); } #pragma omp for schedule(static) for (k = k4+1; k <= nz2; k++) { for (j = 1; j <= ny2; j++) { for (i = 1; i <= nx2; i++) { vijk = vs[k][j][i]; vp1 = vs[k][j+1][i]; vm1 = vs[k][j-1][i]; rhs[k][j][i][0] = rhs[k][j][i][0] + dy1ty1 * (u[k][j+1][i][0] - 2.0*u[k][j][i][0] + u[k][j-1][i][0]) - ty2 * (u[k][j+1][i][2] - u[k][j-1][i][2]); rhs[k][j][i][1] = rhs[k][j][i][1] + dy2ty1 * (u[k][j+1][i][1] - 2.0*u[k][j][i][1] + u[k][j-1][i][1]) + yycon2 * (us[k][j+1][i] - 2.0*us[k][j][i] + us[k][j-1][i]) - ty2 * (u[k][j+1][i][1]*vp1 - u[k][j-1][i][1]*vm1); rhs[k][j][i][2] = rhs[k][j][i][2] + dy3ty1 * (u[k][j+1][i][2] - 2.0*u[k][j][i][2] + u[k][j-1][i][2]) + yycon2*con43 * (vp1 - 2.0*vijk + vm1) - ty2 * (u[k][j+1][i][2]*vp1 - u[k][j-1][i][2]*vm1 + (u[k][j+1][i][4] - square[k][j+1][i] - u[k][j-1][i][4] + square[k][j-1][i]) * c2); rhs[k][j][i][3] = rhs[k][j][i][3] + dy4ty1 * (u[k][j+1][i][3] - 2.0*u[k][j][i][3] + u[k][j-1][i][3]) + yycon2 * (ws[k][j+1][i] - 2.0*ws[k][j][i] + ws[k][j-1][i]) - ty2 * (u[k][j+1][i][3]*vp1 - u[k][j-1][i][3]*vm1); rhs[k][j][i][4] = rhs[k][j][i][4] + dy5ty1 * (u[k][j+1][i][4] - 2.0*u[k][j][i][4] + u[k][j-1][i][4]) + yycon3 * (qs[k][j+1][i] - 2.0*qs[k][j][i] + qs[k][j-1][i]) + yycon4 * (vp1*vp1 - 2.0*vijk*vijk + vm1*vm1) + yycon5 * (u[k][j+1][i][4]*rho_i[k][j+1][i] - 2.0*u[k][j][i][4]*rho_i[k][j][i] + u[k][j-1][i][4]*rho_i[k][j-1][i]) - ty2 * ((c1*u[k][j+1][i][4] - c2*square[k][j+1][i]) * vp1 - (c1*u[k][j-1][i][4] - c2*square[k][j-1][i]) * vm1); } } //--------------------------------------------------------------------- // add fourth order eta-direction dissipation //--------------------------------------------------------------------- j = 1; for (i = 1; i <= nx2; i++) { for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m]- dssp * ( 5.0*u[k][j][i][m] - 4.0*u[k][j+1][i][m] + u[k][j+2][i][m]); } } j = 2; for (i = 1; i <= nx2; i++) { for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (-4.0*u[k][j-1][i][m] + 6.0*u[k][j][i][m] - 4.0*u[k][j+1][i][m] + u[k][j+2][i][m]); } } for (j = 3; j <= ny2-2; j++) { for (i = 1; i <= nx2; i++) { for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * ( u[k][j-2][i][m] - 4.0*u[k][j-1][i][m] + 6.0*u[k][j][i][m] - 4.0*u[k][j+1][i][m] + u[k][j+2][i][m] ); } } } j = ny2-1; for (i = 1; i <= nx2; i++) { for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * ( u[k][j-2][i][m] - 4.0*u[k][j-1][i][m] + 6.0*u[k][j][i][m] - 4.0*u[k][j+1][i][m] ); } } j = ny2; for (i = 1; i <= nx2; i++) { for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * ( u[k][j-2][i][m] - 4.0*u[k][j-1][i][m] + 5.0*u[k][j][i][m] ); } } //kai k4 = 0; } #pragma omp master { if (timeron) timer_stop(t_rhsy); //--------------------------------------------------------------------- // compute zeta-direction fluxes //--------------------------------------------------------------------- if (timeron) timer_start(t_rhsz); } #pragma omp for schedule(static) for (k = k5+1; k <= grid_points[2]-2; k++) { for (j = 1; j <= grid_points[1]-2; j++) { for (i = 1; i <= grid_points[0]-2; i++) { wijk = ws[k][j][i]; wp1 = ws[k+1][j][i]; wm1 = ws[k-1][j][i]; rhs[k][j][i][0] = rhs[k][j][i][0] + dz1tz1 * (u[k+1][j][i][0] - 2.0*u[k][j][i][0] + u[k-1][j][i][0]) - tz2 * (u[k+1][j][i][3] - u[k-1][j][i][3]); rhs[k][j][i][1] = rhs[k][j][i][1] + dz2tz1 * (u[k+1][j][i][1] - 2.0*u[k][j][i][1] + u[k-1][j][i][1]) + zzcon2 * (us[k+1][j][i] - 2.0*us[k][j][i] + us[k-1][j][i]) - tz2 * (u[k+1][j][i][1]*wp1 - u[k-1][j][i][1]*wm1); rhs[k][j][i][2] = rhs[k][j][i][2] + dz3tz1 * (u[k+1][j][i][2] - 2.0*u[k][j][i][2] + u[k-1][j][i][2]) + zzcon2 * (vs[k+1][j][i] - 2.0*vs[k][j][i] + vs[k-1][j][i]) - tz2 * (u[k+1][j][i][2]*wp1 - u[k-1][j][i][2]*wm1); rhs[k][j][i][3] = rhs[k][j][i][3] + dz4tz1 * (u[k+1][j][i][3] - 2.0*u[k][j][i][3] + u[k-1][j][i][3]) + zzcon2*con43 * (wp1 - 2.0*wijk + wm1) - tz2 * (u[k+1][j][i][3]*wp1 - u[k-1][j][i][3]*wm1 + (u[k+1][j][i][4] - square[k+1][j][i] - u[k-1][j][i][4] + square[k-1][j][i]) * c2); rhs[k][j][i][4] = rhs[k][j][i][4] + dz5tz1 * (u[k+1][j][i][4] - 2.0*u[k][j][i][4] + u[k-1][j][i][4]) + zzcon3 * (qs[k+1][j][i] - 2.0*qs[k][j][i] + qs[k-1][j][i]) + zzcon4 * (wp1*wp1 - 2.0*wijk*wijk + wm1*wm1) + zzcon5 * (u[k+1][j][i][4]*rho_i[k+1][j][i] - 2.0*u[k][j][i][4]*rho_i[k][j][i] + u[k-1][j][i][4]*rho_i[k-1][j][i]) - tz2 * ((c1*u[k+1][j][i][4] - c2*square[k+1][j][i])*wp1 - (c1*u[k-1][j][i][4] - c2*square[k-1][j][i])*wm1); } } //kai k5 = 0; } //--------------------------------------------------------------------- // add fourth order zeta-direction dissipation //--------------------------------------------------------------------- k = 1; #pragma omp for schedule(static) nowait for (j = k6+1; j <= grid_points[1]-2; j++) { for (i = 1; i <= grid_points[0]-2; i++) { for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m]- dssp * (5.0*u[k][j][i][m] - 4.0*u[k+1][j][i][m] + u[k+2][j][i][m]); } } //kai k6 = 0; } k = 2; #pragma omp for schedule(static) nowait for (j = k7+1; j <= grid_points[1]-2; j++) { for (i = 1; i <= grid_points[0]-2; i++) { for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (-4.0*u[k-1][j][i][m] + 6.0*u[k][j][i][m] - 4.0*u[k+1][j][i][m] + u[k+2][j][i][m]); } } //kai k7 = 0; } #pragma omp for schedule(static) nowait for (k = k8+1; k <= grid_points[2]-4; k++) { for (j = 1; j <= grid_points[1]-2; j++) { for (i = 1; i <= grid_points[0]-2; i++) { for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * ( u[k-2][j][i][m] - 4.0*u[k-1][j][i][m] + 6.0*u[k][j][i][m] - 4.0*u[k+1][j][i][m] + u[k+2][j][i][m] ); } } } //kai k8 = 2; } k = grid_points[2]-3; #pragma omp for schedule(static) nowait for (j = k9+1; j <= grid_points[1]-2; j++) { for (i = 1; i <= grid_points[0]-2; i++) { for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * ( u[k-2][j][i][m] - 4.0*u[k-1][j][i][m] + 6.0*u[k][j][i][m] - 4.0*u[k+1][j][i][m] ); } } //kai k9 = 0; } k = grid_points[2]-2; #pragma omp for schedule(static) for (j = k10+1; j <= grid_points[1]-2; j++) { for (i = 1; i <= grid_points[0]-2; i++) { for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * ( u[k-2][j][i][m] - 4.0*u[k-1][j][i][m] + 5.0*u[k][j][i][m] ); } } //kai k10 = 0; } #pragma omp master if (timeron) timer_stop(t_rhsz); #pragma omp for schedule(static) nowait for (k = k11+1; k <= nz2; k++) { for (j = 1; j <= ny2; j++) { for (i = 1; i <= nx2; i++) { for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] * dt; } } } //kai k11 = 0; } } //end parallel if (timeron) timer_stop(t_rhs); }
//-------------------------------------------------------------------------// // // // This benchmark is an OpenMP C version of the NPB SP code. This OpenMP // // C version is developed by the Center for Manycore Programming at Seoul // // National University and derived from the OpenMP Fortran versions in // // "NPB3.3-OMP" developed by NAS. // // // // Permission to use, copy, distribute and modify this software for any // // purpose with or without fee is hereby granted. This software is // // provided "as is" without express or implied warranty. // // // // Information on NPB 3.3, including the technical report, the original // // specifications, source code, results and information on how to submit // // new results, is available at: // // // // http://www.nas.nasa.gov/Software/NPB/ // // // // Send comments or suggestions for this OpenMP C version to // // cmp@aces.snu.ac.kr // // // // Center for Manycore Programming // // School of Computer Science and Engineering // // Seoul National University // // Seoul 151-744, Korea // // // // E-mail: cmp@aces.snu.ac.kr // // // //-------------------------------------------------------------------------// //-------------------------------------------------------------------------// // Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, // // and Jaejin Lee // //-------------------------------------------------------------------------// #include <math.h> #include "header.h" void compute_rhs() { int i, j, k, m; double aux, rho_inv, uijk, up1, um1, vijk, vp1, vm1, wijk, wp1, wm1; /* //kai int k1,k2,k3,k4,k5,k6,k7,k8,k9,k10, k11; consistent_data(&k1, "int", 1); consistent_data(&k2, "int", 1); consistent_data(&k3, "int", 1); consistent_data(&k4, "int", 1); consistent_data(&k5, "int", 1); consistent_data(&k6, "int", 1); consistent_data(&k7, "int", 1); consistent_data(&k8, "int", 1); consistent_data(&k9, "int", 1); consistent_data(&k10, "int", 1); consistent_data(&k11, "int", 1); */ if (timeron) timer_start(t_rhs); up1,um1,vijk,vp1,vm1,wijk,wp1,wm1) { //--------------------------------------------------------------------- // compute the reciprocal of density, and the kinetic energy, // and the speed of sound. //--------------------------------------------------------------------- for (k = k1+1; k <= grid_points[2]-1; k++) { for (j = 0; j <= grid_points[1]-1; j++) { for (i = 0; i <= grid_points[0]-1; i++) { rho_inv = 1.0/u[k][j][i][0]; rho_i[k][j][i] = rho_inv; us[k][j][i] = u[k][j][i][1] * rho_inv; vs[k][j][i] = u[k][j][i][2] * rho_inv; ws[k][j][i] = u[k][j][i][3] * rho_inv; square[k][j][i] = 0.5* ( u[k][j][i][1]*u[k][j][i][1] + u[k][j][i][2]*u[k][j][i][2] + u[k][j][i][3]*u[k][j][i][3] ) * rho_inv; qs[k][j][i] = square[k][j][i] * rho_inv; //------------------------------------------------------------------- // (don't need speed and ainx until the lhs computation) //------------------------------------------------------------------- aux = c1c2*rho_inv* (u[k][j][i][4] - square[k][j][i]); speed[k][j][i] = sqrt(aux); } } //kai k1 = 0; } //--------------------------------------------------------------------- // copy the exact forcing term to the right hand side; because // this forcing term is known, we can store it on the whole grid // including the boundary //--------------------------------------------------------------------- for (k = k2+1; k <= nz2+1; k++) { for (j = 0; j <= ny2+1; j++) { for (i = 0; i <= nx2+1; i++) { for (m = 0; m < 5; m++) { rhs[k][j][i][m] = forcing[k][j][i][m]; } } } //kai k2 = 0; } //--------------------------------------------------------------------- // compute xi-direction fluxes //--------------------------------------------------------------------- if (timeron) timer_start(t_rhsx); for (k = k3+1; k <= nz2; k++) { for (j = 1; j <= ny2; j++) { for (i = 1; i <= nx2; i++) { uijk = us[k][j][i]; up1 = us[k][j][i+1]; um1 = us[k][j][i-1]; rhs[k][j][i][0] = rhs[k][j][i][0] + dx1tx1 * (u[k][j][i+1][0] - 2.0*u[k][j][i][0] + u[k][j][i-1][0]) - tx2 * (u[k][j][i+1][1] - u[k][j][i-1][1]); rhs[k][j][i][1] = rhs[k][j][i][1] + dx2tx1 * (u[k][j][i+1][1] - 2.0*u[k][j][i][1] + u[k][j][i-1][1]) + xxcon2*con43 * (up1 - 2.0*uijk + um1) - tx2 * (u[k][j][i+1][1]*up1 - u[k][j][i-1][1]*um1 + (u[k][j][i+1][4] - square[k][j][i+1] - u[k][j][i-1][4] + square[k][j][i-1]) * c2); rhs[k][j][i][2] = rhs[k][j][i][2] + dx3tx1 * (u[k][j][i+1][2] - 2.0*u[k][j][i][2] + u[k][j][i-1][2]) + xxcon2 * (vs[k][j][i+1] - 2.0*vs[k][j][i] + vs[k][j][i-1]) - tx2 * (u[k][j][i+1][2]*up1 - u[k][j][i-1][2]*um1); rhs[k][j][i][3] = rhs[k][j][i][3] + dx4tx1 * (u[k][j][i+1][3] - 2.0*u[k][j][i][3] + u[k][j][i-1][3]) + xxcon2 * (ws[k][j][i+1] - 2.0*ws[k][j][i] + ws[k][j][i-1]) - tx2 * (u[k][j][i+1][3]*up1 - u[k][j][i-1][3]*um1); rhs[k][j][i][4] = rhs[k][j][i][4] + dx5tx1 * (u[k][j][i+1][4] - 2.0*u[k][j][i][4] + u[k][j][i-1][4]) + xxcon3 * (qs[k][j][i+1] - 2.0*qs[k][j][i] + qs[k][j][i-1]) + xxcon4 * (up1*up1 - 2.0*uijk*uijk + um1*um1) + xxcon5 * (u[k][j][i+1][4]*rho_i[k][j][i+1] - 2.0*u[k][j][i][4]*rho_i[k][j][i] + u[k][j][i-1][4]*rho_i[k][j][i-1]) - tx2 * ( (c1*u[k][j][i+1][4] - c2*square[k][j][i+1])*up1 - (c1*u[k][j][i-1][4] - c2*square[k][j][i-1])*um1 ); } } //--------------------------------------------------------------------- // add fourth order xi-direction dissipation //--------------------------------------------------------------------- for (j = 1; j <= ny2; j++) { i = 1; for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m]- dssp * (5.0*u[k][j][i][m] - 4.0*u[k][j][i+1][m] + u[k][j][i+2][m]); } i = 2; for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (-4.0*u[k][j][i-1][m] + 6.0*u[k][j][i][m] - 4.0*u[k][j][i+1][m] + u[k][j][i+2][m]); } } for (j = 1; j <= ny2; j++) { for (i = 3; i <= nx2-2; i++) { for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * ( u[k][j][i-2][m] - 4.0*u[k][j][i-1][m] + 6.0*u[k][j][i][m] - 4.0*u[k][j][i+1][m] + u[k][j][i+2][m] ); } } } for (j = 1; j <= ny2; j++) { i = nx2-1; for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * ( u[k][j][i-2][m] - 4.0*u[k][j][i-1][m] + 6.0*u[k][j][i][m] - 4.0*u[k][j][i+1][m] ); } i = nx2; for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * ( u[k][j][i-2][m] - 4.0*u[k][j][i-1][m] + 5.0*u[k][j][i][m] ); } } //kai k3 = 0; } if (timeron) timer_stop(t_rhsx); //--------------------------------------------------------------------- // compute eta-direction fluxes //--------------------------------------------------------------------- if (timeron) timer_start(t_rhsy); for (k = k4+1; k <= nz2; k++) { for (j = 1; j <= ny2; j++) { for (i = 1; i <= nx2; i++) { vijk = vs[k][j][i]; vp1 = vs[k][j+1][i]; vm1 = vs[k][j-1][i]; rhs[k][j][i][0] = rhs[k][j][i][0] + dy1ty1 * (u[k][j+1][i][0] - 2.0*u[k][j][i][0] + u[k][j-1][i][0]) - ty2 * (u[k][j+1][i][2] - u[k][j-1][i][2]); rhs[k][j][i][1] = rhs[k][j][i][1] + dy2ty1 * (u[k][j+1][i][1] - 2.0*u[k][j][i][1] + u[k][j-1][i][1]) + yycon2 * (us[k][j+1][i] - 2.0*us[k][j][i] + us[k][j-1][i]) - ty2 * (u[k][j+1][i][1]*vp1 - u[k][j-1][i][1]*vm1); rhs[k][j][i][2] = rhs[k][j][i][2] + dy3ty1 * (u[k][j+1][i][2] - 2.0*u[k][j][i][2] + u[k][j-1][i][2]) + yycon2*con43 * (vp1 - 2.0*vijk + vm1) - ty2 * (u[k][j+1][i][2]*vp1 - u[k][j-1][i][2]*vm1 + (u[k][j+1][i][4] - square[k][j+1][i] - u[k][j-1][i][4] + square[k][j-1][i]) * c2); rhs[k][j][i][3] = rhs[k][j][i][3] + dy4ty1 * (u[k][j+1][i][3] - 2.0*u[k][j][i][3] + u[k][j-1][i][3]) + yycon2 * (ws[k][j+1][i] - 2.0*ws[k][j][i] + ws[k][j-1][i]) - ty2 * (u[k][j+1][i][3]*vp1 - u[k][j-1][i][3]*vm1); rhs[k][j][i][4] = rhs[k][j][i][4] + dy5ty1 * (u[k][j+1][i][4] - 2.0*u[k][j][i][4] + u[k][j-1][i][4]) + yycon3 * (qs[k][j+1][i] - 2.0*qs[k][j][i] + qs[k][j-1][i]) + yycon4 * (vp1*vp1 - 2.0*vijk*vijk + vm1*vm1) + yycon5 * (u[k][j+1][i][4]*rho_i[k][j+1][i] - 2.0*u[k][j][i][4]*rho_i[k][j][i] + u[k][j-1][i][4]*rho_i[k][j-1][i]) - ty2 * ((c1*u[k][j+1][i][4] - c2*square[k][j+1][i]) * vp1 - (c1*u[k][j-1][i][4] - c2*square[k][j-1][i]) * vm1); } } //--------------------------------------------------------------------- // add fourth order eta-direction dissipation //--------------------------------------------------------------------- j = 1; for (i = 1; i <= nx2; i++) { for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m]- dssp * ( 5.0*u[k][j][i][m] - 4.0*u[k][j+1][i][m] + u[k][j+2][i][m]); } } j = 2; for (i = 1; i <= nx2; i++) { for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (-4.0*u[k][j-1][i][m] + 6.0*u[k][j][i][m] - 4.0*u[k][j+1][i][m] + u[k][j+2][i][m]); } } for (j = 3; j <= ny2-2; j++) { for (i = 1; i <= nx2; i++) { for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * ( u[k][j-2][i][m] - 4.0*u[k][j-1][i][m] + 6.0*u[k][j][i][m] - 4.0*u[k][j+1][i][m] + u[k][j+2][i][m] ); } } } j = ny2-1; for (i = 1; i <= nx2; i++) { for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * ( u[k][j-2][i][m] - 4.0*u[k][j-1][i][m] + 6.0*u[k][j][i][m] - 4.0*u[k][j+1][i][m] ); } } j = ny2; for (i = 1; i <= nx2; i++) { for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * ( u[k][j-2][i][m] - 4.0*u[k][j-1][i][m] + 5.0*u[k][j][i][m] ); } } //kai k4 = 0; } if (timeron) timer_stop(t_rhsy); //--------------------------------------------------------------------- // compute zeta-direction fluxes //--------------------------------------------------------------------- if (timeron) timer_start(t_rhsz); for (k = k5+1; k <= grid_points[2]-2; k++) { for (j = 1; j <= grid_points[1]-2; j++) { for (i = 1; i <= grid_points[0]-2; i++) { wijk = ws[k][j][i]; wp1 = ws[k+1][j][i]; wm1 = ws[k-1][j][i]; rhs[k][j][i][0] = rhs[k][j][i][0] + dz1tz1 * (u[k+1][j][i][0] - 2.0*u[k][j][i][0] + u[k-1][j][i][0]) - tz2 * (u[k+1][j][i][3] - u[k-1][j][i][3]); rhs[k][j][i][1] = rhs[k][j][i][1] + dz2tz1 * (u[k+1][j][i][1] - 2.0*u[k][j][i][1] + u[k-1][j][i][1]) + zzcon2 * (us[k+1][j][i] - 2.0*us[k][j][i] + us[k-1][j][i]) - tz2 * (u[k+1][j][i][1]*wp1 - u[k-1][j][i][1]*wm1); rhs[k][j][i][2] = rhs[k][j][i][2] + dz3tz1 * (u[k+1][j][i][2] - 2.0*u[k][j][i][2] + u[k-1][j][i][2]) + zzcon2 * (vs[k+1][j][i] - 2.0*vs[k][j][i] + vs[k-1][j][i]) - tz2 * (u[k+1][j][i][2]*wp1 - u[k-1][j][i][2]*wm1); rhs[k][j][i][3] = rhs[k][j][i][3] + dz4tz1 * (u[k+1][j][i][3] - 2.0*u[k][j][i][3] + u[k-1][j][i][3]) + zzcon2*con43 * (wp1 - 2.0*wijk + wm1) - tz2 * (u[k+1][j][i][3]*wp1 - u[k-1][j][i][3]*wm1 + (u[k+1][j][i][4] - square[k+1][j][i] - u[k-1][j][i][4] + square[k-1][j][i]) * c2); rhs[k][j][i][4] = rhs[k][j][i][4] + dz5tz1 * (u[k+1][j][i][4] - 2.0*u[k][j][i][4] + u[k-1][j][i][4]) + zzcon3 * (qs[k+1][j][i] - 2.0*qs[k][j][i] + qs[k-1][j][i]) + zzcon4 * (wp1*wp1 - 2.0*wijk*wijk + wm1*wm1) + zzcon5 * (u[k+1][j][i][4]*rho_i[k+1][j][i] - 2.0*u[k][j][i][4]*rho_i[k][j][i] + u[k-1][j][i][4]*rho_i[k-1][j][i]) - tz2 * ((c1*u[k+1][j][i][4] - c2*square[k+1][j][i])*wp1 - (c1*u[k-1][j][i][4] - c2*square[k-1][j][i])*wm1); } } //kai k5 = 0; } //--------------------------------------------------------------------- // add fourth order zeta-direction dissipation //--------------------------------------------------------------------- k = 1; for (j = k6+1; j <= grid_points[1]-2; j++) { for (i = 1; i <= grid_points[0]-2; i++) { for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m]- dssp * (5.0*u[k][j][i][m] - 4.0*u[k+1][j][i][m] + u[k+2][j][i][m]); } } //kai k6 = 0; } k = 2; for (j = k7+1; j <= grid_points[1]-2; j++) { for (i = 1; i <= grid_points[0]-2; i++) { for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (-4.0*u[k-1][j][i][m] + 6.0*u[k][j][i][m] - 4.0*u[k+1][j][i][m] + u[k+2][j][i][m]); } } //kai k7 = 0; } for (k = k8+1; k <= grid_points[2]-4; k++) { for (j = 1; j <= grid_points[1]-2; j++) { for (i = 1; i <= grid_points[0]-2; i++) { for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * ( u[k-2][j][i][m] - 4.0*u[k-1][j][i][m] + 6.0*u[k][j][i][m] - 4.0*u[k+1][j][i][m] + u[k+2][j][i][m] ); } } } //kai k8 = 2; } k = grid_points[2]-3; for (j = k9+1; j <= grid_points[1]-2; j++) { for (i = 1; i <= grid_points[0]-2; i++) { for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * ( u[k-2][j][i][m] - 4.0*u[k-1][j][i][m] + 6.0*u[k][j][i][m] - 4.0*u[k+1][j][i][m] ); } } //kai k9 = 0; } k = grid_points[2]-2; for (j = k10+1; j <= grid_points[1]-2; j++) { for (i = 1; i <= grid_points[0]-2; i++) { for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * ( u[k-2][j][i][m] - 4.0*u[k-1][j][i][m] + 5.0*u[k][j][i][m] ); } } //kai k10 = 0; } if (timeron) timer_stop(t_rhsz); for (k = k11+1; k <= nz2; k++) { for (j = 1; j <= ny2; j++) { for (i = 1; i <= nx2; i++) { for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] * dt; } } } //kai k11 = 0; } } //end parallel if (timeron) timer_stop(t_rhs); }
//-------------------------------------------------------------------------// // // // This benchmark is an OpenMP C version of the NPB SP code. This OpenMP // // C version is developed by the Center for Manycore Programming at Seoul // // National University and derived from the OpenMP Fortran versions in // // "NPB3.3-OMP" developed by NAS. // // // // Permission to use, copy, distribute and modify this software for any // // purpose with or without fee is hereby granted. This software is // // provided "as is" without express or implied warranty. // // // // Information on NPB 3.3, including the technical report, the original // // specifications, source code, results and information on how to submit // // new results, is available at: // // // // http://www.nas.nasa.gov/Software/NPB/ // // // // Send comments or suggestions for this OpenMP C version to // // cmp@aces.snu.ac.kr // // // // Center for Manycore Programming // // School of Computer Science and Engineering // // Seoul National University // // Seoul 151-744, Korea // // // // E-mail: cmp@aces.snu.ac.kr // // // //-------------------------------------------------------------------------// //-------------------------------------------------------------------------// // Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, // // and Jaejin Lee // //-------------------------------------------------------------------------// #include <math.h> #include "header.h" void compute_rhs() { int i, j, k, m; double aux, rho_inv, uijk, up1, um1, vijk, vp1, vm1, wijk, wp1, wm1; /* //kai int k1,k2,k3,k4,k5,k6,k7,k8,k9,k10, k11; consistent_data(&k1, "int", 1); consistent_data(&k2, "int", 1); consistent_data(&k3, "int", 1); consistent_data(&k4, "int", 1); consistent_data(&k5, "int", 1); consistent_data(&k6, "int", 1); consistent_data(&k7, "int", 1); consistent_data(&k8, "int", 1); consistent_data(&k9, "int", 1); consistent_data(&k10, "int", 1); consistent_data(&k11, "int", 1); */ if (timeron) timer_start(t_rhs); #pragma omp parallel default(shared) private(i,j,k,m,rho_inv,aux,uijk, \ up1,um1,vijk,vp1,vm1,wijk,wp1,wm1) { //--------------------------------------------------------------------- // compute the reciprocal of density, and the kinetic energy, // and the speed of sound. //--------------------------------------------------------------------- #pragma omp for schedule(static) nowait for (k = k1+1; k <= grid_points[2]-1; k++) { for (j = 0; j <= grid_points[1]-1; j++) { for (i = 0; i <= grid_points[0]-1; i++) { rho_inv = 1.0/u[k][j][i][0]; rho_i[k][j][i] = rho_inv; us[k][j][i] = u[k][j][i][1] * rho_inv; vs[k][j][i] = u[k][j][i][2] * rho_inv; ws[k][j][i] = u[k][j][i][3] * rho_inv; square[k][j][i] = 0.5* ( u[k][j][i][1]*u[k][j][i][1] + u[k][j][i][2]*u[k][j][i][2] + u[k][j][i][3]*u[k][j][i][3] ) * rho_inv; qs[k][j][i] = square[k][j][i] * rho_inv; //------------------------------------------------------------------- // (don't need speed and ainx until the lhs computation) //------------------------------------------------------------------- aux = c1c2*rho_inv* (u[k][j][i][4] - square[k][j][i]); speed[k][j][i] = sqrt(aux); } } //kai k1 = 0; } //--------------------------------------------------------------------- // copy the exact forcing term to the right hand side; because // this forcing term is known, we can store it on the whole grid // including the boundary //--------------------------------------------------------------------- #pragma omp for schedule(static) for (k = k2+1; k <= nz2+1; k++) { for (j = 0; j <= ny2+1; j++) { for (i = 0; i <= nx2+1; i++) { for (m = 0; m < 5; m++) { rhs[k][j][i][m] = forcing[k][j][i][m]; } } } //kai k2 = 0; } //--------------------------------------------------------------------- // compute xi-direction fluxes //--------------------------------------------------------------------- #pragma omp master if (timeron) timer_start(t_rhsx); #pragma omp for schedule(static) nowait for (k = k3+1; k <= nz2; k++) { for (j = 1; j <= ny2; j++) { for (i = 1; i <= nx2; i++) { uijk = us[k][j][i]; up1 = us[k][j][i+1]; um1 = us[k][j][i-1]; rhs[k][j][i][0] = rhs[k][j][i][0] + dx1tx1 * (u[k][j][i+1][0] - 2.0*u[k][j][i][0] + u[k][j][i-1][0]) - tx2 * (u[k][j][i+1][1] - u[k][j][i-1][1]); rhs[k][j][i][1] = rhs[k][j][i][1] + dx2tx1 * (u[k][j][i+1][1] - 2.0*u[k][j][i][1] + u[k][j][i-1][1]) + xxcon2*con43 * (up1 - 2.0*uijk + um1) - tx2 * (u[k][j][i+1][1]*up1 - u[k][j][i-1][1]*um1 + (u[k][j][i+1][4] - square[k][j][i+1] - u[k][j][i-1][4] + square[k][j][i-1]) * c2); rhs[k][j][i][2] = rhs[k][j][i][2] + dx3tx1 * (u[k][j][i+1][2] - 2.0*u[k][j][i][2] + u[k][j][i-1][2]) + xxcon2 * (vs[k][j][i+1] - 2.0*vs[k][j][i] + vs[k][j][i-1]) - tx2 * (u[k][j][i+1][2]*up1 - u[k][j][i-1][2]*um1); rhs[k][j][i][3] = rhs[k][j][i][3] + dx4tx1 * (u[k][j][i+1][3] - 2.0*u[k][j][i][3] + u[k][j][i-1][3]) + xxcon2 * (ws[k][j][i+1] - 2.0*ws[k][j][i] + ws[k][j][i-1]) - tx2 * (u[k][j][i+1][3]*up1 - u[k][j][i-1][3]*um1); rhs[k][j][i][4] = rhs[k][j][i][4] + dx5tx1 * (u[k][j][i+1][4] - 2.0*u[k][j][i][4] + u[k][j][i-1][4]) + xxcon3 * (qs[k][j][i+1] - 2.0*qs[k][j][i] + qs[k][j][i-1]) + xxcon4 * (up1*up1 - 2.0*uijk*uijk + um1*um1) + xxcon5 * (u[k][j][i+1][4]*rho_i[k][j][i+1] - 2.0*u[k][j][i][4]*rho_i[k][j][i] + u[k][j][i-1][4]*rho_i[k][j][i-1]) - tx2 * ( (c1*u[k][j][i+1][4] - c2*square[k][j][i+1])*up1 - (c1*u[k][j][i-1][4] - c2*square[k][j][i-1])*um1 ); } } //--------------------------------------------------------------------- // add fourth order xi-direction dissipation //--------------------------------------------------------------------- for (j = 1; j <= ny2; j++) { i = 1; for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m]- dssp * (5.0*u[k][j][i][m] - 4.0*u[k][j][i+1][m] + u[k][j][i+2][m]); } i = 2; for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (-4.0*u[k][j][i-1][m] + 6.0*u[k][j][i][m] - 4.0*u[k][j][i+1][m] + u[k][j][i+2][m]); } } for (j = 1; j <= ny2; j++) { for (i = 3; i <= nx2-2; i++) { for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * ( u[k][j][i-2][m] - 4.0*u[k][j][i-1][m] + 6.0*u[k][j][i][m] - 4.0*u[k][j][i+1][m] + u[k][j][i+2][m] ); } } } for (j = 1; j <= ny2; j++) { i = nx2-1; for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * ( u[k][j][i-2][m] - 4.0*u[k][j][i-1][m] + 6.0*u[k][j][i][m] - 4.0*u[k][j][i+1][m] ); } i = nx2; for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * ( u[k][j][i-2][m] - 4.0*u[k][j][i-1][m] + 5.0*u[k][j][i][m] ); } } //kai k3 = 0; } #pragma omp master { if (timeron) timer_stop(t_rhsx); //--------------------------------------------------------------------- // compute eta-direction fluxes //--------------------------------------------------------------------- if (timeron) timer_start(t_rhsy); } #pragma omp for schedule(static) for (k = k4+1; k <= nz2; k++) { for (j = 1; j <= ny2; j++) { for (i = 1; i <= nx2; i++) { vijk = vs[k][j][i]; vp1 = vs[k][j+1][i]; vm1 = vs[k][j-1][i]; rhs[k][j][i][0] = rhs[k][j][i][0] + dy1ty1 * (u[k][j+1][i][0] - 2.0*u[k][j][i][0] + u[k][j-1][i][0]) - ty2 * (u[k][j+1][i][2] - u[k][j-1][i][2]); rhs[k][j][i][1] = rhs[k][j][i][1] + dy2ty1 * (u[k][j+1][i][1] - 2.0*u[k][j][i][1] + u[k][j-1][i][1]) + yycon2 * (us[k][j+1][i] - 2.0*us[k][j][i] + us[k][j-1][i]) - ty2 * (u[k][j+1][i][1]*vp1 - u[k][j-1][i][1]*vm1); rhs[k][j][i][2] = rhs[k][j][i][2] + dy3ty1 * (u[k][j+1][i][2] - 2.0*u[k][j][i][2] + u[k][j-1][i][2]) + yycon2*con43 * (vp1 - 2.0*vijk + vm1) - ty2 * (u[k][j+1][i][2]*vp1 - u[k][j-1][i][2]*vm1 + (u[k][j+1][i][4] - square[k][j+1][i] - u[k][j-1][i][4] + square[k][j-1][i]) * c2); rhs[k][j][i][3] = rhs[k][j][i][3] + dy4ty1 * (u[k][j+1][i][3] - 2.0*u[k][j][i][3] + u[k][j-1][i][3]) + yycon2 * (ws[k][j+1][i] - 2.0*ws[k][j][i] + ws[k][j-1][i]) - ty2 * (u[k][j+1][i][3]*vp1 - u[k][j-1][i][3]*vm1); rhs[k][j][i][4] = rhs[k][j][i][4] + dy5ty1 * (u[k][j+1][i][4] - 2.0*u[k][j][i][4] + u[k][j-1][i][4]) + yycon3 * (qs[k][j+1][i] - 2.0*qs[k][j][i] + qs[k][j-1][i]) + yycon4 * (vp1*vp1 - 2.0*vijk*vijk + vm1*vm1) + yycon5 * (u[k][j+1][i][4]*rho_i[k][j+1][i] - 2.0*u[k][j][i][4]*rho_i[k][j][i] + u[k][j-1][i][4]*rho_i[k][j-1][i]) - ty2 * ((c1*u[k][j+1][i][4] - c2*square[k][j+1][i]) * vp1 - (c1*u[k][j-1][i][4] - c2*square[k][j-1][i]) * vm1); } } //--------------------------------------------------------------------- // add fourth order eta-direction dissipation //--------------------------------------------------------------------- j = 1; for (i = 1; i <= nx2; i++) { for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m]- dssp * ( 5.0*u[k][j][i][m] - 4.0*u[k][j+1][i][m] + u[k][j+2][i][m]); } } j = 2; for (i = 1; i <= nx2; i++) { for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (-4.0*u[k][j-1][i][m] + 6.0*u[k][j][i][m] - 4.0*u[k][j+1][i][m] + u[k][j+2][i][m]); } } for (j = 3; j <= ny2-2; j++) { for (i = 1; i <= nx2; i++) { for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * ( u[k][j-2][i][m] - 4.0*u[k][j-1][i][m] + 6.0*u[k][j][i][m] - 4.0*u[k][j+1][i][m] + u[k][j+2][i][m] ); } } } j = ny2-1; for (i = 1; i <= nx2; i++) { for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * ( u[k][j-2][i][m] - 4.0*u[k][j-1][i][m] + 6.0*u[k][j][i][m] - 4.0*u[k][j+1][i][m] ); } } j = ny2; for (i = 1; i <= nx2; i++) { for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * ( u[k][j-2][i][m] - 4.0*u[k][j-1][i][m] + 5.0*u[k][j][i][m] ); } } //kai k4 = 0; } #pragma omp master { if (timeron) timer_stop(t_rhsy); //--------------------------------------------------------------------- // compute zeta-direction fluxes //--------------------------------------------------------------------- if (timeron) timer_start(t_rhsz); } #pragma omp for schedule(static) for (k = k5+1; k <= grid_points[2]-2; k++) { for (j = 1; j <= grid_points[1]-2; j++) { for (i = 1; i <= grid_points[0]-2; i++) { wijk = ws[k][j][i]; wp1 = ws[k+1][j][i]; wm1 = ws[k-1][j][i]; rhs[k][j][i][0] = rhs[k][j][i][0] + dz1tz1 * (u[k+1][j][i][0] - 2.0*u[k][j][i][0] + u[k-1][j][i][0]) - tz2 * (u[k+1][j][i][3] - u[k-1][j][i][3]); rhs[k][j][i][1] = rhs[k][j][i][1] + dz2tz1 * (u[k+1][j][i][1] - 2.0*u[k][j][i][1] + u[k-1][j][i][1]) + zzcon2 * (us[k+1][j][i] - 2.0*us[k][j][i] + us[k-1][j][i]) - tz2 * (u[k+1][j][i][1]*wp1 - u[k-1][j][i][1]*wm1); rhs[k][j][i][2] = rhs[k][j][i][2] + dz3tz1 * (u[k+1][j][i][2] - 2.0*u[k][j][i][2] + u[k-1][j][i][2]) + zzcon2 * (vs[k+1][j][i] - 2.0*vs[k][j][i] + vs[k-1][j][i]) - tz2 * (u[k+1][j][i][2]*wp1 - u[k-1][j][i][2]*wm1); rhs[k][j][i][3] = rhs[k][j][i][3] + dz4tz1 * (u[k+1][j][i][3] - 2.0*u[k][j][i][3] + u[k-1][j][i][3]) + zzcon2*con43 * (wp1 - 2.0*wijk + wm1) - tz2 * (u[k+1][j][i][3]*wp1 - u[k-1][j][i][3]*wm1 + (u[k+1][j][i][4] - square[k+1][j][i] - u[k-1][j][i][4] + square[k-1][j][i]) * c2); rhs[k][j][i][4] = rhs[k][j][i][4] + dz5tz1 * (u[k+1][j][i][4] - 2.0*u[k][j][i][4] + u[k-1][j][i][4]) + zzcon3 * (qs[k+1][j][i] - 2.0*qs[k][j][i] + qs[k-1][j][i]) + zzcon4 * (wp1*wp1 - 2.0*wijk*wijk + wm1*wm1) + zzcon5 * (u[k+1][j][i][4]*rho_i[k+1][j][i] - 2.0*u[k][j][i][4]*rho_i[k][j][i] + u[k-1][j][i][4]*rho_i[k-1][j][i]) - tz2 * ((c1*u[k+1][j][i][4] - c2*square[k+1][j][i])*wp1 - (c1*u[k-1][j][i][4] - c2*square[k-1][j][i])*wm1); } } //kai k5 = 0; } //--------------------------------------------------------------------- // add fourth order zeta-direction dissipation //--------------------------------------------------------------------- k = 1; #pragma omp for schedule(static) nowait for (j = k6+1; j <= grid_points[1]-2; j++) { for (i = 1; i <= grid_points[0]-2; i++) { for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m]- dssp * (5.0*u[k][j][i][m] - 4.0*u[k+1][j][i][m] + u[k+2][j][i][m]); } } //kai k6 = 0; } k = 2; #pragma omp for schedule(static) nowait for (j = k7+1; j <= grid_points[1]-2; j++) { for (i = 1; i <= grid_points[0]-2; i++) { for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (-4.0*u[k-1][j][i][m] + 6.0*u[k][j][i][m] - 4.0*u[k+1][j][i][m] + u[k+2][j][i][m]); } } //kai k7 = 0; } #pragma omp for schedule(static) nowait for (k = k8+1; k <= grid_points[2]-4; k++) { for (j = 1; j <= grid_points[1]-2; j++) { for (i = 1; i <= grid_points[0]-2; i++) { for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * ( u[k-2][j][i][m] - 4.0*u[k-1][j][i][m] + 6.0*u[k][j][i][m] - 4.0*u[k+1][j][i][m] + u[k+2][j][i][m] ); } } } //kai k8 = 2; } k = grid_points[2]-3; #pragma omp for schedule(static) nowait for (j = k9+1; j <= grid_points[1]-2; j++) { for (i = 1; i <= grid_points[0]-2; i++) { for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * ( u[k-2][j][i][m] - 4.0*u[k-1][j][i][m] + 6.0*u[k][j][i][m] - 4.0*u[k+1][j][i][m] ); } } //kai k9 = 0; } k = grid_points[2]-2; #pragma omp for schedule(static) for (j = k10+1; j <= grid_points[1]-2; j++) { for (i = 1; i <= grid_points[0]-2; i++) { for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * ( u[k-2][j][i][m] - 4.0*u[k-1][j][i][m] + 5.0*u[k][j][i][m] ); } } //kai k10 = 0; } #pragma omp master if (timeron) timer_stop(t_rhsz); #pragma omp for schedule(static) nowait for (k = k11+1; k <= nz2; k++) { for (j = 1; j <= ny2; j++) { for (i = 1; i <= nx2; i++) { for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] * dt; } } } //kai k11 = 0; } } //end parallel if (timeron) timer_stop(t_rhs); }