#include "mpi/cipher_3d.hpp"
#include "hypercube/hypercube_cryto_agent.hpp"
#include "intel_itt_wrapper.hpp"
#include "mpi/get_matrix_blk.hpp"
#include "mpi/partition_3d.hpp"

static inline double log2_noise_bound(const helib::Ctxt &ctxt) {
  const double ln2 = NTL::log(NTL::xdouble(2l));
  return NTL::log(ctxt.totalNoiseBound()) / ln2;
}

template <typename Scheme, typename ExecOutput>
int cipher_3d<Scheme, ExecOutput>::index_to_rank_exec(
    const int pgrid_index[AXIS_NUM]) {
  if (pgrid_index == nullptr) {
    pgrid_index = pgrid_index_;
  }
  return pgrid_index[K] * pgrid_size_[M] * pgrid_size_[N] +
         pgrid_index[N] * pgrid_size_[M] + pgrid_index[M];
}

template <typename Scheme, typename ExecOutput>
void cipher_3d<Scheme, ExecOutput>::report(std::ostream &ss) const {
  auto hmme = dynamic_cast<hypercube_hmme *>(this->hmm_engine_);
  auto hmmcc = dynamic_cast<hmm_crypto_client<Scheme> *>(this->hmm_cagent_);
  auto hhmmcc = static_cast<hypercube_hmmcc *>(this->hmm_cagent_);
  // clang-format off
  ss << "====================== Cipher 3D ======================\n";
  ss << "* Setup Time            : " << stat_.setup_time << " s\n";
  if (hhmmcc != nullptr) {
  ss << "  * Find Params         : " << hhmmcc->find_params_time() << " s\n";
  }
  ss << "  * Build Context       : " << this->hmm_cagent_->build_context_time() << " s\n";
  if (hmmcc != nullptr) {
  ss << "  * KeyGen Time         : " << hmmcc->key_gen_time() << " s\n";
  }
  ss << "  * Bcast PubKey        : " << stat_.bcast_pk_time << " s\n";
  ss << "  * HMM Encoding        : " << stat_.hmm_encoding_time << " s\n";
  ss << "  * No Encoding Reuse   : " << hmme->get_encoding_time() << " s\n";
  ss << "* Execution time        : " << stat_.exec_time << " s\n";
  ss << "  * Broadcast A & B     : " << stat_.cbcast_time << " s\n";
  ss << "  * Local CGEMM(s)      : " << stat_.cgemm_time << " s\n";
  if (stat_.cgemm_ra_time != 0.0)  {
  if (stat_.creduce_ra_time != 0) {
  ss << "    * Batch Rotate      : " << stat_.cgemm_ra_time << " s\n";
  ss << "    * All-reduce        : " << stat_.creduce_ra_time << " s\n";
  } else {
  ss << "    * RotateAlign       : " << stat_.cgemm_ra_time << " s\n";
  }
  if (stat_.cgemm_shift_time != 0) {
  ss << "    * Batch Shift       : " << stat_.cgemm_shift_time << " s\n";
  ss << "    * All-gather        : " << stat_.cgather_sc_time << " s\n";
  ss << "    * Compute           : " << stat_.cgemm_compute_time << " s\n";
  } else {
  ss << "    * ShiftCompute      : " << stat_.cgemm_compute_time << " s\n";
  }
  }
  ss << "  * Reduce partial C    : " << stat_.creduce_time << " s\n";
  ss << "  * (De)serialization   : " << stat_.serialize_time << " s\n";
  ss << "* Public Key Size       : " << stat_.pk_size << " MB\n";
  ss << "* Communication Traffic : " << stat_.computing_comm_size << " MB\n";
  ss << "* Final nosie bound     : " << stat_.final_noise << std::endl;
  ss << "  * Init nosie bound    : " << stat_.init_noise << std::endl;
  ss << "  + CGEMM(depth = 1)    : " << stat_.cgemm_noise << std::endl;
  ss << "  + Add partial sum     : " << stat_.add_partial_noise << std::endl;
  ss << "=======================================================\n";
  // clang-format on
}

template <typename Scheme, typename ExecOutput>
cipher_3d<Scheme, ExecOutput>::cipher_3d(int root, MPI_Comm comm, long m,
                                         long n, long k, bool fhe4d, int mp,
                                         int np, int kp, int opt)
    : cipher_mpi_base<Scheme, ExecOutput>(root, comm),
      comm_bcast_A_(MPI_COMM_NULL), comm_bcast_B_(MPI_COMM_NULL),
      comm_reduce_C_(MPI_COMM_NULL) {
  dims_[M] = m;
  dims_[N] = n;
  dims_[K] = k;

  // Get MPI metadata
  int rank, p;
  MPI_Comm_rank(comm, &rank);
  MPI_Comm_size(comm, &p);

  if (opt != 0) {
    /* mhhmm_3d */
    const int opt_sq = opt * opt;
    long bvm, bvn, bvk;
    if (k >= opt) {
      if (m >= opt && n >= opt) {
        bvm = m / opt;
        bvn = n / opt;
        bvk = k / opt;
      } else if (m < opt && n < opt) {
        bvm = 1;
        bvn = 1;
        bvk = k / opt;
      } else if (m < opt || n < opt) {
        if (m < opt) {
          bvm = 1;
          bvn = m * n / opt_sq;
          bvk = k / m;
        } else {
          bvn = 1;
          bvm = m * n / opt_sq;
          bvk = k / n;
        }
      }
    } else {
      long *x, *y, *z, *bvx, *bvy, *bvz;
      if (m >= n && n >= k) {
        x = &m, y = &n, z = &k, bvx = &bvm, bvy = &bvn, bvz = &bvk;
      } else if (m >= k && k >= n) {
        x = &m, y = &k, z = &n, bvx = &bvm, bvy = &bvk, bvz = &bvn;
      } else if (n >= m && m >= k) {
        x = &n, y = &m, z = &k, bvx = &bvn, bvy = &bvm, bvz = &bvk;
      } else if (n >= k && k >= m) {
        x = &n, y = &k, z = &m, bvx = &bvn, bvy = &bvk, bvz = &bvm;
      } else {
        ERROR_PRINTF("Single CGEMM is enough\n");
      }
      int P1 = (*x) * (*y) / opt_sq, P2 = (*y) / (*z);
      if (m >= opt && n >= opt) {
        if (fhe4d) {
          *bvy =
              1 << static_cast<long>(std::floor(std::log2(m * n / opt_sq) / 2));
          *bvx = m * n / opt_sq / (*bvy);
        } else {
          auto upper_bound = std::min(P1, P2);
          for (*bvy = 1, *bvx = m * n / opt_sq; *bvx > upper_bound;
               *bvy *= 2, *bvx = m * n / opt_sq / (*bvy)) {
          }
        }
        *bvz = 1;
      } else {
        *bvy = std::min(P1, P2);
        if (fhe4d && *bvy == P1) {
          *bvy = 1 << static_cast<long>(std::floor(std::log2(P1) / 2));
        }
        *bvx = P1 / (*bvy), *bvz = 1;
      }
    }
    bv_[M] = bvm, bv_[N] = bvn, bv_[K] = bvk;
    if (rank == root) {
      std::cout << "Block View bm * bn * bk   : " << bvm << " * " << bvn
                << " * " << bvk << std::endl;
      std::cout.flush();
    }
  }

  if (mp == 0 || np == 0 || kp == 0) {
    if (opt != 0) {
      /* mhhmm_3d */
      if (fhe4d) {
        auto eh_mnk =
            shmm_engine::get_expected_mnk(m / bvm(), n / bvn(), k / bvk());
        const int R =
            std::min(eh_mnk.size(0), std::min(eh_mnk.size(1), eh_mnk.size(2)));
        Solution optimal = findOptimal(p, bvm(), bvn(), bvk(), R);
        rp_ = 0;
        mp = optimal.pm, np = optimal.pn, kp = optimal.pk;
      } else {
        calc_3d_decomposition(p, bvm(), bvn(), bvk(), &mp, &np, &kp, &rp_);
      }
    } else {
      // TODO: remove Cannon's requirement: 'mod(max(mp,np),min(mp,np))=0'
      calc_3d_decomposition(p, m, n, k, &mp, &np, &kp, &rp_);
    }
  }
  pgrid_size_[M] = mp, pgrid_size_[N] = np, pgrid_size_[K] = kp;
  np_ = mp * np * kp;
  // Create the execution communicator
  this->create_comm_exec(np_, comm);

  //* (mm, nn, kk)
  //* From: TOP-LEFT corner on the FRONT face  (0, 0, 0)
  //* To: BOTTOM-RIGHT corner on the BACK face (mp, np, kp)
  int kk = rank / (mp * np);
  int rkk = rank % (mp * np);
  int nn = rkk / mp;
  int mm = rkk % mp;
  pgrid_index_[M] = mm, pgrid_index_[N] = nn, pgrid_index_[K] = kk;

  // Aliases for the matrix size
  blk_size_[M] = m / mp, blk_size_[N] = n / np, blk_size_[K] = k / kp;

  if (rank == root) {
    std::stringstream ss;
    ss << "Problem Size m  * n  * k  : " << m << " * " << n << " * " << k
       << std::endl;
    ss << "Process Grid mp * np * kp : " << mp << " * " << np << " * " << kp
       << std::endl;
    ss << "Work cuboid  mb * nb * kb : " << block_m() << " * " << block_n()
       << " * " << block_k() << std::endl;
    ss << "Process utilization       : "
       << static_cast<double>(np_) * 100.0 / static_cast<double>(np_ + rp_)
       << "% (" << np_ << "/" << np_ + rp_ << ")" << std::endl;
    std::cout << ss.str();
    std::flush(std::cout);
  }

  if (!this->active_exec_) {
    return;
  }

  //* Create Broadcast Group
  // Bcast Group A
  int keyA = nn;
  int colorA = mm * kp + kk;
  // Bcast Group B
  int keyB = mm;
  int colorB = kk * np + nn;

  MPI_Comm_split(this->comm_exec_, colorA, keyA, &comm_bcast_A_);
  MPI_Comm_split(this->comm_exec_, colorB, keyB, &comm_bcast_B_);

  //* Create Reduce Group C
  int keyC = kk;
  int colorC = mm * np + nn;
  MPI_Comm_split(this->comm_exec_, colorC, keyC, &comm_reduce_C_);

  int rank_A, p_A;
  int rank_B, p_B;
  MPI_Comm_rank(comm_bcast_A_, &rank_A);
  MPI_Comm_size(comm_bcast_A_, &p_A);
}

//* Single-Ciphertext Version
shhmm_3d::shhmm_3d(const params<helib::BGV> &params,
                   const hmm_status<shmm_engine> &status, int root,
                   MPI_Comm comm, long m, long n, long k, bool fhe4d, int mp,
                   int np, int kp)
    : cipher_3d(root, comm, m, n, k, fhe4d, mp, np, kp) {
  stat_.setup_time -= MPI_Wtime(); //! Setup - Start

  // Get MPI metadata
  int rank;
  MPI_Comm_rank(comm, &rank);

  // Initialize HMM cryptograph systems
  const auto eh_cube =
      hypercube_hmme::get_hypercube(blk_size_[M], blk_size_[N], blk_size_[K]);
  hmm_cagent_ = rank == root ? new hypercube_hmmcc(params, eh_cube, true)
                             : new hypercube_hmmcs(params, eh_cube, true);

  // Initialize HMM engine
  auto hhmme = new hypercube_hmme(status);
  hmm_engine_ = hhmme;

  if (rank == root) {
    // Print out the HElib context
    hmm_cagent_->report_context(std::cout);
    std::flush(std::cout);
  }

  // Broadcast the public key
  stat_.bcast_pk_time -= MPI_Wtime(); //! Bcast PK - Start
  stat_.pk_size = this->bcast_pubkey();
  stat_.bcast_pk_time += MPI_Wtime(); //! Bcast PK - Stop

  // Acativate HMM engine
  INTEL_ITT_RESUME;
  stat_.hmm_encoding_time -= MPI_Wtime(); //! HMM Encoding - Start
  auto eh_mnk = hypercube_hmme::get_expected_mnk(blk_size_[M], blk_size_[N],
                                                 blk_size_[K]);
  type_ = hypercube_hmme::parse_matrix_type(eh_mnk);
  hhmme->register_engine(hmm_cagent_->context()->getEA(), eh_mnk);
  stat_.hmm_encoding_time += MPI_Wtime(); //! HMM Encoding - Stop
  INTEL_ITT_PAUSE;

  stat_.setup_time += MPI_Wtime(); //! Setup - Stop
}

template <typename T>
void shhmm_3d::init_data_p2p(int root, int rank, MPI_Comm comm,
                             std::string &buf, const T *mat, AXIS X, AXIS Y) {
  AXIS Z = static_cast<AXIS>(M + N + K - X - Y);
  if (pgrid_index_[Z] != 0) {
    // Not on the surface-XY
    return;
  }
  if (rank == root) {
    auto hmmcc = static_cast<hypercube_hmmcc *>(hmm_cagent_);
    ASSERT_PRINTF(hmmcc != nullptr, "Invalid root\n");

    int sf[AXIS_NUM] = {0, 0, 0};
    for (sf[X] = 0; sf[X] < pgrid_size_[X]; sf[X]++) {
      for (sf[Y] = 0; sf[Y] < pgrid_size_[Y]; sf[Y]++) {
        std::stringstream ss;
        auto blk = get_blk(mat, dims_[X], dims_[Y], sf[X], sf[Y], blk_size_[X],
                           blk_size_[Y]);
        auto ctxt_blk = hmmcc->encrypt(blk);
        ctxt_blk.writeTo(ss);

        int recipient = this->index_to_rank_exec(sf);
        if (recipient == root) {
          buf = ss.str();
        } else {
          // FIXME: Using an asynchronous Send will fail
          //? Maybe `MPI_Isend_String()` has some bugs
          MPI_Send_String(ss.str(), recipient, 0, comm);
        }

        // Record noise
        if (recipient == root) {
          stat_.init_noise =
              std::max(stat_.init_noise, log2_noise_bound(ctxt_blk));
        }
      }
    }
  } else {
    MPI_Recv_String(buf, root, 0, comm);
  }
}

void shhmm_3d::init_data(const int *A, const int *B) {
  init_data_p2p<int>(MPI_COMM_EXEC_ROOT, this->rank_exec_, this->comm_exec_,
                     local_A_, A, M, K);
  init_data_p2p<int>(MPI_COMM_EXEC_ROOT, this->rank_exec_, this->comm_exec_,
                     local_B_, B, K, N);
}

void shhmm_3d::exec(std::string &partial_C, bool fhe4d) {
  if (fhe4d) {
    exec_4d(partial_C);
  } else {
    exec_base(partial_C);
  }
}
void shhmm_3d::exec_base(std::string &partial) {
  MPI_Barrier(comm_exec_);
  stat_.exec_time -= MPI_Wtime(); //! Exec - Start
  MPI_Pcontrol(1);                //! Activate mpiP

  // Broadcast
  stat_.cbcast_time -= MPI_Wtime(); //! Bcast - Start
  MPI_Bcast_String(local_A_, 0, comm_bcast_A_);
  MPI_Bcast_String(local_B_, 0, comm_bcast_B_);
  stat_.cbcast_time += MPI_Wtime(); //! Bcast - Stop
  stat_.computing_comm_size +=
      (local_A_.size() * std::ceil(std::log2(np_)) / 1024.0 / 1024.0);
  stat_.computing_comm_size +=
      (local_B_.size() * std::ceil(std::log2(np_)) / 1024.0 / 1024.0);

  // Serialization
  stat_.serialize_time -= MPI_Wtime(); //! Serial - Start
  std::stringstream A_ss, B_ss;
  A_ss << local_A_;
  B_ss << local_B_;
  auto A = helib::Ctxt::readFrom(A_ss, *pk_);
  auto B = helib::Ctxt::readFrom(B_ss, *pk_);
  stat_.serialize_time += MPI_Wtime(); //! Serial - Stop

  // Local CGEMM
  stat_.cgemm_noise -= std::max(log2_noise_bound(A), log2_noise_bound(B));
  stat_.cgemm_time -= MPI_Wtime(); //! HMM - Start
  helib::Ctxt AB(*pk_);
  hmm_engine_->cgemm(type_, AB, A, B);
  stat_.cgemm_time += MPI_Wtime(); //! HMM - Stop
  stat_.cgemm_noise += log2_noise_bound(AB);

  // Reduction
  stat_.creduce_time -= MPI_Wtime(); //! Reduction - Start

  // if (pgrid_index_[K] == 0) {
  //   for (int sender = 1; sender < pgrid_size_[K]; sender++) {
  //     std::stringstream ss;
  //     std::string recv_partial;
  //     MPI_Recv_String(recv_partial, sender, 0, comm_reduce_C_);
  //     ss << recv_partial;
  //     auto recv_partial_ctxt = helib::Ctxt::readFrom(ss, *pk_);

  //     stat_.add_partial_noise -= log2_noise_bound(AB);
  //     AB += recv_partial_ctxt;
  //     stat_.add_partial_noise += log2_noise_bound(AB);
  //   }
  // } else {
  //   std::stringstream ss;
  //   AB.writeTo(ss);
  //   MPI_Send_String(ss.str(), 0, 0, comm_reduce_C_);
  // }

  // New Reduction
  stat_.add_partial_noise -= log2_noise_bound(AB);
  ctxt_reduce_add(AB, &AB, 0, comm_reduce_C_);
  stat_.add_partial_noise += log2_noise_bound(AB);
  stat_.creduce_time += MPI_Wtime(); //! Reduction - Stop

  stat_.serialize_time -= MPI_Wtime(); //! Serial - Start
  if (pgrid_index_[K] == 0) {
    std::stringstream ss;
    AB.writeTo(ss);
    partial = ss.str();
  } else {
    partial.clear();
  }
  stat_.serialize_time += MPI_Wtime(); //! Serial - Stop

  MPI_Pcontrol(0); //! Deactivate mpiP
  MPI_Barrier(comm_exec_);
  stat_.exec_time += MPI_Wtime(); //! Exec - Stop

  stat_.final_noise = log2_noise_bound(AB);
}

void shhmm_3d::exec_4d(std::string &partial) {
  MPI_Barrier(comm_exec_);
  stat_.exec_time -= MPI_Wtime(); //! Exec - Start
  MPI_Pcontrol(1);                //! Activate mpiP

  // Broadcast
  stat_.cbcast_time -= MPI_Wtime(); //! Bcast - Start
  MPI_Bcast_String(local_A_, 0, comm_bcast_A_);
  MPI_Bcast_String(local_B_, 0, comm_bcast_B_);
  stat_.cbcast_time += MPI_Wtime(); //! Bcast - Stop
  stat_.computing_comm_size +=
      (local_A_.size() * std::ceil(std::log2(np_)) / 1024.0 / 1024.0);
  stat_.computing_comm_size +=
      (local_B_.size() * std::ceil(std::log2(np_)) / 1024.0 / 1024.0);

  // Serialization
  stat_.serialize_time -= MPI_Wtime(); //! Serial - Start
  std::stringstream A_ss, B_ss;
  A_ss << local_A_;
  B_ss << local_B_;
  auto A = helib::Ctxt::readFrom(A_ss, *pk_);
  auto B = helib::Ctxt::readFrom(B_ss, *pk_);
  stat_.serialize_time += MPI_Wtime(); //! Serial - Stop
  // clear buffer
  A_ss.str(""), B_ss.str("");

  // Local CGEMM
  const auto &ea = hmm_cagent_->context()->getEA();
  auto engine = static_cast<hypercube_hmme *>(hmm_engine_);
  stat_.cgemm_noise -= std::max(log2_noise_bound(A), log2_noise_bound(B));
  stat_.cgemm_time -= MPI_Wtime(); //! HMM - Start

  int k = ea.sizeOfDimension(0);
  int rank_A, rank_B;
  MPI_Comm_rank(comm_bcast_A_, &rank_A);
  MPI_Comm_rank(comm_bcast_B_, &rank_B);

  auto get = [](int idx, int num, int size, int &offset, int &blk_size) {
    blk_size = size / num;
    int r = size % num;
    ASSERT_PRINTF(r == 0, "Invaild: MPI_Allgather");
    if (idx < r) {
      blk_size++;
      offset = idx * blk_size;
    } else {
      offset = idx * blk_size + r;
    }
  };
  int rot_pos_A, rot_size_A;
  int rot_pos_B, rot_size_B;
  get(rank_A, np(), k, rot_pos_A, rot_size_A);
  get(rank_B, mp(), k, rot_pos_B, rot_size_B);

  // Batch RotateAlign
  stat_.cgemm_ra_time -= MPI_Wtime(); //! RotateAlign - Start
  engine->rotate_align_A(type_, rot_pos_A, rot_size_A, A, A);
  engine->rotate_align_B(type_, rot_pos_B, rot_size_B, B, B);
  stat_.cgemm_ra_time += MPI_Wtime(); //! RotateAlign - Stop

  stat_.creduce_ra_time -= MPI_Wtime(); //! All-reduce - Start
  A.writeTo(A_ss), B.writeTo(B_ss);
  ctxt_allreduce_add(A, &A, comm_bcast_A_);
  ctxt_allreduce_add(B, &B, comm_bcast_B_);
  stat_.creduce_ra_time += MPI_Wtime(); //! All-reduce - Stop

  // Batch ShiftCompute
  auto cube = hypercube_hmme::get_expected_mnk(block_m(), block_n(), block_k());
  const auto R = std::min(std::min(cube.size(0), cube.size(1)), cube.size(2));
  std::vector<helib::Ctxt> Ais(R, helib::Ctxt(*pk_));
  std::vector<helib::Ctxt> Bis(R, helib::Ctxt(*pk_));
  {
    get(rank_A, np(), R, rot_pos_A, rot_size_A);
    get(rank_B, mp(), R, rot_pos_B, rot_size_B);
    std::vector<int> aux_A(rot_size_A);
    std::vector<int> aux_B(rot_size_B);
    std::vector<std::string> As(rot_size_A);
    std::vector<std::string> Bs(rot_size_B);
    {
      std::vector<helib::Ctxt> my_Ais(rot_size_A, helib::Ctxt(*pk_));
      std::vector<helib::Ctxt> my_Bis(rot_size_B, helib::Ctxt(*pk_));
      stat_.cgemm_shift_time -= MPI_Wtime(); //! Shift - Start
      omp_set_nested(1);
#pragma omp parallel
#pragma omp single
      {
#pragma omp task
        engine->shift_compute_A(type_, rot_pos_A, my_Ais, A);
#pragma omp task
        engine->shift_compute_B(type_, rot_pos_B, my_Bis, B);
#pragma omp taskwait
#pragma omp taskloop nogroup
        for (int r = 0; r < rot_size_A; r++) {
          Ais[rot_pos_A + r] = std::move(my_Ais[r]);
          std::stringstream ss;
          Ais[rot_pos_A + r].writeTo(ss);
          As[r] = ss.str();
          aux_A[r] = As[r].size();
        }
#pragma omp taskloop nogroup
        for (int r = 0; r < rot_size_B; r++) {
          Bis[rot_pos_B + r] = std::move(my_Bis[r]);
          std::stringstream ss;
          Bis[rot_pos_B + r].writeTo(ss);
          Bs[r] = ss.str();
          aux_B[r] = Bs[r].size();
        }
      }
      omp_set_nested(0);
      stat_.cgemm_shift_time += MPI_Wtime(); //! Shift - Stop
    }

    const unsigned long THRESHOLD = 1UL << 30;
    do { /* FOR A */
      if (np() == 1) {
        break;
      }
      stat_.cgather_sc_time -= MPI_Wtime(); //! All-gather - Start
                                            // Packing
      for (int i = 1; i < As.size(); i++) {
        As[0] += As[i];
      }
      // All-reduce MAX(len)
      unsigned long len = As[0].size();
      unsigned long max_len = len;
      MPI_Allreduce(&len, &max_len, 1, MPI_UNSIGNED_LONG, MPI_MAX,
                    comm_bcast_A_);
      As[0].resize(max_len);
      // All-gather AUX
      std::vector<int> recv_aux(R);
      MPI_Allgather(aux_A.data(), rot_size_A, MPI_INT,
                    const_cast<int *>(recv_aux.data()), rot_size_A, MPI_INT,
                    comm_bcast_A_);

      // All-gather packed string
      std::string recv;
      recv.resize(np() * max_len);
      if (max_len > THRESHOLD) {
        //! displacement array is also 'INT'
        auto package_num = max_len / THRESHOLD;
        std::vector<std::string> packages(package_num + 1);
        for (unsigned long i = 0; i < package_num; i++) {
          packages[i].resize(static_cast<unsigned long>(np()) * THRESHOLD);
          MPI_Allgather(As[0].data() + i * THRESHOLD, THRESHOLD, MPI_CHAR,
                        const_cast<char *>(packages[i].data()), THRESHOLD,
                        MPI_CHAR, comm_bcast_A_);
        }
        auto tail_count = max_len - package_num * THRESHOLD;
        packages[package_num].resize(np() * tail_count);
        MPI_Allgather(As[0].data() + package_num * THRESHOLD, tail_count,
                      MPI_CHAR,
                      const_cast<char *>(packages[package_num].data()),
                      tail_count, MPI_CHAR, comm_bcast_A_);

#pragma omp parallel
#pragma omp single
#pragma omp taskloop
        for (unsigned long i = 0; i < np(); i++) {
          for (unsigned long j = 0; j < package_num; j++) {
            std::copy(packages[j].begin() + i * THRESHOLD,
                      packages[j].begin() + (i + 1) * THRESHOLD,
                      recv.begin() + i * max_len + j * THRESHOLD);
          }
          std::copy(packages[package_num].begin() + i * tail_count,
                    packages[package_num].begin() + (i + 1) * tail_count,
                    recv.begin() + i * max_len + package_num * THRESHOLD);
        }
      } else {
        MPI_Allgather(As[0].data(), max_len, MPI_CHAR,
                      const_cast<char *>(recv.data()), max_len, MPI_CHAR,
                      comm_bcast_A_);
      }
      stat_.cgather_sc_time += MPI_Wtime(); //! All-gather - Stop

      stat_.serialize_time -= MPI_Wtime(); //! Serial - Start
#pragma omp parallel
#pragma omp single
#pragma omp taskloop
      for (int k = 0; k < recv_aux.size(); k++) {
        int gidx = k / rot_size_A;
        if (gidx == rank_A) {
          continue;
        }
        unsigned long gstart = gidx * max_len;
        int lidx = k % rot_size_A;
        unsigned long lstart = 0;
        for (int i = 0; i < lidx; i++) {
          lstart += recv_aux[gidx * rot_size_A + i];
        }
        std::string ctxt_str = recv.substr(gstart + lstart, recv_aux[k]);
        std::stringstream ss;
        ss << ctxt_str;
        Ais[k] = helib::Ctxt::readFrom(ss, *pk_);
      }
      stat_.serialize_time += MPI_Wtime(); //! Serial - Stop
    } while (false);
    do { /* FOR B */
      if (mp() == 1) {
        break;
      }
      stat_.cgather_sc_time -= MPI_Wtime(); //! All-gather - Start
      // Packing
      for (int i = 1; i < Bs.size(); i++) {
        Bs[0] += Bs[i];
      }
      // All-reduce MAX(len)
      unsigned long len = Bs[0].size();
      unsigned long max_len = len;
      MPI_Allreduce(&len, &max_len, 1, MPI_UNSIGNED_LONG, MPI_MAX,
                    comm_bcast_B_);
      Bs[0].resize(max_len);
      // All-gather AUX
      std::vector<int> recv_aux(R);
      MPI_Allgather(aux_B.data(), rot_size_B, MPI_INT,
                    const_cast<int *>(recv_aux.data()), rot_size_B, MPI_INT,
                    comm_bcast_B_);

      // All-gather packed string
      std::string recv;
      recv.resize(mp() * max_len);
      if (max_len > THRESHOLD) {
        //! displacement array is also 'INT'
        auto package_num = max_len / THRESHOLD;
        std::vector<std::string> packages(package_num + 1);
        for (unsigned long i = 0; i < package_num; i++) {
          packages[i].resize(static_cast<unsigned long>(mp()) * THRESHOLD);
          MPI_Allgather(Bs[0].data() + i * THRESHOLD, THRESHOLD, MPI_CHAR,
                        const_cast<char *>(packages[i].data()), THRESHOLD,
                        MPI_CHAR, comm_bcast_B_);
        }
        auto tail_count = max_len - package_num * THRESHOLD;
        packages[package_num].resize(mp() * tail_count);
        MPI_Allgather(Bs[0].data() + package_num * THRESHOLD, tail_count,
                      MPI_CHAR,
                      const_cast<char *>(packages[package_num].data()),
                      tail_count, MPI_CHAR, comm_bcast_B_);

#pragma omp parallel
#pragma omp single
#pragma omp taskloop
        for (unsigned long i = 0; i < np(); i++) {
          for (unsigned long j = 0; j < package_num; j++) {
            std::copy(packages[j].begin() + i * THRESHOLD,
                      packages[j].begin() + (i + 1) * THRESHOLD,
                      recv.begin() + i * max_len + j * THRESHOLD);
          }
          std::copy(packages[package_num].begin() + i * tail_count,
                    packages[package_num].begin() + (i + 1) * tail_count,
                    recv.begin() + i * max_len + package_num * THRESHOLD);
        }
      } else {
        MPI_Allgather(Bs[0].data(), max_len, MPI_CHAR,
                      const_cast<char *>(recv.data()), max_len, MPI_CHAR,
                      comm_bcast_B_);
      }
      stat_.cgather_sc_time += MPI_Wtime(); //! All-gather - Stop

      stat_.serialize_time -= MPI_Wtime(); //! Serial - Start
#pragma omp parallel
#pragma omp single
#pragma omp taskloop
      for (int k = 0; k < recv_aux.size(); k++) {
        int gidx = k / rot_size_B;
        if (gidx == rank_B) {
          continue;
        }
        unsigned long gstart = gidx * max_len;
        int lidx = k % rot_size_B;
        unsigned long lstart = 0;
        for (int i = 0; i < lidx; i++) {
          lstart += recv_aux[gidx * rot_size_B + i];
        }
        std::string ctxt_str =
            recv.substr(gstart + lstart, recv_aux[gidx * rot_size_B + lidx]);
        std::stringstream ss;
        ss << ctxt_str;
        Bis[k] = helib::Ctxt::readFrom(ss, *pk_);
      }
      stat_.serialize_time += MPI_Wtime(); //! Serial - Stop
    } while (false);
  }

  stat_.cgemm_compute_time -= MPI_Wtime(); //! Compute - Start
#pragma omp parallel
#pragma omp single
#pragma omp taskloop
  for (int i = 0; i < R; i++) {
    Ais[i] *= Bis[i];
  }
  helib::Ctxt AB = Ais[0];
  for (int i = 1; i < Ais.size(); i++) {
    AB += Ais[i];
  }
  stat_.cgemm_compute_time += MPI_Wtime(); //! Compute - Stop
  stat_.cgemm_time += MPI_Wtime();         //! HMM - Stop
  stat_.cgemm_noise += log2_noise_bound(AB);

  // Reduction
  stat_.creduce_time -= MPI_Wtime(); //! Reduction - Start

  // Old Reduction
  if (pgrid_index_[K] == 0) {
    for (int sender = 1; sender < pgrid_size_[K]; sender++) {
      std::stringstream ss;
      std::string recv_partial;
      MPI_Recv_String(recv_partial, sender, 0, comm_reduce_C_);
      ss << recv_partial;
      auto recv_partial_ctxt = helib::Ctxt::readFrom(ss, *pk_);

      stat_.add_partial_noise -= log2_noise_bound(AB);
      AB += recv_partial_ctxt;
      stat_.add_partial_noise += log2_noise_bound(AB);
    }
  } else {
    std::stringstream ss;
    AB.writeTo(ss);
    MPI_Send_String(ss.str(), 0, 0, comm_reduce_C_);
  }

  // New Reduction
  // stat_.add_partial_noise -= log2_noise_bound(AB);
  // ctxt_reduce_add(AB, &AB, 0, comm_reduce_C_);
  // stat_.add_partial_noise += log2_noise_bound(AB);
  stat_.creduce_time += MPI_Wtime(); //! Reduction - Stop

  stat_.serialize_time -= MPI_Wtime(); //! Serial - Start
  if (pgrid_index_[K] == 0) {
    std::stringstream ss;
    AB.writeTo(ss);
    partial = ss.str();
  } else {
    partial.clear();
  }
  stat_.serialize_time += MPI_Wtime(); //! Serial - Stop

  MPI_Pcontrol(0); //! Deactivate mpiP
  MPI_Barrier(comm_exec_);
  stat_.exec_time += MPI_Wtime(); //! Exec - Stop

  stat_.final_noise = log2_noise_bound(AB);
}

void shhmm_3d::collect_data(const std::string &partial_C,
                            std::vector<NTL::mat_ZZ> &result, bool local) {
  if (pgrid_index_[K] != 0) {
    return;
  }
  if (local) {
    if (rank_exec_ == MPI_COMM_EXEC_ROOT) {
      result.resize(1);
      auto hmmcc = static_cast<hypercube_hmmcc *>(hmm_cagent_);
      ASSERT_PRINTF(hmmcc != nullptr, "Invalid root\n");
      std::stringstream ss;
      ss << partial_C;
      auto ctxt = helib::Ctxt::readFrom(ss, *pk_);
      try {
        hmmcc->decrypt(result[0], ctxt);
      } catch (const std::exception &e) {
        std::cerr << e.what() << ":" << log2_noise_bound(ctxt) << std::endl;
        std::flush(std::cerr);
        MPI_Abort(comm_exec_, 1);
      }
    }
    return;
  }

  if (rank_exec_ == MPI_COMM_EXEC_ROOT) {
    result.resize(np_);
    auto hmmcc = static_cast<hypercube_hmmcc *>(hmm_cagent_);
    ASSERT_PRINTF(hmmcc != nullptr, "Invalid root\n");

    // Receive partial C
    int sf_C[AXIS_NUM] = {0, 0, 0};
    for (sf_C[M] = 0; sf_C[M] < pgrid_size_[M]; sf_C[M]++) {
      for (sf_C[N] = 0; sf_C[N] < pgrid_size_[N]; sf_C[N]++) {
        std::stringstream ss;
        std::string ctxt_str;
        int sender = this->index_to_rank_exec(sf_C);
        if (sender == MPI_COMM_EXEC_ROOT) {
          ctxt_str = partial_C;
        } else {
          MPI_Recv_String(ctxt_str, sender, 0, comm_exec_);
        }
        ss << ctxt_str;
        auto ctxt = helib::Ctxt::readFrom(ss, *pk_);
        try {
          const int idx = sf_C[M] * pgrid_size_[N] + sf_C[N];
          hmmcc->decrypt(result[idx], ctxt);
        } catch (const std::exception &e) {
          std::cerr << e.what() << ":" << log2_noise_bound(ctxt) << std::endl;
          std::flush(std::cerr);
          MPI_Abort(comm_exec_, 1);
        }
      }
    }
  } else {
    MPI_Send_String(partial_C, MPI_COMM_EXEC_ROOT, 0, comm_exec_);
  }
}

//* Multi-Ciphertext Version
static inline void get_blk_size(long &szblk, long &nblk, long szdim,
                                long split) {
  // TODO:
  szblk = split;
  nblk = szdim / szblk;
}
mhhmm_3d::mhhmm_3d(const params<helib::BGV> &params,
                   const hmm_status<shmm_engine> &status, int root,
                   MPI_Comm comm, long m, long n, long k, long opt, bool fhe4d,
                   int mp, int np, int kp)
    : cipher_3d(root, comm, m, n, k, fhe4d, mp, np, kp, opt) {
  stat_.setup_time -= MPI_Wtime(); //! Setup - Start
  int rank;
  MPI_Comm_rank(comm, &rank);

  // Initialize HMM engine //! but not active
  get_blk_size(hmm_szie_[M], hmm_grid_size_[M], block_m(), m / bvm());
  get_blk_size(hmm_szie_[N], hmm_grid_size_[N], block_n(), n / bvn());
  get_blk_size(hmm_szie_[K], hmm_grid_size_[K], block_k(), k / bvk());
  if (rank == root) {
    std::cout << "Work cuboid grid x * y * z: " << m_nblk() << " * " << n_nblk()
              << " * " << k_nblk() << std::endl;
    std::cout << "HMM cuboid mbb * nbb * kbb: " << mbb() << " * " << nbb()
              << " * " << kbb() << std::endl;
    std::flush(std::cout);
  }
  auto hhmme = new hypercube_hmme(status);
  hmm_engine_ = hhmme;

  // Initialize HMM cryptograph systems
  const auto eh_cube = hypercube_hmme::get_hypercube(mbb(), nbb(), kbb());
  hmm_cagent_ = rank == root ? new hypercube_hmmcc(params, eh_cube, true)
                             : new hypercube_hmmcs(params, eh_cube, true);

  if (rank == root) {
    // Print out the HElib context
    hmm_cagent_->report_context(std::cout);
    std::flush(std::cout);
  }

  // Broadcast the public key
  stat_.bcast_pk_time -= MPI_Wtime(); //! Bcast PK - Start
  stat_.pk_size = this->bcast_pubkey();
  stat_.bcast_pk_time += MPI_Wtime(); //! Bcast PK - Stop

  // Acativate HMM engine
  INTEL_ITT_RESUME;
  stat_.hmm_encoding_time -= MPI_Wtime(); //! HMM Encoding - Start
  auto eh_mnk = hypercube_hmme::get_expected_mnk(mbb(), nbb(), kbb());
  type_ = hypercube_hmme::parse_matrix_type(eh_mnk);
  hhmme->register_engine(hmm_cagent_->context()->getEA(), eh_mnk);
  stat_.hmm_encoding_time += MPI_Wtime(); //! HMM Encoding - Stop
  INTEL_ITT_PAUSE;

  stat_.setup_time += MPI_Wtime(); //! Setup - Stop
}

template <typename T>
void mhhmm_3d::init_data_p2p(int root, int rank, MPI_Comm comm,
                             std::vector<std::string> &buf, const T *mat,
                             AXIS X, AXIS Y) {
  static_assert(std::is_same<T, int>::value ||
                    std::is_same<T, unsigned int>::value ||
                    std::is_same<T, double>::value,
                "Only `int`, `unsigned int` and `double` are allowed.");

  const auto nblk = hmm_grid_size_[X] * hmm_grid_size_[Y];

  AXIS Z = static_cast<AXIS>(M + N + K - X - Y);
  if (pgrid_index_[Z] != 0) {
    // Not on the surface-XY
    return;
  }
  // std::vector<std::vector<std::string>> global_ctxt_blks;
  // std::vector<std::vector<std::vector<int>>> global_size_buffs;
  // std::vector<std::vector<std::string>> global_pack_buffs;
  if (rank == root) {
    auto hmmcc = static_cast<hypercube_hmmcc *>(hmm_cagent_);
    ASSERT_PRINTF(hmmcc != nullptr, "Invalid root\n");

    int pid = 0;
    int sf[AXIS_NUM] = {0, 0, 0};
    for (sf[X] = 0; sf[X] < pgrid_size_[X]; sf[X]++) {
      for (sf[Y] = 0; sf[Y] < pgrid_size_[Y]; sf[Y]++) {
        // MPI_Request reqs[nblk];
        const int recipient = this->index_to_rank_exec(sf);

        // std::vector<std::string> ctxt_blks;
        std::vector<std::string> ctxt_blks(nblk);
        std::vector<std::vector<int>> size_buffs;
        std::vector<std::string> pack_buffs;

#pragma omp parallel for collapse(2)
        for (int ii = 0; ii < hmm_grid_size_[X]; ii++) {
          for (int jj = 0; jj < hmm_grid_size_[Y]; jj++) {
            std::stringstream ss;
            auto blk =
                get_blk(mat, dims_[X], dims_[Y], sf[X], sf[Y], blk_size_[X],
                        blk_size_[Y], ii, jj, hmm_szie_[X], hmm_szie_[Y]);
            auto ctxt_blk = hmmcc->encrypt(blk);
            ctxt_blk.writeTo(ss);
            // ctxt_blks.push_back(std::move(ss.str()));
            ctxt_blks[ii * hmm_grid_size_[Y] + jj] = std::move(ss.str());

            // Record noise
            if (ii == 0 && jj == 0) {
              stat_.init_noise =
                  std::max(stat_.init_noise, log2_noise_bound(ctxt_blk));
            }
          }
        }

        if (recipient == root) {
          buf = std::move(ctxt_blks);
        } else {
          // clang-format off
          // Pack
          // MPI_Request req;
          // global_ctxt_blks.emplace_back(ctxt_blks);
          // global_pack_buffs.emplace_back(std::vector<std::string>());
          // global_size_buffs.emplace_back(std::vector<std::vector<int>>());
          // MPI_PackIsend_Strings(global_ctxt_blks[pid], global_pack_buffs[pid],
          //                       global_size_buffs[pid], recipient, 0, comm,
          //                       &req);
          // pid++;
          // clang-format on

          MPI_Request req;
          MPI_PackIsend_Strings(ctxt_blks, pack_buffs, size_buffs, recipient, 0,
                                comm, &req);
          MPI_Wait(&req, MPI_STATUS_IGNORE);

          // Non-Pack
          // MPI_Isend_Strings(ctxt_blks, recipient, 0, comm, reqs);
          // MPI_Waitall(nblk, reqs, MPI_STATUSES_IGNORE);
        }
        std::cout << "Surface-" << X << Y << ": initial data in rank "
                  << recipient << " - Ready: " << MPI_Wtime() << " s\n";
        std::cout.flush();
      }
    }
  } else {
    //! MPI_Send(): arg 'count' may be overflow
    // MPI_PackRecv_Strings(buf, nblk, root, 0, comm);
    MPI_MultiPackRecv_Strings(buf, nblk, root, 0, comm);

    // MPI_Recv_Strings(buf, nblk, root, 0, comm);
  }
}

void mhhmm_3d::init_data(const int *A, const int *B) {
  init_data_p2p<int>(MPI_COMM_EXEC_ROOT, this->rank_exec_, this->comm_exec_,
                     local_A_, A, M, K);
  init_data_p2p<int>(MPI_COMM_EXEC_ROOT, this->rank_exec_, this->comm_exec_,
                     local_B_, B, K, N);
}

void mhhmm_3d::strassen(std::vector<helib::Ctxt> &C, const mat_ctxt &A,
                        const mat_ctxt &B, int m1, int m2, int k1, int k2,
                        int n1, int n2) {

  ASSERT_PRINTF(m2 - m1 == 2 && n2 - n1 == 2 && k2 - k1 == 2,
                "Error calling strassen\n");

  helib::Ctxt &C00 = C[m1 * n_nblk() + n1 + 0];
  helib::Ctxt &C01 = C[m1 * n_nblk() + n1 + 1];
  helib::Ctxt &C10 = C[(m1 + 1) * n_nblk() + n1 + 0];
  helib::Ctxt &C11 = C[(m1 + 1) * n_nblk() + n1 + 1];
  const helib::Ctxt &A00 = A[m1 + 0][k1 + 0];
  const helib::Ctxt &A01 = A[m1 + 0][k1 + 1];
  const helib::Ctxt &A10 = A[m1 + 1][k1 + 0];
  const helib::Ctxt &A11 = A[m1 + 1][k1 + 1];
  const helib::Ctxt &B00 = B[k1 + 0][n1 + 0];
  const helib::Ctxt &B01 = B[k1 + 0][n1 + 1];
  const helib::Ctxt &B10 = B[k1 + 1][n1 + 0];
  const helib::Ctxt &B11 = B[k1 + 1][n1 + 1];

  helib::Ctxt M0 = A00;
  helib::Ctxt M1 = A10;
  helib::Ctxt M2 = B01;
  helib::Ctxt M3 = B10;
  helib::Ctxt M4 = A00;
  helib::Ctxt M5 = A10;
  helib::Ctxt M6 = A01;

  omp_set_nested(1);
#pragma omp parallel
#pragma omp single
  {
#pragma omp task depend(out : M0)
    {
      // M0 = (A_00 + A_11)(B_00 + B_11)
      stat_.cgemm_noise = log2_noise_bound(A00);
      helib::Ctxt tmp = B00;
      M0 += A11;
      tmp += B11;
      hmm_engine_->cgemm(type_, M0, M0, tmp);
      stat_.cgemm_noise = log2_noise_bound(M0) - stat_.cgemm_noise;
    }
#pragma omp task depend(out : M1)
    {
      // M1 = (A_10 + A_11)B_00
      M1 += A11;
      hmm_engine_->cgemm(type_, M1, M1, B00);
    }

#pragma omp task depend(out : M2)
    {
      // M2 = A_00(B_01 - B_11)
      M2 -= B11;
      hmm_engine_->cgemm(type_, M2, A00, M2);
    }

#pragma omp task depend(out : M3)
    {
      // M3 = A_11(B_10 - B_00)
      M3 -= B00;
      hmm_engine_->cgemm(type_, M3, A11, M3);
    }

#pragma omp task depend(out : M4)
    {
      // M4 = (A_00 + A_01)B_11
      M4 += A01;
      hmm_engine_->cgemm(type_, M4, M4, B11);
    }

#pragma omp task depend(out : M5)
    {
      // M5 = (A_10 - A_00)(B_00 + B_01)
      helib::Ctxt tmp = B00;
      M5 -= A00;
      tmp += B01;
      hmm_engine_->cgemm(type_, M5, M5, tmp);
    }

#pragma omp task depend(out : M6)
    {
      // M6 = (A_01 - A_11)(B_10 + B_11)
      helib::Ctxt tmp = B10;
      M6 -= A11;
      tmp += B11;
      hmm_engine_->cgemm(type_, M6, M6, tmp);
    }

#pragma omp task depend(in : M0, M3, M4, M6)
    {
      // C_00 = M0 + M3 - M4 + M6
      C00 += M0;
      C00 += M3;
      C00 -= M4;
      C00 += M6;
    }
#pragma omp task depend(in : M2, M4)
    {
      // C_01 = M2 + M4
      C01 += M2;
      C01 += M4;
    }

#pragma omp task depend(in : M1, M3)
    {
      // C_10 = M1 + M3
      C10 += M1;
      C10 += M3;
    }

#pragma omp task depend(in : M0, M1, M2, M5)
    {
      // C_11 = M0 - M1 + M2 + M5
      C11 += M0;
      C11 -= M1;
      C11 += M2;
      C11 += M5;
    }
  }
  omp_set_nested(0);
}

void mhhmm_3d::strassen_seq(std::vector<helib::Ctxt> &C, const mat_ctxt &A,
                            const mat_ctxt &B, int m1, int m2, int k1, int k2,
                            int n1, int n2) {

  ASSERT_PRINTF(m2 - m1 == 2 && n2 - n1 == 2 && k2 - k1 == 2,
                "Error calling strassen\n");

  helib::Ctxt &C00 = C[m1 * n_nblk() + n1 + 0];
  helib::Ctxt &C01 = C[m1 * n_nblk() + n1 + 1];
  helib::Ctxt &C10 = C[(m1 + 1) * n_nblk() + n1 + 0];
  helib::Ctxt &C11 = C[(m1 + 1) * n_nblk() + n1 + 1];
  const helib::Ctxt &A00 = A[m1 + 0][k1 + 0];
  const helib::Ctxt &A01 = A[m1 + 0][k1 + 1];
  const helib::Ctxt &A10 = A[m1 + 1][k1 + 0];
  const helib::Ctxt &A11 = A[m1 + 1][k1 + 1];
  const helib::Ctxt &B00 = B[k1 + 0][n1 + 0];
  const helib::Ctxt &B01 = B[k1 + 0][n1 + 1];
  const helib::Ctxt &B10 = B[k1 + 1][n1 + 0];
  const helib::Ctxt &B11 = B[k1 + 1][n1 + 1];

  helib::Ctxt M0 = A00;
  helib::Ctxt M1 = A10;
  helib::Ctxt M2 = B01;
  helib::Ctxt M3 = B10;
  helib::Ctxt M4 = A00;
  helib::Ctxt M5 = A10;
  helib::Ctxt M6 = A01;

  // M0 = (A_00 + A_11)(B_00 + B_11)
  stat_.cgemm_noise = log2_noise_bound(A00);
  helib::Ctxt tmp = B00;
  M0 += A11;
  tmp += B11;
  hmm_engine_->cgemm(type_, M0, M0, tmp);
  stat_.cgemm_noise = log2_noise_bound(M0) - stat_.cgemm_noise;

  // M1 = (A_10 + A_11)B_00
  M1 += A11;
  hmm_engine_->cgemm(type_, M1, M1, B00);

  // M2 = A_00(B_01 - B_11)
  M2 -= B11;
  hmm_engine_->cgemm(type_, M2, A00, M2);

  // M3 = A_11(B_10 - B_00)
  M3 -= B00;
  hmm_engine_->cgemm(type_, M3, A11, M3);

  // M4 = (A_00 + A_01)B_11
  M4 += A01;
  hmm_engine_->cgemm(type_, M4, M4, B11);

  // M5 = (A_10 - A_00)(B_00 + B_01)
  tmp = B00;
  M5 -= A00;
  tmp += B01;
  hmm_engine_->cgemm(type_, M5, M5, tmp);

  // M6 = (A_01 - A_11)(B_10 + B_11)
  tmp = B10;
  M6 -= A11;
  tmp += B11;
  hmm_engine_->cgemm(type_, M6, M6, tmp);

  // C_00 = M0 + M3 - M4 + M6
  C00 += M0;
  C00 += M3;
  C00 -= M4;
  C00 += M6;

  // C_01 = M2 + M4
  C01 += M2;
  C01 += M4;

  // C_10 = M1 + M3
  C10 += M1;
  C10 += M3;

  // C_11 = M0 - M1 + M2 + M5
  C11 += M0;
  C11 -= M1;
  C11 += M2;
  C11 += M5;
}

void mhhmm_3d::stdblk(std::vector<helib::Ctxt> &C,
                      const std::vector<std::vector<helib::Ctxt>> &A,
                      const std::vector<std::vector<helib::Ctxt>> &B, int m1,
                      int m2, int k1, int k2, int n1, int n2) {
  for (int i = m1; i < m2; i++) {
    for (int j = n1; j < n2; j++) {
      for (int k = k1; k < k2; k++) {
        // Local block CGEMM
        helib::Ctxt AB(*pk_);
        hmm_engine_->cgemm(type_, AB, A[i][k], B[k][j]);

        // Get HMM nosie
        if (i == 0 && j == 0 && k == 0) {
          stat_.cgemm_noise = log2_noise_bound(AB) - log2_noise_bound(A[0][0]);
        }

        if (i == 0 && j == 0) { //! Get HAdd noise - Start
          stat_.add_partial_noise -= log2_noise_bound(C[i * n_nblk() + j]);
        }
        // Add partial sum
        C[i * n_nblk() + j] += AB;
        if (i == 0 && j == 0) { //! Get HAdd noise - Stop
          stat_.add_partial_noise += log2_noise_bound(C[i * n_nblk() + j]);
        }
      }
    }
  }
}

void mhhmm_3d::stdblk(std::vector<helib::Ctxt> &C, const mat_ctxt &A,
                      const mat_ctxt &B) {
  if (nested_) {
    omp_set_nested(1);
#pragma omp parallel for collapse(3)
    for (int i = 0; i < m_nblk(); i++) {
      for (int j = 0; j < n_nblk(); j++) {
        for (int k = 0; k < k_nblk(); k++) {
          // Local block CGEMM
          helib::Ctxt AB(*pk_);
          hmm_engine_->cgemm(type_, AB, A[i][k], B[k][j]);

          // Get HMM nosie
          if (i == 0 && j == 0 && k == 0) {
            stat_.cgemm_noise =
                log2_noise_bound(AB) - log2_noise_bound(A[0][0]);
          }

          if (i == 0 && j == 0) { //! Get HAdd noise - Start
            stat_.add_partial_noise -= log2_noise_bound(C[i * n_nblk() + j]);
          }
#pragma omp critical
          // Add partial sum
          C[i * n_nblk() + j] += AB;
          if (i == 0 && j == 0) { //! Get HAdd noise - Stop
            stat_.add_partial_noise += log2_noise_bound(C[i * n_nblk() + j]);
          }
        }
      }
    }
    omp_set_nested(0);
  } else {
    for (int i = 0; i < m_nblk(); i++) {
      for (int j = 0; j < n_nblk(); j++) {
        for (int k = 0; k < k_nblk(); k++) {
          // Local block CGEMM
          helib::Ctxt AB(*pk_);
          hmm_engine_->cgemm(type_, AB, A[i][k], B[k][j]);

          // Get HMM nosie
          if (i == 0 && j == 0 && k == 0) {
            stat_.cgemm_noise =
                log2_noise_bound(AB) - log2_noise_bound(A[0][0]);
          }

          if (i == 0 && j == 0) { //! Get HAdd noise - Start
            stat_.add_partial_noise -= log2_noise_bound(C[i * n_nblk() + j]);
          }
          // Add partial sum
          C[i * n_nblk() + j] += AB;
          if (i == 0 && j == 0) { //! Get HAdd noise - Stop
            stat_.add_partial_noise += log2_noise_bound(C[i * n_nblk() + j]);
          }
        }
      }
    }
  }
}
void mhhmm_3d::strassen_r(std::vector<helib::Ctxt> &C, const mat_ctxt &A,
                          const mat_ctxt &B, int m1, int m2, int k1, int k2,
                          int n1, int n2) {
  if (m1 == m2 || n1 == n2 || k1 == k2) {
    return;
  }
  // if (rank_exec_ == MPI_COMM_EXEC_ROOT) {
  //   std::cout << "strassen_r: " << m1 << " " << m2 << " " << k1 << " " << k2
  //             << " " << n1 << " " << n2 << std::endl;
  //   std::cout.flush();
  // }

  if (m2 - m1 == 1 || n2 - n1 == 1 || k2 - k1 == 1) {
    stdblk(C, A, B, m1, m2, k1, k2, n1, n2);
  } else if (m2 - m1 == 2 && n2 - n1 == 2 && k2 - k1 == 2) {
    strassen(C, A, B, m1, m2, k1, k2, n1, n2); // parallel
    // strassen_seq(C, A, B, m1, m2, k1, k2, n1, n2); // seq
  } else {
    const int next_m = std::min(m1 + 2, m2);
    const int next_k = std::min(k1 + 2, k2);
    const int next_n = std::min(n1 + 2, n2);
    const int ml[2] = {m1, next_m};
    const int mr[2] = {next_m, m2};
    const int kl[2] = {k1, next_k};
    const int kr[2] = {next_k, k2};
    const int nl[2] = {n1, next_n};
    const int nr[2] = {next_n, n2};

    for (int i = 0; i < 2; i++) {
      for (int j = 0; j < 2; j++) {
        for (int k = 0; k < 2; k++) {
          strassen_r(C, A, B, ml[i], mr[i], kl[k], kr[k], nl[j], nr[j]);
        }
      }
    }
  }
}

void mhhmm_3d::exec(std::vector<std::string> &partial_C, bool fhe4d) {
  if (fhe4d) {
    exec_4d(partial_C);
  } else {
    exec_base(partial_C);
  }
}
void mhhmm_3d::exec_base(std::vector<std::string> &partial_C) {
  MPI_Barrier(comm_exec_);
  stat_.exec_time -= MPI_Wtime(); //! Exec - Start
  MPI_Pcontrol(1);                //! Activate mpiP

  // Broadcast
  stat_.cbcast_time -= MPI_Wtime(); //! Bcast - Start
  MPI_Bcast_Strings(local_A_, 0, comm_bcast_A_);
  MPI_Bcast_Strings(local_B_, 0, comm_bcast_B_);
  stat_.cbcast_time += MPI_Wtime(); //! Bcast - Stop
  stat_.computing_comm_size += (local_A_.size() * local_A_[0].size() *
                                std::ceil(std::log2(np_)) / 1024.0 / 1024.0);
  stat_.computing_comm_size += (local_B_.size() * local_B_[0].size() *
                                std::ceil(std::log2(np_)) / 1024.0 / 1024.0);

  // Serialization
  mat_ctxt ctxt_A;
  mat_ctxt ctxt_B;
  ctxt_A.resize(m_nblk());
  ctxt_B.resize(k_nblk());
  stat_.serialize_time -= MPI_Wtime();
  for (std::size_t i = 0; i < m_nblk(); i++) {
    for (std::size_t j = 0; j < k_nblk(); j++) {
      std::stringstream A_ss;
      A_ss << local_A_[i * k_nblk() + j];
      ctxt_A[i].emplace_back(helib::Ctxt::readFrom(A_ss, *pk_));
    }
  }
  for (int i = 0; i < k_nblk(); i++) {
    for (int j = 0; j < n_nblk(); j++) {
      std::stringstream B_ss;
      B_ss << local_B_[i * n_nblk() + j];
      ctxt_B[i].emplace_back(helib::Ctxt::readFrom(B_ss, *pk_));
    }
  }
  stat_.serialize_time += MPI_Wtime();

  // Local CGEMMs
  helib::Ctxt zero(*pk_);
  zero.DummyEncrypt(NTL::ZZX(0l));
  std::vector<helib::Ctxt> partial(m_nblk() * n_nblk(), zero);
  stat_.cgemm_time -= MPI_Wtime(); //! HMM - Start
  stdblk(partial, ctxt_A, ctxt_B);
  // strassen_r(partial, ctxt_A, ctxt_B, 0, m_nblk(), 0, k_nblk(), 0, n_nblk());
  stat_.cgemm_time += MPI_Wtime(); //! HMM - Stop

  // Reduction
  std::string pack_buff;
  std::vector<int> size_buff;
  std::vector<std::string> local_C;

  stat_.creduce_time -= MPI_Wtime(); //! Reduction - Start
  if (pgrid_index_[K] == 0) {
    for (int sender = 1; sender < pgrid_size_[K]; sender++) {
      MPI_PackRecv_Strings(local_C, m_nblk() * n_nblk(), sender, 0,
                           comm_reduce_C_);
#pragma omp parallel
#pragma omp single
#pragma omp taskloop
      for (int l = 0; l < local_C.size(); l++) {
        std::stringstream ss;
        ss << local_C[l];
        auto recv_partial_ctxt = helib::Ctxt::readFrom(ss, *pk_);
        partial[l] += recv_partial_ctxt;
      }
    }
  } else {
    local_C.resize(m_nblk() * n_nblk());
    size_buff.resize(m_nblk() * n_nblk());
#pragma omp parallel
#pragma omp single
#pragma omp taskloop
    for (int l = 0; l < local_C.size(); l++) {
      std::stringstream ss;
      partial[l].writeTo(ss);
      local_C[l] = ss.str();
    }

    MPI_Request req = MPI_REQUEST_NULL;
    MPI_PackIsend_Strings(local_C, pack_buff, size_buff.data(), 0, 0,
                          comm_reduce_C_, &req);
  }
  // for (int i = 0; i < m_nblk(); i++) {
  //   for (int j = 0; j < n_nblk(); j++) {
  //     // Old reduce
  //     if (pgrid_index_[K] == 0) {
  //       for (int sender = 1; sender < pgrid_size_[K]; sender++) {
  //         std::stringstream ss;
  //         std::string recv_partial;
  //         MPI_Recv_String(recv_partial, sender, 0, comm_reduce_C_);
  //         ss << recv_partial;
  //         auto recv_partial_ctxt = helib::Ctxt::readFrom(ss, *pk_);

  //         if (i == 0 && j == 0) {
  //           stat_.add_partial_noise -=
  //               log2_noise_bound(partial[i * n_nblk() + j]);
  //         }
  //         partial[i * n_nblk() + j] += recv_partial_ctxt;
  //         if (i == 0 && j == 0) {
  //           stat_.add_partial_noise +=
  //               log2_noise_bound(partial[i * n_nblk() + j]);
  //         }
  //       }
  //     } else {
  //       std::stringstream ss;
  //       partial[i * n_nblk() + j].writeTo(ss);
  //       MPI_Send_String(ss.str(), 0, 0, comm_reduce_C_);
  //     }

  //     // New reduce
  //     // helib::Ctxt &part = partial[i * n_nblk() + j];
  //     // if (i == 0 && j == 0) {
  //     //   stat_.add_partial_noise -= log2_noise_bound(part);
  //     // }
  //     // ctxt_reduce_add(part, &part, 0, comm_reduce_C_);
  //     // if (i == 0 && j == 0) {
  //     //   stat_.add_partial_noise += log2_noise_bound(part);
  //     // }
  //   }
  // }
  stat_.creduce_time += MPI_Wtime(); //! Reduction - Stop

  // Deserialization
  stat_.serialize_time -= MPI_Wtime(); //! Serial - Start
  for (auto i = 0; i < partial.size(); i++) {
    if (pgrid_index_[K] == 0) {
      std::stringstream ss;
      partial[i].writeTo(ss);
      partial_C.push_back(ss.str());
    }
  }
  stat_.serialize_time += MPI_Wtime(); //! Serial - Stop

  MPI_Pcontrol(0); //! Deactivate mpiP
  MPI_Barrier(comm_exec_);
  stat_.exec_time += MPI_Wtime(); //! Exec - Stop

  stat_.final_noise = log2_noise_bound(partial[0]);
}

INTEL_ITT_DOMAIN_CREATE(local_hhmm_domain, "src.local.hhmm");
INTEL_ITT_STRING_HANDLE_CREATE(hhmm_ra_handle, "RotateAlign");
INTEL_ITT_STRING_HANDLE_CREATE(hhmm_sc_handle, "ShiftCompute");
void mhhmm_3d::exec_4d_shared(std::vector<std::string> &partial_C,
                              bool lagre_mem) {
  _exec_4d_shared(partial_C, lagre_mem);
}
void mhhmm_3d::_exec_4d_shared(std::vector<std::string> &partial_C,
                               bool lagre_mem) {
  MPI_Barrier(comm_exec_);
  stat_.exec_time -= MPI_Wtime(); //! Exec - Start
  MPI_Pcontrol(1);                //! Activate mpiP

  // Broadcast
  stat_.cbcast_time -= MPI_Wtime(); //! Bcast - Start
  MPI_Bcast_Strings(local_A_, 0, comm_bcast_A_);
  MPI_Bcast_Strings(local_B_, 0, comm_bcast_B_);
  stat_.cbcast_time += MPI_Wtime(); //! Bcast - Stop
  stat_.computing_comm_size += (local_A_.size() * local_A_[0].size() *
                                std::ceil(std::log2(np_)) / 1024.0 / 1024.0);
  stat_.computing_comm_size += (local_B_.size() * local_B_[0].size() *
                                std::ceil(std::log2(np_)) / 1024.0 / 1024.0);

  // Serialization
  // stat_.serialize_time -= MPI_Wtime();
  // std::vector<helib::Ctxt> ctxt_A;
  // std::vector<helib::Ctxt> ctxt_B;
  // for (int l = 0; l < m_nblk() * k_nblk(); l++) {
  //   std::stringstream A_ss;
  //   A_ss << local_A_[l];
  //   ctxt_A.emplace_back(helib::Ctxt::readFrom(A_ss, *pk_));
  // }
  // for (int l = 0; l < k_nblk() * n_nblk(); l++) {
  //   std::stringstream B_ss;
  //   B_ss << local_B_[l];
  //   ctxt_B.emplace_back(helib::Ctxt::readFrom(B_ss, *pk_));
  // }
  // stat_.serialize_time += MPI_Wtime();

  const int b_m = m_nblk();
  const int b_n = n_nblk();
  const int b_k = k_nblk();

  // Local CGEMMs
  const auto R = std::min(std::min(mbb(), nbb()), kbb());
  const auto &ea = hmm_cagent_->context()->getEA();
  auto engine = static_cast<hypercube_hmme *>(hmm_engine_);

  helib::Ctxt zero(*pk_);
  zero.DummyEncrypt(NTL::ZZX(0l));
  std::vector<helib::Ctxt> C(b_m * b_n, zero);
  std::vector<std::string> local_C;
  std::vector<std::shared_ptr<helib::GeneralAutomorphPrecon_FULL>> ctxt_Ap(b_m *
                                                                           b_k);
  std::vector<std::shared_ptr<helib::GeneralAutomorphPrecon_FULL>> ctxt_Bp(b_k *
                                                                           b_n);

  INTEL_ITT_TASK_BEGIN(local_hhmm_domain, hhmm_ra_handle); //! ITT - Begin
  stat_.cgemm_time -= MPI_Wtime();                         //! HMM - Start
  stat_.cgemm_ra_time -= MPI_Wtime();                      //! RA - Start
#pragma omp parallel
#pragma omp single
  {
#pragma omp taskloop nogroup
    for (int l = 0; l < b_m * b_k; l++) {
      std::stringstream A_ss;
      A_ss << local_A_[l];
      auto ctxt_A = helib::Ctxt::readFrom(A_ss, *pk_);
      engine->rotate_align_A(type_, ctxt_A, ctxt_A);
      ctxt_Ap[l] =
          std::make_shared<helib::GeneralAutomorphPrecon_FULL>(ctxt_A, 1, ea);

      // engine->rotate_align_A(type_, ctxt_A[l], ctxt_A[l]);
      // ctxt_Ap[l] = std::make_shared<helib::GeneralAutomorphPrecon_FULL>(
      //     ctxt_A[l], 1, ea);
    }
#pragma omp taskloop nogroup
    for (int l = 0; l < b_k * b_n; l++) {
      std::stringstream B_ss;
      B_ss << local_B_[l];
      auto ctxt_B = helib::Ctxt::readFrom(B_ss, *pk_);
      engine->rotate_align_B(type_, ctxt_B, ctxt_B);
      ctxt_Bp[l] =
          std::make_shared<helib::GeneralAutomorphPrecon_FULL>(ctxt_B, 0, ea);

      // engine->rotate_align_B(type_, ctxt_B[l], ctxt_B[l]);
      // ctxt_Bp[l] = std::make_shared<helib::GeneralAutomorphPrecon_FULL>(
      //     ctxt_B[l], 0, ea);
    }
  }
  stat_.cgemm_ra_time += MPI_Wtime();    //! RA - Stop
  INTEL_ITT_TASK_END(local_hhmm_domain); //! ITT - End

  INTEL_ITT_TASK_BEGIN(local_hhmm_domain, hhmm_sc_handle); //! ITT - Begin
  stat_.cgemm_compute_time -= MPI_Wtime();                 //! SC - Start

  {
    std::vector<std::vector<helib::Ctxt>> partial;
    int parallel_pivot;
    if (lagre_mem && b_k <= R) { /* Large Memory */
      parallel_pivot = b_k;
      partial.resize(parallel_pivot, C);
      for (int r = 0; r < R; r++) {
        std::vector<helib::Ctxt> A_ik(b_m * b_k, helib::Ctxt(*pk_));
        std::vector<std::shared_ptr<helib::Ctxt>> B_kj(b_n * b_k);
#pragma omp parallel
#pragma omp single
        {
#pragma omp taskloop collapse(2) nogroup
          for (int k = 0; k < b_k; k++) {
            for (int j = 0; j < b_n; j++) {
              B_kj[k * b_n + j] = ctxt_Bp[k * b_n + j]->automorph(-r);
            }
          }
#pragma omp taskloop collapse(2) nogroup
          for (int k = 0; k < b_k; k++) {
            for (int i = 0; i < b_m; i++) {
              A_ik[k * b_m + i] = std::move(
                  engine->shift_compute_A(type_, ctxt_Ap[i * b_k + k], r));
            }
          }
#pragma omp taskwait
#pragma omp taskloop collapse(3)
          for (int k = 0; k < b_k; k++) {
            for (int i = 0; i < b_m; i++) {
              for (int j = 0; j < b_n; j++) {
                auto L = A_ik[k * b_m + i];
                L.multiplyBy(*B_kj[k * b_n + j]);
                partial[k][i * b_n + j] += L;
              }
            }
          }
        }
      }
    } else if (lagre_mem && b_k > R) { /* Large Memory */
      parallel_pivot = R;
      partial.resize(parallel_pivot, C);
      for (int k = 0; k < b_k; k++) {
        std::vector<helib::Ctxt> A_ir(b_m * R, helib::Ctxt(*pk_));
        std::vector<std::shared_ptr<helib::Ctxt>> B_jr(b_n * R);
#pragma omp parallel
#pragma omp single
        {
#pragma omp taskloop collapse(2) nogroup
          for (int r = 0; r < R; r++) {
            for (int j = 0; j < b_n; j++) {
              B_jr[r * b_n + j] = ctxt_Bp[k * b_n + j]->automorph(-r);
            }
          }
#pragma omp taskloop collapse(2) nogroup
          for (int r = 0; r < R; r++) {
            for (int i = 0; i < b_m; i++) {
              A_ir[r * b_m + i] = std::move(
                  engine->shift_compute_A(type_, ctxt_Ap[i * b_k + k], r));
            }
          }
#pragma omp taskwait
#pragma omp taskloop collapse(3)
          for (int r = 0; r < R; r++) {
            for (int i = 0; i < b_m; i++) {
              for (int j = 0; j < b_n; j++) {
                auto L = A_ir[r * b_m + i];
                L.multiplyBy(*B_jr[r * b_n + j]);
                partial[r][i * b_n + j] += L;
              }
            }
          }
        }
      }
    } else if (R >= omp_get_max_threads() ||
               (b_k < omp_get_max_threads() && R >= b_k)) {
      parallel_pivot = R;
      partial.resize(parallel_pivot, C);
#pragma omp parallel for
      for (int r = 0; r < R; r++) {
        for (int k = 0; k < b_k; k++) {
          std::vector<std::shared_ptr<helib::Ctxt>> B_j(b_n);
          for (int j = 0; j < b_n; j++) {
            B_j[j] = ctxt_Bp[k * b_n + j]->automorph(-r);
          }
          for (int i = 0; i < b_m; i++) {
            auto A_ikr = std::move(
                engine->shift_compute_A(type_, ctxt_Ap[i * b_k + k], r));
            for (int j = 0; j < b_n; j++) {
              auto L = A_ikr;
              L.multiplyBy(*B_j[j]);
              partial[r][i * b_n + j] += L;
            }
          }
        }
      }
    } else if (b_k >= omp_get_max_threads() ||
               (R < omp_get_max_threads() && b_k >= R)) {
      parallel_pivot = b_k;
      partial.resize(parallel_pivot, C);
#pragma omp parallel for
      for (int k = 0; k < b_k; k++) {
        for (int r = 0; r < R; r++) {
          std::vector<std::shared_ptr<helib::Ctxt>> B_j(b_n);
          for (int j = 0; j < b_n; j++) {
            B_j[j] = ctxt_Bp[k * b_n + j]->automorph(-r);
          }
          for (int i = 0; i < b_m; i++) {
            auto A_ikr = std::move(
                engine->shift_compute_A(type_, ctxt_Ap[i * b_k + k], r));
            for (int j = 0; j < b_n; j++) {
              auto L = A_ikr;
              L.multiplyBy(*B_j[j]);
              partial[k][i * b_n + j] += L;
            }
          }
        }
      }
    }
    // Add partial sum
#pragma omp parallel
#pragma omp single
#pragma omp taskloop
    // #pragma omp parallel for
    for (int l = 0; l < b_m * b_n; l++) {
      for (int r = 0; r < parallel_pivot; r++) {
        C[l] += partial[r][l];
      }
    }
  }

  stat_.cgemm_compute_time += MPI_Wtime(); //! SC - Stop
  stat_.cgemm_time += MPI_Wtime();         //! HMM - Stop
  INTEL_ITT_TASK_END(local_hhmm_domain);   //! ITT - End

  // Reduction
  std::string pack_buff;
  std::vector<int> size_buff;

  stat_.creduce_time -= MPI_Wtime(); //! Reduction - Start
  // Packed MPI
  if (pgrid_index_[K] == 0) {
    for (int sender = 1; sender < pgrid_size_[K]; sender++) {
      MPI_PackRecv_Strings(local_C, b_m * b_n, sender, 0, comm_reduce_C_);
#pragma omp parallel
#pragma omp single
#pragma omp taskloop
      for (int l = 0; l < local_C.size(); l++) {
        std::stringstream ss;
        ss << local_C[l];
        auto recv_partial_ctxt = helib::Ctxt::readFrom(ss, *pk_);
        C[l] += recv_partial_ctxt;
      }
    }
  } else {
    local_C.resize(b_m * b_n);
    size_buff.resize(b_m * b_n);
#pragma omp parallel
#pragma omp single
#pragma omp taskloop
    for (int l = 0; l < local_C.size(); l++) {
      std::stringstream ss;
      C[l].writeTo(ss);
      local_C[l] = ss.str();
    }

    MPI_Request req = MPI_REQUEST_NULL;
    MPI_PackIsend_Strings(local_C, pack_buff, size_buff.data(), 0, 0,
                          comm_reduce_C_, &req);
  }

  // #pragma omp parallel
  // #pragma omp single
  // #pragma omp taskloop
  //   for (int l = 0; l < b_m * b_n; l++) {
  //     std::stringstream ss;
  //     C[l].writeTo(ss);
  //     local_C[l] = ss.str();
  //   }
  //   for (int i = 0; i < b_m; i++) {
  //     for (int j = 0; j < b_n; j++) {
  //       // Old reduce
  //       if (pgrid_index_[K] == 0) {
  //         for (int sender = 1; sender < pgrid_size_[K]; sender++) {
  //           std::stringstream ss;
  //           std::string recv_partial;
  //           MPI_Recv_String(recv_partial, sender, 0, comm_reduce_C_);
  //           ss << recv_partial;
  //           auto recv_partial_ctxt = helib::Ctxt::readFrom(ss, *pk_);
  //           C[i * b_n + j] += recv_partial_ctxt;
  //         }
  //       } else {
  //         // Non-Blocking
  //         MPI_Request req = MPI_REQUEST_NULL;
  //         MPI_Isend_String(local_C[i * b_n + j], 0, 0, comm_reduce_C_,
  //         &req);

  //         // Blocking
  //         // MPI_Send_String(local_C[i * b_n + j], 0, 0,
  //         comm_reduce_C_);
  //       }

  //       // New reduce
  //       // helib::Ctxt &part = C[i * b_n + j];
  //       // Non-Blocking
  //       // ctxt_ireduce_add(part, &part, 0, comm_reduce_C_);

  //       // Blocking
  //       // ctxt_reduce_add(part, &part, 0, comm_reduce_C_);
  //     }
  //   }
  stat_.creduce_time += MPI_Wtime(); //! Reduction - Stop

  // Deserialization
  stat_.serialize_time -= MPI_Wtime(); //! Serial - Start
  for (auto i = 0; i < C.size(); i++) {
    if (pgrid_index_[K] == 0) {
      std::stringstream ss;
      C[i].writeTo(ss);
      partial_C.push_back(ss.str());
    }
  }
  stat_.serialize_time += MPI_Wtime(); //! Serial - Stop

  MPI_Pcontrol(0); //! Deactivate mpiP
  MPI_Barrier(comm_exec_);
  stat_.exec_time += MPI_Wtime(); //! Exec - Stop

  stat_.final_noise = log2_noise_bound(C[0]);
}

void mhhmm_3d::exec_4d(std::vector<std::string> &partial_C) {
  MPI_Barrier(comm_exec_);
  stat_.exec_time -= MPI_Wtime(); //! Exec - Start
  MPI_Pcontrol(1);                //! Activate mpiP

  // Broadcast
  stat_.cbcast_time -= MPI_Wtime(); //! Bcast - Start
  MPI_Bcast_Strings(local_A_, 0, comm_bcast_A_);
  MPI_Bcast_Strings(local_B_, 0, comm_bcast_B_);
  stat_.cbcast_time += MPI_Wtime(); //! Bcast - Stop
  stat_.computing_comm_size += (local_A_.size() * local_A_[0].size() *
                                std::ceil(std::log2(np_)) / 1024.0 / 1024.0);
  stat_.computing_comm_size += (local_B_.size() * local_B_[0].size() *
                                std::ceil(std::log2(np_)) / 1024.0 / 1024.0);

  // Local CGEMMs
  const auto R = std::min(std::min(mbb(), nbb()), kbb());
  const auto &ea = hmm_cagent_->context()->getEA();
  auto engine = static_cast<hypercube_hmme *>(hmm_engine_);

  helib::Ctxt zero(*pk_);
  zero.DummyEncrypt(NTL::ZZX(0l));
  std::vector<helib::Ctxt> C(m_nblk() * n_nblk(), zero);
  std::vector<std::string> local_C;
  std::vector<std::vector<helib::Ctxt>> partial(R, C);
  std::vector<std::shared_ptr<helib::GeneralAutomorphPrecon_FULL>> ctxt_Ap(
      m_nblk() * k_nblk());
  std::vector<std::shared_ptr<helib::GeneralAutomorphPrecon_FULL>> ctxt_Bp(
      k_nblk() * n_nblk());

  INTEL_ITT_TASK_BEGIN(local_hhmm_domain, hhmm_ra_handle); //! ITT - Begin
  stat_.cgemm_time -= MPI_Wtime();                         //! HMM - Start
  stat_.cgemm_ra_time -= MPI_Wtime();                      //! RA - Start
#pragma omp parallel
#pragma omp single
  {
#pragma omp taskloop nogroup
    for (int l = 0; l < m_nblk() * k_nblk(); l++) {
      std::stringstream A_ss;
      A_ss << local_A_[l];
      auto ctxt_A = helib::Ctxt::readFrom(A_ss, *pk_);
      engine->rotate_align_A(type_, ctxt_A, ctxt_A);
      ctxt_Ap[l] =
          std::make_shared<helib::GeneralAutomorphPrecon_FULL>(ctxt_A, 1, ea);
    }
#pragma omp taskloop nogroup
    for (int l = 0; l < k_nblk() * n_nblk(); l++) {
      std::stringstream B_ss;
      B_ss << local_B_[l];
      auto ctxt_B = helib::Ctxt::readFrom(B_ss, *pk_);
      engine->rotate_align_B(type_, ctxt_B, ctxt_B);
      ctxt_Bp[l] =
          std::make_shared<helib::GeneralAutomorphPrecon_FULL>(ctxt_B, 0, ea);
    }
  }
  stat_.cgemm_ra_time += MPI_Wtime();    //! RA - Stop
  INTEL_ITT_TASK_END(local_hhmm_domain); //! ITT - End

  INTEL_ITT_TASK_BEGIN(local_hhmm_domain, hhmm_sc_handle); //! ITT - Begin
  const int b_m = m_nblk();
  const int b_n = n_nblk();
  const int b_k = k_nblk();
  int rank_A, rank_B;
  MPI_Comm_rank(comm_bcast_A_, &rank_A);
  MPI_Comm_rank(comm_bcast_B_, &rank_B);
  int rot_pos_A, rot_size_A;
  int rot_pos_B, rot_size_B;
  auto get = [](int idx, int num, int size, int &offset, int &blk_size) {
    blk_size = size / num;
    int r = size % num;
    ASSERT_PRINTF(r == 0, "Invaild: MPI_Allgather");
    if (idx < r) {
      blk_size++;
      offset = idx * blk_size;
    } else {
      offset = idx * blk_size + r;
    }
  };
  if (np() <= R && mp() <= R) {
    get(rank_A, np(), R, rot_pos_A, rot_size_A);
    get(rank_B, mp(), R, rot_pos_B, rot_size_B);
    for (int k = 0; k < b_k; k++) {
      std::vector<helib::Ctxt> A_ir(R * b_m, helib::Ctxt(*pk_));
      std::vector<helib::Ctxt> B_jr(R * b_n, helib::Ctxt(*pk_));
      {
        std::vector<int> aux_A(rot_size_A * b_m);
        std::vector<int> aux_B(rot_size_B * b_n);
        std::vector<std::string> As(rot_size_A * b_m);
        std::vector<std::string> Bs(rot_size_B * b_n);
        stat_.cgemm_shift_time -= MPI_Wtime(); //! Shift - Start
#pragma omp parallel
#pragma omp single
        {
#pragma omp taskloop collapse(2) shared(aux_A, As) nogroup
          for (int r = 0; r < rot_size_A; r++) {
            for (int i = 0; i < b_m; i++) {
              long rot = rot_pos_A + r;
              A_ir[rot * b_m + i] = std::move(
                  engine->shift_compute_A(type_, ctxt_Ap[i * b_k + k], rot));

              std::stringstream ss;
              A_ir[rot * b_m + i].writeTo(ss);
              As[r * b_m + i] = ss.str();
              aux_A[r * b_m + i] = As[r * b_m + i].size();
            }
          }
#pragma omp taskloop collapse(2) shared(aux_B, Bs) nogroup
          for (int r = 0; r < rot_size_B; r++) {
            for (int j = 0; j < b_n; j++) {
              long rot = rot_pos_B + r;
              B_jr[rot * b_n + j] = *ctxt_Bp[k * b_n + j]->automorph(-rot);

              std::stringstream ss;
              B_jr[rot * b_n + j].writeTo(ss);
              Bs[r * b_n + j] = ss.str();
              aux_B[r * b_n + j] = Bs[r * b_n + j].size();
            }
          }
        }
        stat_.cgemm_shift_time += MPI_Wtime(); //! Shift - Stop

        const unsigned long THRESHOLD = 1UL << 30;
        do { /* FOR A */
          if (np() == 1) {
            break;
          }
          stat_.cgather_sc_time -= MPI_Wtime(); //! All-gather - Start
          // Packing
          for (int i = 1; i < As.size(); i++) {
            As[0] += As[i];
          }
          //! the first collective is slow
          // All-reduce MAX(len)
          unsigned long len = As[0].size();
          unsigned long max_len = len;
          MPI_Allreduce(&len, &max_len, 1, MPI_UNSIGNED_LONG, MPI_MAX,
                        comm_bcast_A_);
          // std::vector<unsigned long> recv_len(np());
          // MPI_Allgather(&len, 1, MPI_UNSIGNED_LONG,
          //               const_cast<unsigned long *>(recv_len.data()), 1,
          //               MPI_UNSIGNED_LONG, comm_bcast_A_);
          // for (int i = 0; i < recv_len.size(); i++) {
          //   max_len = std::max(max_len, recv_len[i]);
          // }
          As[0].resize(max_len);
          // All-gather AUX
          std::vector<int> recv_aux(b_m * R);
          MPI_Allgather(aux_A.data(), b_m * rot_size_A, MPI_INT,
                        const_cast<int *>(recv_aux.data()), b_m * rot_size_A,
                        MPI_INT, comm_bcast_A_);

          // All-gather packed string
          std::string recv;
          recv.resize(np() * max_len);
          if (max_len > THRESHOLD) {
            //! displacement array is also 'INT'
            auto package_num = max_len / THRESHOLD;
            std::vector<std::string> packages(package_num + 1);
            for (unsigned long i = 0; i < package_num; i++) {
              packages[i].resize(static_cast<unsigned long>(np()) * THRESHOLD);
              MPI_Allgather(As[0].data() + i * THRESHOLD, THRESHOLD, MPI_CHAR,
                            const_cast<char *>(packages[i].data()), THRESHOLD,
                            MPI_CHAR, comm_bcast_A_);
            }
            auto tail_count = max_len - package_num * THRESHOLD;
            packages[package_num].resize(np() * tail_count);
            MPI_Allgather(As[0].data() + package_num * THRESHOLD, tail_count,
                          MPI_CHAR,
                          const_cast<char *>(packages[package_num].data()),
                          tail_count, MPI_CHAR, comm_bcast_A_);

#pragma omp parallel
#pragma omp single
#pragma omp taskloop
            for (unsigned long i = 0; i < np(); i++) {
              for (unsigned long j = 0; j < package_num; j++) {
                std::copy(packages[j].begin() + i * THRESHOLD,
                          packages[j].begin() + (i + 1) * THRESHOLD,
                          recv.begin() + i * max_len + j * THRESHOLD);
              }
              std::copy(packages[package_num].begin() + i * tail_count,
                        packages[package_num].begin() + (i + 1) * tail_count,
                        recv.begin() + i * max_len + package_num * THRESHOLD);
            }
          } else {
            MPI_Allgather(As[0].data(), max_len, MPI_CHAR,
                          const_cast<char *>(recv.data()), max_len, MPI_CHAR,
                          comm_bcast_A_);
          }
          stat_.cgather_sc_time += MPI_Wtime(); //! All-gather - Stop

          stat_.serialize_time -= MPI_Wtime(); //! Serial - Start
#pragma omp parallel
#pragma omp single
#pragma omp taskloop
          for (int k = 0; k < recv_aux.size(); k++) {
            int block_size = rot_size_A * b_m;
            int gidx = k / block_size;

            if (gidx == rank_A) {
              continue;
            }

            unsigned long gstart = gidx * max_len;
            int lidx = k % block_size;
            unsigned long lstart = 0;
            for (int i = 0; i < lidx; i++) {
              lstart += recv_aux[gidx * block_size + i];
            }
            std::string ctxt_str = recv.substr(
                gstart + lstart, recv_aux[gidx * block_size + lidx]);
            std::stringstream ss;
            ss << ctxt_str;
            A_ir[k] = helib::Ctxt::readFrom(ss, *pk_);
          }
          stat_.serialize_time += MPI_Wtime(); //! Serial - Stop
        } while (false);

        do { /* FOR B */
          if (mp() == 1) {
            break;
          }
          stat_.cgather_sc_time -= MPI_Wtime(); //! All-gather - Start
          // Packing
          for (int i = 1; i < Bs.size(); i++) {
            Bs[0] += Bs[i];
          }
          //! the first collective is slow
          // All-reduce MAX(len)
          unsigned long len = Bs[0].size();
          unsigned long max_len;
          MPI_Allreduce(&len, &max_len, 1, MPI_UNSIGNED_LONG, MPI_MAX,
                        comm_bcast_B_);
          // std::vector<unsigned long> recv_len(mp());
          // MPI_Allgather(&len, 1, MPI_UNSIGNED_LONG,
          //               const_cast<unsigned long *>(recv_len.data()), 1,
          //               MPI_UNSIGNED_LONG, comm_bcast_B_);
          // for (int i = 0; i < recv_len.size(); i++) {
          //   max_len = std::max(max_len, recv_len[i]);
          // }
          Bs[0].resize(max_len);
          // All-gather AUX
          std::vector<int> recv_aux(b_n * R);
          MPI_Allgather(aux_B.data(), b_n * rot_size_B, MPI_INT,
                        const_cast<int *>(recv_aux.data()), b_n * rot_size_B,
                        MPI_INT, comm_bcast_B_);

          // All-gather packed string
          std::string recv;
          recv.resize(mp() * max_len);
          if (max_len > THRESHOLD) {
            //! displacement array is also 'INT'
            auto package_num = max_len / THRESHOLD;
            std::vector<std::string> packages(package_num + 1);
            for (unsigned long i = 0; i < package_num; i++) {
              packages[i].resize(static_cast<unsigned long>(mp()) * THRESHOLD);
              MPI_Allgather(Bs[0].data() + i * THRESHOLD, THRESHOLD, MPI_CHAR,
                            const_cast<char *>(packages[i].data()), THRESHOLD,
                            MPI_CHAR, comm_bcast_B_);
            }
            auto tail_count = max_len - package_num * THRESHOLD;
            packages[package_num].resize(mp() * tail_count);
            MPI_Allgather(Bs[0].data() + package_num * THRESHOLD, tail_count,
                          MPI_CHAR,
                          const_cast<char *>(packages[package_num].data()),
                          tail_count, MPI_CHAR, comm_bcast_B_);

#pragma omp parallel
#pragma omp single
#pragma omp taskloop
            for (unsigned long i = 0; i < mp(); i++) {
              for (unsigned long j = 0; j < package_num; j++) {
                std::copy(packages[j].begin() + i * THRESHOLD,
                          packages[j].begin() + (i + 1) * THRESHOLD,
                          recv.begin() + i * max_len + j * THRESHOLD);
              }
              std::copy(packages[package_num].begin() + i * tail_count,
                        packages[package_num].begin() + (i + 1) * tail_count,
                        recv.begin() + i * max_len + package_num * THRESHOLD);
            }
          } else {
            MPI_Allgather(Bs[0].data(), max_len, MPI_CHAR,
                          const_cast<char *>(recv.data()), max_len, MPI_CHAR,
                          comm_bcast_B_);
          }
          stat_.cgather_sc_time += MPI_Wtime(); //! All-gather - Stop

          stat_.serialize_time -= MPI_Wtime(); //! Serial - Start
#pragma omp parallel
#pragma omp single
#pragma omp taskloop
          for (int k = 0; k < recv_aux.size(); k++) {
            int block_size = rot_size_B * b_n;
            int gidx = k / block_size;

            if (gidx == rank_B) {
              continue;
            }

            unsigned long gstart = gidx * max_len;
            int lidx = k % block_size;
            unsigned long lstart = 0;
            for (int i = 0; i < lidx; i++) {
              lstart += recv_aux[gidx * block_size + i];
            }
            std::string ctxt_str = recv.substr(
                gstart + lstart, recv_aux[gidx * block_size + lidx]);
            std::stringstream ss;
            ss << ctxt_str;
            B_jr[k] = helib::Ctxt::readFrom(ss, *pk_);
          }
          stat_.serialize_time += MPI_Wtime(); //! Serial - Stop
        } while (false);
      }

      stat_.cgemm_compute_time -= MPI_Wtime(); //! Compute - Start
#pragma omp parallel
#pragma omp single
#pragma omp taskloop collapse(3)
      for (int j = 0; j < b_n; j++) {
        for (int i = 0; i < b_m; i++) {
          for (int r = 0; r < R; r++) {
            auto L = A_ir[r * b_m + i];
            L.multiplyBy(B_jr[r * b_n + j]);
            partial[r][i * b_n + j] += L;
          }
        }
      }
      stat_.cgemm_compute_time += MPI_Wtime(); //! Compute - Stop
    }
  } else {
    get(rank_A, np(), R * b_m, rot_pos_A, rot_size_A);
    get(rank_B, mp(), R * b_n, rot_pos_B, rot_size_B);
    for (int k = 0; k < b_k; k++) {
      std::vector<helib::Ctxt> A_ir(R * b_m, helib::Ctxt(*pk_));
      std::vector<helib::Ctxt> B_jr(R * b_n, helib::Ctxt(*pk_));
      {
        std::vector<int> aux_A(rot_size_A);
        std::vector<int> aux_B(rot_size_B);
        std::vector<std::string> As(rot_size_A);
        std::vector<std::string> Bs(rot_size_B);
        stat_.cgemm_shift_time -= MPI_Wtime(); //! Shift - Start
#pragma omp parallel
#pragma omp single
        {
#pragma omp taskloop shared(aux_A, As) nogroup
          for (long l = 0; l < rot_size_A; l++) {
            auto idx = rot_pos_A + l;
            auto i = idx % b_m;
            auto rot = idx / b_m;
            A_ir[rot * b_m + i] = std::move(
                engine->shift_compute_A(type_, ctxt_Ap[i * b_k + k], rot));

            std::stringstream ss;
            A_ir[rot * b_m + i].writeTo(ss);
            As[l] = ss.str();
            aux_A[l] = As[l].size();
          }
#pragma omp taskloop shared(aux_B, Bs) nogroup
          for (int l = 0; l < rot_size_B; l++) {
            long idx = rot_pos_B + l;
            auto j = idx % b_n;
            auto rot = idx / b_n;
            B_jr[rot * b_n + j] = *ctxt_Bp[k * b_n + j]->automorph(-rot);

            std::stringstream ss;
            B_jr[rot * b_n + j].writeTo(ss);
            Bs[l] = ss.str();
            aux_B[l] = Bs[l].size();
          }
        }
        stat_.cgemm_shift_time += MPI_Wtime(); //! Shift - Stop

        const unsigned long THRESHOLD = 1UL << 30;
        do { /* FOR A */
          if (np() == 1) {
            break;
          }
          stat_.cgather_sc_time -= MPI_Wtime(); //! All-gather - Start
          // Packing
          for (int i = 1; i < As.size(); i++) {
            As[0] += As[i];
          }
          //! the first collective is slow
          // All-reduce MAX(len)
          unsigned long len = As[0].size();
          unsigned long max_len = len;
          MPI_Allreduce(&len, &max_len, 1, MPI_UNSIGNED_LONG, MPI_MAX,
                        comm_bcast_A_);
          // std::vector<unsigned long> recv_len(np());
          // MPI_Allgather(&len, 1, MPI_UNSIGNED_LONG,
          //               const_cast<unsigned long *>(recv_len.data()), 1,
          //               MPI_UNSIGNED_LONG, comm_bcast_A_);
          // for (int i = 0; i < recv_len.size(); i++) {
          //   max_len = std::max(max_len, recv_len[i]);
          // }
          As[0].resize(max_len);
          // All-gather AUX
          std::vector<int> recv_aux(b_m * R);
          MPI_Allgather(aux_A.data(), rot_size_A, MPI_INT,
                        const_cast<int *>(recv_aux.data()), rot_size_A, MPI_INT,
                        comm_bcast_A_);

          // All-gather packed string
          std::string recv;
          recv.resize(np() * max_len);
          if (max_len > THRESHOLD) {
            //! displacement array is also 'INT'
            auto package_num = max_len / THRESHOLD;
            std::vector<std::string> packages(package_num + 1);
            for (unsigned long i = 0; i < package_num; i++) {
              packages[i].resize(static_cast<unsigned long>(np()) * THRESHOLD);
              MPI_Allgather(As[0].data() + i * THRESHOLD, THRESHOLD, MPI_CHAR,
                            const_cast<char *>(packages[i].data()), THRESHOLD,
                            MPI_CHAR, comm_bcast_A_);
            }
            auto tail_count = max_len - package_num * THRESHOLD;
            packages[package_num].resize(np() * tail_count);
            MPI_Allgather(As[0].data() + package_num * THRESHOLD, tail_count,
                          MPI_CHAR,
                          const_cast<char *>(packages[package_num].data()),
                          tail_count, MPI_CHAR, comm_bcast_A_);

#pragma omp parallel
#pragma omp single
#pragma omp taskloop
            for (unsigned long i = 0; i < np(); i++) {
              for (unsigned long j = 0; j < package_num; j++) {
                std::copy(packages[j].begin() + i * THRESHOLD,
                          packages[j].begin() + (i + 1) * THRESHOLD,
                          recv.begin() + i * max_len + j * THRESHOLD);
              }
              std::copy(packages[package_num].begin() + i * tail_count,
                        packages[package_num].begin() + (i + 1) * tail_count,
                        recv.begin() + i * max_len + package_num * THRESHOLD);
            }
          } else {
            MPI_Allgather(As[0].data(), max_len, MPI_CHAR,
                          const_cast<char *>(recv.data()), max_len, MPI_CHAR,
                          comm_bcast_A_);
          }
          stat_.cgather_sc_time += MPI_Wtime(); //! All-gather - Stop

          stat_.serialize_time -= MPI_Wtime(); //! Serial - Start
#pragma omp parallel
#pragma omp single
#pragma omp taskloop
          for (int k = 0; k < recv_aux.size(); k++) {
            int block_size = rot_size_A;
            int gidx = k / block_size;

            if (gidx == rank_A) {
              continue;
            }

            unsigned long gstart = gidx * max_len;
            int lidx = k % block_size;
            unsigned long lstart = 0;
            for (int i = 0; i < lidx; i++) {
              lstart += recv_aux[gidx * block_size + i];
            }
            std::string ctxt_str = recv.substr(
                gstart + lstart, recv_aux[gidx * block_size + lidx]);
            std::stringstream ss;
            ss << ctxt_str;
            A_ir[k] = helib::Ctxt::readFrom(ss, *pk_);
          }
          stat_.serialize_time += MPI_Wtime(); //! Serial - Stop
        } while (false);

        do { /* FOR B */
          if (mp() == 1) {
            break;
          }
          stat_.cgather_sc_time -= MPI_Wtime(); //! All-gather - Start
          // Packing
          for (int i = 1; i < Bs.size(); i++) {
            Bs[0] += Bs[i];
          }
          //! the first collective is slow
          // All-reduce MAX(len)
          unsigned long len = Bs[0].size();
          unsigned long max_len;
          MPI_Allreduce(&len, &max_len, 1, MPI_UNSIGNED_LONG, MPI_MAX,
                        comm_bcast_B_);
          // std::vector<unsigned long> recv_len(mp());
          // MPI_Allgather(&len, 1, MPI_UNSIGNED_LONG,
          //               const_cast<unsigned long *>(recv_len.data()), 1,
          //               MPI_UNSIGNED_LONG, comm_bcast_B_);
          // for (int i = 0; i < recv_len.size(); i++) {
          //   max_len = std::max(max_len, recv_len[i]);
          // }
          Bs[0].resize(max_len);
          // All-gather AUX
          std::vector<int> recv_aux(b_n * R);
          MPI_Allgather(aux_B.data(), rot_size_B, MPI_INT,
                        const_cast<int *>(recv_aux.data()), rot_size_B, MPI_INT,
                        comm_bcast_B_);

          // All-gather packed string
          std::string recv;
          recv.resize(mp() * max_len);
          if (max_len > THRESHOLD) {
            //! displacement array is also 'INT'
            auto package_num = max_len / THRESHOLD;
            std::vector<std::string> packages(package_num + 1);
            for (unsigned long i = 0; i < package_num; i++) {
              packages[i].resize(static_cast<unsigned long>(mp()) * THRESHOLD);
              MPI_Allgather(Bs[0].data() + i * THRESHOLD, THRESHOLD, MPI_CHAR,
                            const_cast<char *>(packages[i].data()), THRESHOLD,
                            MPI_CHAR, comm_bcast_B_);
            }
            auto tail_count = max_len - package_num * THRESHOLD;
            packages[package_num].resize(mp() * tail_count);
            MPI_Allgather(Bs[0].data() + package_num * THRESHOLD, tail_count,
                          MPI_CHAR,
                          const_cast<char *>(packages[package_num].data()),
                          tail_count, MPI_CHAR, comm_bcast_B_);

#pragma omp parallel
#pragma omp single
#pragma omp taskloop
            for (unsigned long i = 0; i < mp(); i++) {
              for (unsigned long j = 0; j < package_num; j++) {
                std::copy(packages[j].begin() + i * THRESHOLD,
                          packages[j].begin() + (i + 1) * THRESHOLD,
                          recv.begin() + i * max_len + j * THRESHOLD);
              }
              std::copy(packages[package_num].begin() + i * tail_count,
                        packages[package_num].begin() + (i + 1) * tail_count,
                        recv.begin() + i * max_len + package_num * THRESHOLD);
            }
          } else {
            MPI_Allgather(Bs[0].data(), max_len, MPI_CHAR,
                          const_cast<char *>(recv.data()), max_len, MPI_CHAR,
                          comm_bcast_B_);
          }
          stat_.cgather_sc_time += MPI_Wtime(); //! All-gather - Stop

          stat_.serialize_time -= MPI_Wtime(); //! Serial - Start
#pragma omp parallel
#pragma omp single
#pragma omp taskloop
          for (int k = 0; k < recv_aux.size(); k++) {
            int block_size = rot_size_B;
            int gidx = k / block_size;

            if (gidx == rank_B) {
              continue;
            }

            unsigned long gstart = gidx * max_len;
            int lidx = k % block_size;
            unsigned long lstart = 0;
            for (int i = 0; i < lidx; i++) {
              lstart += recv_aux[gidx * block_size + i];
            }
            std::string ctxt_str = recv.substr(
                gstart + lstart, recv_aux[gidx * block_size + lidx]);
            std::stringstream ss;
            ss << ctxt_str;
            B_jr[k] = helib::Ctxt::readFrom(ss, *pk_);
          }
          stat_.serialize_time += MPI_Wtime(); //! Serial - Stop
        } while (false);
      }

      stat_.cgemm_compute_time -= MPI_Wtime(); //! Compute - Start
#pragma omp parallel
#pragma omp single
#pragma omp taskloop collapse(3)
      for (int j = 0; j < b_n; j++) {
        for (int i = 0; i < b_m; i++) {
          for (int r = 0; r < R; r++) {
            auto L = A_ir[r * b_m + i];
            L.multiplyBy(B_jr[r * b_n + j]);
            partial[r][i * b_n + j] += L;
          }
        }
      }
      stat_.cgemm_compute_time += MPI_Wtime(); //! Compute - Stop
    }
  }
  // Add partial sum
  stat_.cgemm_compute_time -= MPI_Wtime(); //! Compute - Start
#pragma omp parallel
#pragma omp single
#pragma omp taskloop
  // #pragma omp parallel for
  for (int l = 0; l < m_nblk() * n_nblk(); l++) {
    for (int r = 0; r < R; r++) {
      C[l] += partial[r][l];
    }
  }
  stat_.cgemm_compute_time += MPI_Wtime(); //! Compute - Stop
  stat_.cgemm_time += MPI_Wtime();         //! HMM - Stop
  INTEL_ITT_TASK_END(local_hhmm_domain);   //! ITT - End

  // Reduction
  std::string pack_buff;
  std::vector<int> size_buff;

  stat_.creduce_time -= MPI_Wtime(); //! Reduction - Start
  // Packed MPI
  if (pgrid_index_[K] == 0) {
    for (int sender = 1; sender < pgrid_size_[K]; sender++) {
      MPI_PackRecv_Strings(local_C, m_nblk() * n_nblk(), sender, 0,
                           comm_reduce_C_);
#pragma omp parallel
#pragma omp single
#pragma omp taskloop
      for (int l = 0; l < local_C.size(); l++) {
        std::stringstream ss;
        ss << local_C[l];
        auto recv_partial_ctxt = helib::Ctxt::readFrom(ss, *pk_);
        C[l] += recv_partial_ctxt;
      }
    }
  } else {
    local_C.resize(m_nblk() * n_nblk());
    size_buff.resize(m_nblk() * n_nblk());
#pragma omp parallel
#pragma omp single
#pragma omp taskloop
    for (int l = 0; l < local_C.size(); l++) {
      std::stringstream ss;
      C[l].writeTo(ss);
      local_C[l] = ss.str();
    }

    MPI_Request req = MPI_REQUEST_NULL;
    MPI_PackIsend_Strings(local_C, pack_buff, size_buff.data(), 0, 0,
                          comm_reduce_C_, &req);
  }
  stat_.creduce_time += MPI_Wtime(); //! Reduction - Stop

  // Deserialization
  stat_.serialize_time -= MPI_Wtime(); //! Serial - Start
  for (auto i = 0; i < C.size(); i++) {
    if (pgrid_index_[K] == 0) {
      std::stringstream ss;
      C[i].writeTo(ss);
      partial_C.push_back(ss.str());
    }
  }
  stat_.serialize_time += MPI_Wtime(); //! Serial - Stop

  MPI_Pcontrol(0); //! Deactivate mpiP
  MPI_Barrier(comm_exec_);
  stat_.exec_time += MPI_Wtime(); //! Exec - Stop

  stat_.final_noise = log2_noise_bound(C[0]);
}

void mhhmm_3d::collect_data(const std::vector<std::string> &partial_C,
                            std::vector<NTL::mat_ZZ> &result, bool local) {
  if (pgrid_index_[K] != 0) {
    return;
  }
  if (local) {
    if (rank_exec_ == MPI_COMM_EXEC_ROOT) {
      auto hmmcc = static_cast<hypercube_hmmcc *>(hmm_cagent_);
      ASSERT_PRINTF(hmmcc != nullptr, "Invalid root\n");
      result.resize(1);
      result[0].SetDims(block_m(), block_n());
#pragma omp parallel
#pragma omp single
#pragma omp taskloop
      for (int i = 0; i < m_nblk(); i++) {
        for (int j = 0; j < n_nblk(); j++) {
          std::stringstream ss;
          ss << partial_C[i * n_nblk() + j];
          auto ctxt = helib::Ctxt::readFrom(ss, *pk_);

          NTL::mat_ZZ res;
          try {
            hmmcc->decrypt(res, ctxt);
          } catch (const std::exception &e) {
            std::cerr << e.what() << ":" << log2_noise_bound(ctxt) << std::endl;
            std::flush(std::cerr);
            MPI_Abort(comm_exec_, 1);
          }

          // At block (i, j)
          for (int ii = 0; ii < mbb(); ii++) {
            for (int jj = 0; jj < nbb(); jj++) {
              result[0][i * mbb() + ii][j * nbb() + jj] = res[ii][jj];
            }
          }
        }
      }
    }
    return;
  }

  if (rank_exec_ == MPI_COMM_EXEC_ROOT) {
    result.resize(np_);
    auto hmmcc = static_cast<hypercube_hmmcc *>(hmm_cagent_);
    ASSERT_PRINTF(hmmcc != nullptr, "Invalid root\n");

    // Receive partial C
    int sf_C[AXIS_NUM] = {0, 0, 0};
    for (sf_C[M] = 0; sf_C[M] < pgrid_size_[M]; sf_C[M]++) {
      for (sf_C[N] = 0; sf_C[N] < pgrid_size_[N]; sf_C[N]++) {
        std::stringstream ss;
        std::vector<std::string> ctxt_str;
        int sender = this->index_to_rank_exec(sf_C);
        if (sender == MPI_COMM_EXEC_ROOT) {
          ctxt_str = partial_C;
        } else {
          MPI_Recv_Strings(ctxt_str, m_nblk() * n_nblk(), sender, 0,
                           comm_exec_);
        }

        const int idx = sf_C[M] * pgrid_size_[N] + sf_C[N];
        result[idx].SetDims(block_m(), block_n());
#pragma omp parallel
#pragma omp single
#pragma omp taskloop
        for (int i = 0; i < m_nblk(); i++) {
          for (int j = 0; j < n_nblk(); j++) {
            std::stringstream ss;
            ss << ctxt_str[i * n_nblk() + j];
            auto ctxt = helib::Ctxt::readFrom(ss, *pk_);

            NTL::mat_ZZ res;
            try {
              hmmcc->decrypt(res, ctxt);
            } catch (const std::exception &e) {
              std::cerr << e.what() << ":" << log2_noise_bound(ctxt)
                        << std::endl;
              std::flush(std::cerr);
              MPI_Abort(comm_exec_, 1);
            }

            // At block (i, j)
            for (int ii = 0; ii < mbb(); ii++) {
              for (int jj = 0; jj < nbb(); jj++) {
                result[idx][i * mbb() + ii][j * nbb() + jj] = res[ii][jj];
              }
            }
          }
        }
      }
    }
  } else {
    MPI_Send_Strings(partial_C, MPI_COMM_EXEC_ROOT, 0, comm_exec_);
  }
}