#include "cipher_cannon.hpp"
#include "get_matrix_blk.hpp"
#include "hypercube/hypercube_cryto_agent.hpp"
#include "intel_itt_wrapper.hpp"

static inline double log2_noise_bound(const helib::Ctxt &ctxt) {
  const double ln2 = NTL::log(NTL::xdouble(2l));
  return NTL::log(ctxt.totalNoiseBound()) / ln2;
}

template <typename Scheme, typename ExecOutput>
cipher_cannon<Scheme, ExecOutput>::cipher_cannon(int root, MPI_Comm comm,
                                                 long m, long n, long k)
    : cipher_mpi_base<Scheme, ExecOutput>(root, comm), m_(m), n_(n), k_(k),
      cannon_comm_(MPI_COMM_NULL) {
  stat_.setup_time -= MPI_Wtime(); //! Setup - Start
  // Get the size of `comm`
  int np;
  MPI_Comm_size(comm, &np);
  sqrt_np_ = static_cast<int>(std::sqrt(np));
  np_ = sqrt_np_ * sqrt_np_;
  rp_ = np - np_;
  // Create the execution communicator
  this->create_comm_exec(np_, comm);

  // Get matrix block size
  mb_ = (m + sqrt_np_ - 1) / sqrt_np_;
  nb_ = (n + sqrt_np_ - 1) / sqrt_np_;
  kb_ = (k + sqrt_np_ - 1) / sqrt_np_;

  if (!this->active_exec_) {
    return;
  }

  // Initializa the Topology of Cannon's Algorithm
  int dims[AXIS_NUM] = {sqrt_np_, sqrt_np_};
  int periods[AXIS_NUM] = {true, true};
  // Create a communicator given the 2D torus topology.
  MPI_Cart_create(this->comm_exec_, AXIS_NUM, dims, periods, true,
                  &cannon_comm_);

  // Get the new rank in `cannon_comm_`
  int cannon_rank;
  MPI_Comm_rank(cannon_comm_, &cannon_rank);

  // Get my coordinates in the new communicator
  int coords[AXIS_NUM];
  MPI_Cart_coords(cannon_comm_, cannon_rank, AXIS_NUM, coords);

  // Get source/destination pair for RotateAlign & ShiftCompute
  MPI_Cart_shift(cannon_comm_, Y, -coords[X], &sa_rank_[R], &sa_rank_[L]);
  MPI_Cart_shift(cannon_comm_, X, -coords[Y], &sa_rank_[D], &sa_rank_[U]);
  MPI_Cart_shift(cannon_comm_, Y, -1, &sc_rank_[R], &sc_rank_[L]);
  MPI_Cart_shift(cannon_comm_, X, -1, &sc_rank_[D], &sc_rank_[U]);
  stat_.setup_time += MPI_Wtime(); //! Setup - Start
}

template <typename Scheme, typename ExecOutput>
void cipher_cannon<Scheme, ExecOutput>::report(std::ostream &ss) const {
  auto hmmcc = dynamic_cast<hmm_crypto_client<Scheme> *>(this->hmm_cagent_);
  auto hhmmcc = static_cast<hypercube_hmmcc *>(this->hmm_cagent_);
  // clang-format off
  ss << "==================== Cipher Cannon ====================\n";
  ss << "Problem Size m * n * k    : " << m_ << " * " << n_ << " * " << k_ << std::endl;
  ss << "Process Grid D * D        : " << sqrt_np_ << " * " << sqrt_np_ << std::endl;
  ss << "Work cuboid  mb * nb * kb : " << block_m() << " * " << block_n() << " * " << block_k() << std::endl;
  ss << "Process utilization       : "
     << static_cast<double>(np_) * 100.0 / static_cast<double>(np_ + rp_)
     << "% (" << np_ << "/" << np_ + rp_ << ")"
     << std::endl;
  ss << "* Setup Time              : " << stat_.setup_time << " s\n";
  if (hhmmcc != nullptr) {
  ss << "  * Find Params           : " << hhmmcc->find_params_time() << " s\n";
  }
  ss << "  * Build Context         : " << this->hmm_cagent_->build_context_time() << " s\n";
  if (hmmcc != nullptr) {
  ss << "  * KeyGen Time           : " << hmmcc->key_gen_time() << " s\n";
  }
  ss << "  * Bcast PubKey          : " << stat_.bcast_pk_time << " s\n";
  ss << "  * HMM Encoding          : " << stat_.hmm_encoding_time << " s\n";
  ss << "* Execution time          : " << stat_.exec_time << " s\n";
  ss << "  * Shift Align           : " << stat_.shift_align_time << " s\n";
  ss << "  * Shift Compute         : " << stat_.shift_compute_time << " s\n";
  ss << "    * Shift A & B         : " << stat_.sc_comm_time << " s\n";
  ss << "    * Local CGEMMs        : " << stat_.sc_cgemm_time << " s\n";
  int cnt = 0;
  for (auto cgemm_time: stat_.cgemm_times) {
  if (cnt == 32) {
  ss << "      * .....";
  break;
  } 
  ss << "      * CGEMM             : " << cgemm_time << " s\n";
  cnt ++;
  }
  ss << "    * (De)serialization   : " << stat_.sc_serialize_time << " s\n";
  ss << "* Public Key Size         : " << stat_.pk_size << " MB\n";
  ss << "* Communication Traffic   : " << stat_.computing_comm_size / 1024.0 << " MB\n";
  ss << "* Final nosie bound       : " << stat_.final_noise << std::endl;
  ss << "  * Init nosie bound      : " << stat_.init_noise << std::endl;
  ss << "  + CGEMM(depth = 1)      : " << stat_.cgemm_noise << std::endl;
  ss << "  + Add partial sum       : " << stat_.add_partial_noise << std::endl;
  ss << "=======================================================\n";
  // clang-format on
}

static hmm_crypto_agent *
setup_hypercube_crypto(int root, int rank, const params<helib::BGV> &params,
                       const cube_signature &cube, MPI_Comm comm) {
  hmm_crypto_agent *cagent = nullptr;

  std::string str;
  std::stringstream ss;
  if (rank == root) {
    cagent = new hypercube_hmmcc(params, cube, true);
    cagent->context()->writeTo(ss);
    str = ss.str();
  }
  MPI_Bcast_String(str, root, comm);
  if (rank != root) {
    ss << str;
    cagent = new hypercube_hmmcs(ss);
  }
  return cagent;
}

void shhmm_cannon::setup_crypto(int root, int rank,
                                const params<helib::BGV> &params,
                                const cube_signature &cube, MPI_Comm comm) {
  hmm_cagent_ = setup_hypercube_crypto(root, rank, params, cube, comm);
}

//* Single-Ciphertext Version
shhmm_cannon::shhmm_cannon(int root, MPI_Comm comm, long m, long n, long k,
                           const params<helib::BGV> &params,
                           const hmm_status<shmm_engine> &status)
    : cipher_cannon(root, comm, m, n, k) {
  stat_.setup_time -= MPI_Wtime(); //! Setup - Start
  // Get MPI metadata
  int rank;
  MPI_Comm_rank(comm, &rank);

  // Initialize HMM engine
  auto hhmme = new hypercube_hmme(status);
  hmm_engine_ = hhmme;

  // Initialize HMM cryptograph systems
  const auto eh_cube = hypercube_hmme::get_expected_mnk(mb_, nb_, kb_);
#if 1 // Find parameters respectively
  hmm_cagent_ = rank == root ? new hypercube_hmmcc(params, eh_cube, true)
                             : new hypercube_hmmcs(params, eh_cube, true);
#else // Broadcast the HElib context
  setup_crypto(root, rank, params, eh_cube, comm);
#endif
  if (rank == root) {
    // Print out the HElib context
    hmm_cagent_->report_context(std::cout);
    std::flush(std::cout);
  }

  // Broadcast the public key
  stat_.bcast_pk_time -= MPI_Wtime(); //! Bcast PK - Start
  stat_.pk_size = this->bcast_pubkey();
  stat_.bcast_pk_time += MPI_Wtime(); //! Bcast PK - Stop

  // Acativate HMM engine
  INTEL_ITT_RESUME;
  stat_.hmm_encoding_time -= MPI_Wtime(); //! HMM Encoding - Start
  const auto eh_mnk = hypercube_hmme::get_expected_mnk(mb_, nb_, kb_);
  type_ = hypercube_hmme::parse_matrix_type(eh_mnk);
  hhmme->register_engine(hmm_cagent_->context()->getEA(), eh_mnk);
  stat_.hmm_encoding_time += MPI_Wtime(); //! HMM Encoding - Stop
  INTEL_ITT_PAUSE;

  stat_.setup_time += MPI_Wtime(); //! Setup - Stop
}

template <typename T>
void shhmm_cannon::init_data(int root, int rank, MPI_Comm comm, const T *A,
                             const T *B) {
  static_assert(std::is_same<T, int>::value ||
                    std::is_same<T, unsigned int>::value ||
                    std::is_same<T, double>::value,
                "Only `int`, `unsigned int` and `double` are allowed.");

  if (rank == root) {
    auto hmmcc = static_cast<hypercube_hmmcc *>(hmm_cagent_);
    ASSERT_PRINTF(hmmcc != nullptr, "Invalid root\n");
    for (int recipient = 0; recipient < np_; recipient++) {
      const int i = recipient / sqrt_np_;
      const int j = recipient % sqrt_np_;

      std::stringstream A_ss, B_ss;
      auto A_blk = get_blk(A, m_, k_, i, j, mb_, kb_);
      auto B_blk = get_blk(B, k_, n_, i, j, kb_, nb_);
      auto ctxt_A_blk = hmmcc->encrypt(A_blk);
      auto ctxt_B_blk = hmmcc->encrypt(B_blk);
      ctxt_A_blk.writeTo(A_ss);
      ctxt_B_blk.writeTo(B_ss);
      if (recipient == root) {
        local_A_ = A_ss.str();
        local_B_ = B_ss.str();
      } else {
        MPI_Send_String(A_ss.str(), recipient, 0, comm);
        MPI_Send_String(B_ss.str(), recipient, 0, comm);
      }

      if (recipient == root) {
        stat_.init_noise = log2_noise_bound(ctxt_A_blk);
      }
    }
  } else {
    MPI_Recv_String(local_A_, root, 0, comm);
    MPI_Recv_String(local_B_, root, 0, comm);
  }
}
void shhmm_cannon::init_data(const int *A, const int *B) {
  // For cannon's algorithm
  // We distribute the data evenly in the `comm_exec`

  //! Must explicitly call template function
  init_data<int>(MPI_COMM_EXEC_ROOT, rank_exec_, comm_exec_, A, B);
}

static inline NTL::mat_ZZ get_blk(const NTL::mat_ZZ &matrix, int i, int j,
                                  long xb, long yb) {
  NTL::mat_ZZ blk;
  blk.SetDims(xb, yb);
  for (int ii = 0; ii < xb; ii++) {
    for (int jj = 0; jj < yb; jj++) {
      blk[ii][jj] = matrix[i * xb + ii][j * yb + jj];
    }
  }
  return std::move(blk);
}

void shhmm_cannon::init_data(const NTL::mat_ZZ &A, const NTL::mat_ZZ &B) {
  // For cannon's algorithm
  // We distribute the data evenly in the `comm_exec`

  if (rank_exec_ == MPI_COMM_EXEC_ROOT) {
    auto hmmcc = static_cast<hypercube_hmmcc *>(hmm_cagent_);
    ASSERT_PRINTF(hmmcc != nullptr, "Invalid root\n");
    for (int recipient = 0; recipient < np_; recipient++) {
      const int i = recipient / sqrt_np_;
      const int j = recipient % sqrt_np_;

      std::stringstream A_ss, B_ss;
      auto A_blk = get_blk(A, i, j, mb_, kb_);
      auto B_blk = get_blk(B, i, j, kb_, nb_);
      auto ctxt_A_blk = hmmcc->encrypt(A_blk);
      auto ctxt_B_blk = hmmcc->encrypt(B_blk);
      ctxt_A_blk.writeTo(A_ss);
      ctxt_B_blk.writeTo(B_ss);
      if (recipient == MPI_COMM_EXEC_ROOT) {
        local_A_ = A_ss.str();
        local_B_ = B_ss.str();
      } else {
        MPI_Send_String(A_ss.str(), recipient, 0, comm_exec_);
        MPI_Send_String(B_ss.str(), recipient, 0, comm_exec_);
      }

      if (recipient == MPI_COMM_EXEC_ROOT) {
        stat_.init_noise = log2_noise_bound(ctxt_A_blk);
      }
    }
  } else {
    MPI_Recv_String(local_A_, MPI_COMM_EXEC_ROOT, 0, comm_exec_);
    MPI_Recv_String(local_B_, MPI_COMM_EXEC_ROOT, 0, comm_exec_);
  }
}

void shhmm_cannon::collect_data(const std::string &partial_C,
                                std::vector<NTL::mat_ZZ> &res, bool local) {
  // TODO: local check
  if (rank_exec_ == MPI_COMM_EXEC_ROOT) {
    res.resize(np_);
    auto hmmcc = static_cast<hypercube_hmmcc *>(hmm_cagent_);
    ASSERT_PRINTF(hmmcc != nullptr, "Invalid root\n");
    for (int sender = 0; sender < np_; sender++) {
      std::stringstream ss;
      std::string ctxt_str;
      if (sender == MPI_COMM_EXEC_ROOT) {
        ctxt_str = partial_C;
      } else {
        MPI_Recv_String(ctxt_str, sender, 0, comm_exec_);
      }
      ss << ctxt_str;
      auto ctxt = helib::Ctxt::readFrom(ss, *pk_);
      try {
        hmmcc->decrypt(res[sender], ctxt);
      } catch (const std::exception &e) {
        std::cerr << e.what() << ":" << log2_noise_bound(ctxt) << std::endl;
        std::flush(std::cerr);
        MPI_Abort(comm_exec_, 1);
      }
    }
  } else {
    MPI_Send_String(partial_C, MPI_COMM_EXEC_ROOT, 0, comm_exec_);
  }
}

void shhmm_cannon::exec(std::string &partial_str, bool fhe4d) {
  MPI_Barrier(cannon_comm_);
  stat_.exec_time -= MPI_Wtime();
  MPI_Pcontrol(1); //! Activate mpiP

  // clang-format off
  // Shift Align
  stat_.shift_align_time -= MPI_Wtime();
  MPI_Sendrecv_String_replace(local_A_, sa_rank_[L], 0, sa_rank_[R], 0, cannon_comm_);
  MPI_Sendrecv_String_replace(local_B_, sa_rank_[U], 1, sa_rank_[D], 1, cannon_comm_);
  stat_.shift_align_time += MPI_Wtime();
  stat_.computing_comm_size += (local_A_.size() / 1024.0);
  stat_.computing_comm_size += (local_B_.size() / 1024.0);
  // clang-format on

  stat_.shift_compute_time -= MPI_Wtime();
  // Shift Compute
  helib::Ctxt partial(*pk_);
  partial.DummyEncrypt(NTL::ZZX(0l));
  for (int i = 0; i < sqrt_np_; i++) {
    if (i) {
      // clang-format off
      stat_.sc_comm_time -= MPI_Wtime();
      MPI_Sendrecv_String_replace(local_A_, sc_rank_[L], 0, sc_rank_[R], 0, cannon_comm_);
      MPI_Sendrecv_String_replace(local_B_, sc_rank_[U], 1, sc_rank_[D], 1, cannon_comm_);
      stat_.sc_comm_time += MPI_Wtime();
      stat_.computing_comm_size += (local_A_.size() / 1024.0);
      stat_.computing_comm_size += (local_B_.size() / 1024.0);
      // clang-format on
    }

    // Serialization
    stat_.sc_serialize_time -= MPI_Wtime();
    std::stringstream A_ss, B_ss;
    A_ss << local_A_;
    B_ss << local_B_;
    auto A = helib::Ctxt::readFrom(A_ss, *pk_);
    auto B = helib::Ctxt::readFrom(B_ss, *pk_);
    stat_.sc_serialize_time += MPI_Wtime();

    // Local CGEMM
    stat_.cgemm_noise -= i == 0 ? log2_noise_bound(A) : 0.0;
    auto start_t = MPI_Wtime();
    helib::Ctxt AB(*pk_);
    hmm_engine_->cgemm(type_, AB, A, B);
    auto stop_t = MPI_Wtime();
    stat_.cgemm_times.push_back(stop_t - start_t);
    stat_.sc_cgemm_time += stat_.cgemm_times.back();
    stat_.cgemm_noise += i == 0 ? log2_noise_bound(AB) : 0.0;

    // Add partial sum
    stat_.add_partial_noise -=
        i == 0 ? log2_noise_bound(AB) : log2_noise_bound(partial);
    partial += AB;
    stat_.add_partial_noise += log2_noise_bound(partial);
  }
  // Deserialization
  stat_.sc_serialize_time -= MPI_Wtime();
  std::stringstream ss;
  partial.writeTo(ss);
  partial_str = ss.str();
  stat_.sc_serialize_time += MPI_Wtime();
  stat_.shift_compute_time += MPI_Wtime();

  MPI_Pcontrol(0); //! Deactivate mpiP
  MPI_Barrier(cannon_comm_);
  stat_.exec_time += MPI_Wtime();

  stat_.final_noise = log2_noise_bound(partial);
}

//* Multi-Ciphertext Version
static inline void get_blk_size(long &szblk, long &nblk, long szdim,
                                long split) {
  // TODO:
  szblk = split;
  nblk = szdim / szblk;
}
mhhmm_cannon::mhhmm_cannon(int root, MPI_Comm comm, long m, long n, long k,
                           long opt, const params<helib::BGV> &params,
                           const hmm_status<shmm_engine> &status)
    : cipher_cannon(root, comm, m, n, k) {
  stat_.setup_time -= MPI_Wtime(); //! Setup - Start
  int rank;
  MPI_Comm_rank(comm, &rank);

  // Initialize HMM engine //! but not active
  get_blk_size(mbb_, m_nblk_, mb_, opt);
  get_blk_size(nbb_, n_nblk_, nb_, opt);
  get_blk_size(kbb_, k_nblk_, kb_, opt);
  if (rank == root) {
    std::cout << "dim-m: size = " << mb_ << " szblk = " << mbb_
              << ", # of blks = " << m_nblk_ << std::endl;
    std::cout << "dim-n: size = " << nb_ << " szblk = " << nbb_
              << ", # of blks = " << n_nblk_ << std::endl;
    std::cout << "dim-k: size = " << kb_ << " szblk = " << kbb_
              << ", # of blks = " << k_nblk_ << std::endl;
  }
  auto hhmme = new hypercube_hmme(status);
  hmm_engine_ = hhmme;

  // Initialize HMM cryptograph systems
  const auto eh_cube = hypercube_hmme::get_hypercube(mbb_, nbb_, kbb_);
  hmm_cagent_ = rank == root ? new hypercube_hmmcc(params, eh_cube, true)
                             : new hypercube_hmmcs(params, eh_cube, true);

  if (rank == root) {
    // Print out the HElib context
    hmm_cagent_->report_context(std::cout);
    std::flush(std::cout);
  }

  // Broadcast the public key
  stat_.bcast_pk_time -= MPI_Wtime(); //! Bcast PK - Start
  stat_.pk_size = this->bcast_pubkey();
  stat_.bcast_pk_time += MPI_Wtime(); //! Bcast PK - Stop

  // Acativate HMM engine
  INTEL_ITT_RESUME;
  stat_.hmm_encoding_time -= MPI_Wtime(); //! HMM Encoding - Start
  const auto eh_mnk = hypercube_hmme::get_expected_mnk(mbb_, nbb_, kbb_);
  type_ = hypercube_hmme::parse_matrix_type(eh_mnk);
  hhmme->register_engine(hmm_cagent_->context()->getEA(), eh_mnk);
  stat_.hmm_encoding_time += MPI_Wtime(); //! HMM Encoding - Stop
  INTEL_ITT_PAUSE;

  stat_.setup_time += MPI_Wtime(); //! Setup - Stop
}

template <typename T>
void mhhmm_cannon::init_data(int root, int rank, MPI_Comm comm, const T *A,
                             const T *B) {
  static_assert(std::is_same<T, int>::value ||
                    std::is_same<T, unsigned int>::value ||
                    std::is_same<T, double>::value,
                "Only `int`, `unsigned int` and `double` are allowed.");

  const auto A_nblk = m_nblk_ * k_nblk_;
  const auto B_nblk = k_nblk_ * n_nblk_;

  if (rank == root) {
    auto hmmcc = static_cast<hypercube_hmmcc *>(hmm_cagent_);
    ASSERT_PRINTF(hmmcc != nullptr, "Invalid root\n");
    for (int recipient = 0; recipient < np_; recipient++) {
      const int i = recipient / sqrt_np_;
      const int j = recipient % sqrt_np_;

      MPI_Request reqs_A[A_nblk];
      MPI_Request reqs_B[B_nblk];
      std::vector<std::string> ctxt_A_blks;
      std::vector<std::string> ctxt_B_blks;
      // Send Matrix A
      for (int ii = 0; ii < m_nblk_; ii++) {
        for (int jj = 0; jj < k_nblk_; jj++) {
          std::stringstream ss;
          auto A_blk = get_blk(A, m_, k_,
                               /* pi */ i, /* pj */ j, mb_, kb_,
                               /* blki */ ii, /* blkj */ jj, mbb_, kbb_);
          auto ctxt_A_blk = hmmcc->encrypt(A_blk);
          ctxt_A_blk.writeTo(ss);
          ctxt_A_blks.push_back(ss.str());

          if (ii == 0 && jj == 0) {
            stat_.init_noise = log2_noise_bound(ctxt_A_blk);
          }
        }
      }
      // Send Matrix B
      for (int ii = 0; ii < k_nblk_; ii++) {
        for (int jj = 0; jj < n_nblk_; jj++) {
          std::stringstream ss;
          auto B_blk = get_blk(B, k_, n_,
                               /* pi */ i, /* pj */ j, kb_, nb_,
                               /* blki */ ii, /* blkj */ jj, kbb_, nbb_);
          auto ctxt_B_blk = hmmcc->encrypt(B_blk);
          ctxt_B_blk.writeTo(ss);
          ctxt_B_blks.push_back(ss.str());
        }
      }
      if (recipient == root) {
        local_A_ = std::move(ctxt_A_blks);
        local_B_ = std::move(ctxt_B_blks);
      } else {
        MPI_Isend_Strings(ctxt_A_blks, recipient, 0, comm, reqs_A);
        MPI_Isend_Strings(ctxt_B_blks, recipient, 0, comm, reqs_B);
        MPI_Waitall(A_nblk, reqs_A, MPI_STATUSES_IGNORE);
        MPI_Waitall(B_nblk, reqs_B, MPI_STATUSES_IGNORE);
      }
    }
  } else {
    MPI_Recv_Strings(local_A_, A_nblk, root, 0, comm);
    MPI_Recv_Strings(local_B_, B_nblk, root, 0, comm);
  }
}

void mhhmm_cannon::init_data(const int *A, const int *B) {
  // For cannon's algorithm
  // We distribute the data evenly in the comm_exec

  //! Must explicitly call template function
  init_data<int>(MPI_COMM_EXEC_ROOT, rank_exec_, comm_exec_, A, B);
}

void mhhmm_cannon::strassen(std::vector<helib::Ctxt> &C, const mat_ctxt &A,
                            const mat_ctxt &B) {
  helib::Ctxt &C00 = C[0 * n_nblk_ + 0];
  helib::Ctxt &C01 = C[0 * n_nblk_ + 1];
  helib::Ctxt &C10 = C[1 * n_nblk_ + 0];
  helib::Ctxt &C11 = C[1 * n_nblk_ + 1];
  helib::Ctxt M0 = A[0][0];
  helib::Ctxt M1 = A[1][0];
  helib::Ctxt M2 = B[0][1];
  helib::Ctxt M3 = B[1][0];
  helib::Ctxt M4 = A[0][0];
  helib::Ctxt M5 = A[1][0];
  helib::Ctxt M6 = A[0][1];

#pragma omp parallel
#pragma omp single
  {
#pragma omp task depend(out : M0)
    {
      // M0 = (A_00 + A_11)(B_00 + B_11)
      stat_.cgemm_noise -= log2_noise_bound(A[0][0]);
      helib::Ctxt tmp = B[0][0];
      M0 += A[1][1];
      tmp += B[1][1];
      stat_.sc_cgemm_time -= omp_get_wtime();
      hmm_engine_->cgemm(type_, M0, M0, tmp);
      stat_.sc_cgemm_time += omp_get_wtime();
      stat_.cgemm_noise += log2_noise_bound(M0);
    }
#pragma omp task depend(out : M1)
    {
      // M1 = (A_10 + A_11)B_00
      M1 += A[1][1];
      stat_.sc_cgemm_time -= omp_get_wtime();
      hmm_engine_->cgemm(type_, M1, M1, B[0][0]);
      stat_.sc_cgemm_time += omp_get_wtime();
    }

#pragma omp task depend(out : M2)
    {
      // M2 = A_00(B_01 - B_11)
      M2 -= B[1][1];
      stat_.sc_cgemm_time -= omp_get_wtime();
      hmm_engine_->cgemm(type_, M2, A[0][0], M2);
      stat_.sc_cgemm_time += omp_get_wtime();
    }

#pragma omp task depend(out : M3)
    {
      // M3 = A_11(B_10 - B_00)
      M3 -= B[0][0];
      stat_.sc_cgemm_time -= omp_get_wtime();
      hmm_engine_->cgemm(type_, M3, A[1][1], M3);
      stat_.sc_cgemm_time += omp_get_wtime();
    }

#pragma omp task depend(out : M4)
    {
      // M4 = (A_00 + A_01)B_11
      M4 += A[0][1];
      stat_.sc_cgemm_time -= omp_get_wtime();
      hmm_engine_->cgemm(type_, M4, M4, B[1][1]);
      stat_.sc_cgemm_time += omp_get_wtime();
    }

#pragma omp task depend(out : M5)
    {
      // M5 = (A_10 - A_00)(B_00 + B_01)
      helib::Ctxt tmp = B[0][0];
      M5 -= A[0][0];
      tmp += B[0][1];
      stat_.sc_cgemm_time -= omp_get_wtime();
      hmm_engine_->cgemm(type_, M5, M5, tmp);
      stat_.sc_cgemm_time += omp_get_wtime();
    }

#pragma omp task depend(out : M6)
    {
      // M6 = (A_01 - A_11)(B_10 + B_11)
      helib::Ctxt tmp = B[1][0];
      M6 -= A[1][1];
      tmp += B[1][1];
      stat_.sc_cgemm_time -= omp_get_wtime();
      hmm_engine_->cgemm(type_, M6, M6, tmp);
      stat_.sc_cgemm_time += omp_get_wtime();
    }

#pragma omp task depend(in : M0, M3, M4, M6)
    {
      // C_00 = M0 + M3 - M4 + M6
      C00 += M0;
      C00 += M3;
      C00 -= M4;
      C00 += M6;
    }
#pragma omp task depend(in : M2, M4)
    {
      // C_01 = M2 + M4
      C01 += M2;
      C01 += M4;
    }

#pragma omp task depend(in : M1, M3)
    {
      // C_10 = M1 + M3
      C10 += M1;
      C10 += M3;
    }

#pragma omp task depend(in : M0, M1, M2, M5)
    {
      // C_11 = M0 - M1 + M2 + M5
      C11 += M0;
      C11 -= M1;
      C11 += M2;
      C11 += M5;
    }
  }
}

void mhhmm_cannon::stdblk(std::vector<helib::Ctxt> &C, const mat_ctxt &A,
                          const mat_ctxt &B) {
#pragma omp parallel
#pragma omp single
  for (int i = 0; i < m_nblk_; i++) {
    for (int j = 0; j < n_nblk_; j++) {
#pragma omp taskloop
      for (int k = 0; k < k_nblk_; k++) {
        // Local block CGEMM
        auto start_t = MPI_Wtime();
        helib::Ctxt AB(*pk_);
        hmm_engine_->cgemm(type_, AB, A[i][k], B[k][j]);
        auto stop_t = MPI_Wtime();
        stat_.cgemm_times.push_back(stop_t - start_t);
        stat_.sc_cgemm_time += stat_.cgemm_times.back();

        // Get HMM nosie
        if (i == 0 && j == 0 && k == 0) {
          stat_.cgemm_noise = log2_noise_bound(AB) - log2_noise_bound(A[0][0]);
        }

        if (i == 0 && j == 0) { //! Get HAdd noise - Start
          stat_.add_partial_noise -= log2_noise_bound(C[i * n_nblk_ + j]);
        }
        // Add partial sum
        C[i * n_nblk_ + j] += AB;
        if (i == 0 && j == 0) { //! Get HAdd noise - Stop
          stat_.add_partial_noise += log2_noise_bound(C[i * n_nblk_ + j]);
        }
      }
    }
  }
}

void mhhmm_cannon::exec(std::vector<std::string> &partial_C, bool fhe4d) {
  MPI_Barrier(cannon_comm_);
  stat_.exec_time -= MPI_Wtime(); //! EXEC - Start
  MPI_Pcontrol(1);                //! Activate mpiP

  // clang-format off
  // Shift Align
  stat_.shift_align_time -= MPI_Wtime();  //! SA - Start
  MPI_Sendrecv_String_replace(local_A_, sa_rank_[L], 0, sa_rank_[R], 0, cannon_comm_);
  MPI_Sendrecv_String_replace(local_B_, sa_rank_[U], 1, sa_rank_[D], 1, cannon_comm_);
  stat_.shift_align_time += MPI_Wtime(); //! SA - Stop
  stat_.computing_comm_size += (local_A_[0].size() * local_A_.size() / 1024.0);
  stat_.computing_comm_size += (local_B_[0].size() * local_B_.size() / 1024.0);
  // clang-format on

  stat_.shift_compute_time -= MPI_Wtime(); //! SC - Start
  // Shift Compute
  helib::Ctxt zero(*pk_);
  zero.DummyEncrypt(NTL::ZZX(0l));
  std::vector<helib::Ctxt> partial(m_nblk_ * n_nblk_, zero);
  for (int l = 0; l < sqrt_np_; l++) {
    if (l) {
      // clang-format off
      stat_.sc_comm_time -= MPI_Wtime();
      MPI_Sendrecv_String_replace(local_A_, sc_rank_[L], 0, sc_rank_[R], 0, cannon_comm_);
      MPI_Sendrecv_String_replace(local_B_, sc_rank_[U], 1, sc_rank_[D], 1, cannon_comm_);
      stat_.sc_comm_time += MPI_Wtime();
      stat_.computing_comm_size += (local_A_[0].size() * local_A_.size() / 1024.0);
      stat_.computing_comm_size += (local_B_[0].size() * local_B_.size() / 1024.0);
      // clang-format on
    }

    // Serialization
    mat_ctxt ctxt_A;
    mat_ctxt ctxt_B;
    ctxt_A.resize(m_nblk_);
    ctxt_B.resize(k_nblk_);
    stat_.sc_serialize_time -= MPI_Wtime();
    for (std::size_t i = 0; i < m_nblk_; i++) {
      for (std::size_t j = 0; j < k_nblk_; j++) {
        std::stringstream A_ss;
        A_ss << local_A_[i * k_nblk_ + j];
        ctxt_A[i].emplace_back(helib::Ctxt::readFrom(A_ss, *pk_));
      }
    }
    for (int i = 0; i < k_nblk_; i++) {
      for (int j = 0; j < n_nblk_; j++) {
        std::stringstream B_ss;
        B_ss << local_B_[i * n_nblk_ + j];
        ctxt_B[i].emplace_back(helib::Ctxt::readFrom(B_ss, *pk_));
      }
    }
    stat_.sc_serialize_time += MPI_Wtime();

    omp_set_nested(1);
    // Local CGEMM
    if (use_strassen_) {
      strassen(partial, ctxt_A, ctxt_B);
    } else {
      stdblk(partial, ctxt_A, ctxt_B);
    }
    omp_set_nested(0);
  }

  // Deserialization
  stat_.sc_serialize_time -= MPI_Wtime();
  for (auto i = 0; i < partial.size(); i++) {
    std::stringstream ss;
    partial[i].writeTo(ss);
    partial_C.push_back(ss.str());
  }
  stat_.sc_serialize_time += MPI_Wtime();
  stat_.shift_compute_time += MPI_Wtime(); //! SC - Stop

  MPI_Pcontrol(0); //! Deactivate mpiP
  MPI_Barrier(cannon_comm_);
  stat_.exec_time += MPI_Wtime(); //! EXEC - Stop

  stat_.final_noise = log2_noise_bound(partial[0]);
}

void mhhmm_cannon::collect_data(const std::vector<std::string> &partial_C,
                                std::vector<NTL::mat_ZZ> &result, bool local) {
  // TODO: local check
  if (rank_exec_ == MPI_COMM_EXEC_ROOT) {
    result.resize(np_);
    auto hmmcc = static_cast<hypercube_hmmcc *>(hmm_cagent_);
    ASSERT_PRINTF(hmmcc != nullptr, "Invalid root\n");
    for (int sender = 0; sender < np_; sender++) {
      std::vector<std::string> ctxt_str;
      if (sender == MPI_COMM_EXEC_ROOT) {
        ctxt_str = partial_C;
      } else {
        MPI_Recv_Strings(ctxt_str, m_nblk_ * n_nblk_, sender, 0, comm_exec_);
      }

      result[sender].SetDims(mb_, nb_);
      for (int i = 0; i < m_nblk_; i++) {
        for (int j = 0; j < n_nblk_; j++) {
          std::stringstream ss;
          ss << ctxt_str[i * n_nblk_ + j];
          auto ctxt = helib::Ctxt::readFrom(ss, *pk_);

          NTL::mat_ZZ res;
          try {
            hmmcc->decrypt(res, ctxt);
          } catch (const std::exception &e) {
            std::cerr << e.what() << ":" << log2_noise_bound(ctxt) << std::endl;
            std::flush(std::cerr);
            MPI_Abort(comm_exec_, 1);
          }

          // At block (i, j)
          for (int ii = 0; ii < mbb_; ii++) {
            for (int jj = 0; jj < nbb_; jj++) {
              result[sender][i * mbb_ + ii][j * nbb_ + jj] = res[ii][jj];
            }
          }
        }
      }
    }
  } else {
    MPI_Send_Strings(partial_C, MPI_COMM_EXEC_ROOT, 0, comm_exec_);
  }
}