#include "CNN/mpi_bgv_mnist_4d.hpp"
#include "CNN/mpi_mnist_4d.hpp"
#include "Ctxt.h"
#include "hypercube/hypercube_cryto_agent.hpp"
#include "intel_itt_wrapper.hpp"
#include "math.cpp"
#include "math_util.hpp"
#include "mem_usage.hpp"
#include <mpi.h>
#include <sstream>
#include <string>
#include <unistd.h>

INTEL_ITT_DOMAIN_CREATE(mnist_forward_test, "test.cnn.mnist.forward");
INTEL_ITT_STRING_HANDLE_CREATE(hcnn_conv, "HConv");
INTEL_ITT_STRING_HANDLE_CREATE(hcnn_sq1, "HSquare1");
INTEL_ITT_STRING_HANDLE_CREATE(hcnn_fc1, "HFC1");
INTEL_ITT_STRING_HANDLE_CREATE(hcnn_sq2_fc2, "HSquare2-HFC2");

static inline double log2_noise_bound(const helib::Ctxt &ctxt) {
  const double ln2 = NTL::log(NTL::xdouble(2l));
  return NTL::log(ctxt.totalNoiseBound()) / ln2;
}

static inline void trans_neg(NTL::mat_ZZ &a, long t) {
  for (int i = 0; i < a.NumRows(); i++)
    for (int j = 0; j < a.NumCols(); j++) {
      if (NTL::conv<int>(a[i][j]) > (t / 2)) {
        a[i][j] -= t;
      }
    }
}

static inline void trans_neg_single(NTL::ZZ &a, const NTL::ZZ &t) {
  if (a >= (t / 2))
    a -= t;
}

static inline void Tensor2Matrix(NTL::mat_ZZ &mat, int row, int col,
                                 const torch::Tensor &a, const NTL::ZZ &T,
                                 const NTL::ZZ &t) {
  mat.SetDims(row, col);
  for (int i = 0; i < row; i++)
    for (int j = 0; j < col; j++) {
      if (a[i][j].item<float>() >= 0)
        mat[i][j] = (NTL::conv<NTL::ZZ>(a[i][j].item<float>() * SCALE)) % t;
      else
        mat[i][j] =
            ((NTL::conv<NTL::ZZ>(a[i][j].item<float>() * SCALE)) + T) % t;
    }
}

static inline void Tensor2Matrix_conv(NTL::mat_ZZ &mat, int row, int col,
                                      const torch::Tensor &a, const NTL::ZZ &T,
                                      const NTL::ZZ &t) {
  mat.SetDims(row, col);
  for (int i = 0; i < row; i++)
    for (int j = 0; j < col; j++) {
      if (a[0][0].item<float>() >= 0)
        mat[i][j] = (NTL::conv<NTL::ZZ>(a[0][0].item<float>() * SCALE)) % t;
      else
        mat[i][j] =
            ((NTL::conv<NTL::ZZ>(a[0][0].item<float>() * SCALE)) + T) % t;
    }
}

// clang-format off
static inline torch::Tensor Matrix2Tensor(const NTL::mat_ZZ &mat, int row, int col) {
  // clang-format on
  torch::Tensor a = torch::zeros({row, col});
  for (int i = 0; i < row; i++)
    for (int j = 0; j < col; j++)
      a[i][j] = NTL::conv<int>(mat[i][j]);
  return std::move(a);
}

helib::Ctxt mpi_bgv_hmnist_client::init_hx(const torch::Tensor &a, int asz0,
                                           int t_index) const {
  // input data
  NTL::mat_ZZ ptxt;
  Tensor2Matrix(ptxt, a.size(2), a.size(3), a[asz0][0], T_, ti_[t_index]);

  // Encryption
  auto ctxt = hmmccs_[t_index]->encrypt(ptxt, key);
  return std::move(ctxt);
}

helib::Ctxt mpi_bgv_hmnist_client::init_hconv(const torch::Tensor &a, int asz0,
                                              int asz1, int t_index) const {
  // input data
  NTL::mat_ZZ ptxt;
  Tensor2Matrix_conv(ptxt, 64, 64, a[asz0][asz1], T_, ti_[t_index]);
  // Encryption
  auto ctxt = hmmccs_[t_index]->encrypt(ptxt, key);
  // auto AB = hmmccs_[t_index]->decrypt(ctxt);
  // trans_neg(AB, NTL::conv<long>(t));
  // std::cout << a[asz0][asz1][0][0].item() << ", "
  //           << NTL::conv<float>(AB[0][0]) / 1000 << std::endl;
  return std::move(ctxt);
}

helib::Ctxt mpi_bgv_hmnist_client::init_hfc1(const torch::Tensor &a, int asz0,
                                             int t_index) const {
  // input data
  NTL::mat_ZZ ptxt;
  // std::cout << a.size(1) << " " << a.size(2) << std::endl;
  Tensor2Matrix(ptxt, a.size(1), a.size(2), a[asz0], T_, ti_[t_index]);

  // Encryption
  auto ctxt = hmmccs_[t_index]->encrypt(ptxt, key);
  return std::move(ctxt);
}

helib::Ctxt mpi_bgv_hmnist_client::init_hfc2(const torch::Tensor &a,
                                             int t_index) const {
  // input data
  NTL::mat_ZZ ptxt;
  std::cout << a.size(0) << "," << a.size(1) << std::endl;
  Tensor2Matrix(ptxt, a.size(0), a.size(1), a, T_, ti_[t_index]);

  // Encryption
  auto ctxt = hmmccs_[t_index]->encrypt(ptxt, key);
  return std::move(ctxt);
}

mpi_bgv_hmnist_client::mpi_bgv_hmnist_client(const params<helib::BGV> &params,
                                             int crt_num)
    : crt_num_(crt_num) {
  hmmccs_.resize(crt_num);
  ti_.resize(crt_num);

  auto start_t = MPI_Wtime();
  // #pragma omp parallel for
  for (int i = 0; i < crt_num; i++) {
    hmmccs_[i] = new hypercube_hmmcc(params, MNIST_HYPERCUBE, true);
    ti_[i] = hmmccs_[i]->context()->getP();
  }

  T_ = NTL::to_ZZ(1l);
  contexts_.resize(crt_num);
  for (int i = 0; i < crt_num; i++) {
    contexts_[i] = hmmccs_[i]->context().get();
    std::cout << "client_context_0_" << i << std::endl;
    hmmccs_[i]->report_context();
    T_ *= ti_[i];
  }
  auto end_t = MPI_Wtime();
  std::cout << "* Setup Helib Context           : " << (end_t - start_t) << " s"
            << std::endl;
}

void mpi_bgv_hmnist_client::init_hcnn(encrypted_model<helib::BGV> &ctxt_model,
                                      const torch::nn::Conv2d &conv1,
                                      const torch::nn::Linear &fc1,
                                      const torch::nn::Linear &fc2) const {
  auto init_hcnn_start_t = MPI_Wtime();
  std::cout << conv1->weight.data().sizes() << std::endl; // 4,1,7,7
  auto b = init_conv_kernel(conv1->weight.data());        // [49,4,1,1]
  std::cout << b.sizes() << std::endl;
  std::cout << "init_conv_kernel finished" << std::endl;
  std::cout << fc1->weight.data().sizes() << std::endl; // 64,256
  // print(fc1->weight.data());
  auto c = init_fc1_kernel(fc1->weight.data(), 4); // [4,64,64]
  std::cout << c.sizes() << std::endl;
  std::cout << "init_fc1_kernel finished" << std::endl;
  // auto c = fc1->weight.data().t(); // [256,64]
  std::cout << fc2->weight.data().sizes() << std::endl; // [10,64]
  // print(fc2->weight.data());
  auto d = init_fc2_kernel(fc2->weight.data()); // [10,64]
  std::cout << d.sizes() << std::endl;
  std::cout << "init_fc2_kernel finished" << std::endl;
  std::size_t mem_begin_ = 0.0;
  std::size_t mem_end_ = 0.0;
  double mem_hconv = 0.0; // GB
  const double GB = static_cast<double>(1l << 30l);
  const double MB = static_cast<double>(1l << 20l);
  // init Convolution layer
  // 5,49,4
  mem_begin_ = getCurrentRSS();
  std::cout << "mem_begin_=" << static_cast<double>(mem_begin_) / GB
            << std::endl;
#pragma omp parallel
#pragma omp single
  {
    auto start_t = MPI_Wtime();
    ctxt_model.conv.resize(crt_num_);
    for (int k = 0; k < crt_num_; k++) {
      ctxt_model.conv[k].resize(b.size(0));
      for (int i = 0; i < b.size(0); i++) {
        for (int j = 0; j < b.size(1); j++) {
          ctxt_model.conv[k][i].emplace_back(hmmccs_[k]->public_key());
        }
      }
    }
#pragma omp taskloop collapse(3) nogroup
    for (int k = 0; k < crt_num_; k++) {
      for (int i = 0; i < b.size(0); i++) {
        for (int j = 0; j < b.size(1); j++) {
          ctxt_model.conv[k][i][j] = std::move(init_hconv(b, i, j, k));
        }
      }
    }
    mem_end_ = getCurrentRSS();
    mem_hconv = static_cast<double>(mem_end_ - mem_begin_) / GB;
    std::cout << "mem_hconv=" << mem_hconv << std::endl;
    auto end_t = MPI_Wtime();
    std::cout << "  * Init convolution layer      : " << (end_t - start_t)
              << " s\n";

    // init fully connected layer1
    // 5,4
    start_t = MPI_Wtime();
    ctxt_model.fc1.resize(crt_num_);
    for (int k = 0; k < crt_num_; k++) {
      for (int i = 0; i < c.size(0); i++) {
        ctxt_model.fc1[k].emplace_back(hmmccs_[k]->public_key());
      }
    }
#pragma omp taskloop collapse(2) nogroup
    for (int k = 0; k < crt_num_; k++) {
      for (int i = 0; i < c.size(0); i++) {
        ctxt_model.fc1[k][i] = std::move(init_hfc1(c, i, k));
      }
    }
    end_t = MPI_Wtime();
    std::cout << "  * Init fully connected layer1 : " << (end_t - start_t)
              << " s\n";

    // init fully connected layer2
    // 5
    start_t = MPI_Wtime();

    for (int k = 0; k < crt_num_; k++) {
      ctxt_model.fc2.emplace_back(hmmccs_[k]->public_key());
    }
#pragma omp taskloop nogroup
    for (int k = 0; k < crt_num_; k++) {
      ctxt_model.fc2[k] = std::move(init_hfc2(d, k));
    }
    end_t = MPI_Wtime();
    std::cout << "  * Init fully connected layer2 : " << (end_t - start_t)
              << " s\n";
    auto init_hcnn_end_t = MPI_Wtime();
    std::cout << "* Init HCNN                     : "
              << (init_hcnn_end_t - init_hcnn_start_t) << " s\n";
  }
}

void mpi_bgv_hmnist_client::init_input(encrypted_input<helib::BGV> &ctxt_input,
                                       const torch::Tensor &filter,
                                       const torch::Tensor &x) const {
  // x 2048,1,28,28
  auto a = im2matrix(x, filter, 3);
  std::cout << "x: " << x.size(0) << " " << x.size(1) << " " << x.size(2) << " "
            << x.size(3) << std::endl;
  std::cout << "a: " << a.size(0) << " " << a.size(1) << " " << a.size(2) << " "
            << a.size(3) << " " << a.size(4) << std::endl;
  std::vector<helib::Ctxt> tmp;
  std::vector<std::vector<helib::Ctxt>> tmp2;

  // init input
  // a 8,49,4,64,256
  // 8,5,49,4

  auto start_t = MPI_Wtime();
  ctxt_input.data.resize(x.size(0) / T_BATCHSIZE);
#pragma omp parallel for
  for (int l = 0; l < x.size(0) / T_BATCHSIZE; l++) {
    ctxt_input.data[l].resize(crt_num_);
    for (int k = 0; k < crt_num_; k++) {
      ctxt_input.data[l][k].resize(a.size(1));
      for (int i = 0; i < a.size(1); i++) {
        for (int j = 0; j < a.size(2); j++) {
          ctxt_input.data[l][k][i].emplace_back(hmmccs_[k]->public_key());
        }
      }
    }
    for (int k = 0; k < crt_num_; k++) {
      for (int i = 0; i < a.size(1); i++) {
        for (int j = 0; j < a.size(2); j++) {
          auto tmp = a.index({l, "..."});
          ctxt_input.data[l][k][i][j] = std::move(init_hx(tmp, i, k));
        }
      }
    }
  }
  auto end_t = MPI_Wtime();
  std::cout << "* Init input                    : " << (end_t - start_t)
            << " s\n";
}

// 4,5
void mpi_bgv_hmnist_client::recover_result(
    torch::Tensor &out, const std::vector<std::vector<std::string>> &in,
    MPI_Comm comm, int root) const {
  int rank, p;
  MPI_Comm_rank(comm, &rank);
  MPI_Comm_size(comm, &p);
  // 10,4 * 64
  torch::Tensor tmp =
      torch::zeros({out.size(0), static_cast<long>(in.size()) * out.size(1)});
  auto start_t = MPI_Wtime();
  // CRT-Pre
  if (rank == root) {
    // 4,10,64,5
    CH_Remainder crt[in.size()][out.size(0)][out.size(1)][crt_num_];
    for (int i = 0; i < in.size(); i++) {
      for (int kcrt = 0; kcrt < crt_num_; kcrt++) {
        std::stringstream ss;
        ss << in[i][kcrt];
        auto ctxt = helib::Ctxt::readFrom(ss, hmmccs_[kcrt]->public_key());
        NTL::mat_ZZ res;
        hmmccs_[kcrt]->decrypt(res, ctxt);
        std::cout << res.NumCols() << " " << res.NumRows() << std::endl;
        for (int j = 0; j < out.size(0); j++)
          for (int k = 0; k < out.size(1); k++) {
            crt[i][j][k][kcrt].result = NTL::conv<NTL::ZZ>(res[j][k]);
            crt[i][j][k][kcrt].mod_num = NTL::conv<NTL::ZZ>(ti_[kcrt]);
          }
      }
    }

    // CRT && Scale
    NTL::ZZ pow, base(SCALE);
    NTL::power(pow, base, 11);

    // 10,4,64
    for (int i = 0; i < in.size(); i++) {
      for (int j = 0; j < out.size(0); j++) {
        for (int k = 0; k < out.size(1); k++) {
          NTL::ZZ temp = Ch_remainder_theorem(crt[i][j][k], crt_num_);
          trans_neg_single(temp, T_);
          tmp.index({j, i * out.size(1) + k}) = NTL::conv<float>(
              (NTL::conv<NTL::xdouble>(temp) / NTL::conv<NTL::xdouble>(pow)));
        }
      }
    }
    tmp = tmp.transpose(0, 1);
    // 256,10
    out = tmp.clone();
    auto end_t = MPI_Wtime();
    std::cout << "CRT: " << (end_t - start_t) << " s" << std::endl;
  }
}

mpi_bgv_hmnist_server::mpi_bgv_hmnist_server(
    const hmm_status<shmm_engine> &status, MPI_Comm comm, int crt_num,
    int conv_size0, const params<helib::BGV> &params)
    : crt_num_(crt_num) {
  int rank, p;
  MPI_Comm_rank(comm, &rank);
  MPI_Comm_size(comm, &p);
  metrics_.resize(crt_num);
  hmmes_.resize(crt_num);
  hmmcss_.resize(crt_num);
  metrics_[0].hmm_encoding_time -= MPI_Wtime();
  contexts_.resize(crt_num);

  if (rank != COMM_WORLD_ROOT)
    for (int i = 0; i < crt_num; i++) {
      hmmcss_[i] = new hypercube_hmmcs(params, MNIST_HYPERCUBE, true);
      contexts_[i] = hmmcss_[i]->context().get();
    }

  for (int i = 0; i < crt_num; i++) {
    hmmes_[i] = new hypercube_hmme(status);
    // pk_[i] = new helib::PubKey(hmmcc[i]->public_key());
  }

#pragma omp parallel for
  for (int i = 0; i < crt_num; i++) {
    // hmmes_[i] = new hypercube_hmme(status);
    auto fc1 = hypercube_hmme::get_expected_mnk(MATRIX_DIM_M, MATRIX_DIM_N,
                                                MATRIX_DIM_K);
    hmmes_[i]->register_engine(contexts_[i]->getEA(), fc1);
    auto fc2 =
        hypercube_hmme::get_expected_mnk(10l, MATRIX_DIM_N, MATRIX_DIM_K);
    hmmes_[i]->register_engine(contexts_[i]->getEA(), fc2);
    // pk_[i] = new helib::PubKey(hmmcc[i]->public_key());
  }

  MPI_Comm_split(comm, (rank % (p / conv_size0)) ? MPI_UNDEFINED : 1,
                 rank / (p / conv_size0), &comm_Bcast);

  MPI_Comm_split(comm, (rank / (p / conv_size0) == 0) ? 1 : MPI_UNDEFINED,
                 rank % (p / conv_size0), &comm_reducfc2);

  MPI_Comm_split(comm, rank / (p / conv_size0), rank % (p / conv_size0),
                 &comm_conv);

  MPI_Comm_split(comm, rank % (p / conv_size0), rank / (p / conv_size0),
                 &comm_reducfc1);
  metrics_[0].hmm_encoding_time += MPI_Wtime();
}

mpi_bgv_hmnist_server::mpi_bgv_hmnist_server(
    const hmm_status<shmm_engine> &status, MPI_Comm comm, int crt_num,
    int conv_size0, const params<helib::BGV> &params,
    const std::vector<hypercube_hmmcc *> &hmmcc)
    : crt_num_(crt_num) {
  int rank, p;
  MPI_Comm_rank(comm, &rank);
  MPI_Comm_size(comm, &p);
  metrics_.resize(crt_num);
  hmmes_.resize(crt_num);
  hmmcss_.resize(crt_num);
  metrics_[0].hmm_encoding_time -= MPI_Wtime();
  contexts_.resize(crt_num);

  if (rank != COMM_WORLD_ROOT)
    for (int i = 0; i < crt_num; i++) {
      hmmcss_[i] = new hypercube_hmmcs(params, MNIST_HYPERCUBE, true);
      contexts_[i] = hmmcss_[i]->context().get();
    }
  else
    for (int i = 0; i < crt_num; i++) {
      contexts_[i] = hmmcc[i]->context().get();
    }

  for (int i = 0; i < crt_num; i++) {
    hmmes_[i] = new hypercube_hmme(status);
    // pk_[i] = new helib::PubKey(hmmcc[i]->public_key());
  }

#pragma omp parallel for
  for (int i = 0; i < crt_num; i++) {
    // hmmes_[i] = new hypercube_hmme(status);
    auto fc1 = hypercube_hmme::get_expected_mnk(MATRIX_DIM_M, MATRIX_DIM_N,
                                                MATRIX_DIM_K);
    hmmes_[i]->register_engine(contexts_[i]->getEA(), fc1);
    auto fc2 =
        hypercube_hmme::get_expected_mnk(10l, MATRIX_DIM_N, MATRIX_DIM_K);
    hmmes_[i]->register_engine(contexts_[i]->getEA(), fc2);
    // pk_[i] = new helib::PubKey(hmmcc[i]->public_key());
  }

  MPI_Comm_split(comm, (rank % (p / conv_size0)) ? MPI_UNDEFINED : 1,
                 rank / (p / conv_size0), &comm_Bcast);

  MPI_Comm_split(comm, (rank / (p / conv_size0) == 0) ? 1 : MPI_UNDEFINED,
                 rank % (p / conv_size0), &comm_reducfc2);

  MPI_Comm_split(comm, rank / (p / conv_size0), rank % (p / conv_size0),
                 &comm_conv);

  MPI_Comm_split(comm, rank % (p / conv_size0), rank / (p / conv_size0),
                 &comm_reducfc1);

  metrics_[0].hmm_encoding_time += MPI_Wtime();
}

// 5,49
void mpi_bgv_hmnist_server::hconv_server(
    std::vector<helib::Ctxt> &out,
    const std::vector<std::vector<helib::Ctxt>> &in,
    const std::vector<std::vector<helib::Ctxt>> &hconv) const {
  auto &metrics = const_cast<std::vector<metric> &>(metrics_);

  for (int i = 0; i < in.size(); i++) {
    metrics[i].init_noise = log2_noise_bound(in[0][0]);
    metrics[i].conv_noise = metrics[i].init_noise;
    // INTEL_ITT_TASK_BEGIN(mnist_forward_test, hcnn_conv); //! ITT - Begin
    metrics[i].hconv_time -= MPI_Wtime();
    helib::Ctxt test(in[i][0]);
    test *= hconv[i][0];
    for (int j = 1; j < in[i].size(); j++) {
      helib::Ctxt tmp(in[i][j]);
      tmp *= hconv[i][j];
      test += tmp;
    }
    out.push_back(test);
    metrics[i].hconv_time += MPI_Wtime();
    metrics[i].conv_noise = log2_noise_bound(in[i][0]) - metrics[0].conv_noise;
  }
}

void mpi_bgv_hmnist_server::back_hconv(
    const std::vector<helib::Ctxt> &out,
    std::vector<std::vector<helib::Ctxt>> &ctxt) const {
  int rank, p;
  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  MPI_Comm_size(MPI_COMM_WORLD, &p);
  if (rank != COMM_WORLD_ROOT) {
    std::vector<std::string> tmp;
    for (int i = 0; i < crt_num_; i++) {
      std::stringstream ss;
      out[i].writeTo(ss);
      tmp.push_back(ss.str());
    }
    MPI_Send_Strings(tmp, COMM_WORLD_ROOT, rank, MPI_COMM_WORLD);
  } else {
    ctxt.resize(p);
    for (int i = 0; i < p; i++) {
      if (i == 0) {
        ctxt[i] = out;
      } else {
        std::vector<std::string> tmp;
        MPI_Recv_Strings(tmp, crt_num_, i, i, MPI_COMM_WORLD);
        for (int j = 0; j < crt_num_; j++) {
          {
            std::stringstream ss;
            ss << tmp[j];
            auto t = helib::Ctxt::readFrom(ss, get_pk(j));
            ctxt[i].push_back(t);
          }
        }
      }
    }
  }
}

void mpi_bgv_hmnist_server::hsqr1_server(std::vector<helib::Ctxt> &ctxt) const {
  auto &metrics = const_cast<std::vector<metric> &>(metrics_);
  for (int i = 0; i < ctxt.size(); i++) {
    metrics[i].sq1_noise = log2_noise_bound(ctxt[i]);
    // INTEL_ITT_TASK_BEGIN(mnist_forward_test, hcnn_sq1); //! ITT - Begin
    metrics[i].hsq1_time -= MPI_Wtime();
    ctxt[i].square();
    metrics[i].hsq1_time += MPI_Wtime();
    // INTEL_ITT_TASK_END(mnist_forward_test); //! ITT - End
    metrics[i].sq1_noise = log2_noise_bound(ctxt[i]) - metrics[i].sq1_noise;
  }
}

// 5
void mpi_bgv_hmnist_server::hfc1_server(std::vector<helib::Ctxt> &ctxt,
                                        const std::vector<helib::Ctxt> &hfc1,
                                        MPI_Comm comm, int rank,
                                        int root) const {
  auto &metrics = const_cast<std::vector<metric> &>(metrics_);
  std::vector<std::string> tmp;
  int p;
  MPI_Comm_size(comm, &p);
#pragma omp parallel for
  for (int i = 0; i < ctxt.size(); i++) {
    // printf("I am thread %d / %d \n", omp_get_thread_num(),
    //        omp_get_num_threads());
    // printf("%d\n", omp_in_parallel());
    // printf("%d\n", omp_get_max_threads());
    // printf("%d\n", omp_get_num_procs());
    metrics[i].fc1_noise = log2_noise_bound(ctxt[i]);
    // INTEL_ITT_TASK_BEGIN(mnist_forward_test, hcnn_fc1); //! ITT - Begin
    metrics[i].hfc1_time -= MPI_Wtime();
    hmmes_[i]->cgemm(MATRIX_TYPE::SQUARE, ctxt[i], hfc1, ctxt[i]);
    metrics[i].hfc1_time += MPI_Wtime();
    // INTEL_ITT_TASK_END(mnist_forward_test); //! ITT - End
    metrics[i].fc1_noise = log2_noise_bound(ctxt[i]) - metrics[i].fc1_noise;
  }
  for (int i = 0; i < ctxt.size(); i++) {
    std::stringstream ss;
    ctxt[i].writeTo(ss);
    tmp.push_back(ss.str());
  }
  // MPI_Barrier(comm);
  if (rank != root) {
    MPI_Send_Strings(tmp, root, rank, comm);
  } else {
    for (int i = 1; i < p; i++) {
      std::vector<std::string> recv_partial;
      MPI_Recv_Strings(recv_partial, ctxt.size(), i, i, comm);
      for (int j = 0; j < ctxt.size(); j++) {
        std::stringstream ss;
        ss << recv_partial[j];
        auto recv_partial_ctxt = helib::Ctxt::readFrom(ss, get_pk());
        ctxt[j] += recv_partial_ctxt;
      }
    }
  }
}

// 4,5
void mpi_bgv_hmnist_server::back_hfc1(
    const std::vector<helib::Ctxt> &out,
    std::vector<std::vector<helib::Ctxt>> &ctxt, MPI_Comm comm) const {
  int rank, p;
  MPI_Comm_rank(comm, &rank);
  MPI_Comm_size(comm, &p);
  if (rank != COMM_WORLD_ROOT) {
    std::vector<std::string> tmp;
    for (int i = 0; i < crt_num_; i++) {
      std::stringstream ss;
      out[i].writeTo(ss);
      tmp.push_back(ss.str());
    }
    MPI_Send_Strings(tmp, COMM_WORLD_ROOT, rank, MPI_COMM_WORLD);
  } else {
    ctxt.resize(p);
    for (int i = 0; i < p; i++) {
      if (i == 0) {
        ctxt[i] = out;
      } else {
        std::vector<std::string> tmp;
        MPI_Recv_Strings(tmp, crt_num_, i, i, MPI_COMM_WORLD);
        for (int j = 0; j < crt_num_; j++) {
          {
            std::stringstream ss;
            ss << tmp[j];
            auto t = helib::Ctxt::readFrom(ss, get_pk(j));
            ctxt[i].push_back(t);
          }
        }
      }
    }
  }
}

void mpi_bgv_hmnist_server::hsqr2_server(std::vector<helib::Ctxt> &ctxt) const {
  auto &metrics = const_cast<std::vector<metric> &>(metrics_);
  for (int i = 0; i < ctxt.size(); i++) {
    metrics[i].sq2_noise = log2_noise_bound(ctxt[i]);
    // INTEL_ITT_TASK_BEGIN(mnist_forward_test, hcnn_sq2_fc2); //! ITT - Begin
    metrics[i].hsq2_time -= MPI_Wtime();
    ctxt[i].square();
    metrics[i].hsq2_time += MPI_Wtime();
    metrics[i].sq2_noise = log2_noise_bound(ctxt[i]) - metrics[i].sq2_noise;
  }
}

void mpi_bgv_hmnist_server::hfc2_server(
    std::vector<helib::Ctxt> &ctxt, std::vector<std::vector<std::string>> &buf,
    const std::vector<helib::Ctxt> &hfc2, MPI_Comm comm, int rank,
    int root) const {
  auto &metrics = const_cast<std::vector<metric> &>(metrics_);
  std::vector<std::string> tmp;
  int p;
  MPI_Comm_size(comm, &p);
#pragma omp parallel for
  for (int i = 0; i < crt_num_; i++) {
    metrics[i].fc2_noise = log2_noise_bound(ctxt[i]);
    metrics[i].hfc2_time -= MPI_Wtime();
    hmmes_[i]->cgemm(MATRIX_TYPE::MAX_K, ctxt[i], hfc2[i], ctxt[i]);
    metrics[i].hfc2_time += MPI_Wtime();
    // INTEL_ITT_TASK_END(mnist_forward_test); //! ITT - End
    metrics[i].fc2_noise = log2_noise_bound(ctxt[i]) - metrics[i].fc2_noise;
    metrics[i].final_noise = log2_noise_bound(ctxt[i]);
    metrics[i].left_cap = ctxt[i].capacity();
  }
  for (int i = 0; i < crt_num_; i++) {
    std::stringstream ss;
    ctxt[i].writeTo(ss);
    tmp.push_back(ss.str());
  }
  // MPI_Barrier(comm);
  if (rank != root) {
    MPI_Send_Strings(tmp, root, rank, comm);
  }
  if (rank == root) {
    buf.push_back(tmp);
    for (int i = 1; i < p; i++) {
      std::vector<std::string> recv_partial;
      MPI_Recv_Strings(recv_partial, crt_num_, i, i, comm);
      buf.push_back(recv_partial);
    }
  }
}

void mpi_bgv_hmnist_server::forward_test(
    std::vector<std::vector<std::string>> &out,
    std::vector<std::vector<std::string>> &buf_input,
    const std::vector<std::vector<helib::Ctxt>> &hconv,
    const std::vector<helib::Ctxt> &hfc1,
    const std::vector<helib::Ctxt> &hfc2) const {
  MPI_Barrier(MPI_COMM_WORLD);
  auto &metrics = const_cast<std::vector<metric> &>(metrics_);
  // Reset metrics
  for (auto &metric : metrics) {
    metric.reset();
  }
  int rank, p;
  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  MPI_Comm_size(MPI_COMM_WORLD, &p);
  std::vector<std::vector<helib::Ctxt>> ctxt;
  ctxt.resize(buf_input.size());
  metrics[0].forword_test_time -= MPI_Wtime();
  omp_set_nested(1);
  // #pragma omp parallel
  for (int i = 0; i < buf_input.size(); i++) {
    for (int j = 0; j < buf_input[i].size(); j++) {
      std::stringstream ss;
      ss << buf_input[i][j];
      ctxt[i].push_back(helib::Ctxt::readFrom(ss, get_pk(i)));
    }
  }

  if (rank == COMM_WORLD_ROOT)
    std::cout << "readinput" << rank << "fininsh" << std::endl;

  // MPI_Barrier(MPI_COMM_WORLD);
  std::vector<helib::Ctxt> ct;
  hconv_server(ct, ctxt, hconv);
  if (rank == COMM_WORLD_ROOT)
    std::cout << "hconv" << rank << "fininsh" << std::endl;
  // MPI_Barrier(MPI_COMM_WORLD);

  // sqr1
  hsqr1_server(ct);
  if (rank == COMM_WORLD_ROOT)
    std::cout << "hsqr1" << rank << "fininsh" << std::endl;
  // MPI_Barrier(MPI_COMM_WORLD);

  // hfc1
  int rank_fc1;
  MPI_Comm_rank(comm_reducfc1, &rank_fc1);
  hfc1_server(ct, hfc1, comm_reducfc1, rank_fc1, 0);
  if (rank == COMM_WORLD_ROOT)
    std::cout << "hfc1" << rank << "fininsh" << std::endl;
  // MPI_Barrier(MPI_COMM_WORLD);

  // sqr2
  if (comm_reducfc2 != MPI_COMM_NULL) {
    hsqr2_server(ct);
    if (rank == COMM_WORLD_ROOT)
      std::cout << "hsqr2" << rank << "fininsh" << std::endl;
    // MPI_Barrier(comm_reducfc2);
  }

  // hfc2
  // MPI_Barrier(MPI_COMM_WORLD);
  if (comm_reducfc2 != MPI_COMM_NULL) {
    int rank_fc2;
    MPI_Comm_rank(comm_reducfc2, &rank_fc2);
    hfc2_server(ct, out, hfc2, comm_reducfc2, rank_fc2, 0);
    if (rank == COMM_WORLD_ROOT)
      std::cout << "hfc2" << rank << "fininsh" << std::endl;
  }
  omp_set_nested(0);
  MPI_Barrier(MPI_COMM_WORLD);
  metrics[0].forword_test_time += MPI_Wtime();
}

// 4,5,49,4 ->5,49
void mpi_bgv_hmnist_server::send_input_p2p(
    const encrypted_input<helib::BGV> &ctxt_in, MPI_Comm comm,
    std::vector<std::vector<std::string>> &buf, int root) const {
  int rank, p;
  MPI_Comm_rank(comm, &rank);
  MPI_Comm_size(comm, &p);
  if (rank == root) {
    auto ctxt = ctxt_in.data;
    for (int i = 0; i < p; i++) {
      buf.resize(crt_num_);
      std::vector<std::string> send;
      // MPI_Request reqs[crt_num_ * conv_size];
      auto kbatch = i % ctxt.size();
      auto kconv = i / ctxt.size();
      for (int kcrt = 0; kcrt < crt_num_; kcrt++) {
        for (int j = 0; j < ctxt[kbatch][kcrt].size(); j++) {
          std::stringstream ss;
          ctxt[kbatch][kcrt][j][kconv].writeTo(ss);
          send.push_back(ss.str());
        }
      }
      if (i == root) {
        int num = 0;
        for (int kcrt = 0; kcrt < crt_num_; kcrt++)
          for (int j = 0; j < ctxt[kbatch][kcrt].size(); j++) {
            buf[kcrt].push_back(send[num++]);
          }
      } else {
        MPI_Send_Strings(send, i, i, comm);
        // MPI_Isend_Strings(send, i, i, comm, reqs);
        // MPI_Waitall(p, reqs, MPI_STATUSES_IGNORE);
      }
    }
  } else {
    buf.resize(crt_num_);
    std::vector<std::string> rec;
    MPI_Recv_Strings(rec, crt_num_ * conv_size, root, rank, comm);
    int num = 0;
    for (int i = 0; i < crt_num_; i++) {
      for (int j = 0; j < conv_size; j++) {
        buf[i].push_back(rec[num++]);
      }
    }
  }
}

// [5,49,4] -> 49
void mpi_bgv_hmnist_server::send_honv_p2p(
    const std::vector<std::vector<std::vector<helib::Ctxt>>> &ctxt,
    MPI_Comm comm, std::vector<std::string> &buf, int root, int rank,
    int crt_num) const {
  int p;
  MPI_Comm_size(comm, &p);
  if (rank == root) {
    std::cout << "conv_size_" << ctxt.size() << "," << ctxt[0].size() << ","
              << ctxt[0][0].size() << std::endl;
    for (int i = 0; i < p; i++) {
      // buf.resize(crt_num_);
      auto kcrt = i % ctxt.size();
      auto kconv = i / ctxt.size();
      std::vector<std::string> tmp;
      // MPI_Request reqs[crt_num_ * conv_size];
      // for (int j = 0; j < crt_num_; j++) {
      for (int k = 0; k < conv_size; k++) {
        std::stringstream ss;
        ctxt[kcrt][k][kconv].writeTo(ss);
        tmp.push_back(ss.str());
        // }
      }
      if (i == root) {
        int num = 0;
        // for (int j = 0; j < crt_num_; j++) {
        for (int k = 0; k < ctxt[kcrt].size(); k++) {
          buf.push_back(tmp[num++]);
          // }
        }
      } else {
        MPI_Send_Strings(tmp, i, i, comm);
        // MPI_Isend_Strings(tmp, i, i, comm, reqs);
        // MPI_Waitall(crt_num_ * conv_size, reqs, MPI_STATUSES_IGNORE);
      }
    }
  } else {
    buf.resize(crt_num);
    std::vector<std::string> tmp;
    MPI_Recv_Strings(tmp, conv_size, root, rank, comm);
    int num = 0;
    // for (int j = 0; j < crt_num_; j++) {
    for (int k = 0; k < conv_size; k++) {
      buf.push_back(tmp[num++]);
    }
    // }
  }
}

// ctxt[5,49,4] [4,5,49]
// void mpi_bgv_hmnist_server::back_honv_p2p(
//     const std::vector<std::vector<std::string>> &ctxt, MPI_Comm comm,
//     std::vector<std::vector<std::vector<std::string>>> &buf, int root, int
//     rank, int crt_num) const {
//   int p;
//   MPI_Comm_size(comm, &p);
//   std::vector<std::string> tmp;
//   if (rank != root) {
//     for (int i = 0; i < crt_num_; i++) {
//       for (int j = 0; j < conv_size; j++) {
//         tmp.push_back(ctxt[i][j]);
//       }
//     }
//     // std::cout << "hconv_backsend_" << rank << "start" << std::endl;
//     MPI_Send_Strings(tmp, root, rank, comm);
//     // std::cout << "hconv_backsend_" << rank << "finished" << std::endl;
//   } else {
//     buf.resize(p);
//     for (int i = 0; i < p; i++) {
//       if (i != root) {
//         // std::cout << "hconv_backrec_" << rank << "start" << std::endl;
//         MPI_Recv_Strings(tmp, crt_num_ * conv_size, i, i, comm);
//         // std::cout << "hconv_backrec_" << rank << "finished" << std::endl;
//         int num = 0;
//         buf[i].resize(crt_num_);
//         for (int j = 0; j < crt_num_; j++) {
//           for (int k = 0; k < conv_size; k++) {
//             buf[i][j].push_back(tmp[j * conv_size + k]);
//           }
//         }
//       } else {
//         buf[0] = ctxt;
//       }
//     }
//   }
// }

// 5,4 -> 1
void mpi_bgv_hmnist_server::send_hfc1_p2p(
    std::vector<std::vector<helib::Ctxt>> &ctxt, MPI_Comm comm,
    std::string &buf, int root, int rank, int crt_num) const {
  int p;
  MPI_Comm_size(comm, &p);
  if (rank == root) {
    for (int i = 0; i < p; i++) {
      std::string tmp;
      // MPI_Request reqs[crt_num_];
      // for (int j = 0; j < crt_num_; j++) {
      auto kcrt = i % ctxt.size();
      auto kconv = i / ctxt.size();
      std::stringstream ss;
      ctxt[kcrt][kconv].writeTo(ss);
      tmp = ss.str();
      // }
      if (i == root) {
        // for (int i = 0; i < crt_num_; i++) {
        buf = tmp;
        // }
      } else {
        MPI_Send_String(tmp, i, i, comm);
        // MPI_Isend_Strings(tmp, i, i, comm, reqs);
        // MPI_Waitall(crt_num_, reqs, MPI_STATUSES_IGNORE);
      }
    }
  } else {
    MPI_Recv_String(buf, root, rank, comm);
  }
}

void mpi_bgv_hmnist_server::back_hfc1_p2p(
    const std::vector<std::string> &ctxt, MPI_Comm comm,
    std::vector<std::vector<std::string>> &buf, int root, int rank,
    int crt_num) const {
  int p;
  MPI_Comm_size(comm, &p);
  std::vector<std::string> tmp;
  if (rank != root) {
    for (int i = 0; i < crt_num_; i++) {
      tmp.push_back(ctxt[i]);
    }
    // std::cout << "hconv_backsend_" << rank << "start" << std::endl;
    MPI_Send_Strings(tmp, root, rank, comm);
    // std::cout << "hconv_backsend_" << rank << "finished" << std::endl;
  } else {
    buf.resize(p);
    for (int i = 0; i < p; i++) {
      if (i != root) {
        // std::cout << "hconv_backrec_" << rank << "start" << std::endl;
        MPI_Recv_Strings(tmp, crt_num_, i, i, comm);
        // std::cout << "hconv_backrec_" << rank << "finished" << std::endl;
        for (int j = 0; j < crt_num_; j++) {
          std::stringstream ss;
          ss << tmp[j];
          buf[i].push_back(tmp[j]);
        }
      } else {
        buf[0] = ctxt;
      }
    }
  }
}

// 5 -> 1
void mpi_bgv_hmnist_server::send_hfc2_p2p(const std::vector<helib::Ctxt> &ctxt,
                                          MPI_Comm comm, std::string &buf,
                                          int root, int rank) const {
  if (comm != MPI_COMM_NULL) {
    int p;
    MPI_Comm_size(comm, &p);
    if (rank == root) {
      for (int i = 0; i < p; i++) {
        std::string tmp;
        auto kcrt = i % ctxt.size();
        std::stringstream ss;
        ctxt[kcrt].writeTo(ss);
        tmp = ss.str();
        if (i == root) {
          buf = tmp;
        } else {
          MPI_Send_String(tmp, i, i, comm);
          // MPI_Isend_Strings(tmp, i, i, comm, reqs);
          // MPI_Waitall(crt_num_, reqs, MPI_STATUSES_IGNORE);
        }
      }
    } else {
      MPI_Recv_String(buf, root, rank, comm);
    }
  }
}

// 5,49
// 4,5,49
// void mpi_bgv_hmnist_server::bcast_hconv_server(
//     const std::vector<std::vector<std::string>> &in, MPI_Comm comm,
//     std::vector<std::vector<std::string>> &buf, int root, int rank,
//     int crt_num) const {
//   std::vector<std::string> tmp;
//   buf.resize(crt_num_);
//   if (rank == root) {
//     for (int i = 0; i < crt_num_; i++) {
//       for (int j = 0; j < in[i].size(); j++) {
//         std::stringstream ss;
//         ss << in[i][j];
//         tmp.push_back(ss.str());
//         // tmp.push_back("test");
//       }
//     }
//   }
//   MPI_Bcast_Strings(tmp, root, comm);
//   int num = 0;
//   for (int i = 0; i < crt_num_; i++)
//     for (int j = 0; j < conv_size; j++) {
//       buf[i].push_back(tmp[num++]);
//     }
// }

// // 5,49 -> 4,5,49
// void mpi_bgv_hmnist_server::back_hconv_server(
//     const std::vector<std::vector<std::string>> &ctxt, MPI_Comm comm,
//     std::vector<std::vector<std::vector<std::string>>> &buf, int root, int rank,
//     int crt_num) const {
//   int p;
//   MPI_Comm_size(comm, &p);
//   // // MPI_Barrier(comm);
//   // std::cout << ctxt.size() << " " << ctxt[0].size() << std::endl;
//   if (rank != root) {
//     std::vector<std::string> send;
//     for (int i = 0; i < crt_num_; i++)
//       for (int j = 0; j < conv_size; j++) {
//         send.push_back(ctxt[i][j]);
//       }
//     std::cout << "hconv_backpush_" << rank << "finished" << std::endl;
//     // sleep(1000);
//     std::cout << "hconv_backsend_" << rank << "start" << std::endl;
//     // MPI_Send_Strings(tmp, root, rank, comm);
//     MPI_Send_Strings(send, root, rank, comm);
//     std::cout << "hconv_backsend_" << rank << "finished" << std::endl;
//   } else {
//     std::vector<std::string> rec;
//     buf.resize(p);
//     for (int i = 0; i < p; i++) {
//       if (i != root) {
//         buf[i].resize(crt_num_);
//         std::cout << "hconv_backrec_" << i << "start" << std::endl;
//         MPI_Recv_Strings(rec, crt_num_ * conv_size, i, i, comm);
//         std::cout << "hconv_backrec_" << i << "finished" << std::endl;
//         for (int j = 0; j < crt_num_; j++)
//           for (int k = 0; i < conv_size; k++) {
//             std::stringstream ss;
//             ss << rec[j * conv_size + k];
//             buf[i][j].push_back(rec[j * conv_size + k]);
//           }
//       } else {
//         buf[0] = ctxt;
//       }
//     }
//   }
// }

// // 5
// void mpi_bgv_hmnist_server::bcast_hfc1_server(
//     const std::vector<std::string> &in, MPI_Comm comm,
//     std::vector<std::string> &buf, int root, int rank, int crt_num) const {
//   if (rank == root) {
//     for (int i = 0; i < crt_num_; i++) {
//       std::stringstream ss;
//       ss << in[i];
//       buf.push_back(ss.str());
//     }
//   }
//   MPI_Bcast_Strings(buf, root, comm);
// }

// // 5,49 -> 16,5,49
// void mpi_bgv_hmnist_server::back_input_server(
//     const std::vector<std::vector<std::string>> &ctxt, MPI_Comm comm,
//     std::vector<std::vector<std::vector<std::string>>> &buf, int root, int rank,
//     int crt_num) const {
//   int p, size;
//   MPI_Comm_size(comm, &p);
//   std::vector<std::string> tmp;
//   if (rank != root) {
//     std::cout << crt_num << " " << rank << " " << ctxt[2].size() << std::endl;
//     for (int i = 0; i < crt_num_; i++)
//       for (int j = 0; j < ctxt[i].size(); j++) {
//         tmp.push_back(ctxt[i][j]);
//         // MPI_Send_String(t, root, rank, comm);
//       }
//     std::cout << "input_backsend_" << rank << " " << tmp.size() << "start"
//               << std::endl;
//     // MPI_Bcast(&size, 1, MPI_INT, 0, comm);
//     MPI_Send_Strings(tmp, root, rank, comm);
//     std::cout << "input_backsend_" << rank << "finished" << std::endl;
//   } else {
//     buf.resize(p);
//     for (int i = 0; i < p; i++) {
//       if (i != root) {
//         buf[i].resize(crt_num_);
//         // std::cout << "input_backrec_" << i << "start" << std::endl;
//         MPI_Recv_Strings(tmp, crt_num_ * conv_size, i, i, comm);
//         // std::cout << "input_backrec_" << i << "finished" << std::endl;
//         for (int j = 0; j < crt_num_; j++)
//           for (int k = 0; k < ctxt[j].size(); k++) {
//             std::stringstream ss;
//             ss << tmp[j * ctxt[j].size() + k];
//             buf[i][j].push_back(tmp[j * ctxt[j].size() + k]);
//             // std::cout << tmp[j * ctxt[j].size() + k] << std::endl;
//           }
//       } else {
//         buf[i] = ctxt;
//       }
//     }
//   }
// }

void mpi_bgv_hmnist_server::report_metrics(std::ostream &ss) const {
  // clang-format off
  int rank;
  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  ss << "================= Cipher CNN("<< rank << ")=================\n";
  ss << "* HMM Encoding       : " << metrics_[0].hmm_encoding_time << " s\n";
  ss << "* HCNN Time          : " << metrics_[0].forword_test_time << " s\n";
  for (auto i = 0; i < metrics_.size(); i++) {
  ss << "  * Round " << i << "          : \n";
  ss << "    * HConv time     : " << metrics_[i].hconv_time << " s\n";
  ss << "    * HSquare1 time  : " << metrics_[i].hsq1_time << " s\n";
  ss << "    * HFC1 time      : " << metrics_[i].hfc1_time << " s\n";
  ss << "    * HSquare2 time  : " << metrics_[i].hsq2_time << " s\n";
  ss << "    * HFC2 time      : " << metrics_[i].hfc2_time << " s\n";
  }
  // ss << "    * HConv time     : " << metrics_[0].hconv_time << " s\n";
  // ss << "    * HSquare1 time  : " << metrics_[0].hsq1_time << " s\n";
  // ss << "    * HFC1 time      : " << metrics_[0].hfc1_time << " s\n";
  // ss << "    * HSquare2 time  : " << metrics_[0].hsq2_time << " s\n";
  // ss << "    * HFC2 time      : " << metrics_[0].hfc2_time << " s\n";
  ss << "* Final nosie bound  : " << metrics_[0].final_noise << std::endl;
  ss << "  * Init nosie bound : " << metrics_[0].init_noise << std::endl;
  ss << "  + HConv noise      : " << metrics_[0].conv_noise << std::endl;
  ss << "  + HSquare1 noise   : " << metrics_[0].sq1_noise << std::endl;
  ss << "  + HFC1 noise       : " << metrics_[0].fc1_noise << std::endl;
  ss << "  + HSquare2 noise   : " << metrics_[0].sq2_noise << std::endl;
  ss << "  + HFC2 noise       : " << metrics_[0].fc2_noise << std::endl;
  ss << "* Left capacity      : " << metrics_[0].left_cap << std::endl;
  ss << "==============================================\n";
  // clang-format on
}
