#include "Cifar10/mpi_Cifar10_test.hpp"
#include "Cifar10/mpi_bgv_Cifar10_test.hpp"
#include "Ctxt.h"
#include "hypercube/hypercube_cryto_agent.hpp"
#include "intel_itt_wrapper.hpp"
#include "math.cpp"
#include "math_util.hpp"
#include "mem_usage.hpp"
#include <ATen/core/TensorAccessor.h>
#include <iomanip>
#include <iostream>
#include <mpi.h>
#include <sstream>
#include <string>
#include <unistd.h>

// extern bool no_encoding_reuse;

INTEL_ITT_DOMAIN_CREATE(Cifar10_forward_test, "test.cnn.Cifar10.forward");
INTEL_ITT_STRING_HANDLE_CREATE(hinit, "HInit");
INTEL_ITT_STRING_HANDLE_CREATE(hcnn_conv, "HConv");
INTEL_ITT_STRING_HANDLE_CREATE(hcnn_input, "HInput");
INTEL_ITT_STRING_HANDLE_CREATE(hcnn_sq1, "HSquare1");
INTEL_ITT_STRING_HANDLE_CREATE(hcnn_fc1, "HFC1");
INTEL_ITT_STRING_HANDLE_CREATE(hcnn_sq2_fc2, "HSquare2-HFC2");

static inline double log2_noise_bound(const helib::Ctxt &ctxt) {
  const double ln2 = NTL::log(NTL::xdouble(2l));
  return NTL::log(ctxt.totalNoiseBound()) / ln2;
}

static inline void trans_neg(NTL::mat_ZZ &a, long t) {
  for (int i = 0; i < a.NumRows(); i++)
    for (int j = 0; j < a.NumCols(); j++) {
      if (NTL::conv<int>(a[i][j]) > (t / 2)) {
        a[i][j] -= t;
      }
    }
}

static inline void trans_neg_single(NTL::ZZ &a, const NTL::ZZ &t) {
  if (a >= (t / 2))
    a -= t;
}

static inline void Tensor2Matrix(NTL::mat_ZZ &mat, int row, int col,
                                 const torch::TensorAccessor<float, 2> &a,
                                 const NTL::ZZ &T, const NTL::ZZ &t) {
  mat.SetDims(row, col);
  // for (int i = 0; i < row; i++)
  //   for (int j = 0; j < col; j++) {
  //     if (a[i][j].item<float>() >= 0)
  //       mat[i][j] = (NTL::conv<NTL::ZZ>(a[i][j].item<float>() * SCALE)) % t;
  //     else
  //       mat[i][j] =
  //           ((NTL::conv<NTL::ZZ>(a[i][j].item<float>() * SCALE)) + T) % t;
  //   }
  // auto a_data = a.accessor<float, 2>();
  for (int i = 0; i < row; i++)
    for (int j = 0; j < col; j++) {
      if (a[i][j] >= 0)
        mat[i][j] = (NTL::conv<NTL::ZZ>(a[i][j] * SCALE)) % t;
      else
        mat[i][j] = ((NTL::conv<NTL::ZZ>(a[i][j] * SCALE)) + T) % t;
    }
}

static inline void Tensor2Matrix_conv(NTL::mat_ZZ &mat, int row, int col,
                                      const torch::TensorAccessor<float, 1> &a,
                                      int l, const NTL::ZZ &T,
                                      const NTL::ZZ &t) {
  mat.SetDims(row, col);
  // auto a_data = a.accessor<float, 2>();
  for (int i = 0; i < row; i++)
    for (int j = 0; j < col; j++) {
      if ((row * l + i) / HCONV_IN < a.size(0)) {
        if (a[(row * l + i) / HCONV_IN] >= 0)
          mat[i][j] = (NTL::conv<NTL::ZZ>(a[(row * l + i) / HCONV_IN] * SCALE)) % t;
        else
          mat[i][j] =
              ((NTL::conv<NTL::ZZ>(a[(row * l + i) / HCONV_IN] * SCALE)) + T) % t;
      } else {
        mat[i][j] = (NTL::conv<NTL::ZZ>(0 * SCALE)) % t;
      }
    }
}

// clang-format off
static inline torch::Tensor Matrix2Tensor(const NTL::mat_ZZ &mat, int row, int col) {
  // clang-format on
  torch::Tensor a = torch::zeros({row, col});
  for (int i = 0; i < row; i++)
    for (int j = 0; j < col; j++)
      a[i][j] = NTL::conv<int>(mat[i][j]);
  return std::move(a);
}

helib::Ctxt
mpi_bgv_hCifar10_client::init_hx(const torch::TensorAccessor<float, 2> &a,
                                 int t_index) const {
  // input data
  NTL::mat_ZZ ptxt;
  Tensor2Matrix(ptxt, a.size(0), a.size(1), a, T_, ti_[t_index]);

  // Encryption
  auto ctxt = hmmccs_[t_index]->encrypt(ptxt, key);
  return std::move(ctxt);
}

helib::Ctxt mpi_bgv_hCifar10_client::init_hx_single(
    const torch::TensorAccessor<float, 2> &a, int t_index) const {
  // input data
  NTL::mat_ZZ ptxt;
  Tensor2Matrix(ptxt, a.size(0), a.size(1), a, T_, ti_[t_index]);

  // Encryption
  auto ctxt = hmmccs_[t_index]->encrypt(ptxt, key);
  return std::move(ctxt);
}

// //121,64,1,1
// //121,32
// //121,100,1,1
// 121,50
// 25,3,6400,128 ->25,3,50,128,128
helib::Ctxt
mpi_bgv_hCifar10_client::init_hconv(const torch::TensorAccessor<float, 3> &a,
                                    int asz0, int asz1, int l,
                                    int t_index) const {
  // input data
  NTL::mat_ZZ ptxt;
  Tensor2Matrix_conv(ptxt, T_BATCHSIZE, T_BATCHSIZE, a[asz0][asz1], l, T_,
                     ti_[t_index]);
  // Encryption
  auto ctxt = hmmccs_[t_index]->encrypt(ptxt, key);
  // auto AB = hmmccs_[t_index]->decrypt(ctxt);
  // trans_neg(AB, NTL::conv<long>(t));
  // std::cout << a[asz0][asz1][0][0].item() << ", "
  //           << NTL::conv<float>(AB[0][0]) / 1000 << std::endl;
  return std::move(ctxt);
}

helib::Ctxt
mpi_bgv_hCifar10_client::init_hfc1(const torch::TensorAccessor<float, 3> &a,
                                   int asz0, int t_index) const {
  // input data
  NTL::mat_ZZ ptxt;
  // std::cout << "init_hfc1_" << a.size(0) << "," << a.size(1) << "," <<
  // a.size(3)
  // << std::endl;
  Tensor2Matrix(ptxt, a.size(1), a.size(2), a[asz0], T_, ti_[t_index]);

  // Encryption
  auto ctxt = hmmccs_[t_index]->encrypt(ptxt, key);
  return std::move(ctxt);
}

helib::Ctxt
mpi_bgv_hCifar10_client::init_hfc2(const torch::TensorAccessor<float, 2> &a,
                                   int t_index) const {
  // input data
  NTL::mat_ZZ ptxt;
  // std::cout << "init_hfc2_" << a.size(0) << "," << a.size(1) << std::endl;
  Tensor2Matrix(ptxt, a.size(0), a.size(1), a, T_, ti_[t_index]);

  // Encryption
  auto ctxt = hmmccs_[t_index]->encrypt(ptxt, key);
  return std::move(ctxt);
}

mpi_bgv_hCifar10_client::mpi_bgv_hCifar10_client(
    const params<helib::BGV> &params, int crt_num)
    : crt_num_(crt_num) {
  hmmccs_.resize(crt_num);
  ti_.resize(crt_num);

  auto start_t = MPI_Wtime();
  // #pragma omp parallel for
  for (int i = 0; i < crt_num; i++) {
    hmmccs_[i] = new hypercube_hmmcc(params, MNIST_HYPERCUBE, true);
    ti_[i] = hmmccs_[i]->context()->getP();
  }

  T_ = NTL::to_ZZ(1l);
  contexts_.resize(crt_num);
  for (int i = 0; i < crt_num; i++) {
    contexts_[i] = hmmccs_[i]->context().get();
    std::cout << "client_context_0_" << i << std::endl;
    hmmccs_[i]->report_context();
    T_ *= ti_[i];
  }
  auto end_t = MPI_Wtime();
  std::cout << "* Setup Helib Context           : " << (end_t - start_t) << " s"
            << std::endl;
}

void mpi_bgv_hCifar10_client::init_hcnn(encrypted_model<helib::BGV> &ctxt_model,
                                        const torch::nn::Conv2d &conv,
                                        const torch::nn::Linear &fc1,
                                        const torch::nn::Linear &fc2) const {
  auto init_hcnn_start_t = MPI_Wtime();
  auto b =
      init_conv_kernel(conv->weight.data());    // [25,64,3,1,1] [25,3,6400,128]
  auto c = init_fc1_kernel(fc1->weight.data()); // [32,128,128] [50,128,128]
  auto d = init_fc2_kernel(fc2->weight.data()); // [10,128]     [10,128]
  auto b_data = b.accessor<float, 3>();
  auto c_data = c.accessor<float, 3>();
  auto d_data = d.accessor<float, 2>();
  std::cout << b.sizes() << std::endl;
  std::cout << c.sizes() << std::endl;
  std::cout << d.sizes() << std::endl;
  // int conv_num = fc1->weight.data().size(1) / T_BATCHSIZE;
  int conv_num = k_nblk * kp;
  // int conv_num = 8;
  std::cout << "init_fc2_kernel finished" << std::endl;
  std::size_t mem_begin_ = 0.0;
  std::size_t mem_end_ = 0.0;
  double mem_hconv = 0.0; // GB
  const double GB = static_cast<double>(1l << 30l);
  const double MB = static_cast<double>(1l << 20l);
  // init Convolution layer
  mem_begin_ = getCurrentRSS();
  std::cout << "mem_begin_=" << static_cast<double>(mem_begin_) / GB
            << std::endl;
  // INTEL_ITT_RESUME;
  // 5,25,32,3
  ctxt_model.conv.resize(crt_num_);
  for (int k = 0; k < crt_num_; k++) {
    ctxt_model.conv[k].resize(b.size(0));
    for (int i = 0; i < b.size(0); i++) {
      ctxt_model.conv[k][i].resize(conv_num);
      for (int l = 0; l < conv_num; l++) {
        for (int j = 0; j < b.size(1); j++) {
          ctxt_model.conv[k][i][l].emplace_back(hmmccs_[k]->public_key());
        }
      }
    }
  }

  // 5,32
  ctxt_model.fc1.resize(crt_num_);
  for (int k = 0; k < crt_num_; k++) {
    for (int i = 0; i < c.size(0); i++) {
      ctxt_model.fc1[k].emplace_back(hmmccs_[k]->public_key());
    }
  }

  // 5
  for (int k = 0; k < crt_num_; k++) {
    ctxt_model.fc2.emplace_back(hmmccs_[k]->public_key());
  }
  std::cout << "init_half finished" << std::endl;
  // INTEL_ITT_TASK_BEGIN(Cifar10_forward_test, hinit);
  // for (int k = 0; k < crt_num_; k++) {
  //   for (int i = 0; i < b.size(0); i++) {
  //     for (int j = 0; j < conv_num; j++) {
  //       // mem_begin_ = getCurrentRSS();
  //       // std::cout << "mem_now_=" << static_cast<double>(mem_begin_) / GB
  //       //           << std::endl;
  //       std::cout << k * b.size(0) * conv_num + i * conv_num + j <<
  //       std::endl; ctxt_model.conv[k][i][j] = std::move(init_hconv(b_data, i,
  //       j, k));
  //     }
  //   }
  // }
#pragma omp parallel
#pragma omp single
  {
    // 5,121,32
    // 5,25,50,3
    auto start_t = MPI_Wtime();
#pragma omp taskloop collapse(3) nogroup
    // 5,25,50,3
    for (int k = 0; k < crt_num_; k++) {
      for (int i = 0; i < b.size(0); i++) {
        for (int j = 0; j < conv_num; j++) {
          for (int l = 0; l < b.size(1); l++) {
            // mem_begin_ = getCurrentRSS();
            // std::cout << "mem_now_=" << static_cast<double>(mem_begin_) / GB
            //           << std::endl;
            // std::cout << k * b.size(0) * conv_num + i * conv_num + j <<
            // std::endl;
            ctxt_model.conv[k][i][j][l] =
                std::move(init_hconv(b_data, i, l, j, k));
          }
        }
      }
    }
    auto end_t = MPI_Wtime();
    std::cout << "  * Init convolution layer      : " << (end_t - start_t)
              << " s\n";

    // init fully connected layer1
    // 5,32
    // 5,50
    start_t = MPI_Wtime();
#pragma omp taskloop collapse(2) nogroup
    for (int k = 0; k < crt_num_; k++) {
      for (int i = 0; i < c.size(0); i++) {
        ctxt_model.fc1[k][i] = std::move(init_hfc1(c_data, i, k));
      }
    }
    end_t = MPI_Wtime();
    std::cout << "  * Init fully connected layer1 : " << (end_t - start_t)
              << " s\n";

    // init fully connected layer2
    // 5
    start_t = MPI_Wtime();
#pragma omp taskloop nogroup
    for (int k = 0; k < crt_num_; k++) {
      ctxt_model.fc2[k] = std::move(init_hfc2(d_data, k));
    }
    end_t = MPI_Wtime();
    std::cout << "  * Init fully connected layer2 : " << (end_t - start_t)
              << " s\n";
    auto init_hcnn_end_t = MPI_Wtime();
    std::cout << "* Init HCifar10                     : "
              << (init_hcnn_end_t - init_hcnn_start_t) << " s\n";
  }
  // INTEL_ITT_TASK_END(Cifar10_forward_test);
}

void mpi_bgv_hCifar10_client::init_input(
    encrypted_input<helib::BGV> &ctxt_input, const torch::Tensor &filter,
    const torch::Tensor &x) const {
  // // x 2048,1,28,28
  // x 256,3,32,32
  std::cout << "x:" << x.sizes() << std::endl;
  // std::cout << "x: " << x.size(0) << " " << x.size(1) << " " << x.size(2) <<
  // " "
  //           << x.size(3) << std::endl;
  auto a = im2matrix(x, filter, 3);
  auto a_data = a.accessor<float, 6>();
  std::cout << "a:" << a.sizes() << std::endl;
  // std::cout << "a: " << a.size(0) << " " << a.size(1) << " " << a.size(2) <<
  // " "
  //           << a.size(3) << " " << a.size(4) << std::endl;
  std::vector<helib::Ctxt> tmp;
  // std::vector<std::vector<helib::Ctxt>> tmp2;
  const auto asz0 = a.size(0), asz1 = a.size(1), asz2 = a.size(2),
             asz3 = a.size(3);

  // init input
  //// a 4,49,4,64,256
  // //4,5,49,4
  // // a 8,49,4,64,256
  // // 8,5,49,4
  // // a 2,25,3,50,128,128
  // a 32,2,121,3,128,128
  // 32,2,5,121,3
  auto start_t = MPI_Wtime();
  ctxt_input.data.resize(asz0);
  // for (int l = 0; l < x.size(0) / T_BATCHSIZE; l++) {
  //   ctxt_input.data[l].resize(crt_num_);
  //   for (int k = 0; k < crt_num_; k++) {
  //     ctxt_input.data[l][k].resize(a.size(1));
  //   }
  // }
  // #pragma omp parallel for collapse(3)
  for (int l = 0; l < asz0; l++) {
    ctxt_input.data[l].resize(asz1);
    for (int i = 0; i < asz1; i++) {
      ctxt_input.data[l][i].resize(crt_num_);
      for (int k = 0; k < crt_num_; k++) {
        ctxt_input.data[l][i][k].resize(asz2);
        for (int j = 0; j < asz2; j++) {
          for (int m = 0; m < asz3; m++) {
            ctxt_input.data[l][i][k][j].emplace_back(hmmccs_[k]->public_key());
          }
        }
      }
    }
  }
  INTEL_ITT_TASK_BEGIN(Cifar10_forward_test, hcnn_input);
#pragma omp parallel
#pragma omp single
#pragma omp taskloop collapse(2) nogroup
  for (int l = 0; l < asz0; l++) {
    for (int i = 0; i < asz1; i++) {
      for (int j = 0; j < asz2; j++) {
        for (int m = 0; m < asz3; m++) {
          for (int k = 0; k < crt_num_; k++) {
            auto tmp = a_data[l][i][j][m];
            ctxt_input.data[l][i][k][j][m] = std::move(init_hx(tmp, k));
            if (!(m % 10)) {
              std::cout << m << "," << k << std::endl;
              std::cout.flush();
            }
            // std::cout << l << "," << i << "," << k << "," << j << "," << m
            //           << std::endl;
          }
        }
      }
    }
  }
  INTEL_ITT_TASK_END(Cifar10_forward_test);
  INTEL_ITT_DETACH;
  auto end_t = MPI_Wtime();
  std::cout << "* Init input                    : " << (end_t - start_t)
            << " s\n";
}

void mpi_bgv_hCifar10_client::init_input_single(std::vector<std::string> &send,
                                                const torch::Tensor &a,
                                                int dst) const {
  // //a 32,2,121,3,128,128
  // //2,2,5,121,3
  // a 50,2,25,3,128,128
  // 4,2,5,25,3
  // 8,1,5,25,3
  int rank, p;
  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  MPI_Comm_size(MPI_COMM_WORLD, &p);

  auto a_data = a.accessor<float, 6>();
  const auto asz0 = a.size(0), asz1 = a.size(1), asz2 = a.size(2),
             asz3 = a.size(3);
  auto nblk = k_nblk * n_nblk;

  send.resize(conv_size * nblk * crt_num_ * FC_IN);
  const auto kbatch = dst % (a_data.size(1) / n_nblk); // 0
  const auto kconv = dst / (a_data.size(1) / n_nblk);  // dst
#pragma omp parallel
#pragma omp single
#pragma omp taskloop collapse(4)
  for (std::size_t m = 0; m < k_nblk; m++) {
    for (std::size_t l = 0; l < n_nblk; l++) {
      for (std::size_t kcrt = 0; kcrt < crt_num_; kcrt++) {
        for (std::size_t in = 0; in < FC_IN; in++) {
          for (std::size_t j = 0; j < conv_size; j++) {
            // for (int j = 0; j < ctxt[kbatch][kcrt].size(); j++) {
            auto tmp = a_data[kconv * k_nblk + m][kbatch * n_nblk + l][j][in];
            std::stringstream ss;
            auto send_ctxt = init_hx_single(tmp, kcrt);
            send_ctxt.writeTo(ss);
            send[m * n_nblk * crt_num_ * conv_size * FC_IN +
                 l * crt_num_ * conv_size * FC_IN + kcrt * conv_size * FC_IN +
                 j * FC_IN + in] = ss.str();
          }
        }
      }
    }
  }
}

void mpi_bgv_hCifar10_client::init_input_single2(std::vector<std::string> &send,
                                                 const torch::Tensor &a,
                                                 int dst) const {
  // //a 32,2,121,3,128,128
  // //2,2,5,121,3
  // a 50,2,5,25,3,128,128
  // 4,2,5,25,3
  // 8,1,5,25,3
  int rank, p;
  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  MPI_Comm_size(MPI_COMM_WORLD, &p);

  auto a_data = a.accessor<float, 6>();
  const auto asz0 = a.size(0), asz1 = a.size(1), asz2 = a.size(2),
             asz3 = a.size(3);
  auto nblk = k_nblk * n_nblk;

  send.resize(conv_size * nblk * crt_num_ * FC_IN);
  // const auto kbatch = dst / (a_data.size(0) / k_nblk); // 0
  // const auto kconv = dst / (a_data.size(1) / n_nblk);  // dst
  // const auto kbatch = 0;  // 0
  // const auto kconv = dst; // dst
  const auto kbatch = dst % (a_data.size(1) / n_nblk); // 0
  const auto kconv = dst / (a_data.size(1) / n_nblk);  // dst
#pragma omp parallel
#pragma omp single
#pragma omp taskloop collapse(4)
  for (std::size_t m = 0; m < k_nblk; m++) {
    for (std::size_t l = 0; l < n_nblk; l++) {
      for (std::size_t kcrt = 0; kcrt < crt_num_; kcrt++) {
        for (std::size_t in = 0; in < FC_IN; in++) {
          for (std::size_t j = 0; j < conv_size; j++) {
            // for (int j = 0; j < ctxt[kbatch][kcrt].size(); j++) {
            torch::Tensor t = torch::zeros({128, 128});
            auto tmp = t.accessor<float, 2>();
            if (kconv * k_nblk + m < a.size(0))
              tmp = a_data[kconv * k_nblk + m][kbatch * n_nblk + l][j][in];
            std::stringstream ss;
            auto send_ctxt = init_hx_single(tmp, kcrt);
            send_ctxt.writeTo(ss);
            send[m * n_nblk * crt_num_ * conv_size * FC_IN +
                 l * crt_num_ * conv_size * FC_IN + kcrt * conv_size * FC_IN +
                 j * FC_IN + in] = ss.str();
          }
        }
      }
    }
  }
}

// 2,5
void mpi_bgv_hCifar10_client::recover_result(
    torch::Tensor &out, const std::vector<std::vector<std::string>> &in,
    MPI_Comm comm, int root) const {
  int rank, p;
  MPI_Comm_rank(comm, &rank);
  MPI_Comm_size(comm, &p);
  // std::cout << out.size(0) << "," << static_cast<long>(in.size()) << ","
  //           << out.size(1) << std::endl;
  // 10,2 * 128
  torch::Tensor tmp =
      torch::zeros({out.size(0), static_cast<long>(in.size()) * out.size(1)});
  auto start_t = MPI_Wtime();
  // CRT-Pre
  if (rank == root) {
    // //8,10,64,5
    // 2,10,128,4
    CH_Remainder crt[in.size()][out.size(0)][out.size(1)][crt_num_];
#pragma omp parallel for collapse(2)
    for (int i = 0; i < in.size(); i++) {
      for (int kcrt = 0; kcrt < crt_num_; kcrt++) {
        std::stringstream ss;
        ss << in[i][kcrt];
        auto ctxt = helib::Ctxt::readFrom(ss, hmmccs_[kcrt]->public_key());
        // auto ctxt = in[i][kcrt];
        NTL::mat_ZZ res;
        hmmccs_[kcrt]->decrypt(res, ctxt);
        // std::cout << res.NumCols() << " " << res.NumRows() << std::endl;
        for (int j = 0; j < out.size(0); j++)
          for (int k = 0; k < out.size(1); k++) {
            crt[i][j][k][kcrt].result = NTL::conv<NTL::ZZ>(res[j][k]);
            crt[i][j][k][kcrt].mod_num = NTL::conv<NTL::ZZ>(ti_[kcrt]);
          }
      }
    }

    // CRT && Scale
    NTL::ZZ pow, base(SCALE);
    NTL::power(pow, base, 11);

// // 10,4,64
// 2,10,128
#pragma omp parallel for collapse(3)
    for (int i = 0; i < in.size(); i++) {
      for (int j = 0; j < out.size(0); j++) {
        for (int k = 0; k < out.size(1); k++) {
          NTL::ZZ temp = Ch_remainder_theorem(crt[i][j][k], crt_num_);
          trans_neg_single(temp, T_);
          tmp.index({j, i * out.size(1) + k}) = NTL::conv<float>(
              (NTL::conv<NTL::xdouble>(temp) / NTL::conv<NTL::xdouble>(pow)));
        }
      }
    }
    tmp = tmp.transpose(0, 1);
    // print(tmp);
    // 256,10
    out = tmp.clone();
    auto end_t = MPI_Wtime();
    std::cout << "CRT: " << (end_t - start_t) << " s" << std::endl;
  }
}

mpi_bgv_hCifar10_server::mpi_bgv_hCifar10_server(
    const hmm_status<shmm_engine> &status, MPI_Comm comm, int crt_num,
    int conv_size0, const params<helib::BGV> &params)
    : crt_num_(crt_num) {
  int rank, p;
  MPI_Comm_rank(comm, &rank);
  MPI_Comm_size(comm, &p);
  metrics_.resize(crt_num);
  hmmes_.resize(crt_num);
  hmmcss_.resize(crt_num);
  contexts_.resize(crt_num);

  if (rank != COMM_WORLD_ROOT) {
    // #pragma omp parallel for
    for (int i = 0; i < crt_num; i++) {
      hmmcss_[i] = new hypercube_hmmcs(params, MNIST_HYPERCUBE, true);
      contexts_[i] = hmmcss_[i]->context().get();
    }
  }

  // #pragma omp parallel for
  for (int i = 0; i < crt_num; i++) {
    hmmes_[i] = new hypercube_hmme(status);
    // hmmes_[i]->status()->reset(false);
    // hmmes_[i]->status()->set_encoding_reuse(!no_encoding_reuse);
    // pk_[i] = new helib::PubKey(hmmcc[i]->public_key());
  }

  metrics_[0].hmm_encoding_time -= MPI_Wtime();
  // #pragma omp parallel for
  for (int i = 0; i < crt_num; i++) {
    // hmmes_[i] = new hypercube_hmme(status);
    auto fc1 = hypercube_hmme::get_expected_mnk(MATRIX_DIM_M, MATRIX_DIM_N,
                                                MATRIX_DIM_K);
    hmmes_[i]->register_engine(contexts_[i]->getEA(), fc1);
    auto fc2 =
        hypercube_hmme::get_expected_mnk(10l, MATRIX_DIM_N, MATRIX_DIM_K);
    hmmes_[i]->register_engine(contexts_[i]->getEA(), fc2);
    // pk_[i] = new helib::PubKey(hmmcc[i]->public_key());
  }
  metrics_[0].hmm_encoding_time += MPI_Wtime();

  MPI_Comm_split(comm, (rank % np) ? MPI_UNDEFINED : 1, rank / np, &comm_Bcast);

  MPI_Comm_split(comm, (rank / np == 0) ? 1 : MPI_UNDEFINED, rank % np,
                 &comm_reducfc2);

  MPI_Comm_split(comm, rank / np, rank % np, &comm_conv);

  MPI_Comm_split(comm, rank % np, rank / np, &comm_reducfc1);
}

mpi_bgv_hCifar10_server::mpi_bgv_hCifar10_server(
    const hmm_status<shmm_engine> &status, MPI_Comm comm, int crt_num,
    int conv_size0, const params<helib::BGV> &params,
    const std::vector<hypercube_hmmcc *> &hmmcc)
    : crt_num_(crt_num) {
  int rank, p;
  MPI_Comm_rank(comm, &rank);
  MPI_Comm_size(comm, &p);
  metrics_.resize(crt_num);
  hmmes_.resize(crt_num);
  hmmcss_.resize(crt_num);

  contexts_.resize(crt_num);
  std::cout << rank << std::endl;
  if (rank != COMM_WORLD_ROOT) {
    for (int i = 0; i < crt_num; i++) {
      hmmcss_[i] = new hypercube_hmmcs(params, MNIST_HYPERCUBE, true);
      contexts_[i] = hmmcss_[i]->context().get();
    }
  } else {
    for (int i = 0; i < crt_num; i++) {
      contexts_[i] = hmmcc[i]->context().get();
    }
  }
  // #pragma omp parallel for
  for (int i = 0; i < crt_num; i++) {
    hmmes_[i] = new hypercube_hmme(status);
    // hmmes_[i]->status()->reset(false);
    // hmmes_[i]->status()->set_encoding_reuse(!no_encoding_reuse);
    // pk_[i] = new helib::PubKey(hmmcc[i]->public_key());
  }

  metrics_[0].hmm_encoding_time -= MPI_Wtime();
  for (int i = 0; i < crt_num; i++) {
    // hmmes_[i] = new hypercube_hmme(status);
    auto fc1 = hypercube_hmme::get_expected_mnk(MATRIX_DIM_M, MATRIX_DIM_N,
                                                MATRIX_DIM_K);
    hmmes_[i]->register_engine(contexts_[i]->getEA(), fc1);
    auto fc2 =
        hypercube_hmme::get_expected_mnk(10l, MATRIX_DIM_N, MATRIX_DIM_K);
    hmmes_[i]->register_engine(contexts_[i]->getEA(), fc2);
    // pk_[i] = new helib::PubKey(hmmcc[i]->public_key());
  }
  metrics_[0].hmm_encoding_time += MPI_Wtime();
  std::cout << rank
            << "* HMM Encoding       : " << metrics_[0].hmm_encoding_time
            << std::endl;
  std::cout.flush();

  MPI_Comm_split(comm, (rank % np) ? MPI_UNDEFINED : 1, rank / np, &comm_Bcast);

  MPI_Comm_split(comm, (rank / np == 0) ? 1 : MPI_UNDEFINED, rank % np,
                 &comm_reducfc2);

  MPI_Comm_split(comm, rank / np, rank % np, &comm_conv);

  MPI_Comm_split(comm, rank % np, rank / np, &comm_reducfc1);
}

// //2,5,49
// in 2,2,5,121,3
// hconv 5,121,2
// out 2,2,5

// in 4,2,5,25,3
// hconv 5,25,4,3
// out 4,2,5
void mpi_bgv_hCifar10_server::hconv_server(
    std::vector<std::vector<std::vector<helib::Ctxt>>> &out,
    const std::vector<
        std::vector<std::vector<std::vector<std::vector<helib::Ctxt>>>>> &in,
    const std::vector<std::vector<std::vector<std::vector<helib::Ctxt>>>>
        &hconv) const {
  auto &metrics = const_cast<std::vector<metric> &>(metrics_);
  const auto insize0 = in.size(), insize1 = in[0].size(),
             insize2 = in[0][0].size(), insize3 = in[0][0][0].size(),
             insize4 = in[0][0][0][0].size();
  int rank;
  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  metrics[0].hconv_time -= MPI_Wtime();
  // std::cout << rank << "conv start_" << insize0 << "_" << insize1 <<
  // "_"
  //           << insize2 << std::endl;
  // std::vector<std::vector<helib::Ctxt>> test;
  std::vector<std::vector<std::vector<std::vector<helib::Ctxt>>>> test;
  test.resize(insize0);
  out.resize(insize0);
  for (int i = 0; i < insize0; i++) {
    test[i].resize(insize1);
    out[i].resize(insize1);
    for (int k = 0; k < insize1; k++) {
      test[i][k].resize(insize2);
      // if (!rank)
      //   std::cout << i << "_" << k << std::endl;
      for (int j = 0; j < insize2; j++) {
        out[i][k].emplace_back(get_pk(j));
        for (int m = 0; m < insize3; m++) {
          for (int n = 0; n < insize4; n++) {
            test[i][k][j].emplace_back(get_pk(j));
          }
        }
      }
    }
  }
// if (!rank)
//   std::cout << rank << "conv init" << std::endl;
#pragma omp parallel
#pragma omp single
#pragma omp taskloop collapse(4) nogroup
  for (int i = 0; i < insize0; i++) {
    for (int k = 0; k < insize1; k++) {
      for (int j = 0; j < insize2; j++) {
        for (int n = 0; n < insize4; n++) {
          for (int m = 0; m < insize3; m++) {
            test[i][k][j][insize4 * m + n] = in[i][k][j][m][n];
            test[i][k][j][insize4 * m + n] *= hconv[j][m][i][n];
          }
        }
      }
    }
  }
  // if (!rank)
  //   std::cout << rank << "conv mul finished" << std::endl;
  for (int i = 0; i < insize0; i++)
    for (int k = 0; k < insize1; k++)
      for (int j = 0; j < insize2; j++) {
        for (int m = 0; m < insize3; m++) {
          for (int n = 0; n < insize4; n++) {
            if (m + n)
              out[i][k][j] += test[i][k][j][insize4 * m + n];
            else
              out[i][k][j] = test[i][k][j][insize4 * m + n];
          }
        }
      }
  metrics[0].hconv_time += MPI_Wtime();
  // if (!rank)
  //   std::cout << rank << "conv add finished" << std::endl;

  // std::vector<std::vector<helib::Ctxt>> test;
  // out.resize(in.size());
  // // #pragma omp parallel for collapse(2)
  // for (int i = 0; i < in.size(); i++) {
  //   for (int k = 0; k < in[i].size(); k++) {
  //     metrics[k].init_noise = log2_noise_bound(in[0][0][0]);
  //     metrics[k].conv_noise = metrics[k].init_noise;
  //     // INTEL_ITT_TASK_BEGIN(Cifar10_forward_test, hcnn_conv); //! ITT -
  //     Begin metrics[k].hconv_time -= MPI_Wtime(); helib::Ctxt
  //     test(in[i][k][0]); test *= hconv[k][0]; for (int j = 1; j <
  //     in[i][k].size(); j++) {
  //       helib::Ctxt tmp(in[i][k][j]);
  //       tmp *= hconv[k][j];
  //       test += tmp;
  //     }
  //     out[i].push_back(test);
  //     metrics[k].hconv_time += MPI_Wtime();
  //     metrics[k].conv_noise =
  //         log2_noise_bound(in[i][k][0]) - metrics[0].conv_noise;
  //   }
  // }
}

void mpi_bgv_hCifar10_server::back_hconv(
    const std::vector<helib::Ctxt> &out,
    std::vector<std::vector<helib::Ctxt>> &ctxt) const {
  int rank, p;
  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  MPI_Comm_size(MPI_COMM_WORLD, &p);
  if (rank != COMM_WORLD_ROOT) {
    std::vector<std::string> tmp;
    for (int i = 0; i < crt_num_; i++) {
      std::stringstream ss;
      out[i].writeTo(ss);
      tmp.push_back(ss.str());
    }
    MPI_Send_Strings(tmp, COMM_WORLD_ROOT, rank, MPI_COMM_WORLD);
  } else {
    ctxt.resize(p);
    for (int i = 0; i < p; i++) {
      if (i == 0) {
        ctxt[i] = out;
      } else {
        std::vector<std::string> tmp;
        MPI_Recv_Strings(tmp, crt_num_, i, i, MPI_COMM_WORLD);
        for (int j = 0; j < crt_num_; j++) {
          {
            std::stringstream ss;
            ss << tmp[j];
            auto t = helib::Ctxt::readFrom(ss, get_pk(j));
            ctxt[i].push_back(t);
          }
        }
      }
    }
  }
}

// 2,2,5
void mpi_bgv_hCifar10_server::hsqr1_server(
    std::vector<std::vector<std::vector<helib::Ctxt>>> &ctxt) const {
  auto &metrics = const_cast<std::vector<metric> &>(metrics_);
  metrics[0].sq1_noise = log2_noise_bound(ctxt[0][0][0]);
  metrics[0].hsq1_time -= MPI_Wtime();
#pragma omp parallel
#pragma omp single
#pragma omp taskloop collapse(3)
  for (int i = 0; i < ctxt.size(); i++) {
    for (int j = 0; j < ctxt[0].size(); j++) {
      for (int k = 0; k < ctxt[0][0].size(); k++) {
        // INTEL_ITT_TASK_BEGIN(Cifar10_forward_test, hcnn_sq1); //! ITT - Begin
        // metrics[j].hsq1_time -= MPI_Wtime();
        ctxt[i][j][k].square();
        // metrics[j].hsq1_time += MPI_Wtime();
        // INTEL_ITT_TASK_END(Cifar10_forward_test); //! ITT - End
      }
    }
  }
  metrics[0].hsq1_time += MPI_Wtime();
  metrics[0].sq1_noise = log2_noise_bound(ctxt[0][0][0]) - metrics[0].sq1_noise;
}

// ctxt 4,2,5
// hfc1 5,4
// out 2,5
void mpi_bgv_hCifar10_server::hfc1_server(
    std::vector<std::vector<std::vector<helib::Ctxt>>> &ctxt,
    const std::vector<std::vector<helib::Ctxt>> &hfc1,
    std::vector<std::vector<helib::Ctxt>> &out, MPI_Comm comm, int rank,
    int root) const {
  auto &metrics = const_cast<std::vector<metric> &>(metrics_);
  std::vector<std::string> tmp;
  int p;
  MPI_Comm_size(comm, &p);
  out.resize(m_nblk * n_nblk);
  if (rank == 0) {
    std::cout << rank << "ctxt:" << ctxt.size() << "," << ctxt[0].size() << ","
              << ctxt[0][0].size() << std::endl;
    std::cout << rank << "hfc1:" << hfc1.size() << "," << hfc1[0].size()
              << std::endl;
  }
  // #pragma omp parallel for collapse(3)
  //   for (int i = 0; i < ctxt.size(); i++) {
  //     for (int j = 0; j < ctxt[0].size(); j++) {
  //       for (int k = 0; k < ctxt[0][0].size(); k++) {
  //         const auto &ea = get_context()[k]->getEA();
  //         std::vector<std::shared_ptr<helib::GeneralAutomorphPrecon_FULL>>
  //         ctmp(
  //             1);
  //         ctmp[0] = std::make_shared<helib::GeneralAutomorphPrecon_FULL>(
  //             ctxt[i][j][k], 0, ea);
  //         std::shared_ptr<helib::Ctxt> send_ctxt1;
  //         send_ctxt1 = ctmp[0]->automorph(0);
  //         if (send_ctxt1->getContext() == ctxt[i][j][k].getContext())
  //           std::cout << rank << "_automorph hfc1" << std::endl;
  //       }
  //     }
  //   }
  for (int k = 0; k < crt_num_; k++) {
    // std::cout << rank << "-" << k << std::endl;
    metrics[0].hfc1_time -= MPI_Wtime();
    auto eh_mnk = hypercube_hmme::get_expected_mnk(MATRIX_DIM_M, MATRIX_DIM_N,
                                                   MATRIX_DIM_K);
    MATRIX_TYPE type_ = hypercube_hmme::parse_matrix_type(eh_mnk);
    const auto R = std::min(std::min(MATRIX_DIM_M, MATRIX_DIM_N), MATRIX_DIM_K);
    const auto &ea = get_context()[k]->getEA();
    helib::Ctxt zero(get_pk(k));
    zero.DummyEncrypt(NTL::ZZX(0l));
    std::vector<helib::Ctxt> C(m_nblk * n_nblk, zero);
    std::vector<std::shared_ptr<helib::GeneralAutomorphPrecon_FULL>> ctxt_Ap(
        m_nblk * k_nblk);
    std::vector<std::shared_ptr<helib::GeneralAutomorphPrecon_FULL>> ctxt_Bp(
        k_nblk * n_nblk);
    // std::cout << rank << "," << k << "hfc1 init finished" << std::endl;
#pragma omp parallel
#pragma omp single
    {
#pragma omp taskloop nogroup
      for (int l = 0; l < m_nblk * k_nblk; l++) {
        // std::stringstream A_ss;
        // A_ss << ctxt[k][0][l];
        // auto ctxt_A = helib::Ctxt::readFrom(A_ss, *pk_);
        auto ctxt_A(hfc1[k][l]);
        hmmes_[k]->rotate_align_A(type_, ctxt_A, ctxt_A);
        ctxt_Ap[l] =
            std::make_shared<helib::GeneralAutomorphPrecon_FULL>(ctxt_A, 1, ea);
      }
      // std::cout << rank << "," << k << "ctxt_Ap init finished" << std::endl;
#pragma omp taskloop nogroup
      // #pragma omp taskloop
      for (int l = 0; l < k_nblk * n_nblk; l++) {
        // std::stringstream B_ss;
        // B_ss << ctxt_model.fc1[k][l];
        // auto ctxt_B = helib::Ctxt::readFrom(B_ss, *pk_);
        // auto ctxt_B(hfc1[l / n_nblk][l % n_nblk][k]);
        auto ctxt_B(ctxt[l / n_nblk][l % n_nblk][k]);
        hmmes_[k]->rotate_align_B(type_, ctxt_B, ctxt_B);
        ctxt_Bp[l] =
            std::make_shared<helib::GeneralAutomorphPrecon_FULL>(ctxt_B, 0, ea);
        // auto tmp = ctxt_Bp[l]->automorph(0);
        // std::cout << rank << "," << k << "," << l
        // << "ctxt_Bp automorph finished" << std::endl;
      }
    }
    // std::cout << rank << "," << k << "ctxt_Bp init finished" << std::endl;
    std::vector<std::vector<helib::Ctxt>> partial;
    int parallel_pivot;
    // std::cout << omp_get_max_threads() << std::endl;
    bool large_mem = false;
    // MPI_Barrier(comm);
    //       if (large_mem && k_nblk <= R) { /* Large Memory */
    //         parallel_pivot = k_nblk;
    //         partial.resize(parallel_pivot, C);
    //         for (int r = 0; r < R; r++) {
    //           std::vector<helib::Ctxt> A_ik(m_nblk * k_nblk,
    //                                         helib::Ctxt(get_pk(k)));
    //           std::vector<std::shared_ptr<helib::Ctxt>> B_kj(n_nblk *
    //           k_nblk);
    // #pragma omp parallel
    // #pragma omp single
    //           {
    // #pragma omp taskloop collapse(2) nogroup
    //             for (int i = 0; i < k_nblk; i++) {
    //               for (int j = 0; j < n_nblk; j++) {
    //                 // std::cout << i << "B_kj finish" << j << std::endl;
    //                 B_kj[i * n_nblk + j] = ctxt_Bp[i * n_nblk +
    //                 j]->automorph(-r);
    //               }
    //             }
    // #pragma omp taskloop collapse(2) nogroup
    //             for (int i = 0; i < k_nblk; i++) {
    //               for (int j = 0; j < m_nblk; j++) {
    //                 // std::cout << i << "A_ik finish" << j << std::endl;
    //                 A_ik[i * m_nblk + j] =
    //                 std::move(hmmes_[k]->shift_compute_A(
    //                     type_, ctxt_Ap[j * k_nblk + i], r));
    //               }
    //             }
    // #pragma omp taskwait
    // #pragma omp taskloop collapse(3)
    //             for (int l = 0; l < k_nblk; l++) {
    //               for (int i = 0; i < m_nblk; i++) {
    //                 for (int j = 0; j < n_nblk; j++) {
    //                   auto L = A_ik[l * m_nblk + i];
    //                   L.multiplyBy(*B_kj[l * n_nblk + j]);
    //                   partial[l][i * n_nblk + j] += L;
    //                   // std::cout << l << "partial finish" << i << j <<
    //                   std::endl;
    //                 }
    //               }
    //             }
    //           }
    //         }
    //       } else
    if (R >= omp_get_max_threads() ||
        (k_nblk < omp_get_max_threads() && R >= k_nblk)) {
      parallel_pivot = R;
      partial.resize(parallel_pivot, C);
#pragma omp parallel for
      // #pragma omp taskloop collapse(2)
      for (int r = 0; r < R; r++) {
        for (int l = 0; l < k_nblk; l++) {
          // std::cout << r << "_k_nblk_" << l << std::endl;
          std::vector<std::shared_ptr<helib::Ctxt>> B_j(n_nblk);
          for (int j = 0; j < n_nblk; j++) {
            // std::cout << r << "_" << k << "_" << l << "_ctxtbp_" << j
            //           << std::endl;
            // // auto tmp = ctxt_Bp[l * n_nblk + j]->automorph(-r);
            // std::cout << rank << "_" << r << "_" << k << "_" << l
            //           << "_B_j_ begin" << j << std::endl;
            // B_j.push_back(tmp);
            B_j[j] = (ctxt_Bp[l * n_nblk + j]->automorph(-r));
            // B_j[j] = std::move(hmmes_[k]->shift_compute_B(
            //     type_, ctxt_Bp[l * n_nblk + j], -r));
            // std::cout << rank << "_" << r << "_" << k << "_" << l << "_B_j_
            // end"
            //           << j << std::endl;
          }
          for (int i = 0; i < m_nblk; i++) {
            // std::cout << rank << "_" << r << "_" << k << "_" << l
            //           << "_shift_compute_A_ begin" << i << std::endl;
            auto A_ikr = std::move(
                hmmes_[k]->shift_compute_A(type_, ctxt_Ap[i * k_nblk + l], r));
            // std::cout << rank << "_" << r << "_" << k << "_" << l
            //           << "_shift_compute_A_ end" << i << std::endl;
            for (int j = 0; j < n_nblk; j++) {
              auto L = A_ikr;
              L.multiplyBy(*B_j[j]);
              partial[r][i * n_nblk + j] += L;
              // std::cout << r << "_" << k << "_" << l << "_partial_" << i <<
              // "_"
              //           << j << std::endl;
            }
          }
        }
      } //! omp for: Implicit synchronization
    }
    // }980 2180
    else if (k_nblk >= omp_get_max_threads() ||
             (R < omp_get_max_threads() && k_nblk >= R)) {
      parallel_pivot = k_nblk;
      partial.resize(parallel_pivot, C);
#pragma omp parallel for
      for (int k = 0; k < k_nblk; k++) {
        for (int r = 0; r < R; r++) {
          std::vector<std::shared_ptr<helib::Ctxt>> B_j(n_nblk);
          for (int j = 0; j < n_nblk; j++) {
            B_j[j] = ctxt_Bp[k * n_nblk + j]->automorph(-r);
          }
          for (int i = 0; i < m_nblk; i++) {
            auto A_ikr = std::move(
                hmmes_[k]->shift_compute_A(type_, ctxt_Ap[i * k_nblk + k], r));
            for (int j = 0; j < n_nblk; j++) {
              auto L = A_ikr;
              L.multiplyBy(*B_j[j]);
              partial[k][i * n_nblk + j] += L;
            }
          }
        }
      }
    }
// Add partial sum
#pragma omp parallel
#pragma omp single
#pragma omp taskloop
    // #pragma omp parallel for
    for (int l = 0; l < m_nblk * n_nblk; l++) {
      for (int r = 0; r < parallel_pivot; r++) {
        C[l] += partial[r][l];
      }
    }
    metrics[0].hfc1_time += MPI_Wtime();
    // std::cout << rank << "hfc1c finished" << std::endl;

    std::string pack_buff;
    std::vector<int> size_buff;
    std::vector<std::string> local_C;
    metrics[0].mpi_time -= MPI_Wtime();
    if (rank == 0) {
      for (int sender = 1; sender < p; sender++) {
        // std::cout << k << "_" << rank << "sender:" << sender << " "
        //           << std::endl;
        MPI_PackRecv_Strings(local_C, m_nblk * n_nblk, sender, 0, comm);
#pragma omp parallel
#pragma omp single
#pragma omp taskloop
        for (int l = 0; l < local_C.size(); l++) {
          std::stringstream ss;
          ss << local_C[l];
          auto recv_partial_ctxt = helib::Ctxt::readFrom(ss, get_pk(k));
          C[l] += recv_partial_ctxt;
        }
      }
      // std::cout << std::endl;
    } else {
      local_C.resize(m_nblk * n_nblk);
      size_buff.resize(m_nblk * n_nblk);
#pragma omp parallel
#pragma omp single
#pragma omp taskloop
      for (int l = 0; l < local_C.size(); l++) {
        // std::cout << k << "_" << rank << "l:" << l << " " << std::endl;
        std::stringstream ss;
        C[l].writeTo(ss);
        local_C[l] = ss.str();
      }
      MPI_PackSend_Strings(local_C, pack_buff, size_buff.data(), 0, 0, comm);
    }
    metrics[0].mpi_time += MPI_Wtime();

    metrics[0].hfc1_time -= MPI_Wtime();
    for (int l = 0; l < m_nblk * n_nblk; l++) {
      out[l].emplace_back(C[l]);
    }
    // std::cout << rank << "hfc1 finish" << std::endl;
    metrics[0].hfc1_time += MPI_Wtime();
    // }
  }
}

// 4,5
void mpi_bgv_hCifar10_server::back_hfc1(
    const std::vector<helib::Ctxt> &out,
    std::vector<std::vector<helib::Ctxt>> &ctxt, MPI_Comm comm) const {
  int rank, p;
  MPI_Comm_rank(comm, &rank);
  MPI_Comm_size(comm, &p);
  if (rank != COMM_WORLD_ROOT) {
    std::vector<std::string> tmp;
    for (int i = 0; i < crt_num_; i++) {
      std::stringstream ss;
      out[i].writeTo(ss);
      tmp.push_back(ss.str());
    }
    MPI_Send_Strings(tmp, COMM_WORLD_ROOT, rank, MPI_COMM_WORLD);
  } else {
    ctxt.resize(p);
    for (int i = 0; i < p; i++) {
      if (i == 0) {
        ctxt[i] = out;
      } else {
        std::vector<std::string> tmp;
        MPI_Recv_Strings(tmp, crt_num_, i, i, MPI_COMM_WORLD);
        for (int j = 0; j < crt_num_; j++) {
          {
            std::stringstream ss;
            ss << tmp[j];
            auto t = helib::Ctxt::readFrom(ss, get_pk(j));
            ctxt[i].push_back(t);
          }
        }
      }
    }
  }
}

// 2,5
void mpi_bgv_hCifar10_server::hsqr2_server(
    std::vector<std::vector<helib::Ctxt>> &ctxt) const {
  auto &metrics = const_cast<std::vector<metric> &>(metrics_);
  metrics[0].sq2_noise = log2_noise_bound(ctxt[0][0]);
  metrics[0].hsq2_time -= MPI_Wtime();
#pragma omp parallel
#pragma omp single
#pragma omp taskloop collapse(2)
  for (int i = 0; i < ctxt.size(); i++) {
    for (int j = 0; j < ctxt[0].size(); j++) {
      // INTEL_ITT_TASK_BEGIN(Cifar10_forward_test, hcnn_sq2_fc2); //! ITT -
      // Begin

      ctxt[i][j].square();
      // metrics[j].hsq2_time += MPI_Wtime();
    }
  }
  metrics[0].hsq2_time += MPI_Wtime();
  metrics[0].sq2_noise = log2_noise_bound(ctxt[0][0]) - metrics[0].sq2_noise;
}

// ctxt 2,5
// hfc2 5
// void mpi_bgv_hCifar10_server::hfc2_server(
//     std::vector<std::vector<helib::Ctxt>> &ctxt,
//     std::vector<std::vector<std::string>> &buf,
//     const std::vector<helib::Ctxt> &hfc2, MPI_Comm comm, int rank,
//     int root) const {
//   auto &metrics = const_cast<std::vector<metric> &>(metrics_);
//   std::vector<std::string> tmp;
//   int p;
//   MPI_Comm_size(comm, &p);
//   buf.resize(ctxt.size() * p);
//   const double MB = static_cast<double>(1l << 20l);
//   // std::cout << p << std::endl;
//   metrics[0].hfc2_time -= MPI_Wtime();
// #pragma omp parallel for collapse(2)
//   for (int j = 0; j < ctxt.size(); j++) {
//     for (int i = 0; i < crt_num_; i++) {
//       metrics[i].fc2_noise = log2_noise_bound(ctxt[j][i]);
//       // metrics[i].hfc2_time -= MPI_Wtime();
//       hmmes_[i]->cgemm(MATRIX_TYPE::MAX_K, ctxt[j][i], hfc2[i], ctxt[j][i]);
//       // metrics[i].hfc2_time += MPI_Wtime();
//       // INTEL_ITT_TASK_END(Cifar10_forward_test); //! ITT - End
//       metrics[i].fc2_noise =
//           log2_noise_bound(ctxt[j][i]) - metrics[i].fc2_noise;
//       metrics[i].final_noise = log2_noise_bound(ctxt[j][i]);
//       metrics[i].left_cap = ctxt[j][i].capacity();
//     }
//   }
//   metrics[0].hfc2_time += MPI_Wtime();
//   for (int j = 0; j < ctxt.size(); j++) {
//     for (int i = 0; i < crt_num_; i++) {
//       std::stringstream ss;
//       ctxt[j][i].writeTo(ss);
//       tmp.push_back(ss.str());
//     }
//   }
//   if (rank == 0)
//     std::cout << "ctxt_result_size:"
//               << static_cast<double>(tmp[0].size() * sizeof(char)) / MB <<
//               "MB"
//               << std::endl;
//   // MPI_Barrier(comm);
//   metrics[0].mpi_time -= MPI_Wtime();
//   if (rank != root) {
//     MPI_Send_Strings(tmp, root, rank, comm);
//   }
//   if (rank == root) {
//     // std::cout << ctxt.size() << "_" << ctxt[0].size() << std::endl;
//     for (int j = 0; j < ctxt.size(); j++) {
//       buf[j].resize(crt_num_);
//       for (int k = 0; k < crt_num_; k++) {
//         // std::cout << j << "." << k << std::endl;
//         buf[j][k] = tmp[j * crt_num_ + k];
//       }
//     }
//     // std::cout << "buf0_finished" << std::endl;
//     for (int i = 1; i < p; i++) {
//       std::vector<std::string> recv_partial;
//       MPI_Recv_Strings(recv_partial, crt_num_ * ctxt.size(), i, i, comm);
//       for (int j = 0; j < ctxt.size(); j++) {
//         buf[i * ctxt.size() + j].resize(crt_num_);
//         for (int k = 0; k < crt_num_; k++) {
//           // std::cout << i << "," << j << "," << k << std::endl;
//           buf[i * ctxt.size() + j][k] = recv_partial[j * crt_num_ + k];
//         }
//       }
//     }
//   }
//   metrics[0].mpi_time += MPI_Wtime();
//   // double encoding = 0.0;
//   // for (int i = 0; i < crt_num_; i++) {
//   //   encoding += this->get_hmme()[i]->get_encoding_time();
//   // }
//   // metrics[0].runtime_encoding_time = encoding;
// }

// ctxt 2,5
// hfc2 5

// void mpi_bgv_hCifar10_server::hfc2_server(
//     std::vector<std::vector<helib::Ctxt>> &ctxt,
//     const std::vector<helib::Ctxt> &hfc2,
//     std::vector<std::vector<std::string>> &buf, MPI_Comm comm, int rank,
//     int root) const {
//   auto &metrics = const_cast<std::vector<metric> &>(metrics_);
//   std::vector<std::string> tmp;
//   int p;
//   MPI_Comm_size(comm, &p);
//   // buf.resize(ctxt.size() * p);
//   const double MB = static_cast<double>(1l << 20l);
//   // std::cout << p << std::endl;
//   metrics[0].hfc2_time -= MPI_Wtime();
// #pragma omp parallel for collapse(2)
//   for (int j = 0; j < ctxt.size(); j++) {
//     for (int i = 0; i < crt_num_; i++) {
//       metrics[i].fc2_noise = log2_noise_bound(ctxt[j][i]);
//       // metrics[i].hfc2_time -= MPI_Wtime();
//       hmmes_[i]->cgemm(MATRIX_TYPE::SQUARE, ctxt[j][i], hfc2[i], ctxt[j][i]);
//       // metrics[i].hfc2_time += MPI_Wtime();
//       // INTEL_ITT_TASK_END(Cifar10_forward_test); //! ITT - End
//       metrics[i].fc2_noise =
//           log2_noise_bound(ctxt[j][i]) - metrics[i].fc2_noise;
//       metrics[i].final_noise = log2_noise_bound(ctxt[j][i]);
//       metrics[i].left_cap = ctxt[j][i].capacity();
//     }
//   }
//   metrics[0].hfc2_time += MPI_Wtime();
//   for (int j = 0; j < ctxt.size(); j++) {
//     for (int i = 0; i < crt_num_; i++) {
//       std::stringstream ss;
//       ctxt[j][i].writeTo(ss);
//       tmp.push_back(ss.str());
//     }
//   }
//   if (rank == 0)
//     std::cout << "ctxt_result_size:"
//               << static_cast<double>(tmp[0].size() * sizeof(char)) / MB <<
//               "MB"
//               << std::endl;
//   // MPI_Barrier(comm);
//   metrics[0].mpi_time -= MPI_Wtime();
//   if (rank != root) {
//     MPI_Send_Strings(tmp, root, rank, comm);
//   }
//   if (rank == root) {
//     // std::cout << ctxt.size() << "_" << ctxt[0].size() << std::endl;
//     for (int j = 0; j < ctxt.size(); j++) {
//       buf[j].resize(crt_num_);
//       for (int k = 0; k < crt_num_; k++) {
//         // std::cout << j << "." << k << std::endl;
//         buf[j][k] = tmp[j * crt_num_ + k];
//       }
//     }
//     // std::cout << "buf0_finished" << std::endl;
//     for (int i = 1; i < p; i++) {
//       std::vector<std::string> recv_partial;
//       MPI_Recv_Strings(recv_partial, crt_num_ * ctxt.size(), i, i, comm);
//       for (int j = 0; j < ctxt.size(); j++) {
//         buf[i * ctxt.size() + j].resize(crt_num_);
//         for (int k = 0; k < crt_num_; k++) {
//           // std::cout << i << "," << j << "," << k << std::endl;
//           buf[i * ctxt.size() + j][k] = recv_partial[j * crt_num_ + k];
//         }
//       }
//     }
//   }
//   metrics[0].mpi_time += MPI_Wtime();
//   // double encoding = 0.0;
//   // for (int i = 0; i < crt_num_; i++) {
//   //   encoding += this->get_hmme()[i]->get_encoding_time();
//   // }
//   // metrics[0].runtime_encoding_time = encoding;
// }

void mpi_bgv_hCifar10_server::hfc2_server(
    std::vector<std::vector<helib::Ctxt>> &ctxt,
    std::vector<std::vector<std::string>> &buf,
    const std::vector<helib::Ctxt> &hfc2, MPI_Comm comm, int rank,
    int root) const {
  auto &metrics = const_cast<std::vector<metric> &>(metrics_);
  std::vector<std::string> tmp;
  int p;
  MPI_Comm_size(comm, &p);
  buf.resize(ctxt.size() * p);
  const double MB = static_cast<double>(1l << 20l);
  // std::cout << p << std::endl;
  metrics[0].hfc2_time -= MPI_Wtime();
#pragma omp parallel for collapse(2)
  for (int j = 0; j < ctxt.size(); j++) {
    for (int i = 0; i < crt_num_; i++) {
      metrics[i].fc2_noise = log2_noise_bound(ctxt[j][i]);
      // metrics[i].hfc2_time -= MPI_Wtime();
      hmmes_[i]->cgemm(MATRIX_TYPE::SQUARE, ctxt[j][i], hfc2[i], ctxt[j][i]);
      // metrics[i].hfc2_time += MPI_Wtime();
      // INTEL_ITT_TASK_END(mnist_forward_test); //! ITT - End
      metrics[i].fc2_noise =
          log2_noise_bound(ctxt[j][i]) - metrics[i].fc2_noise;
      metrics[i].final_noise = log2_noise_bound(ctxt[j][i]);
      metrics[i].left_cap = ctxt[j][i].capacity();
    }
  }
  metrics[0].hfc2_time += MPI_Wtime();
  for (int j = 0; j < ctxt.size(); j++) {
    for (int i = 0; i < crt_num_; i++) {
      std::stringstream ss;
      ctxt[j][i].writeTo(ss);
      tmp.push_back(ss.str());
    }
  }
  if (rank == 0)
    std::cout << "ctxt_result_size:"
              << static_cast<double>(tmp[0].size() * sizeof(char)) / MB << "MB"
              << std::endl;
  // MPI_Barrier(comm);
  metrics[0].mpi_time -= MPI_Wtime();
  if (rank != root) {
    MPI_Send_Strings(tmp, root, rank, comm);
  }
  if (rank == root) {
    // std::cout << ctxt.size() << "_" << ctxt[0].size() << std::endl;
    for (int j = 0; j < ctxt.size(); j++) {
      buf[j].resize(crt_num_);
      for (int k = 0; k < crt_num_; k++) {
        // std::cout << j << "." << k << std::endl;
        buf[j][k] = tmp[j * crt_num_ + k];
      }
    }
    // std::cout << "buf0_finished" << std::endl;
    for (int i = 1; i < p; i++) {
      std::vector<std::string> recv_partial;
      MPI_Recv_Strings(recv_partial, crt_num_ * ctxt.size(), i, i, comm);
      for (int j = 0; j < ctxt.size(); j++) {
        buf[i * ctxt.size() + j].resize(crt_num_);
        for (int k = 0; k < crt_num_; k++) {
          // std::cout << i << "," << j << "," << k << std::endl;
          buf[i * ctxt.size() + j][k] = recv_partial[j * crt_num_ + k];
        }
      }
    }
  }
  metrics[0].mpi_time += MPI_Wtime();
  // double encoding = 0.0;
  // for (int i = 0; i < crt_num_; i++) {
  //   encoding += this->get_hmme()[i]->get_encoding_time();
  // }
  // metrics[0].runtime_encoding_time = encoding;
}

// 2,2,5,121,3
void mpi_bgv_hCifar10_server::forward_test(
    std::vector<std::vector<std::string>> &out,
    std::vector<std::vector<std::vector<std::vector<std::vector<std::string>>>>>
        &buf_input,
    const std::vector<std::vector<std::vector<std::vector<helib::Ctxt>>>>
        &hconv,
    const std::vector<std::vector<helib::Ctxt>> &hfc1,
    const std::vector<helib::Ctxt> &hfc2) const {
  MPI_Barrier(MPI_COMM_WORLD);
  MPI_Pcontrol(1); //! Deactivate mpiP
  auto &metrics = const_cast<std::vector<metric> &>(metrics_);
  // Reset metrics
  for (auto &metric : metrics) {
    metric.reset();
  }
  int rank, p;
  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  MPI_Comm_size(MPI_COMM_WORLD, &p);
  std::vector<std::vector<std::vector<std::vector<std::vector<helib::Ctxt>>>>>
      ctxt;
  ctxt.resize(buf_input.size());
  if (rank == 0)
    std::cout << "forward_test_" << buf_input.size() << ","
              << buf_input[0].size() << "," << buf_input[0][0].size() << ","
              << buf_input[0][0][0].size() << ","
              << buf_input[0][0][0][0].size() << std::endl;
  metrics[0].initinput_time -= MPI_Wtime();
  // metrics[0].Deserialization_time -= MPI_Wtime();
  // #pragma omp parallel
  for (int i = 0; i < buf_input.size(); i++) {
    ctxt[i].resize(buf_input[i].size());
    for (int j = 0; j < buf_input[0].size(); j++) {
      ctxt[i][j].resize(buf_input[i][j].size());
      for (int k = 0; k < buf_input[i][j].size(); k++) {
        ctxt[i][j][k].resize(buf_input[i][j][k].size());
        for (int l = 0; l < buf_input[i][j][k].size(); l++) {
          for (int m = 0; m < buf_input[i][j][k][l].size(); m++) {
            ctxt[i][j][k][l].emplace_back(get_pk(k));
          }
        }
      }
    }
  }
#pragma omp parallel for collapse(3)
  for (int i = 0; i < buf_input.size(); i++) {
    for (int j = 0; j < buf_input[0].size(); j++) {
      for (int k = 0; k < buf_input[0][0].size(); k++) {
        for (int l = 0; l < buf_input[0][0][0].size(); l++) {
          for (int m = 0; m < buf_input[0][0][0][0].size(); m++) {
            std::stringstream ss;
            ss << buf_input[i][j][k][l][m];
            ctxt[i][j][k][l][m] = helib::Ctxt::readFrom(ss, get_pk(k));
          }
        }
      }
    }
  }
  // // metrics[0].Deserialization_time += MPI_Wtime();
  // for (int i = 0; i < buf_input.size(); i++) {
  //   ctxt[i].resize(buf_input[i].size());
  //   for (int j = 0; j < buf_input[i].size(); j++) {
  //     for (int k = 0; k < buf_input[i][j].size(); k++) {
  //       std::stringstream ss;
  //       ss << buf_input[i][j][k];
  //       ctxt[i][j].push_back(helib::Ctxt::readFrom(ss, get_pk(j)));
  //     }
  //   }
  // }
  // MPI_Barrier(MPI_COMM_WORLD);
  if (rank == COMM_WORLD_ROOT)
    std::cout << "readinput" << rank << "fininsh" << std::endl;
  // MPI_Barrier(MPI_COMM_WORLD);
  metrics[0].initinput_time += MPI_Wtime();
  metrics[0].forword_test_time -= MPI_Wtime();
  std::vector<std::vector<std::vector<helib::Ctxt>>> ct;
  // std::vector<std::vector<helib::Ctxt>> ct1;
  hconv_server(ct, ctxt, hconv);
  if (rank == COMM_WORLD_ROOT)
    std::cout << "hconv" << rank << "fininsh" << std::endl;
  // MPI_Barrier(MPI_COMM_WORLD);

  // sqr1
  hsqr1_server(ct);
  if (rank == COMM_WORLD_ROOT)
    std::cout << "hsqr1" << rank << "fininsh" << std::endl;
  // MPI_Barrier(MPI_COMM_WORLD);

  // hfc1
  int rank_fc1;
  std::vector<std::vector<helib::Ctxt>> ct1;
  MPI_Comm_rank(comm_reducfc1, &rank_fc1);
  hfc1_server(ct, hfc1, ct1, comm_reducfc1, rank_fc1, 0);
  if (rank == COMM_WORLD_ROOT)
    std::cout << "hfc1" << rank << "fininsh" << std::endl;
  // MPI_Barrier(MPI_COMM_WORLD);

  // sqr2
  if (comm_reducfc2 != MPI_COMM_NULL) {
    hsqr2_server(ct1);
    if (rank == COMM_WORLD_ROOT)
      std::cout << "hsqr2" << rank << "fininsh" << std::endl;
    // MPI_Barrier(comm_reducfc2);
  }

  // hfc2
  // MPI_Barrier(MPI_COMM_WORLD);
  if (comm_reducfc2 != MPI_COMM_NULL) {
    int rank_fc2;
    MPI_Comm_rank(comm_reducfc2, &rank_fc2);
    hfc2_server(ct1, out, hfc2, comm_reducfc2, rank_fc2, 0);
    if (rank == COMM_WORLD_ROOT)
      std::cout << "hfc2" << rank << "fininsh" << std::endl;
  }
  MPI_Pcontrol(0); //! Deactivate mpiP
  metrics[0].forword_test_time += MPI_Wtime();
  MPI_Barrier(MPI_COMM_WORLD);
}

// //4,5,49,4 ->5,49
// //8,5,49,4 ->2,5,49
// 32,2,5,121,3 -> 2,2,5,121,3
void mpi_bgv_hCifar10_server::send_input_p2p(
    const encrypted_input<helib::BGV> &ctxt_in, MPI_Comm comm,
    std::vector<std::vector<std::vector<std::vector<std::vector<std::string>>>>>
        &buf,
    int root) const {
  int rank, p;
  MPI_Comm_rank(comm, &rank);
  MPI_Comm_size(comm, &p);
  // if (rank == root)
  //   std::cout << hmm_grid_size_[K] << "," << hmm_grid_size_[N] << std::endl;
  // auto nblk = hmm_grid_size_[K] * hmm_grid_size_[N];
  auto nblk = k_nblk * n_nblk;
  buf.resize(k_nblk);
  if (rank == root) {
    auto ctxt = ctxt_in.data;
    for (int i = 0; i < p; i++) {
      // MPI_Request reqs[nblk * crt_num_ * conv_size];
      // const auto kbatch = i / (ctxt.size() / k_nblk);      // 0
      // const auto kconv = i / (ctxt[0].size() / n_nblk);    // i
      const auto kbatch = i % (ctxt[0].size() / n_nblk); // 0
      const auto kconv = i / (ctxt[0].size() / n_nblk);  // dst
      std::vector<std::string> send(conv_size * nblk * crt_num_ * FC_IN);
      // int num = 0;
#pragma omp parallel
#pragma omp single
#pragma omp taskloop collapse(4)
      for (std::size_t m = 0; m < k_nblk; m++) {
        for (std::size_t l = 0; l < n_nblk; l++) {
          for (std::size_t kcrt = 0; kcrt < crt_num_; kcrt++) {
            for (std::size_t in = 0; in < FC_IN; in++) {
              for (std::size_t j = 0; j < conv_size; j++) {
                // for (int j = 0; j < ctxt[kbatch][kcrt].size(); j++) {
                std::stringstream ss;
                ctxt[kconv * k_nblk + m][kbatch * n_nblk + l][kcrt][j][in]
                    .writeTo(ss);
                // send.push_back(ss.str());
                send[m * n_nblk * crt_num_ * conv_size * FC_IN +
                     l * crt_num_ * conv_size * FC_IN +
                     kcrt * conv_size * FC_IN + j * FC_IN + in] = ss.str();
              }
            }
          }
        }
      }
      if (i == root) {
        int num = 0;
        for (int m = 0; m < k_nblk; m++) {
          buf[m].resize(n_nblk);
          for (int l = 0; l < n_nblk; l++) {
            buf[m][l].resize(crt_num_);
            for (int kcrt = 0; kcrt < crt_num_; kcrt++) {
              buf[m][l][kcrt].resize(conv_size);
              for (int j = 0; j < conv_size; j++) {
                for (int in = 0; in < FC_IN; in++) {
                  buf[m][l][kcrt][j].push_back(send[num++]);
                }
              }
            }
          }
        }
      } else {
        MPI_Send_Strings(send, i, i, comm);
        // MPI_Isend_Strings(send, i, i, comm, reqs);
        // MPI_Waitall(p, reqs, MPI_STATUSES_IGNORE);
      }
    }
  } else {
    std::vector<std::string> rec;
    MPI_Recv_Strings(rec, nblk * crt_num_ * conv_size * FC_IN, root, rank,
                     comm);
    int num = 0;
    for (std::size_t m = 0; m < k_nblk; m++) {
      buf[m].resize(n_nblk);
      for (std::size_t l = 0; l < n_nblk; l++) {
        buf[m][l].resize(crt_num_);
        for (int i = 0; i < crt_num_; i++) {
          buf[m][l][i].resize(conv_size);
          for (int j = 0; j < conv_size; j++) {
            for (int in = 0; in < FC_IN; in++) {
              buf[m][l][i][j].push_back(rec[num++]);
            }
          }
        }
      }
    }
  }
}

// 2,2,5,121,3
// 4,2,5,25,3
void mpi_bgv_hCifar10_server::send_input_p2p_single(
    std::vector<std::string> &send, MPI_Comm comm,
    std::vector<std::vector<std::vector<std::vector<std::vector<std::string>>>>>
        &buf,
    int root, int dst) const {
  int rank, p;
  MPI_Comm_rank(comm, &rank);
  MPI_Comm_size(comm, &p);
  // if (rank == root)
  //   std::cout << hmm_grid_size_[K] << "," << hmm_grid_size_[N] << std::endl;
  // auto nblk = hmm_grid_size_[K] * hmm_grid_size_[N];
  auto nblk = k_nblk * n_nblk;
  buf.resize(k_nblk);
  if (rank == root) {
    if (dst == root) {
      int num = 0;
      for (std::size_t m = 0; m < k_nblk; m++) {
        buf[m].resize(n_nblk);
        for (std::size_t l = 0; l < n_nblk; l++) {
          buf[m][l].resize(crt_num_);
          for (std::size_t kcrt = 0; kcrt < crt_num_; kcrt++) {
            buf[m][l][kcrt].resize(conv_size);
            for (std::size_t j = 0; j < conv_size; j++) {
              for (std::size_t in = 0; in < FC_IN; in++) {
                buf[m][l][kcrt][j].push_back(send[num++]);
              }
            }
          }
        }
      }
    } else {
      MPI_Send_Strings(send, dst, dst, comm);
    }
  } else {
    std::vector<std::string> rec;
    MPI_Recv_Strings(rec, nblk * crt_num_ * conv_size * FC_IN, root, rank,
                     comm);
    int num = 0;
    for (std::size_t m = 0; m < k_nblk; m++) {
      buf[m].resize(n_nblk);
      for (std::size_t l = 0; l < n_nblk; l++) {
        buf[m][l].resize(crt_num_);
        for (std::size_t kcrt = 0; kcrt < crt_num_; kcrt++) {
          buf[m][l][kcrt].resize(conv_size);
          for (std::size_t j = 0; j < conv_size; j++) {
            for (std::size_t in = 0; in < FC_IN; in++) {
              buf[m][l][kcrt][j].push_back(rec[num++]);
            }
          }
        }
      }
    }
  }
  if (rank == 0) {
    std::cout.setf(std::ios::fixed, std::ios::floatfield);
    std::cout.precision(2);
    std::cout << dst << "_send_input_time:" << MPI_Wtime() << std::endl;
  }
}

// //4,5,49,4 ->5,49
// 8,5,49,4 ->2,5,49
// void mpi_bgv_hCifar10_server::send_input_p2p_single(
//     const std::vector<std::string> &send, MPI_Comm comm,
//     std::vector<std::vector<std::vector<std::string>>> &buf, int root,
//     int dst) const {
//   int rank, p;
//   MPI_Comm_rank(comm, &rank);
//   MPI_Comm_size(comm, &p);
//   auto nblk = k_nblk * n_nblk;
//   buf.resize(nblk);
//   if (rank == root) {
//     if (dst == root) {
//       int num = 0;
//       for (int l = 0; l < nblk; l++) {
//         buf[l].resize(crt_num_);
//         for (int kcrt = 0; kcrt < crt_num_; kcrt++)
//           for (int j = 0; j < conv_size; j++) {
//             buf[l][kcrt].push_back(send[num++]);
//           }
//       }
//     } else {
//       MPI_Send_Strings(send, dst, dst, comm);
//     }
//   } else {
//     std::vector<std::string> rec;
//     MPI_Recv_Strings(rec, nblk * crt_num_ * conv_size, root, rank, comm);
//     int num = 0;
//     for (int l = 0; l < nblk; l++) {
//       buf[l].resize(crt_num_);
//       for (int i = 0; i < crt_num_; i++) {
//         for (int j = 0; j < conv_size; j++) {
//           buf[l][i].push_back(rec[num++]);
//         }
//       }
//     }
//   }
//   if (rank == 0)
//     std::cout << std::setprecision(9) << dst
//               << "_send_input_time:" << MPI_Wtime() << std::endl;
// }

// void mpi_bgv_hCifar10_server::send_input_p2p_single(
//     const std::vector<std::string> &send, MPI_Comm comm,
//     std::vector<std::vector<std::vector<std::string>>> &buf, int root, int
//     dst, std::vector<int> &buffer_attached_size, std::vector<char *>
//     &buffer_attached, int sendbatchsize, MPI_Request *reqs) const {
//   int rank, p;
//   MPI_Comm_rank(comm, &rank);
//   MPI_Comm_size(comm, &p);
//   auto nblk = k_nblk * n_nblk;
//   buf.resize(nblk);
//   if (rank == root) {
//     std::string pack_buff;
//     std::vector<int> size_buff;
//     size_buff.resize(nblk * crt_num_ * conv_size);
//     if (dst == root) {
//       int num = 0;
//       for (int l = 0; l < nblk; l++) {
//         buf[l].resize(crt_num_);
//         for (int kcrt = 0; kcrt < crt_num_; kcrt++)
//           for (int j = 0; j < conv_size; j++) {
//             buf[l][kcrt].push_back(send[num++]);
//           }
//       }
//     } else {
//       // MPI_Request req = MPI_REQUEST_NULL;
//       // MPI_Request req2 = MPI_REQUEST_NULL;
//       for (auto i = 0; i < send.size(); i++) {
//         size_buff[i] = send[i].size();
//         pack_buff += send[i];
//       }
//       int *size = size_buff.data();
//       int buffer_attached_size2 = 2 * MPI_BSEND_OVERHEAD +
//                                   pack_buff.size() * sizeof(char) +
//                                   send.size() * sizeof(int);
//       // char *buffer_attached2 = (char *)malloc(buffer_attached_size2);
//       // MPI_Buffer_attach(buffer_attached2, buffer_attached_size2);
//       buffer_attached[dst] = (char *)malloc(buffer_attached_size2);
//       MPI_Buffer_attach(buffer_attached[dst], buffer_attached_size2);
//       // MPI_PackIBsend_Strings(send, pack_buff, size_buff.data(), dst, dst,
//       // comm,
//       //                        &req, &req2);
//       MPI_Ibsend(size, send.size(), MPI_INT, dst, dst, comm,
//                  &reqs[(dst) % sendbatchsize]);
//       MPI_Ibsend(const_cast<char *>(pack_buff.data()), pack_buff.size(),
//                  MPI_CHAR, dst, dst, comm,
//                  &reqs[((dst) % sendbatchsize + sendbatchsize)]);
//       // MPI_Wait(&req, MPI_STATUS_IGNORE);
//       // MPI_Wait(&req2, MPI_STATUS_IGNORE);
//       // MPI_Buffer_detach(&buffer_attached2, &buffer_attached_size2);
//       // free(buffer_attached2);
//       // MPI_Buffer_detach(&buffer_attached[dst], &buffer_attached_size2);
//       // free(buffer_attached[dst]);
//       if (!((dst + 1) % sendbatchsize)) {
//         // MPI_Wait(&req, MPI_STATUS_IGNORE);
//         MPI_Waitall(sendbatchsize, reqs, MPI_STATUSES_IGNORE);
//         for (int i = dst - sendbatchsize + 1; i <= dst; i++) {
//           if (i) {
//             MPI_Buffer_detach(&buffer_attached[i], &buffer_attached_size[i]);
//             free(buffer_attached[i]);
//           }
//         }
//       }
//       // MPI_PackBsend_Strings(send, pack_buff, size_buff.data(), dst, dst,
//       // comm); MPI_Bsend_Strings(send, dst, dst, comm);
//     }
//   } else {
//     std::vector<std::string> rec;
//     // MPI_Recv_Strings(rec, nblk * crt_num_ * conv_size, root, rank, comm);
//     MPI_PackRecv_Strings(rec, nblk * crt_num_ * conv_size, root, rank, comm);
//     int num = 0;
//     for (int l = 0; l < nblk; l++) {
//       buf[l].resize(crt_num_);
//       for (int i = 0; i < crt_num_; i++) {
//         for (int j = 0; j < conv_size; j++) {
//           buf[l][i].push_back(rec[num++]);
//         }
//       }
//     }
//   }
//   if (rank == 0) {
//     // send.clear();
//     std::cout << std::setprecision(9) << dst
//               << "_send_input_time:" << MPI_Wtime() << std::endl;
//   }
// }

// // 5,49,4
// 5,121,32 -> 5,121,2

// 5,25,50,3 -> 5,25,4,3
void mpi_bgv_hCifar10_server::send_hconv_p2p(
    const std::vector<std::vector<std::vector<std::vector<helib::Ctxt>>>> &ctxt,
    MPI_Comm comm,
    std::vector<std::vector<std::vector<std::vector<std::string>>>> &buf,
    int root, int rank, int crt_num) const {
  int p;
  MPI_Comm_size(comm, &p);
  if (rank == root) {
    // std::cout << "send_hconv_p2p_" << ctxt.size() << "," << ctxt[0].size()
    // << "," << ctxt[0][0].size() << "," << ctxt[0][0][0].size()
    // << std::endl;
    for (int i = 0; i < p; i++) {
      // std::cout << "send_hconv_to_" << i << std::endl;
      std::vector<std::string> tmp;
      MPI_Request reqs[crt_num_ * conv_size * k_nblk * FC_IN];
      for (int j = 0; j < crt_num_; j++) {
        for (int k = 0; k < conv_size; k++) {
          for (int l = 0; l < k_nblk; l++) {
            for (int m = 0; m < FC_IN; m++) {
              std::stringstream ss;
              // helib::Ctxt zero(get_pk(j));
              // zero.DummyEncrypt(NTL::ZZX(0l));
              if (i * k_nblk + l < ctxt[0][0].size())
                ctxt[j][k][i * k_nblk + l][m].writeTo(ss);
              else {
                helib::Ctxt t(ctxt[j][0][0][0]);
                t -= ctxt[j][0][0][0];
                t.writeTo(ss);
              }
              // zero.writeTo(ss);
              tmp.push_back(ss.str());
            }
          }
        }
      }
      if (i == root) {
        // std::cout << "root_hconv_begin" << i << std::endl;
        buf.resize(crt_num_);
        int num = 0;
        for (int j = 0; j < crt_num_; j++) {
          buf[j].resize(ctxt[j].size());
          for (int k = 0; k < ctxt[j].size(); k++) {
            buf[j][k].resize(k_nblk);
            for (int l = 0; l < k_nblk; l++) {
              for (int m = 0; m < FC_IN; m++) {
                buf[j][k][l].push_back(tmp[num++]);
              }
            }
          }
        }
      } else {
        // MPI_Send_Strings(tmp, i, i, comm);
        MPI_Isend_Strings(tmp, i, i, comm, reqs);
        MPI_Waitall(crt_num_ * conv_size * k_nblk * FC_IN, reqs,
                    MPI_STATUSES_IGNORE);
      }
    }
  } else {
    // std::cout << "rec_honv_p2p_" << rank << std::endl;
    buf.resize(crt_num);
    std::vector<std::string> tmp;
    MPI_Recv_Strings(tmp, crt_num_ * conv_size * k_nblk * FC_IN, root, rank,
                     comm);
    int num = 0;
    for (int j = 0; j < crt_num_; j++) {
      buf[j].resize(conv_size);
      for (int k = 0; k < conv_size; k++) {
        buf[j][k].resize(k_nblk);
        for (int l = 0; l < k_nblk; l++) {
          for (int m = 0; m < FC_IN; m++) {
            buf[j][k][l].push_back(tmp[num++]);
          }
        }
      }
    }
  }
}

// ctxt[5,49,4] [4,5,49]
void mpi_bgv_hCifar10_server::back_honv_p2p(
    const std::vector<std::vector<std::string>> &ctxt, MPI_Comm comm,
    std::vector<std::vector<std::vector<std::string>>> &buf, int root, int rank,
    int crt_num) const {
  int p;
  MPI_Comm_size(comm, &p);
  std::vector<std::string> tmp;
  if (rank != root) {
    for (int i = 0; i < crt_num_; i++) {
      for (int j = 0; j < conv_size; j++) {
        tmp.push_back(ctxt[i][j]);
      }
    }
    // std::cout << "hconv_backsend_" << rank << "start" << std::endl;
    MPI_Send_Strings(tmp, root, rank, comm);
    // std::cout << "hconv_backsend_" << rank << "finished" << std::endl;
  } else {
    buf.resize(p);
    for (int i = 0; i < p; i++) {
      if (i != root) {
        // std::cout << "hconv_backrec_" << rank << "start" << std::endl;
        MPI_Recv_Strings(tmp, crt_num_ * conv_size, i, i, comm);
        // std::cout << "hconv_backrec_" << rank << "finished" << std::endl;
        int num = 0;
        buf[i].resize(crt_num_);
        for (int j = 0; j < crt_num_; j++) {
          for (int k = 0; k < conv_size; k++) {
            buf[i][j].push_back(tmp[j * conv_size + k]);
          }
        }
      } else {
        buf[0] = ctxt;
      }
    }
  }
}

// // 5,4
// //5,32 -> 5,2
// 5,50 -> 5,4
void mpi_bgv_hCifar10_server::send_hfc1_p2p(
    std::vector<std::vector<helib::Ctxt>> &ctxt, MPI_Comm comm,
    std::vector<std::string> &buf, int root, int rank, int crt_num) const {
  int p;
  MPI_Comm_size(comm, &p);
  if (rank == root) {
    // std::cout << "send_hfc1_p2p_" << ctxt.size() << "," << ctxt[0].size()
    //           << std::endl;
    for (int i = 0; i < p; i++) {
      std::vector<std::string> tmp;
      MPI_Request reqs[crt_num_ * k_nblk];
      for (int j = 0; j < crt_num_; j++) {
        for (int k = 0; k < k_nblk; k++) {
          std::stringstream ss;
          // helib::Ctxt zero(get_pk(j));
          // zero.DummyEncrypt(NTL::ZZX(0l));
          if (i * k_nblk + k < ctxt[0].size())
            ctxt[j][i * k_nblk + k].writeTo(ss);
          else {
            helib::Ctxt t(ctxt[j][0]);
            t -= ctxt[j][0];
            t.writeTo(ss);
          }
          tmp.push_back(ss.str());
        }
      }
      if (i == root) {
        // std::cout << "root_hfc1_begin" << i << std::endl;
        int num = 0;
        for (int j = 0; j < crt_num_; j++) {
          for (int k = 0; k < k_nblk; k++) {
            buf.push_back(tmp[num++]);
          }
        }
      } else {
        // MPI_Send_Strings(tmp, i, i, comm);
        MPI_Isend_Strings(tmp, i, i, comm, reqs);
        MPI_Waitall(crt_num_ * k_nblk, reqs, MPI_STATUSES_IGNORE);
      }
    }
  } else {
    // std::cout << "rec_hfc1_p2p_" << rank << std::endl;
    MPI_Recv_Strings(buf, crt_num_ * k_nblk, root, rank, comm);
  }
}

void mpi_bgv_hCifar10_server::back_hfc1_p2p(
    const std::vector<std::string> &ctxt, MPI_Comm comm,
    std::vector<std::vector<std::string>> &buf, int root, int rank,
    int crt_num) const {
  int p;
  MPI_Comm_size(comm, &p);
  std::vector<std::string> tmp;
  if (rank != root) {
    for (int i = 0; i < crt_num_; i++) {
      tmp.push_back(ctxt[i]);
    }
    // std::cout << "hconv_backsend_" << rank << "start" << std::endl;
    MPI_Send_Strings(tmp, root, rank, comm);
    // std::cout << "hconv_backsend_" << rank << "finished" << std::endl;
  } else {
    buf.resize(p);
    for (int i = 0; i < p; i++) {
      if (i != root) {
        // std::cout << "hconv_backrec_" << rank << "start" << std::endl;
        MPI_Recv_Strings(tmp, crt_num_, i, i, comm);
        // std::cout << "hconv_backrec_" << rank << "finished" << std::endl;
        for (int j = 0; j < crt_num_; j++) {
          std::stringstream ss;
          ss << tmp[j];
          buf[i].push_back(tmp[j]);
        }
      } else {
        buf[0] = ctxt;
      }
    }
  }
}

// 5
void mpi_bgv_hCifar10_server::bcast_hfc2_client(
    const std::vector<helib::Ctxt> &ctxt, MPI_Comm comm,
    std::vector<std::string> &buf, int root, int rank) const {
  if (comm != MPI_COMM_NULL) {
    if (rank == root) {
      // std::cout << "bcast_hfc2_client" << root << std::endl;
      for (int i = 0; i < crt_num_; i++) {
        std::stringstream ss;
        ctxt[i].writeTo(ss);
        buf.emplace_back(ss.str());
      }
    }
    MPI_Bcast_Strings(buf, root, comm);
  }
}

// //5,49
// //4,5,49
// 5,25,4,3
void mpi_bgv_hCifar10_server::bcast_hconv_server(
    const std::vector<std::vector<std::vector<std::vector<std::string>>>> &in,
    MPI_Comm comm,
    std::vector<std::vector<std::vector<std::vector<std::string>>>> &buf,
    int root, int rank, int crt_num) const {
  std::vector<std::string> tmp;
  int rank_global;
  MPI_Comm_rank(MPI_COMM_WORLD, &rank_global);
  if (rank == root) {
    // std::cout << rank_global << "," << in.size() << "," << in[0].size() <<
    // ","
    // << in[0][0].size() << "," << in[0][0][0].size() << std::endl;
    for (int i = 0; i < crt_num_; i++) {
      // buf[i].resize(conv_size);
      for (int j = 0; j < conv_size; j++) {
        // buf[i][j].resize(k_nblk);
        for (int k = 0; k < k_nblk; k++) {
          for (int l = 0; l < FC_IN; l++) {
            std::stringstream ss;
            ss << in[i][j][k][l];
            tmp.push_back(ss.str());
            // tmp.push_back("test");
          }
        }
      }
    }
    // std::cout << rank << "end" << std::endl;
  }
  MPI_Bcast_Strings(tmp, root, comm);
  int num = 0;
  // std::cout << rank << "buf start" << std::endl;
  buf.resize(crt_num_);
  for (int i = 0; i < crt_num_; i++) {
    buf[i].resize(conv_size);
    for (int j = 0; j < conv_size; j++) {
      buf[i][j].resize(k_nblk);
      for (int k = 0; k < k_nblk; k++) {
        for (int l = 0; l < FC_IN; l++) {
          // std::cout << rank << "i=" << i << ",j=" << j << ",k=" << k
          //           << ",l=" << l << std::endl;
          buf[i][j][k].push_back(tmp[num++]);
        }
      }
    }
  }
  // std::cout << rank << "," << rank_global << "buf end" << std::endl;
}

// 5,49 -> 4,5,49
void mpi_bgv_hCifar10_server::back_hconv_server(
    const std::vector<std::vector<std::string>> &ctxt, MPI_Comm comm,
    std::vector<std::vector<std::vector<std::string>>> &buf, int root, int rank,
    int crt_num) const {
  int p;
  MPI_Comm_size(comm, &p);
  // // MPI_Barrier(comm);
  // std::cout << ctxt.size() << " " << ctxt[0].size() << std::endl;
  if (rank != root) {
    std::vector<std::string> send;
    for (int i = 0; i < crt_num_; i++)
      for (int j = 0; j < conv_size; j++) {
        send.push_back(ctxt[i][j]);
      }
    std::cout << "hconv_backpush_" << rank << "finished" << std::endl;
    // sleep(1000);
    std::cout << "hconv_backsend_" << rank << "start" << std::endl;
    // MPI_Send_Strings(tmp, root, rank, comm);
    MPI_Send_Strings(send, root, rank, comm);
    std::cout << "hconv_backsend_" << rank << "finished" << std::endl;
  } else {
    std::vector<std::string> rec;
    buf.resize(p);
    for (int i = 0; i < p; i++) {
      if (i != root) {
        buf[i].resize(crt_num_);
        std::cout << "hconv_backrec_" << i << "start" << std::endl;
        MPI_Recv_Strings(rec, crt_num_ * conv_size, i, i, comm);
        std::cout << "hconv_backrec_" << i << "finished" << std::endl;
        for (int j = 0; j < crt_num_; j++)
          for (int k = 0; i < conv_size; k++) {
            std::stringstream ss;
            ss << rec[j * conv_size + k];
            buf[i][j].push_back(rec[j * conv_size + k]);
          }
      } else {
        buf[0] = ctxt;
      }
    }
  }
}

// 5
void mpi_bgv_hCifar10_server::bcast_hfc1_server(
    const std::vector<std::string> &in, MPI_Comm comm,
    std::vector<std::string> &buf, int root, int rank, int crt_num) const {
  if (rank == root) {
    for (int i = 0; i < crt_num_; i++) {
      for (int j = 0; j < k_nblk; j++) {
        std::stringstream ss;
        ss << in[i * k_nblk + j];
        buf.push_back(ss.str());
      }
    }
  }
  MPI_Bcast_Strings(buf, root, comm);
}

// 5,49 -> 16,5,49
void mpi_bgv_hCifar10_server::back_input_server(
    const std::vector<std::vector<std::string>> &ctxt, MPI_Comm comm,
    std::vector<std::vector<std::vector<std::string>>> &buf, int root, int rank,
    int crt_num) const {
  int p, size;
  MPI_Comm_size(comm, &p);
  std::vector<std::string> tmp;
  if (rank != root) {
    std::cout << crt_num << " " << rank << " " << ctxt[2].size() << std::endl;
    for (int i = 0; i < crt_num_; i++)
      for (int j = 0; j < ctxt[i].size(); j++) {
        tmp.push_back(ctxt[i][j]);
        // MPI_Send_String(t, root, rank, comm);
      }
    std::cout << "input_backsend_" << rank << " " << tmp.size() << "start"
              << std::endl;
    // MPI_Bcast(&size, 1, MPI_INT, 0, comm);
    MPI_Send_Strings(tmp, root, rank, comm);
    std::cout << "input_backsend_" << rank << "finished" << std::endl;
  } else {
    buf.resize(p);
    for (int i = 0; i < p; i++) {
      if (i != root) {
        buf[i].resize(crt_num_);
        // std::cout << "input_backrec_" << i << "start" << std::endl;
        MPI_Recv_Strings(tmp, crt_num_ * conv_size, i, i, comm);
        // std::cout << "input_backrec_" << i << "finished" << std::endl;
        for (int j = 0; j < crt_num_; j++)
          for (int k = 0; k < ctxt[j].size(); k++) {
            std::stringstream ss;
            ss << tmp[j * ctxt[j].size() + k];
            buf[i][j].push_back(tmp[j * ctxt[j].size() + k]);
            // std::cout << tmp[j * ctxt[j].size() + k] << std::endl;
          }
      } else {
        buf[i] = ctxt;
      }
    }
  }
}

void mpi_bgv_hCifar10_server::report_metrics(std::ostream &ss) const {
  // clang-format off
  int rank;
  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  ss << "================= Cipher Cifar10("<< rank << ")=================\n";
  ss << "* HMM Encoding       : " << metrics_[0].hmm_encoding_time << " s\n";
  ss << "* HCifar10 Time          : " << metrics_[0].forword_test_time << " s\n";
  ss << "* Init_Input Time    : " << metrics_[0].initinput_time << " s\n";
  // ss << "* Deseri_Input Time  : " << metrics_[0].Deserialization_time << " s\n";
  ss << "* MPI Time           : " << metrics_[0].mpi_time << " s\n";
  // for (auto i = 0; i < metrics_.size(); i++) {
  // ss << "  * Round " << i << "          : \n";
  // ss << "    * HConv time     : " << metrics_[i].hconv_time << " s\n";
  // ss << "    * HSquare1 time  : " << metrics_[i].hsq1_time << " s\n";
  // ss << "    * HFC1 time      : " << metrics_[i].hfc1_time << " s\n";
  // ss << "    * HSquare2 time  : " << metrics_[i].hsq2_time << " s\n";
  // ss << "    * HFC2 time      : " << metrics_[i].hfc2_time << " s\n";
  // }
  ss << "* HConv time     : " << metrics_[0].hconv_time << " s\n";
  ss << "* HSquare1 time  : " << metrics_[0].hsq1_time << " s\n";
  ss << "* HFC1 time      : " << metrics_[0].hfc1_time << " s\n";
  ss << "* HSquare2 time  : " << metrics_[0].hsq2_time << " s\n";
  ss << "* HFC2 time      : " << metrics_[0].hfc2_time << " s\n";
  // ss << "* Runtime Encoding time : " << metrics_[0].runtime_encoding_time << " s\n";
  // ss << "    * HConv time     : " << metrics_[0].hconv_time << " s\n";
  // ss << "    * HSquare1 time  : " << metrics_[0].hsq1_time << " s\n";
  // ss << "    * HFC1 time      : " << metrics_[0].hfc1_time << " s\n";
  // ss << "    * HSquare2 time  : " << metrics_[0].hsq2_time << " s\n";
  // ss << "    * HFC2 time      : " << metrics_[0].hfc2_time << " s\n";
  ss << "* Final nosie bound  : " << metrics_[0].final_noise << std::endl;
  // ss << "  * Init nosie bound : " << metrics_[0].init_noise << std::endl;
  // ss << "  + HConv noise      : " << metrics_[0].conv_noise << std::endl;
  // ss << "  + HSquare1 noise   : " << metrics_[0].sq1_noise << std::endl;
  // ss << "  + HFC1 noise       : " << metrics_[0].fc1_noise << std::endl;
  // ss << "  + HSquare2 noise   : " << metrics_[0].sq2_noise << std::endl;
  // ss << "  + HFC2 noise       : " << metrics_[0].fc2_noise << std::endl;
  ss << "* Left capacity      : " << metrics_[0].left_cap << std::endl;
  ss << "==============================================\n";
  // clang-format on
}
