
#include "CNN/mpi_mnist_base2.hpp"
#include "CNN/emnist.hpp"
#include "CNN/mpi_bgv_mnist_base2.hpp"
#include "Ctxt.h"
#include "h_mat_mul.hpp"
#include "intel_itt_wrapper.hpp"
#include "keys.h"
#include "mem_usage.hpp"
#include "mpi/mpi_util.hpp"
#include <filesystem>
#include <mpi.h>
#include <sstream>
#include <torch/csrc/autograd/generated/variable_factories.h>
#include <unistd.h>

INTEL_ITT_DOMAIN_CREATE(mnist_test, "test.cnn.mnist");
INTEL_ITT_STRING_HANDLE_CREATE(hcnn, "HCNN");
INTEL_ITT_STRING_HANDLE_CREATE(hcnn_init, "HCNN-Init");
INTEL_ITT_STRING_HANDLE_CREATE(hcnn_inferr, "HCNN-Inferr");

bool no_encoding_reuse = false;

// The batch size for training.
const int64_t kTrainBatchSize = 64;

// The batch size for testing.
const int64_t kTestBatchSize = 512;

// The number of epochs to train.
const int64_t kNumberOfEpochs = 10;

// After how many batches to log a new update with the loss value.
const int64_t kLogInterval = 10;

template class mpi_h_mnist_client<helib::BGV>;
template class mpi_h_mnist_server<helib::BGV>;
template class mpi_h_mnist_client<helib::CKKS>;
template class mpi_h_mnist_server<helib::CKKS>;

torch::Tensor init_conv_kernel(const torch::Tensor &a) {
  torch::Tensor a_ = torch::zeros({a.size(2) * a.size(2), a.size(0), 1, 1});
  std::cout << a.size(0) << "," << a.size(1) << "," << a.size(2) << std::endl;
  for (int i = 0; i < a.size(2); i++)
    for (int j = 0; j < a.size(2); j++)
      for (int k = 0; k < a.size(0); k++)
        a_[i * a.size(2) + j][k][0][0] = a[k][0][i][j];
  return std::move(a_);
}

torch::Tensor init_fc1_kernel(const torch::Tensor &a, int conv_channels) {
  torch::Tensor a_ = torch::zeros({conv_channels, a.size(0), a.size(0)});
  for (int i = 0; i < conv_channels; i++)
    a_[i] = a.index({"...", torch::indexing::Slice(i * a.size(0),
                                                   i * a.size(0) + a.size(0))});
  return std::move(a_);
}

torch::Tensor init_fc2_kernel(const torch::Tensor &a) {
  torch::Tensor a_ = a.clone();
  // std::cout << a.size(0) << "," << a.size(1) << std::endl;
  // for (int i = 0; i < a.size(1); i++)
  //   a_ = torch::cat({a_, torch::zeros({1, a.size(1)})}, 0);
  return std::move(a_);
}

// torch::Tensor im2matrix(const torch::Tensor &x, const torch::Tensor &filter,
//                         int stride) {
//   int out_size = (x.size(2) - filter.size(2)) / stride + 1;
//   int aRow_ = out_size * out_size, aCol_ = x.size(0);
//   // 49,4,64,256
//   torch::Tensor A = torch::zeros(
//       {filter.size(2) * filter.size(2), filter.size(0), aRow_, aCol_});
//   // 4,49,4,64,64
//   torch::Tensor A_ =
//       torch::zeros({aCol_ / T_BATCHSIZE, filter.size(2) * filter.size(2),
//                     filter.size(0), aRow_, T_BATCHSIZE});
//   int sqrfsz2 = filter.size(2) * filter.size(2), fsz0 = filter.size(0),
//       fsz2 = filter.size(2);
//   for (int i = 0; i < sqrfsz2; i++) {
//     for (int j = 0; j < filter.size(0); j++)
//       for (int k = 0; k < aCol_; k++)
//         A.index({i, j, "...", k}) =
//             x.index({k, "...",
//                      torch::indexing::Slice(
//                          i / fsz2, i / fsz2 + stride * out_size, stride),
//                      torch::indexing::Slice(
//                          i % fsz2, i % fsz2 + stride * out_size, stride)})
//                 .reshape({aRow_});
//   }
//   for (int i = 0; i < aCol_ / T_BATCHSIZE; i++) {
//     A_[i] = A.index({"...", torch::indexing::Slice(i * 64, i * 64 + 64, 1)});
//   }
//   return std::move(A_);
// }

torch::Tensor im2matrix(const torch::Tensor &x, const torch::Tensor &filter,
                        int stride) {
  int out_size = (x.size(2) - filter.size(2)) / stride + 1;
  int aRow_ = out_size * out_size, aCol_ = x.size(0);
  // 49,4,64,1024
  torch::Tensor A = torch::zeros(
      {filter.size(2) * filter.size(2), filter.size(0), aRow_, aCol_});
  // 4,49,4,64,256
  torch::Tensor A_ =
      torch::zeros({aCol_ / T_BATCHSIZE, filter.size(2) * filter.size(2),
                    filter.size(0), aRow_, T_BATCHSIZE});
  // 4,49,256,64
  // torch::Tensor AA =
  //     torch::zeros({aCol_ / T_BATCHSIZE, filter.size(2) * filter.size(2), 0,
  //     aRow_});
  int sqrfsz2 = filter.size(2) * filter.size(2), fsz0 = filter.size(0),
      fsz2 = filter.size(2);
  for (int i = 0; i < sqrfsz2; i++) {
    for (int j = 0; j < filter.size(0); j++)
      for (int k = 0; k < aCol_; k++)
        A.index({i, j, "...", k}) =
            x.index({k, "...",
                     torch::indexing::Slice(
                         i / fsz2, i / fsz2 + stride * out_size, stride),
                     torch::indexing::Slice(
                         i % fsz2, i % fsz2 + stride * out_size, stride)})
                .reshape({aRow_});
  }
  for (int i = 0; i < aCol_ / T_BATCHSIZE; i++) {
    A_[i] = A.index(
        {"...", torch::indexing::Slice(i * T_BATCHSIZE,
                                       i * T_BATCHSIZE + T_BATCHSIZE, 1)});
  }
  // for (int i = 0; i < filter.size(0); i++) {
  //   AA = torch::cat({AA, A_.select(2, i)}, 2);
  // }
  // std::cout << AA.size(0) << "_" << AA.size(1) << "_" << AA.size(2) << "_"
  //           << AA.size(3) << std::endl;
  return std::move(A_);
}

// Ptxt version
torch::Tensor sqractive(const torch::Tensor &x) {
  return std::move(torch::square(x));
}

torch::Tensor gemm(const torch::Tensor &a, const torch::Tensor &b) {
  return std::move(torch::matmul(a, b));
}

torch::Tensor conv_nmm(const torch::Tensor &x, const torch::Tensor &filter) {
  torch::Tensor out =
      torch::zeros({x.size(0), x.size(1), x.size(2), x.size(3)});
  for (int i = 0; i < x.size(0); i++)
    for (int j = 0; j < x.size(1); j++) {
      out[i][j] = x[i][j].mul(filter[i][j]);
      if (i)
        out[0][j] = out[0][j].add(out[i][j]);
    }
  return std::move(out.index({0, "..."}));
}
torch::Tensor fct1(const torch::Tensor &x, const torch::Tensor &filter) {
  torch::Tensor out = torch::zeros({x.size(1), x.size(2)});
  for (int i = 0; i < x.size(0); i++)
    out = out.add(gemm(filter.index({i, "..."}), x.index({i, "..."})));
  return std::move(out);
}
torch::Tensor fct2(const torch::Tensor &x, const torch::Tensor &filter) {
  torch::Tensor out = torch::zeros({x.size(0), x.size(1)});
  out = gemm(filter, x);
  return std::move(out);
}
torch::Tensor fullconnect(const torch::Tensor &x, const torch::Tensor &filter) {
  auto out = gemm(x, filter);
  return std::move(out);
}

Net::Net(int root, MPI_Comm comm, const params<helib::BGV> &params)
    : conv1(torch::nn::Conv2dOptions(1, 4, /*kernel_size=*/7)
                .stride(3)
                .bias(false)),
      fc1(torch::nn::LinearOptions(256, 64).bias(false)),
      fc2(torch::nn::LinearOptions(64, 10).bias(false)) {

  // Get MPI metadata
  int rank, p;
  MPI_Comm_rank(comm, &rank);
  MPI_Comm_size(comm, &p);
  hmm_status<shmm_engine> hmm_status(METHOD::BSGS, 0);
  if (rank == root) {
    client_ = new mpi_bgv_hmnist_client(params, T_NUM);
    auto bgv_client = static_cast<mpi_bgv_hmnist_client *>(
        const_cast<mpi_h_mnist_client<helib::BGV> *>(client_));
    server_ = new mpi_bgv_hmnist_server(hmm_status, comm, T_NUM,
                                        conv1->weight.data().size(0), params,
                                        bgv_client->get_hmmcc());
  } else {
    server_ = new mpi_bgv_hmnist_server(hmm_status, comm, T_NUM,
                                        conv1->weight.data().size(0), params);
  }
  // MPI_Barrier(MPI_COMM_WORLD);
  // bcast_pubkey
  if (rank == root) {
    std::cout << "bcast_pk - start: " << MPI_Wtime() << std::endl;
    std::cout.flush();
  }
  std::vector<std::string> pk_str;
  auto bgv_client = static_cast<mpi_bgv_hmnist_client *>(
      const_cast<mpi_h_mnist_client<helib::BGV> *>(client_));
  auto bgv_server = static_cast<mpi_bgv_hmnist_server *>(
      const_cast<mpi_h_mnist_server<helib::BGV> *>(server_));
  bgv_server->pk_.resize(T_NUM);
  if (rank == root) {
    for (int i = 0; i < T_NUM; i++) {
      std::stringstream pk_ss;
      bgv_server->pk_[i] =
          new helib::PubKey(bgv_client->get_hmmcc()[i]->public_key());
      bgv_server->pk_[i]->writeTo(pk_ss);
      pk_str.push_back(pk_ss.str());
    }
  }
  MPI_Bcast_Strings(pk_str, root, comm);
  if (rank != root) {
#pragma omp parallel for
    for (int i = 0; i < T_NUM; i++) {
      std::stringstream pk_ss;
      pk_ss << pk_str[i];
      bgv_server->pk_[i] = new helib::PubKey(
          helib::PubKey::readFrom(pk_ss, *bgv_server->get_context()[i]));
    }
  }
  if (rank == root) {
    std::cout << "bcast_pk - finished: " << MPI_Wtime() << std::endl;
    std::cout.flush();
  }

  register_module("conv1", conv1);
  register_module("conv1_drop", conv1_drop);
  register_module("fc1", fc1);
  register_module("fc2", fc2);
}

// double Net::bcast_seckey(int root, int rank, MPI_Comm comm) {
//   std::vector<std::string> sk_str;
//   auto bgv_client = static_cast<mpi_bgv_hmnist_client *>(
//       const_cast<mpi_h_mnist_client<helib::BGV> *>(client_));
//   auto bgv_server = static_cast<mpi_bgv_hmnist_server *>(
//       const_cast<mpi_h_mnist_server<helib::BGV> *>(server_));
//   for (int i = 0; i < bgv_server->get_crt_num(); i++) {
//     sk_[i] = new helib::SecKey(*bgv_client->get_context()[i]);
//     sk_[i]->GenSecKey();
//     helib::addSome1DMatrices(*sk_[i]);
//   }
//   return sk_str.size() / 1024.0 / 1024.0;
// }

// TODO:
void Net::CNN_init() {
  auto bgv_client = static_cast<mpi_bgv_hmnist_client *>(
      const_cast<mpi_h_mnist_client<helib::BGV> *>(client_));
  auto bgv_server = static_cast<mpi_bgv_hmnist_server *>(
      const_cast<mpi_h_mnist_server<helib::BGV> *>(server_));
  int rank, p;
  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  MPI_Comm_size(MPI_COMM_WORLD, &p);
  int rank_Bcast;
  if (server_->comm_Bcast != MPI_COMM_NULL)
    MPI_Comm_rank(server_->comm_Bcast, &rank_Bcast);
  int rank_conv;
  MPI_Comm_rank(server_->comm_conv, &rank_conv);

  encrypted_model<helib::BGV> ctxt_model;
  if (rank == COMM_WORLD_ROOT) {
    client_->init_hcnn(ctxt_model, conv1, fc1, fc2);
  }

  // MPI_Barrier(MPI_COMM_WORLD);
  // hconv && hfc1
  std::vector<std::vector<std::string>> buf_client_conv;
  std::vector<std::string> buf_client_fc1;
  std::vector<std::vector<std::vector<std::string>>> test_conv;
  std::vector<std::vector<std::string>> test_fc1;
  std::vector<std::string> test_fc2;

  if (server_->comm_Bcast != MPI_COMM_NULL) {
    server_->send_honv_p2p(ctxt_model.conv, server_->comm_Bcast,
                           buf_client_conv, MPI_Bcast_ROOT, rank_Bcast,
                           bgv_server->get_crt_num());
  }
  if (rank == COMM_WORLD_ROOT)
    std::cout << rank << "_hconv finished" << std::endl;
  // MPI_Barrier(MPI_COMM_WORLD);

  if (server_->comm_Bcast != MPI_COMM_NULL) {
    // std::cout << "bcast_hfc1_client[" << rank1 << "]start" << std::endl;
    server_->send_hfc1_p2p(ctxt_model.fc1, server_->comm_Bcast, buf_client_fc1,
                           MPI_Bcast_ROOT, rank_Bcast,
                           bgv_server->get_crt_num());
  }
  // MPI_Barrier(MPI_COMM_WORLD);
  std::vector<std::vector<std::string>> buf_server_hconv;
  std::vector<std::string> buf_server_hfc1;

  // hconv
  server_->bcast_hconv_server(buf_client_conv, server_->comm_conv,
                              buf_server_hconv, 0, rank_conv,
                              bgv_server->get_crt_num());
  // MPI_Barrier(MPI_COMM_WORLD);
  hconv.resize(bgv_server->get_crt_num());
  for (int i = 0; i < bgv_server->get_crt_num(); i++) {
    for (int j = 0; j < buf_server_hconv[i].size(); j++) {
      std::stringstream ss;
      ss << buf_server_hconv[i][j];
      hconv[i].push_back(helib::Ctxt::readFrom(ss, bgv_server->get_pk(i)));
    }
  }
  if (rank == COMM_WORLD_ROOT)
    std::cout << "hconvinit_" << rank << "finished" << std::endl;
  // MPI_Barrier(MPI_COMM_WORLD);

  // hfc1
  server_->bcast_hfc1_server(buf_client_fc1, server_->comm_conv,
                             buf_server_hfc1, 0, rank_conv,
                             bgv_server->get_crt_num());

  for (int i = 0; i < bgv_server->get_crt_num(); i++) {
    std::stringstream ss;
    ss << buf_server_hfc1[i];
    hfc1.push_back(helib::Ctxt::readFrom(ss, bgv_server->get_pk(i)));
  }
  if (rank == COMM_WORLD_ROOT)
    std::cout << "hfc1init_" << rank << "finished" << std::endl;
  // MPI_Barrier(MPI_COMM_WORLD);

  // hfc2
  std::vector<std::string> buf_hfc2;
  int rank_reduc2;
  if (server_->comm_reducfc2 != MPI_COMM_NULL)
    MPI_Comm_rank(server_->comm_reducfc2, &rank_reduc2);
  if (server_->comm_reducfc2 != MPI_COMM_NULL) {
    server_->bcast_hfc2_client(ctxt_model.fc2, server_->comm_reducfc2, buf_hfc2,
                               COMM_WORLD_ROOT, rank_reduc2);
  }

  if (server_->comm_reducfc2 != MPI_COMM_NULL) {
    for (int i = 0; i < bgv_server->get_crt_num(); i++) {
      std::stringstream ss;
      ss << buf_hfc2[i];
      hfc2.push_back(helib::Ctxt::readFrom(ss, bgv_server->get_pk(i)));
    }
  }
  if (server_->comm_reducfc2 != MPI_COMM_NULL && rank == COMM_WORLD_ROOT)
    std::cout << "hfc2init_" << rank << "finished" << std::endl;
}

torch::Tensor Net::forward_train(torch::Tensor x) {
  x = sqractive(conv1->forward(x));
  x = x.view({-1, 256});
  x = fc1->forward(x);
  x = sqractive(x);
  x = torch::dropout(x, /*p=*/0.1, /*training=*/is_training());
  x = fc2->forward(x);
  // x.print();
  return std::move(torch::log_softmax(x, /*dim=*/1));
}

void Net::ptxt_forward_test(const torch::Tensor &x) {
  auto a = im2matrix(x, conv1->weight.data(), 3);
  auto b = init_conv_kernel(conv1->weight.data());
  auto c = init_fc1_kernel(fc1->weight.data(), 4);
  auto d = init_fc2_kernel(fc2->weight.data());
  auto X = x;
  X = conv_nmm(a, b);
  X = sqractive(X);
  X = fct1(X, c);
  X = sqractive(X);
  X = torch::dropout(X, /*p=*/0.1, /*training=*/is_training());
  X = fct2(x, d);
  X = X.index({torch::indexing::Slice({0, 10}), "..."});
  X = X.transpose(0, 1);
  std::cout << X << std::endl;
}

void Net::init_in(const torch::Tensor &x,
                  encrypted_input<helib::BGV> &ctxt_in) {
  int batchsize = x.size(0),
      fconvsize = conv1->weight.data().size(2) * conv1->weight.data().size(3);
  // Ctxt version
  int rank, p;
  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  MPI_Comm_size(MPI_COMM_WORLD, &p);
  std::vector<std::vector<std::string>> buf_input;
  // if (rank == COMM_WORLD_ROOT) {
  client_->init_input(ctxt_in, conv1->weight.data(), x);
  std::cout << "initinput" << rank << "finish" << std::endl;
}

void Net::forward_test(const torch::Tensor &x,
                       std::vector<std::vector<std::string>> &ctxt_out) {
  // MPI_Barrier(MPI_COMM_WORLD);
  // auto start_t = MPI_Wtime();
  int rank, p;
  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  MPI_Comm_size(MPI_COMM_WORLD, &p);
  std::size_t mem_begin_ = 0.0;
  std::size_t mem_end_ = 0.0;
  const double GB = static_cast<double>(1l << 30l);
  const double MB = static_cast<double>(1l << 20l);
  mem_begin_ = getCurrentRSS();
  // encrypted_input<helib::BGV> ctxt_in;
  std::vector<std::vector<std::vector<std::string>>> buf_input;
  // std::vector<std::vector<helib::Ctxt>> ctxt_input;
  // if (rank == COMM_WORLD_ROOT)
  //   init_in(x, ctxt_in);
  // return;
  // MPI_Barrier(MPI_COMM_WORLD);
  // 8,5,49,4 ->2,5,49
  // server_->send_input_p2p(ctxt_in, MPI_COMM_WORLD, buf_input,
  // COMM_WORLD_ROOT); MPI_Barrier(MPI_COMM_WORLD);
  torch::Tensor a;
  if (rank == 0) {
    std::cout << "x: " << x.size(0) << " " << x.size(1) << " " << x.size(2)
              << " " << x.size(3) << std::endl;
    a = im2matrix(x, conv1->weight.data(), 3);
    std::cout << "a: " << a.size(0) << " " << a.size(1) << " " << a.size(2)
              << " " << a.size(3) << " " << a.size(4) << std::endl;
  }
  double Init_input = 0.0;
  Init_input -= MPI_Wtime();
  for (int i = 0; i < p; i++) {
    // MPI_Barrier(MPI_COMM_WORLD);
    std::vector<std::string> send;
    if (rank == 0) {
      client_->init_input_single(send, a, i);
      if (i == 0) {
        std::cout << "ctxt_send_size:"
                  << static_cast<double>(send[0].size() * sizeof(char)) / MB
                  << "MB" << std::endl;
      }
    }
    if (rank == i || rank == 0) {
      server_->send_input_p2p_single(send, MPI_COMM_WORLD, buf_input,
                                     COMM_WORLD_ROOT, i);
    }
  }
  Init_input += MPI_Wtime();
  if (rank == 0)
    std::cout << "* Init input                    : " << Init_input << " s\n";
  // if (rank == COMM_WORLD_ROOT)
  //   std::cout << "sendinput" << rank << "finished" << std::endl;

  // 4 * 5
  auto start_t = MPI_Wtime();
  server_->forward_test(ctxt_out, buf_input, hconv, hfc1, hfc2);
  // if (rank == COMM_WORLD_ROOT)
  //   std::cout << "forward_test" << rank << "finish" << std::endl;
  MPI_Barrier(MPI_COMM_WORLD);
  if (rank == COMM_WORLD_ROOT) {
    auto end_t = MPI_Wtime();
    std::cout << "forward_test: " << (end_t - start_t) << " s" << std::endl;
    std::cout.flush();
  }
  mem_end_ = getPeakRSS();
  for (int i = 0; i < p; i++) {
    MPI_Barrier(MPI_COMM_WORLD);
    if (i == rank) {
      std::stringstream ss;
      double encoding = 0.0;
      for (int j = 0; j < T_NUM; j++) {
        // double encodingj = 0.0;
        encoding += static_cast<mpi_bgv_hmnist_server *>(
                        const_cast<mpi_h_mnist_server<helib::BGV> *>(server_))
                        ->get_hmme()[j]
                        ->get_encoding_time();
        // encoding += encodingj;
        // std::stringstream ss1;
        // ss1 << "* No Encoding Reusej : " << j << "_" << encodingj << " s\n";
        // std::cout << ss1.str();
        // std::cout.flush();
      }
      server_->report_metrics(ss);
      ss << "* No Encoding Reuse  : " << encoding << " s\n";
      ss << rank
         << "mem_forward_begin_ = " << static_cast<double>(mem_begin_) / GB
         << " GB" << std::endl;
      ss << rank << "mem_forward_peak_ = " << static_cast<double>(mem_end_) / GB
         << " GB" << std::endl;
      ss << rank << "mem_forward_use_ = "
         << static_cast<double>(mem_end_) / GB -
                static_cast<double>(mem_begin_) / GB
         << " GB" << std::endl;
      std::cout << ss.str();
      std::cout.flush();
    }
  }
  // if (rank == COMM_WORLD_ROOT) {
  //   auto end_t = MPI_Wtime();
  //   std::cout << "forward_test: " << (end_t - start_t) << " s" << std::endl;
  // }
  // for (int i = 0; i < p; i++) {
  //   MPI_Barrier(MPI_COMM_WORLD);
  //   if (i == rank) {
  //     double encoding = 0.0;
  //     for (int j = 0; j < T_NUM; j++) {
  //       encoding += static_cast<mpi_bgv_hmnist_server *>(
  //                       const_cast<mpi_h_mnist_server<helib::BGV>
  //                       *>(server_))
  //                       ->get_hmme()[j]
  //                       ->get_encoding_time();
  //     }
  //     std::stringstream ss;
  //     ss << "* No Encoding Reuse  : " << encoding << " s\n";
  //     server_->report_metrics(ss);
  //     std::cout << ss.str();
  //     std::cout.flush();
  //   }
  // }
}

torch::Tensor
Net::forward_result(const std::vector<std::vector<std::string>> &ctxt_out) {
  int rank;
  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  // if (rank == COMM_WORLD_ROOT && server_->comm_reducfc2 != MPI_COMM_NULL) {
  if (rank != COMM_WORLD_ROOT || server_->comm_reducfc2 == MPI_COMM_NULL) {
    MPI_Abort(server_->comm_reducfc2, 1);
  }
  torch::Tensor out = torch::zeros({fc2->weight.data().size(0), MATRIX_DIM_N});
  client_->recover_result(out, ctxt_out, server_->comm_reducfc2,
                          COMM_WORLD_ROOT);
  std::cout << "recover_result" << rank << "finish" << std::endl;
  return torch::log_softmax(out, /*dim=*/1);
}

template <typename DataLoader>
void train(size_t epoch, Net &model, torch::Device device,
           DataLoader &data_loader, torch::optim::Optimizer &optimizer,
           size_t dataset_size) {
  model.train();
  size_t batch_idx = 0;
  for (auto &batch : data_loader) {
    // auto data = batch.data.to(device), targets = batch.target.to(device);
    auto data = batch.data.to(device), targets = batch.target.to(device);
    optimizer.zero_grad();
    auto output = model.forward_train(data);
    // output.print();
    auto loss = torch::nll_loss(output, targets);
    AT_ASSERT(!std::isnan(loss.template item<float>()));
    loss.backward();
    optimizer.step();

    if (batch_idx++ % kLogInterval == 0) {
      std::printf("Train Epoch: %ld [%5ld/%5ld] Loss: %.4f\n", epoch,
                  batch_idx * batch.data.size(0), dataset_size,
                  loss.template item<float>());
    }
  }
}

void load_model(Net &model, const torch::Device &device,
                const std::string &pt_file) {
  std::string kDataRoot = "build/data";
  if (!std::filesystem::exists(pt_file)) {
    auto train_dataset =
        EMNIST(kDataRoot)
            .map(torch::data::transforms::Normalize<>(0.1307, 0.3081))
            .map(torch::data::transforms::Stack<>());
    // auto train_dataset =
    //     torch::data::datasets::MNIST(kDataRoot)
    //         .map(torch::data::transforms::Normalize<>(0.1307, 0.3081))
    //         .map(torch::data::transforms::Stack<>());
    const size_t train_dataset_size = train_dataset.size().value();
    auto train_loader =
        torch::data::make_data_loader<torch::data::samplers::SequentialSampler>(
            std::move(train_dataset), kTrainBatchSize);

    torch::optim::SGD optimizer(model.parameters(),
                                torch::optim::SGDOptions(0.01).momentum(0.5));

    // [batchsize,1,28,28]
    for (size_t epoch = 1; epoch <= kNumberOfEpochs; ++epoch) {
      train(epoch, model, device, *train_loader, optimizer, train_dataset_size);
    }
    torch::serialize::OutputArchive archive;
    model.save(archive);
    archive.save_to(pt_file);
  }
  torch::serialize::InputArchive archive;
  archive.load_from(pt_file);
  model.load(archive);
}

void test(Net &model, const torch::Device &device, const std::string &root,
          std::size_t test_num) {
  int rank, p;
  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  auto test_dataset =
      EMNIST(root, EMNIST::Mode::kTest)
          .map(torch::data::transforms::Normalize<>(0.1307, 0.3081))
          .map(torch::data::transforms::Stack<>());
  // auto test_dataset =
  //     torch::data::datasets::MNIST(root,
  //                                  torch::data::datasets::MNIST::Mode::kTest)
  //         .map(torch::data::transforms::Normalize<>(0.1307, 0.3081))
  //         .map(torch::data::transforms::Stack<>());

  const size_t test_dataset_size = test_dataset.size().value();
  auto test_loader =
      torch::data::make_data_loader(std::move(test_dataset), kTestBatchSize);

  torch::NoGradGuard no_grad;
  model.eval();
  double test_loss = 0;
  int32_t correct = 0;
  size_t batch_idx = 0;
  // INTEL_ITT_RESUME;
  // INTEL_ITT_TASK_BEGIN(mnist_test, hcnn);      //! ITT - Begin
  // INTEL_ITT_TASK_BEGIN(mnist_test, hcnn_init); //! ITT - Begin
  model.CNN_init();
  // MPI_Barrier(MPI_COMM_WORLD);
  // INTEL_ITT_TASK_END(mnist_test); //! ITT - End
  for (const auto &batch : *test_loader) {
    if (batch_idx >= test_num) {
      break;
    }
    // MPI_Barrier(MPI_COMM_WORLD);
    auto data = batch.data.to(device), targets = batch.target.to(device);
    // auto output = model.forward2(data, hmme, hmmcc, key);
    // std::cout << data << std::endl;
    // CNN_init();
    // INTEL_ITT_TASK_BEGIN(mnist_test, hcnn_inferr); //! ITT - Begin
    std::vector<std::vector<std::string>> ctxt_out;
    model.forward_test(data, ctxt_out);
    // return;
    // std::cout << "forward_test" << rank << std::endl;
    // MPI_Barrier(MPI_COMM_WORLD);
    if (rank == COMM_WORLD_ROOT) {
      auto output = model.forward_result(ctxt_out);
      // INTEL_ITT_TASK_END(mnist_test); //! ITT - End
      test_loss += torch::nll_loss(output, targets,
                                   /*weight=*/{}, torch::Reduction::Sum)
                       .template item<float>();
      auto pred = output.argmax(1);
      correct += pred.eq(targets).sum().template item<int64_t>();
      std::printf("Accuracy: %.3f\n", static_cast<double>(correct) /
                                          ((batch_idx + 1) * kTestBatchSize));
    }
    batch_idx++;
  }
  // INTEL_ITT_TASK_END(mnist_test); //! ITT - End
  // INTEL_ITT_DETACH;
}
