
#include "CNN/mnist_helper_shared.hpp"
#include "CNN/bgv_mnist_helper_shared.hpp"
#include "h_mat_mul.hpp"
#include "intel_itt_wrapper.hpp"
#include <filesystem>

INTEL_ITT_DOMAIN_CREATE(mnist_test, "test.cnn.mnist");
INTEL_ITT_STRING_HANDLE_CREATE(hcnn, "HCNN");
INTEL_ITT_STRING_HANDLE_CREATE(hcnn_init, "HCNN-Init");
INTEL_ITT_STRING_HANDLE_CREATE(hcnn_inferr, "HCNN-Inferr");

// The batch size for training.
const int64_t kTrainBatchSize = 64;

// The batch size for testing.
const int64_t kTestBatchSize = 64;

// The number of epochs to train.
const int64_t kNumberOfEpochs = 10;

// After how many batches to log a new update with the loss value.
const int64_t kLogInterval = 10;

template class h_mnist_client<helib::BGV>;
template class h_mnist_server<helib::BGV>;
template class h_mnist_client<helib::CKKS>;
template class h_mnist_server<helib::CKKS>;

torch::Tensor init_conv_kernel(const torch::Tensor &a) {
  torch::Tensor a_ = torch::zeros({a.size(2) * a.size(2), a.size(0), 1, 1});
  for (int i = 0; i < a.size(2); i++)
    for (int j = 0; j < a.size(2); j++)
      for (int k = 0; k < a.size(0); k++)
        a_[i * a.size(2) + j][k][0][0] = a[k][0][i][j];
  return std::move(a_);
}

torch::Tensor init_fc1_kernel(const torch::Tensor &a, int conv_channels) {
  torch::Tensor a_ = torch::zeros({conv_channels, a.size(0), a.size(0)});
  for (int i = 0; i < conv_channels; i++)
    a_[i] = a.index({"...", torch::indexing::Slice(i * a.size(0),
                                                   i * a.size(0) + a.size(0))});
  return std::move(a_);
}

torch::Tensor init_fc2_kernel(const torch::Tensor &a) {
  torch::Tensor a_ = a.clone();
  // for (int i = 0; i < a.size(1); i++)
  //   a_ = torch::cat({a_, torch::zeros({1, a.size(1)})}, 0);
  return std::move(a_);
}

torch::Tensor im2matrix(const torch::Tensor &x, const torch::Tensor &filter,
                        int stride) {
  int out_size = (x.size(2) - filter.size(2)) / stride + 1;
  int aRow_ = out_size * out_size, aCol_ = x.size(0);
  torch::Tensor A_ = torch::zeros(
      {filter.size(2) * filter.size(2), filter.size(0), aRow_, aCol_});
  int sqrfsz2 = filter.size(2) * filter.size(2), fsz0 = filter.size(0),
      fsz2 = filter.size(2);
  for (int i = 0; i < sqrfsz2; i++) {
    for (int j = 0; j < filter.size(0); j++)
      for (int k = 0; k < aCol_; k++)
        A_.index({i, j, "...", k}) =
            x.index({k, "...",
                     torch::indexing::Slice(
                         i / fsz2, i / fsz2 + stride * out_size, stride),
                     torch::indexing::Slice(
                         i % fsz2, i % fsz2 + stride * out_size, stride)})
                .reshape({aRow_});
  }
  return std::move(A_);
}

// Ptxt version
torch::Tensor sqractive(const torch::Tensor &x) {
  return std::move(torch::square(x));
}

torch::Tensor gemm(const torch::Tensor &a, const torch::Tensor &b) {
  return std::move(torch::matmul(a, b));
}

torch::Tensor conv_nmm(const torch::Tensor &x, const torch::Tensor &filter) {
  torch::Tensor out =
      torch::zeros({x.size(0), x.size(1), x.size(2), x.size(3)});
  for (int i = 0; i < x.size(0); i++)
    for (int j = 0; j < x.size(1); j++) {
      out[i][j] = x[i][j].mul(filter[i][j]);
      if (i)
        out[0][j] = out[0][j].add(out[i][j]);
    }
  return std::move(out.index({0, "..."}));
}
torch::Tensor fct1(const torch::Tensor &x, const torch::Tensor &filter) {
  torch::Tensor out = torch::zeros({x.size(1), x.size(2)});
  for (int i = 0; i < x.size(0); i++)
    out = out.add(gemm(filter.index({i, "..."}), x.index({i, "..."})));
  return std::move(out);
}
torch::Tensor fct2(const torch::Tensor &x, const torch::Tensor &filter) {
  torch::Tensor out = torch::zeros({x.size(0), x.size(1)});
  out = gemm(filter, x);
  return std::move(out);
}
torch::Tensor fullconnect(const torch::Tensor &x, const torch::Tensor &filter) {
  auto out = gemm(x, filter);
  return std::move(out);
}

// template <typename Scheme>
Net::Net(const params<helib::BGV> &params)
    : conv1(torch::nn::Conv2dOptions(1, 4, /*kernel_size=*/7)
                .stride(3)
                .bias(false)),
      fc1(torch::nn::LinearOptions(256, 64).bias(false)),
      fc2(torch::nn::LinearOptions(64, 10).bias(false)) {
  // bcast_pubkey
  // auto bgv_server = static_cast<bgv_hmnist_server *>(
  //     const_cast<h_mnist_server<helib::BGV> *>(server_));
  client_ = new bgv_hmnist_client(params, T_NUM);
  std::cout << "client finished" << std::endl;
  // Create a HMM status instance
  // hmm_status<shmm_engine> hmm_status(METHOD::BSGS, omp_get_max_threads());
  auto bgv_client = static_cast<bgv_hmnist_client *>(
      const_cast<h_mnist_client<helib::BGV> *>(client_));
  hmm_status<shmm_engine> hmm_status(METHOD::BSGS, 0);
  server_ = new bgv_hmnist_server(hmm_status, bgv_client->get_crt_num(), params,
                                  bgv_client->get_hmmcc());
  std::cout << "server finished" << std::endl;
  auto bgv_server = static_cast<bgv_hmnist_server *>(
      const_cast<h_mnist_server<helib::BGV> *>(server_));
  bgv_server->pk_.resize(T_NUM);
  for (int i = 0; i < T_NUM; i++) {
    bgv_server->pk_[i] =
        new helib::PubKey(bgv_client->get_hmmcc()[i]->public_key());
  }
  std::cout << "pk finished" << std::endl;
  // MPI_Barrier(MPI_COMM_WORLD);
  register_module("conv1", conv1);
  register_module("conv1_drop", conv1_drop);
  register_module("fc1", fc1);
  register_module("fc2", fc2);
}

// template <typename Scheme>
void Net::CNN_init() { client_->init_hcnn(ctxt_model, conv1, fc1, fc2); }

// template <typename Scheme>
torch::Tensor Net::forward_train(torch::Tensor x) {
  x = sqractive(conv1->forward(x));
  x = x.view({-1, 256});
  x = fc1->forward(x);
  x = sqractive(x);
  x = torch::dropout(x, /*p=*/0.1, /*training=*/is_training());
  x = fc2->forward(x);
  // x.print();
  return std::move(torch::log_softmax(x, /*dim=*/1));
}

// template <typename Scheme>
void Net::ptxt_forward_test(const torch::Tensor &x) {
  auto a = im2matrix(x, conv1->weight.data(), 3);
  auto b = init_conv_kernel(conv1->weight.data());
  auto c = init_fc1_kernel(fc1->weight.data(), 4);
  auto d = init_fc2_kernel(fc2->weight.data());
  auto X = x;
  X = conv_nmm(a, b);
  X = sqractive(X);
  X = fct1(X, c);
  X = sqractive(X);
  X = torch::dropout(X, /*p=*/0.1, /*training=*/is_training());
  X = fct2(x, d);
  X = X.index({torch::indexing::Slice({0, 10}), "..."});
  X = X.transpose(0, 1);
  std::cout << X << std::endl;
}

// template <typename Scheme>
torch::Tensor Net::forward_test(const torch::Tensor &x) {
  int batchsize = x.size(0),
      fconvsize = conv1->weight.data().size(2) * conv1->weight.data().size(3);
  auto out = torch::zeros({fc2->weight.data().size(0), batchsize});
  encrypted_input<helib::BGV> ctxt_in;

  // Ctxt version
  auto bgv_client = static_cast<bgv_hmnist_client *>(
      const_cast<h_mnist_client<helib::BGV> *>(client_));
  // auto bgv_server = static_cast<bgv_hmnist_server *>(
  //     const_cast<h_mnist_server<helib::BGV> *>(server_));
  client_->init_input(ctxt_in, conv1->weight.data(), x);
  std::cout << "init_input finished" << std::endl;
  encrypted_output<helib::BGV> ctxt_out;
  server_->forward_test(ctxt_out, ctxt_in, ctxt_model, bgv_client->get_hmmcc());
  std::cout << "forward_test finished" << std::endl;
  server_->report_metrics();

  client_->recover_result(out, ctxt_out);

  // Ptxt version
  // ptxt_forward_test(x);

  return torch::log_softmax(out, /*dim=*/1);
}

// template class Net<helib::BGV>;
// template class Net<helib::CKKS>;

template <typename DataLoader>
void train(size_t epoch, Net &model, torch::Device device,
           DataLoader &data_loader, torch::optim::Optimizer &optimizer,
           size_t dataset_size) {
  model.train();
  size_t batch_idx = 0;
  for (auto &batch : data_loader) {
    // auto data = batch.data.to(device), targets = batch.target.to(device);
    auto data = batch.data.to(device), targets = batch.target.to(device);
    optimizer.zero_grad();
    auto output = model.forward_train(data);
    // output.print();
    auto loss = torch::nll_loss(output, targets);
    AT_ASSERT(!std::isnan(loss.template item<float>()));
    loss.backward();
    optimizer.step();

    if (batch_idx++ % kLogInterval == 0) {
      std::printf("Train Epoch: %ld [%5ld/%5ld] Loss: %.4f\n", epoch,
                  batch_idx * batch.data.size(0), dataset_size,
                  loss.template item<float>());
    }
  }
}

// template <typename Scheme>
void load_model(Net &model, const torch::Device &device,
                const std::string &pt_file) {
  std::string kDataRoot = "build/data";
  if (!std::filesystem::exists(pt_file)) {
    auto train_dataset =
        torch::data::datasets::MNIST(kDataRoot)
            .map(torch::data::transforms::Normalize<>(0.1307, 0.3081))
            .map(torch::data::transforms::Stack<>());
    const size_t train_dataset_size = train_dataset.size().value();
    auto train_loader =
        torch::data::make_data_loader<torch::data::samplers::SequentialSampler>(
            std::move(train_dataset), kTrainBatchSize);

    torch::optim::SGD optimizer(model.parameters(),
                                torch::optim::SGDOptions(0.01).momentum(0.5));

    // [batchsize,1,28,28]
    for (size_t epoch = 1; epoch <= kNumberOfEpochs; ++epoch) {
      train(epoch, model, device, *train_loader, optimizer, train_dataset_size);
    }
    torch::serialize::OutputArchive archive;
    model.save(archive);
    archive.save_to(pt_file);
  }
  torch::serialize::InputArchive archive;
  archive.load_from(pt_file);
  model.load(archive);
}
// template void load_model(Net &model, const torch::Device &device,
//                          const std::string &pt_file);
// template void load_model(Net<helib::CKKS> &model, const torch::Device
// &device,
//                          const std::string &pt_file);

// template <typename Scheme>
void test(Net &model, const torch::Device &device, const std::string &root,
          std::size_t test_num) {
  auto test_dataset =
      torch::data::datasets::MNIST(root,
                                   torch::data::datasets::MNIST::Mode::kTest)
          .map(torch::data::transforms::Normalize<>(0.1307, 0.3081))
          .map(torch::data::transforms::Stack<>());

  const size_t test_dataset_size = test_dataset.size().value();
  auto test_loader =
      torch::data::make_data_loader(std::move(test_dataset), kTestBatchSize);

  torch::NoGradGuard no_grad;
  model.eval();
  double test_loss = 0;
  int32_t correct = 0;
  size_t batch_idx = 0;
  INTEL_ITT_RESUME;
  INTEL_ITT_TASK_BEGIN(mnist_test, hcnn);      //! ITT - Begin
  INTEL_ITT_TASK_BEGIN(mnist_test, hcnn_init); //! ITT - Begin
  model.CNN_init();
  INTEL_ITT_TASK_END(mnist_test); //! ITT - End
  for (const auto &batch : *test_loader) {
    if (batch_idx >= test_num) {
      break;
    }
    auto data = batch.data.to(device), targets = batch.target.to(device);
    // auto output = model.forward2(data, hmme, hmmcc, key);
    // std::cout << data << std::endl;
    // CNN_init();
    INTEL_ITT_TASK_BEGIN(mnist_test, hcnn_inferr); //! ITT - Begin
    auto output = model.forward_test(data);
    INTEL_ITT_TASK_END(mnist_test); //! ITT - End
    test_loss += torch::nll_loss(output, targets,
                                 /*weight=*/{}, torch::Reduction::Sum)
                     .template item<float>();
    auto pred = output.argmax(1);
    correct += pred.eq(targets).sum().template item<int64_t>();
    batch_idx++;
    std::printf("Accuracy: %.3f\n",
                static_cast<double>(correct) / (batch_idx * kTestBatchSize));
  }
  INTEL_ITT_TASK_END(mnist_test); //! ITT - End
  INTEL_ITT_DETACH;

  // test_loss /= dataset_size;
  // std::printf("\nTest set: Average loss: %.4f | Accuracy: %.3f\n",
  // test_loss,
  //             static_cast<double>(correct) / dataset_size);
}
// template void test(Net &model, const torch::Device &device,
//                    const std::string &root, std::size_t test_num);
// template void test(Net<helib::CKKS> &model, const torch::Device &device,
//                    const std::string &root, std::size_t test_num);