#include "CNN/cifar10.hpp"
#include "CNN/cifar10_model_test.hpp"
#include <filesystem>

const int64_t kTrainBatchSize = 100;
const int64_t kTestBatchSize = 128;
const int64_t kNumberOfEpochs = 10;
const int64_t kLogInterval = 50;

const std::string kRoot = "build/data/cifar-10-batches-bin";

const torch::Device device(torch::kCPU);

template <typename DataLoader, typename Net, typename LossFn>
void train(size_t dataset_size, size_t epoch, Net &model, torch::Device device,
           DataLoader &data_loader, torch::optim::Optimizer &optimizer,
           LossFn &loss_fn) {
  float running_loss = 0.0;
  size_t batch_idx = 0;
  for (auto &batch : data_loader) {
    auto data = batch.data.to(device);
    auto targets = batch.target.to(device);

    optimizer.zero_grad();
    auto output = model.ptxt_forward_test(data);

    auto loss = loss_fn(output, targets);
    AT_ASSERT(!std::isnan(loss.template item<float>()));

    loss.backward();
    optimizer.step();

    running_loss += loss.template item<float>();
    if ((batch_idx++ % kLogInterval) + 1 == kLogInterval) {
      auto pred = output.argmax(1);
      auto correct = pred.eq(targets).sum().template item<int64_t>();
      std::printf("Train Epoch: %ld [%5ld/%5ld] Loss: %.4f Accuracy: %.3f\n",
                  epoch, batch_idx * batch.data.size(0), dataset_size,
                  running_loss / kLogInterval,
                  static_cast<double>(correct) / kTrainBatchSize);
      running_loss = 0.0;
    }
  }
}

template <typename Net>
void test(const std::string &root, std::size_t test_num, Net &model,
          const torch::Device &device) {
  auto test_dataset = CIFAR10(root, CIFAR10::Mode::kTest)
                          .map(torch::data::transforms::Normalize<>(
                              CIFAR10::CIFAR10_MEANS, CIFAR10::CIFAR10_STDS))
                          .map(torch::data::transforms::Stack<>());
  const size_t test_dataset_size = test_dataset.size().value();
  auto test_loader =
      torch::data::make_data_loader<torch::data::samplers::SequentialSampler>(
          std::move(test_dataset), kTestBatchSize);

  torch::NoGradGuard no_grad;
  model.eval();

  int32_t correct = 0;
  int32_t total = 0;
  size_t batch_idx = 0;
  for (const auto &batch : *test_loader) {
    if (batch_idx >= test_num) {
      break;
    }
    auto data = batch.data.to(device);
    auto targets = batch.target.to(device);
    auto output = model.ptxt_forward_test(data);
    auto pred = output.argmax(1);
    correct += pred.eq(targets).sum().template item<int64_t>();
    total += targets.size(0);
    batch_idx++;
  }
  assert(total == batch_idx * kTestBatchSize);
  std::cout << model.conv << "," << model.fc1 << "," << model.fc2 << std::endl;
  std::printf("Accuracy of the network on the %d test images: %.3f\n", total,
              static_cast<double>(correct) / static_cast<double>(total));
}

template <typename Net>
void load_model(const std::string &pt_file, const std::string &root, Net &model,
                const torch::Device &device) {
  if (!std::filesystem::exists(pt_file)) {
    std::cout << "Model '" << pt_file
              << "' doesnot exist.\n"
                 "Training first ..."
              << std::endl;
    // Dataset Loader
    auto train_dataset = CIFAR10(root, CIFAR10::Mode::kTrain)
                             .map(torch::data::transforms::Normalize<>(
                                 CIFAR10::CIFAR10_MEANS, CIFAR10::CIFAR10_STDS))
                             .map(torch::data::transforms::Stack<>());
    const size_t train_dataset_size = train_dataset.size().value();
    // auto train_loader =
    //     torch::data::make_data_loader<torch::data::samplers::SequentialSampler>(
    //         std::move(train_dataset), kTrainBatchSize);
    auto train_loader =
        torch::data::make_data_loader<torch::data::samplers::RandomSampler>(
            std::move(train_dataset), kTrainBatchSize);

    // Set optim
    // torch::optim::SGD optimizer(model.parameters(),
    //                             torch::optim::SGDOptions(0.001).momentum(0.9));
    torch::optim::Adam optimizer(model.parameters(),
                                 torch::optim::AdamOptions(0.0002));

    // Set loss function
    auto criterion = torch::nn::CrossEntropyLoss();

    // Training
    model.train();
    for (size_t epoch = 1; epoch <= kNumberOfEpochs; ++epoch) {
      train(train_dataset_size, epoch, model, device, *train_loader, optimizer,
            criterion);
    }

    torch::serialize::OutputArchive archive;
    model.save(archive);
    archive.save_to(pt_file);
  }
  torch::serialize::InputArchive archive;
  archive.load_from(pt_file);
  model.load(archive);
}

int main() {
  torch::manual_seed(1);

  NaiveNoPaddingNet5 model;
  // LoLaNet5 model;

  const std::string pt_file = kRoot + "/" + model.pt_name;
  load_model(pt_file, kRoot, model, device);

  test(kRoot, 2, model, device);

  return 0;
}