#include <torch/torch.h>

const int64_t CIFAR10_CHANNEL_IN = 3;
const int64_t CIFAR10_CLASS_NUM = 10;

// Naive: Conv -> Square -> FC1 -> Square -> FC2
struct NaiveNoPaddingNet5 : public torch::nn::Module {
public:
  // Conv
  constexpr static int64_t CIFAR10_CHANNEL_OUT = 64;
  constexpr static int64_t CIFAR10_KERNEL_SIZE = 5;
  constexpr static int64_t CIFAR10_STRIDE = 3;

  // FC1
  constexpr static int64_t CIFAR10_FC1_INH =
      (((32 - CIFAR10_KERNEL_SIZE) + CIFAR10_STRIDE) / CIFAR10_STRIDE);
  constexpr static int64_t CIFAR10_FC1_INW = CIFAR10_FC1_INH;
  constexpr static int64_t CIFAR10_FC1_IN =
      CIFAR10_FC1_INH * CIFAR10_FC1_INW * CIFAR10_CHANNEL_OUT;
  constexpr static int64_t CIFAR10_FC1_OUT = 128;
  /*
   * CIFAR10_CHANNEL_OUT = 128 CIFAR10_FC1_IN = 12800:
   * CIFAR10_FC1_OUT = 128: 8192 60.0; 2048 61.1; 1024 60.1
   */

  /*
   * CIFAR10_CHANNEL_OUT = 64 CIFAR10_FC1_IN = 6400:
   * CIFAR10_FC1_OUT = 3200: 8192 60.7
   * CIFAR10_FC1_OUT = 1600: 8192 59.9; 2048 60.1; 1024 60.4
   * CIFAR10_FC1_OUT = 800: 8192 59.2; 2048 59.6; 1024 60.6
   * CIFAR10_FC1_OUT = 256: 8192 59.7; 2048 60.1; 1024 60.3
   * CIFAR10_FC1_OUT = 128: 8192 58.2; 2048 58.1; 1024 57.4
   */

  /*
   * CIFAR10_CHANNEL_OUT = 32 CIFAR10_FC1_IN = 3200:
   * CIFAR10_FC1_OUT = 128: 8192 56.2; 2048 55.8; 1024 56.9
   */

  /*
   * CIFAR10_CHANNEL_OUT = 4 CIFAR10_FC1_IN = 256:
   * CIFAR10_FC1_OUT = 128: 8192 56.2; 2048 55.8; 1024 56.9
   */
public:
  constexpr static const char *pt_name = "cifar10_naive5.pt";

  NaiveNoPaddingNet5()
      : conv(torch::nn::Conv2dOptions(CIFAR10_CHANNEL_IN, CIFAR10_CHANNEL_OUT,
                                      CIFAR10_KERNEL_SIZE)
                 .stride(CIFAR10_STRIDE)
                 .bias(false)),
        fc1(torch::nn::LinearOptions(CIFAR10_FC1_IN, CIFAR10_FC1_OUT)
                .bias(false)),
        fc2(torch::nn::LinearOptions(CIFAR10_FC1_OUT, CIFAR10_CLASS_NUM)
                .bias(false)) {
    register_module("conv", conv);
    register_module("conv_drop", conv_drop);
    register_module("fc1", fc1);
    register_module("fc2", fc2);
  }

  ~NaiveNoPaddingNet5() = default;

  torch::Tensor forward(torch::Tensor x) {
    x = torch::square(conv_drop(conv(x)));
    // x = torch::square(conv(x));
    x = x.view({-1, CIFAR10_FC1_IN});
    x = torch::square(fc1(x));
    x = fc2(x);
    return std::move(x);
  }

public:
  torch::nn::Conv2d conv;
  torch::nn::Dropout2d conv_drop;
  torch::nn::Linear fc1;
  torch::nn::Linear fc2;
};

// LoLa: Conv1 -> Square -> Conv2 -> Square -> FC
struct LoLaNet5 : public torch::nn::Module {
protected:
  // Conv1
  constexpr static int64_t CONV1_CHANNEL_OUT = 83;
  constexpr static int64_t CONV1_KERNEL_SIZE = 8;
  constexpr static int64_t CONV1_STRIDE = 2;
  constexpr static int64_t CONV1_PADDING = 1;

  // Conv2
  constexpr static int64_t CONV2_CHANNEL_OUT = 163;
  constexpr static int64_t CONV2_KERNEL_SIZE = 6;
  constexpr static int64_t CONV2_STRIDE = 2;

  // FC
  constexpr static int64_t CIFAR10_FC1_INH = 5;
  constexpr static int64_t CIFAR10_FC1_INW = CIFAR10_FC1_INH;
  constexpr static int64_t CIFAR10_FC1_IN =
      CIFAR10_FC1_INH * CIFAR10_FC1_INW * CONV2_CHANNEL_OUT;

public:
  constexpr static const char *pt_name = "cifar10_lola5.pt";

  LoLaNet5()
      : conv1(torch::nn::Conv2dOptions(CIFAR10_CHANNEL_IN, CONV1_CHANNEL_OUT,
                                       CONV1_KERNEL_SIZE)
                  .stride(CONV1_STRIDE)
                  .padding(CONV1_PADDING)
                  .bias(false)),
        conv2(torch::nn::Conv2dOptions(CONV1_CHANNEL_OUT, CONV2_CHANNEL_OUT,
                                       CONV2_KERNEL_SIZE)
                  .stride(CONV2_STRIDE)
                  .bias(false)),
        fc(torch::nn::LinearOptions(CIFAR10_FC1_IN, CIFAR10_CLASS_NUM)
               .bias(false)) {
    register_module("conv1", conv1);
    register_module("conv2", conv2);
    register_module("fc", fc);
  }

  ~LoLaNet5() = default;

  torch::Tensor forward(torch::Tensor x) {
    x = torch::square(conv1(x));
    x = torch::square(conv2(x));
    x = torch::flatten(x, 1);
    x = fc(x);
    return std::move(x);
  }

public:
  torch::nn::Conv2d conv1;
  torch::nn::Conv2d conv2;
  torch::nn::Linear fc;
};