#include <torch/torch.h>

#define T_BATCHSIZE 128

const int64_t CIFAR10_CHANNEL_IN = 3;
const int64_t CIFAR10_CLASS_NUM = 10;

torch::Tensor init_conv_kernel(const torch::Tensor &a) {
  torch::Tensor a_ = torch::zeros({a.size(2) * a.size(2), a.size(0), 1, 1});
  std::cout << a.size(0) << "," << a.size(1) << "," << a.size(2) << std::endl;
  for (int i = 0; i < a.size(2); i++)
    for (int j = 0; j < a.size(2); j++)
      for (int k = 0; k < a.size(0); k++)
        a_[i * a.size(2) + j][k][0][0] = a[k][0][i][j];
  return std::move(a_);
}

torch::Tensor init_fc1_kernel(const torch::Tensor &a) {
  torch::Tensor a_ =
      torch::zeros({a.size(1) / T_BATCHSIZE, a.size(0), T_BATCHSIZE});
  for (int i = 0; i < a.size(1) / T_BATCHSIZE; i++)
    a_[i] = a.index({"...", torch::indexing::Slice(i * a.size(0),
                                                   i * a.size(0) + a.size(0))});
  return std::move(a_);
}

torch::Tensor init_fc2_kernel(const torch::Tensor &a) {
  torch::Tensor a_ = a.clone();
  // std::cout << a.size(0) << "," << a.size(1) << std::endl;
  // for (int i = 0; i < a.size(1); i++)
  //   a_ = torch::cat({a_, torch::zeros({1, a.size(1)})}, 0);
  return std::move(a_);
}

// 256,3,32,32
torch::Tensor im2matrix(const torch::Tensor &x, const torch::Tensor &filter,
                        int stride) {
  int out_size = (x.size(2) - filter.size(2)) / stride + 1;
  int aRow_ = out_size * out_size, aCol_ = x.size(0);
  // conv_size * conv_size, conv_out, out_size * out_size,Tbatchsize
  // 25,3,6400,256
  torch::Tensor A = torch::zeros({filter.size(2) * filter.size(2), x.size(1),
                                  filter.size(0) * aRow_, aCol_});
  // 50,2,25,3,128,128
  torch::Tensor A_ = torch::zeros(
      {filter.size(0) * aRow_ / T_BATCHSIZE, aCol_ / T_BATCHSIZE,
       filter.size(2) * filter.size(2), x.size(1), T_BATCHSIZE, T_BATCHSIZE});

  int sqrfsz2 = filter.size(2) * filter.size(2), fsz0 = filter.size(0),
      fsz2 = filter.size(2);
  for (int i = 0; i < sqrfsz2; i++) {
    for (int l = 0; l < x.size(1); l++) {
      for (int k = 0; k < aCol_; k++) {
        for (int j = 0; j < filter.size(0); j++)
          A.index({i, l,
                   torch::indexing::Slice(j * aRow_, j * aRow_ + aRow_, 1),
                   k}) =
              x.index({k, l,
                       torch::indexing::Slice(
                           i / fsz2, i / fsz2 + stride * out_size, stride),
                       torch::indexing::Slice(
                           i % fsz2, i % fsz2 + stride * out_size, stride)})
                  .reshape({aRow_});
      }
    }
  }
  for (int i = 0; i < filter.size(0) * aRow_ / T_BATCHSIZE; i++)
    for (int j = 0; j < aCol_ / T_BATCHSIZE; j++) {
      A_[i][j] =
          A.index({"...",
                   torch::indexing::Slice(i * T_BATCHSIZE,
                                          i * T_BATCHSIZE + T_BATCHSIZE, 1),
                   torch::indexing::Slice(j * T_BATCHSIZE,
                                          j * T_BATCHSIZE + T_BATCHSIZE, 1)});
    }
  return std::move(A_);
}

// Ptxt version
torch::Tensor sqractive(const torch::Tensor &x) {
  return std::move(torch::square(x));
}

torch::Tensor gemm(const torch::Tensor &a, const torch::Tensor &b) {
  return std::move(torch::matmul(a, b));
}

torch::Tensor conv_nmm(const torch::Tensor &x, const torch::Tensor &filter) {
  torch::Tensor out =
      torch::zeros({x.size(0), x.size(1), x.size(2), x.size(3)});
  for (int i = 0; i < x.size(0); i++)
    for (int j = 0; j < x.size(1); j++) {
      out[i][j] = x[i][j].mul(filter[i][j]);
      if (i)
        out[0][j] = out[0][j].add(out[i][j]);
    }
  return std::move(out.index({0, "..."}));
}
torch::Tensor fct1(const torch::Tensor &x, const torch::Tensor &filter) {
  torch::Tensor out = torch::zeros({x.size(1), x.size(2)});
  for (int i = 0; i < x.size(0); i++)
    out = out.add(gemm(filter.index({i, "..."}), x.index({i, "..."})));
  return std::move(out);
}
torch::Tensor fct2(const torch::Tensor &x, const torch::Tensor &filter) {
  torch::Tensor out = torch::zeros({x.size(0), x.size(1)});
  out = gemm(filter, x);
  return std::move(out);
}
torch::Tensor fullconnect(const torch::Tensor &x, const torch::Tensor &filter) {
  auto out = gemm(x, filter);
  return std::move(out);
}

// Naive: Conv -> Square -> FC1 -> Square -> FC2
struct NaiveNoPaddingNet5 : public torch::nn::Module {
public:
  // Conv
  constexpr static int64_t CIFAR10_CHANNEL_OUT = 64;
  constexpr static int64_t CIFAR10_KERNEL_SIZE = 5;
  constexpr static int64_t CIFAR10_STRIDE = 3;

  // FC1
  constexpr static int64_t CIFAR10_FC1_INH =
      (((32 - CIFAR10_KERNEL_SIZE) + CIFAR10_STRIDE) / CIFAR10_STRIDE);
  constexpr static int64_t CIFAR10_FC1_INW = CIFAR10_FC1_INH;
  constexpr static int64_t CIFAR10_FC1_IN =
      CIFAR10_FC1_INH * CIFAR10_FC1_INW * CIFAR10_CHANNEL_OUT;
  constexpr static int64_t CIFAR10_FC1_OUT = 128;
  /*
   * CIFAR10_CHANNEL_OUT = 128 CIFAR10_FC1_IN = 12800:
   * CIFAR10_FC1_OUT = 128: 8192 60.0; 2048 61.1; 1024 60.1
   */

  /*
   * CIFAR10_CHANNEL_OUT = 64 CIFAR10_FC1_IN = 6400:
   * CIFAR10_FC1_OUT = 3200: 8192 60.7
   * CIFAR10_FC1_OUT = 1600: 8192 59.9; 2048 60.1; 1024 60.4
   * CIFAR10_FC1_OUT = 800: 8192 59.2; 2048 59.6; 1024 60.6
   * CIFAR10_FC1_OUT = 256: 8192 59.7; 2048 60.1; 1024 60.3
   * CIFAR10_FC1_OUT = 128: 8192 58.2; 2048 58.1; 1024 57.4
   */

  /*
   * CIFAR10_CHANNEL_OUT = 32 CIFAR10_FC1_IN = 3200:
   * CIFAR10_FC1_OUT = 128: 8192 56.2; 2048 55.8; 1024 56.9
   */

  /*
   * CIFAR10_CHANNEL_OUT = 4 CIFAR10_FC1_IN = 256:
   * CIFAR10_FC1_OUT = 128: 8192 56.2; 2048 55.8; 1024 56.9
   */
public:
  constexpr static const char *pt_name = "cifar10_naive5.pt";

  NaiveNoPaddingNet5()
      : conv(torch::nn::Conv2dOptions(CIFAR10_CHANNEL_IN, CIFAR10_CHANNEL_OUT,
                                      CIFAR10_KERNEL_SIZE)
                 .stride(CIFAR10_STRIDE)
                 .bias(false)),
        fc1(torch::nn::LinearOptions(CIFAR10_FC1_IN, CIFAR10_FC1_OUT)
                .bias(false)),
        fc2(torch::nn::LinearOptions(CIFAR10_FC1_OUT, CIFAR10_CLASS_NUM)
                .bias(false)) {
    register_module("conv", conv);
    register_module("conv_drop", conv_drop);
    register_module("fc1", fc1);
    register_module("fc2", fc2);
  }

  ~NaiveNoPaddingNet5() = default;

  torch::Tensor forward(torch::Tensor x) {
    x = torch::square(conv_drop(conv(x)));
    // x = torch::square(conv(x));
    x = x.view({-1, CIFAR10_FC1_IN});
    x = torch::square(fc1(x));
    x = fc2(x);
    return std::move(x);
  }

  torch::Tensor ptxt_forward_test(const torch::Tensor &x) {
    auto a = im2matrix(x, conv->weight.data(), 3);
    // auto b = init_conv_kernel(conv->weight.data());
    // auto c = init_fc1_kernel(fc1->weight.data());
    // auto d = init_fc2_kernel(fc2->weight.data());
    auto b = init_conv_kernel(conv->weight.data()); // [121,64,1,1] [25,64,1,1]
    auto c = init_fc1_kernel(fc1->weight.data());   // [32,128,128] [50,128,128]
    auto d = init_fc2_kernel(fc2->weight.data());   // [10,128]     [10,128]
    // auto b_data = b.accessor<float, 4>();
    // auto c_data = c.accessor<float, 3>();
    // auto d_data = d.accessor<float, 2>();
    std::cout << conv->weight.data().sizes() << std::endl;
    std::cout << fc1->weight.data().sizes() << std::endl;
    std::cout << fc2->weight.data().sizes() << std::endl;
    std::cout << a.sizes() << std::endl;
    std::cout << b.sizes() << std::endl;
    std::cout << c.sizes() << std::endl;
    std::cout << d.sizes() << std::endl;
    // std::cout
    auto X = x;
    X = conv_nmm(a, b);
    std::cout << "conv_nmm" << std::endl;
    X = sqractive(X);
    std::cout << "sqractive1" << std::endl;
    X = fct1(X, c);
    std::cout << "fct1" << std::endl;
    X = sqractive(X);
    std::cout << "sqractive2" << std::endl;
    X = torch::dropout(X, /*p=*/0.1, /*training=*/is_training());
    X = fct2(x, d);
    std::cout << "fct2" << std::endl;
    X = X.index({torch::indexing::Slice({0, 10}), "..."});
    X = X.transpose(0, 1);
    std::cout << "finish" << std::endl;
    // std::cout << X << std::endl;
    return X;
  }

public:
  torch::nn::Conv2d conv;
  torch::nn::Dropout2d conv_drop;
  torch::nn::Linear fc1;
  torch::nn::Linear fc2;
};

// LoLa: Conv1 -> Square -> Conv2 -> Square -> FC
struct LoLaNet5 : public torch::nn::Module {
protected:
  // Conv1
  constexpr static int64_t CONV1_CHANNEL_OUT = 83;
  constexpr static int64_t CONV1_KERNEL_SIZE = 8;
  constexpr static int64_t CONV1_STRIDE = 2;
  constexpr static int64_t CONV1_PADDING = 1;

  // Conv2
  constexpr static int64_t CONV2_CHANNEL_OUT = 163;
  constexpr static int64_t CONV2_KERNEL_SIZE = 6;
  constexpr static int64_t CONV2_STRIDE = 2;

  // FC
  constexpr static int64_t CIFAR10_FC1_INH = 5;
  constexpr static int64_t CIFAR10_FC1_INW = CIFAR10_FC1_INH;
  constexpr static int64_t CIFAR10_FC1_IN =
      CIFAR10_FC1_INH * CIFAR10_FC1_INW * CONV2_CHANNEL_OUT;

public:
  constexpr static const char *pt_name = "cifar10_lola5.pt";

  LoLaNet5()
      : conv1(torch::nn::Conv2dOptions(CIFAR10_CHANNEL_IN, CONV1_CHANNEL_OUT,
                                       CONV1_KERNEL_SIZE)
                  .stride(CONV1_STRIDE)
                  .padding(CONV1_PADDING)
                  .bias(false)),
        conv2(torch::nn::Conv2dOptions(CONV1_CHANNEL_OUT, CONV2_CHANNEL_OUT,
                                       CONV2_KERNEL_SIZE)
                  .stride(CONV2_STRIDE)
                  .bias(false)),
        fc(torch::nn::LinearOptions(CIFAR10_FC1_IN, CIFAR10_CLASS_NUM)
               .bias(false)) {
    register_module("conv1", conv1);
    register_module("conv2", conv2);
    register_module("fc", fc);
  }

  ~LoLaNet5() = default;

  torch::Tensor forward(torch::Tensor x) {
    x = torch::square(conv1(x));
    x = torch::square(conv2(x));
    x = torch::flatten(x, 1);
    x = fc(x);
    return std::move(x);
  }

public:
  torch::nn::Conv2d conv1;
  torch::nn::Conv2d conv2;
  torch::nn::Linear fc;
};