#include <THC/THC.h>
#include <device_launch_parameters.h>
#include <torch/extension.h>
#include <torch/types.h>

#include <iostream>
#include <vector>

#include "cpp_helper.h"

namespace rolfma {
template <typename scalar_t>
__global__ void im2col_cuda_kernel(const scalar_t* __restrict__ data_im_,
                                   const int height, const int width,
                                   const int ksize_h, const int ksize_w,
                                   const int channels, const int height_col,
                                   const int width_col,
                                   scalar_t* __restrict__ data_col_) {
  CUDA_KERNEL_LOOP(index, height * width) {
    int64_t i = index / width;
    int64_t j = index % width;

    int64_t cnt = 0;
    // 遍历各个通道
    for (int64_t k = 0; k < channels; k++) {
      // 遍历卷积核
      for (int64_t m = i - ksize_h / 2; m < i + ksize_h / 2 + 1; m++) {
        for (int64_t n = j - ksize_w / 2; n < j + ksize_w / 2 + 1; n++) {
          if (m >= 0 && m < height && n >= 0 && n < width) {
            data_col_[(cnt++) * width_col + (i * width + j)] =
                data_im_[(k * height + m) * width + n];

          } else
            cnt++;
        }
      }
    }
  }
}

TORCH_API Tensor im2col_cuda(const Tensor& self, IntArrayRef kernel_size,
                             IntArrayRef dilation, IntArrayRef padding,
                             IntArrayRef stride) {
  TORCH_CHECK(self.dim() == 3);
  TORCH_CHECK(dilation == torch::IntArrayRef({1, 1}));
  TORCH_CHECK(padding == torch::IntArrayRef({1, 1}));
  TORCH_CHECK(stride == torch::IntArrayRef({1, 1}));
  TORCH_CHECK(kernel_size[0] == kernel_size[1]);
  TORCH_CHECK(kernel_size[0] % 2 == 1);

  int64_t c = self.size(0);
  int64_t h = self.size(1);
  int64_t w = self.size(2);
  int64_t k_s = kernel_size[0];

  int64_t o_h = (h + 2 * 1 - k_s) / 1 + 1;
  int64_t o_w = (w + 2 * 1 - k_s) / 1 + 1;

  int64_t size_1 = c * k_s * k_s;
  int64_t size_2 = o_h * o_w;

  torch::Tensor cols =
      torch::zeros(torch::IntArrayRef({size_1, size_2})).cuda();
  torch::Tensor im = self.clone().cuda();

  int block_size = 32;

  AT_DISPATCH_FLOATING_TYPES(
      self.scalar_type(), "im2col_cuda", ([&] {
        im2col_cuda_kernel<<<(h * w / block_size) + 1, block_size>>>(
            im.data_ptr<scalar_t>(), h, w, k_s, k_s, c, size_1, size_2,
            cols.data_ptr<scalar_t>());
      }));

  return cols;
}

template <typename scalar_t>
__global__ void col2im_cuda_kernel(const scalar_t* __restrict__ data_im_,
                                   const int height, const int width,
                                   const int ksize_h, const int ksize_w,
                                   const int channels, const int height_col,
                                   const int width_col,
                                   scalar_t* __restrict__ data_col_) {
  CUDA_KERNEL_LOOP(index, height * width) {
    int64_t i = index / width;
    int64_t j = index % width;

    int64_t cnt = 0;
    // 遍历各个通道
    for (int64_t k = 0; k < channels; k++) {
      // 遍历卷积核
      for (int64_t m = i - ksize_h / 2; m < i + ksize_h / 2 + 1; m++) {
        for (int64_t n = j - ksize_w / 2; n < j + ksize_w / 2 + 1; n++) {
          if (m >= 0 && m < height && n >= 0 && n < width) {
            data_im_[(k * height + m) * width + n] +=
                data_col_[cnt * width_col + (i * width + j)];
            cnt++;
          } else
            cnt++;
        }
      }
    }
  }
}
TORCH_API Tensor col2im_cuda(const Tensor& self, IntArrayRef output_size,
                             IntArrayRef kernel_size, IntArrayRef dilation,
                             IntArrayRef padding, IntArrayRef stride) {
  TORCH_CHECK(self.dim() == 2);
  TORCH_CHECK(dilation == torch::IntArrayRef({1, 1}));
  TORCH_CHECK(padding == torch::IntArrayRef({1, 1}));
  TORCH_CHECK(stride == torch::IntArrayRef({1, 1}));
  TORCH_CHECK(kernel_size[0] == kernel_size[1]);
  TORCH_CHECK(kernel_size[0] % 2 == 1);

  int64_t output_height = output_size[0];
  int64_t output_width = output_size[1];
  int64_t channel_num = self.size(0) / (kernel_size[0] * kernel_size[1]);
  TORCH_CHECK(self.size(1) == output_height * output_width);
  TORCH_CHECK(self.size(0) == channel_num * kernel_size[0] * kernel_size[1]);

  torch::Tensor output =
      torch::zeros(
          torch::IntArrayRef({channel_num, output_height, output_width}))
          .cuda();

  int64_t k_s = kernel_size[0];
  int block_size = 32;
  int64_t size_1 = channel_num * k_s * k_s;
  int64_t size_2 = output_height * output_width;

  AT_DISPATCH_FLOATING_TYPES(
      self.scalar_type(), "im2col_cuda", ([&] {
        im2col_cuda_kernel<<<(output_height * output_width / block_size) + 1,
                             block_size>>>(
            output.data_ptr<scalar_t>(), output_height, output_width, k_s, k_s,
            channel_num, size_1, size_2, self.data_ptr<scalar_t>());
      }));

  return output;
}

TORCH_API Tensor im2col(const Tensor& self, IntArrayRef kernel_size,
                        IntArrayRef dilation, IntArrayRef padding,
                        IntArrayRef stride) {
  TORCH_CHECK(self.dim() == 3);
  TORCH_CHECK(dilation == torch::IntArrayRef({1, 1}));
  TORCH_CHECK(padding == torch::IntArrayRef({1, 1}));
  TORCH_CHECK(stride == torch::IntArrayRef({1, 1}));
  TORCH_CHECK(kernel_size[0] == kernel_size[1]);
  TORCH_CHECK(kernel_size[0] % 2 == 1);

  int64_t c = self.size(0);
  int64_t h = self.size(1);
  int64_t w = self.size(2);
  int64_t k_s = kernel_size[0];

  int64_t size_1 = c * k_s * k_s;
  int64_t size_2 = h * w;

  torch::Tensor cols = torch::zeros(torch::IntArrayRef({size_1, size_2}));

  // 遍历各个位置
  int64_t cnt = 0;
  for (int64_t i = 0; i < h; i++) {
    for (int64_t j = 0; j < w; j++) {
      cnt = 0;
      // 遍历各个通道
      for (int64_t k = 0; k < c; k++) {
        // 遍历卷积核
        for (int64_t m = i - k_s / 2; m < i + k_s / 2 + 1; m++) {
          for (int64_t n = j - k_s / 2; n < j + k_s / 2 + 1; n++) {
            if (m >= 0 && m < h && n >= 0 && n < w)
              cols[cnt++][i * w + j] = self[k][m][n];
            else
              cnt++;
          }
        }
      }
    }
  }
  return cols;
}

TORCH_API Tensor col2im(const Tensor& self, IntArrayRef output_size,
                        IntArrayRef kernel_size, IntArrayRef dilation,
                        IntArrayRef padding, IntArrayRef stride) {
  TORCH_CHECK(self.dim() == 2);
  TORCH_CHECK(dilation == torch::IntArrayRef({1, 1}));
  TORCH_CHECK(padding == torch::IntArrayRef({1, 1}));
  TORCH_CHECK(stride == torch::IntArrayRef({1, 1}));
  TORCH_CHECK(kernel_size[0] == kernel_size[1]);
  TORCH_CHECK(kernel_size[0] % 2 == 1);

  int64_t output_height = output_size[0];
  int64_t output_width = output_size[1];
  int64_t channel_num = self.size(0) / (kernel_size[0] * kernel_size[1]);
  TORCH_CHECK(self.size(1) == output_height * output_width);
  TORCH_CHECK(self.size(0) == channel_num * kernel_size[0] * kernel_size[1]);

  torch::Tensor output = torch::zeros(
      torch::IntArrayRef({channel_num, output_height, output_width}));
  torch::Tensor alpha = torch::zeros(
      torch::IntArrayRef({channel_num, output_height, output_width}));

  int64_t k_s = kernel_size[0];
  // 遍历各个位置
  int64_t cnt = 0;
  for (int64_t i = 0; i < output_height; i++) {
    for (int64_t j = 0; j < output_width; j++) {
      cnt = 0;
      // 遍历各个通道
      for (int64_t k = 0; k < channel_num; k++) {
        // 遍历卷积核
        for (int64_t m = i - k_s / 2; m < i + k_s / 2 + 1; m++) {
          for (int64_t n = j - k_s / 2; n < j + k_s / 2 + 1; n++) {
            if (m >= 0 && m < output_height && n >= 0 && n < output_width) {
              output[k][m][n] += self[cnt][i * output_width + j];
              alpha[k][m][n] += 1;
              cnt++;
            } else
              cnt++;
          }
        }
      }
    }
  }
  return output;
}

}  // namespace rolfma

std::vector<torch::Tensor> conv_forward(torch::Tensor input,
                                        torch::Tensor weights,
                                        torch::Tensor bias, int64_t kW,
                                        int64_t kH, int64_t dW, int64_t dH,
                                        int64_t padW, int64_t padH,
                                        bool is_bias) {
  CHECK_CUDA_INPUT(input);
  CHECK_CUDA_INPUT(weights);
  CHECK_CUDA_INPUT(bias);

  int64_t batch_size = input.size(0);
  int64_t nInputPlane = input.size(1);
  int64_t inputHeight = input.size(2);
  int64_t inputWidth = input.size(3);

  int64_t nOutputPlane = weights.size(0);
  int64_t outputHeight = (inputHeight + 2 * padH - kH) / dH + 1;
  int64_t outputWidth = (inputWidth + 2 * padW - kW) / dW + 1;

  // 提前准备好相应形状的 tensor
  torch::Tensor output =
      torch::zeros(torch::IntArrayRef(
                       {batch_size, nOutputPlane, outputHeight, outputWidth}))
          .cuda();
  torch::Tensor columns =
      torch::zeros(torch::IntArrayRef(
                       {nInputPlane * kW * kH, outputHeight * outputWidth}))
          .cuda();
  torch::Tensor ones =
      torch::ones(torch::IntArrayRef({1, outputHeight * outputWidth})).cuda();

  // 更改形状
  weights =
      weights.reshape(torch::IntArrayRef({nOutputPlane, nInputPlane * kW * kH}))
          .cuda();
  bias = bias.reshape(torch::IntArrayRef({nOutputPlane, 1})).cuda();

  for (int elt = 0; elt < batch_size; elt++) {
    torch::Tensor input_n = input[elt].cuda();

    if (is_bias) {
      output[elt].add_(bias.mm(ones).reshape(torch::IntArrayRef(
                           {nOutputPlane, outputHeight, outputWidth})),
                       1);
    }
    // columns.dim: (inplanes * kW * kH) * (outHeight * outWidth)
    columns = rolfma::im2col_cuda(input_n.clone(), torch::IntArrayRef({kW, kH}),
                                  torch::IntArrayRef({1, 1}),
                                  torch::IntArrayRef({padW, padH}),
                                  torch::IntArrayRef({dW, dH}));
    torch::Tensor other_tensor =
        weights.mm(columns)
            .reshape(
                torch::IntArrayRef({nOutputPlane, outputHeight, outputWidth}))
            .clone()
            .cuda();
    // weights.dim: outplanes * inplanes * kW * kH, conv(weights, coloumns)
    output[elt].add_(other_tensor, 1);
  }
  return {output};
}

torch::Tensor backward_gradInput(torch::Tensor input, torch::Tensor gradOutput,
                                 torch::Tensor weights, int64_t kW, int64_t kH,
                                 int64_t dW, int64_t dH, int64_t padW,
                                 int64_t padH) {
  int64_t batch_size = input.size(0);
  int64_t nInputPlane = input.size(1);
  int64_t inputHeight = input.size(2);
  int64_t inputWidth = input.size(3);

  int64_t nOutputPlane = gradOutput.size(1);
  int64_t outputHeight = gradOutput.size(2);
  int64_t outputWidth = gradOutput.size(3);

  torch::Tensor gradInput =
      torch::zeros(torch::IntArrayRef(
                       {batch_size, nInputPlane, inputHeight, inputWidth}))
          .cuda();
  torch::Tensor gradColumns =
      torch::zeros(torch::IntArrayRef(
                       {nInputPlane * kH * kW, outputHeight * outputWidth}))
          .cuda();

  /* weight reshape to (inputPlanes * kW * kH) * outputplanes */
  torch::Tensor weights_ = weights.clone().cuda();

  weights =
      weights.reshape(torch::IntArrayRef({nOutputPlane, nInputPlane * kW * kH}))
          .t()
          .cuda();

  for (int elt = 0; elt < batch_size; elt++) {
    torch::Tensor gradInput_n = gradInput[elt].cuda();
    torch::Tensor gradOutput_n = gradOutput[elt].cuda();

    // gradOutput_n.dim: nOutputPlane * (outputHeight*outputWidth)
    gradOutput_n = gradOutput_n
                       .reshape(torch::IntArrayRef(
                           {nOutputPlane, outputHeight * outputWidth}))
                       .cuda();

    gradColumns = weights.mm(gradOutput_n).cuda();

    torch::Tensor im = rolfma::col2im_cuda(
        gradColumns.clone(),
        /*output_size=*/torch::IntArrayRef({inputHeight, inputWidth}),
        /*kernel_size=*/torch::IntArrayRef({kW, kH}),
        /*dilation=*/torch::IntArrayRef({1, 1}),
        /*padding=*/torch::IntArrayRef({padW, padH}),
        /*stride=*/torch::IntArrayRef({dW, dH}));
    gradInput[elt].add_(im, 1);
  }
  return gradInput;
}

std::vector<torch::Tensor> backward_gradParameters(
    torch::Tensor input, torch::Tensor gradOutput, torch::Tensor weights,
    int64_t kW, int64_t kH, int64_t dW, int64_t dH, int64_t padW, int64_t padH,
    bool is_bias) {
  int64_t batch_size = input.size(0);
  int64_t nInputPlane = input.size(1);
  int64_t inputHeight = input.size(2);
  int64_t inputWidth = input.size(3);

  int64_t nOutputPlane = gradOutput.size(1);
  int64_t outputHeight = gradOutput.size(2);
  int64_t outputWidth = gradOutput.size(3);

  torch::Tensor gradWeights =
      torch::zeros(torch::IntArrayRef({weights.size(0), weights.size(1),
                                       weights.size(2), weights.size(3)}))
          .cuda();
  torch::Tensor gradBias =
      torch::zeros(torch::IntArrayRef({nOutputPlane})).cuda();
  torch::Tensor ones =
      torch::ones(torch::IntArrayRef({outputHeight * outputWidth, 1})).cuda();

  torch::Tensor columns =
      torch::zeros(torch::IntArrayRef(
                       {nInputPlane * kW * kH, outputHeight * outputWidth}))
          .cuda();

  for (int elt = 0; elt < batch_size; elt++) {
    torch::Tensor gradOutput_n = gradOutput[elt].cuda();
    gradOutput_n = gradOutput_n
                       .reshape(torch::IntArrayRef(
                           {nOutputPlane, outputHeight * outputWidth}))
                       .cuda();

    // columns.dim: (inplanes * kW * kH) * (outHeight * outWidth)
    columns = rolfma::im2col_cuda(input[elt].clone(),
                                  /*kernel_size=*/torch::IntArrayRef({kW, kH}),
                                  /*dilation=*/torch::IntArrayRef({1, 1}),
                                  /*padding=*/torch::IntArrayRef({padW, padH}),
                                  /*stride=*/torch::IntArrayRef({dW, dH}))
                  .t();
    gradWeights.add_(gradOutput_n.mm(columns).reshape(torch::IntArrayRef(
                         {nOutputPlane, nInputPlane, kW, kH})),
                     1);

    if (is_bias) {
      gradBias.add_(
          gradOutput_n.mm(ones).reshape(torch::IntArrayRef({nOutputPlane})), 1);
    }
  }
  return {gradWeights, gradBias};
}

std::vector<torch::Tensor> conv_backward(torch::Tensor input,
                                         torch::Tensor gradOutput,
                                         torch::Tensor weights, int64_t kW,
                                         int64_t kH, int64_t dW, int64_t dH,
                                         int64_t padW, int64_t padH,
                                         bool is_bias) {
  CHECK_CUDA_INPUT(gradOutput);
  CHECK_CUDA_INPUT(weights);
  CHECK_CUDA_INPUT(input);

  torch::Tensor gradInput =
      backward_gradInput(input, gradOutput, weights, kW, kH, dW, dH, padW, padH)
          .cuda();
  std::vector<torch::Tensor> gradParas = backward_gradParameters(
      input, gradOutput, weights, kW, kH, dW, dH, padW, padH, is_bias);

  torch::Tensor gradWeights = gradParas[0].cuda();
  torch::Tensor gradBias = gradParas[1].cuda();

  return {gradInput, gradWeights, gradBias};
}

PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
  m.def("forward", &conv_forward, "conv forward (CUDA)");
  m.def("backward", &conv_backward, "conv backward (CUDA)");
}
