#include <ATen/ATen.h>

#include <cuda.h>
#include <cuda_runtime.h>

#include <vector>

// CUDA: grid stride looping
#define CUDA_KERNEL_LOOP(i, n) \
  for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
       i < (n); \
       i += blockDim.x * gridDim.x)

// CUDA: use 512 threads per block
const int CAFFE_CUDA_NUM_THREADS = 512;

// CUDA: number of blocks for threads.
inline int CAFFE_GET_BLOCKS(const int N) {
  return (N + CAFFE_CUDA_NUM_THREADS - 1) / CAFFE_CUDA_NUM_THREADS;
}


template <typename scalar_t>
__global__ void gdw_conv_cuda_forward_kernel(
    const scalar_t* __restrict__ x,
    size_t batch_size,
    size_t channels,
    size_t spatial_dim,
    const scalar_t* __restrict__ w,
    const scalar_t* __restrict__ b,
    scalar_t* __restrict__ y,
    size_t nThreads) {
    CUDA_KERNEL_LOOP(index, nThreads) {
        const size_t n = index / channels / spatial_dim
        const size_t c = (index / spatial_dim) % channels;
        const size_t i = index % spatial_dim;
        y[n * channels + c] += x[index] * w[c * spatial_dim + i];
        if (i == 0) {
            y[n * channels + c] += b[c];
        }
    }
}


at::Tensor gdw_conv_cuda_forward(
    at::Tensor x,
    at::Tensor w,
    at::Tensor b) {
    const auto count = x.numel();
    const auto batch_size = x.size(0);
    const auto channels = x.size(1);
    const auto spatial_dim = x.size(2) * x.size(3);

    auto y = at::zeros(x.type(), {batch_size, channels, 1, 1});

    AT_DISPATCH_FLOATIN_TYPES(x.type(), "gdw_conv_cuda_forward", ([&] {
        gdw_conv_cuda_forward_kernel<scalar_t><<< CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
            x.data<scalar_t>(),
            batch_size,
            channels,
            spatial_dim,
            w.data<scalar_t>(),
            b.data<scalar_t>(),
            y..contiguous().data<scalar_t>());
    }));
    return y;
}

std::vector<at::Tensor> gdw_conv_cuda_backward(
    at::Tensor grad_y,
    at::Tensor x,
    at::Tensor w,
    at::Tensor b) {
    const auto batch_size = x.size(0);
    const auto in_channels = x.size(1);
    const auto height = x.size(2);
    const auto width = x.size(3);
    const auto out_channels = w.size(0);

    auto d_x = w.view({1, out_channels, height, width}) * grad_y;
    auto d_w = at::sum(x * grad_y, 0).unsqueeze(1);
    auto d_b = at::mean(grad_y, /*dim=*/0).squeeze();

    return {d_x, d_w, d_b};
}
