#include <torch/extension.h>

// __constant__ int const_sz_for_test = 1024;
template <typename scalar_t>
__global__ void my_cuda_kernel(
    torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits>,
    torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits>
);

torch::Tensor my_cuda_test(torch::Tensor in_tensor_2)
{
    // in_tensor_2 shape: (8, 256)
    const auto dim0 = in_tensor_2.size(0);
    const auto dim1 = in_tensor_2.size(1);

    // 2-dimensional thread grid & block
    const dim3 my_blk(128, dim1 / 128);
    const dim3 my_grid(4, dim0 / 4);

    // TODO: an experiment of creating new Tensor
    // The device of new tensor should be `NVIDIA GPU`
    auto my_out_cuda_tensor = torch::randn({dim0, dim1}).to(in_tensor_2.device());
    // auto my_out_cpu_tensor = torch::randn({dim0, dim1});
    AT_DISPATCH_FLOATING_TYPES(in_tensor_2.type(), "CUDA exercise", ([&] {
    my_cuda_kernel<scalar_t><<<my_grid, my_blk>>>(
        in_tensor_2.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(),
        my_out_cuda_tensor.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>()
    );
}));
    // cudaDeviceSynchronize();
    return my_out_cuda_tensor;
    // return my_out_cpu_tensor;
}

template <typename scalar_t>
__global__ void my_cuda_kernel(
    torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> in_tensor,
    torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> my_out_tensor
)
{
    const int thd_local_idx = threadIdx.x + threadIdx.y * blockDim.x;
    const int blk_sz = blockDim.y * blockDim.x;
    const int blk_idx = blockIdx.x + blockIdx.y * gridDim.x;
    // printf("The local index of this CUDA thread is %d...\n", thd_local_idx);
    // The following two methods which declare static shared memory are all wrong...
    // method (1) is wrong
    // __shared__ scalar_t shared_mem[const_sz_for_test];
    // method (2) is wrong
    // __shared__ scalar_t shared_mem[blk_sz];
    __shared__ scalar_t shared_mem[256];

    const scalar_t global_val = in_tensor[blk_idx][thd_local_idx];
    // TODO: Test the CUDA built-in function `sqrtf`
    scalar_t sqrtf_res = sqrtf(global_val);
    shared_mem[thd_local_idx] = global_val;
    my_out_tensor[blk_idx][thd_local_idx] = shared_mem[thd_local_idx];
    __syncthreads();

    /*
    if ((blockIdx.y == 0) && (blockIdx.x == 0) && (threadIdx.y == 0) && (threadIdx.x == 0))
    {
        printf("The number of `scalar_t`'s bytes is %d!\n", sizeof(scalar_t));
    }
    */
}

