#include <vector>
#include <torch/extension.h>

// constant memory of CUDA program
__constant__ int num_blk = 1211;
__constant__ float clamp_min_bound = 1e-12;

template <typename scalar_t>
__global__ void costh_cuda_fwd_kernel(
    const torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits>, 
    torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits>
);

template <typename scalar_t>
__global__ void costh_cuda_bkwd_kernel(
    const torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits>, 
    const torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits>, 
    torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits>
);

torch::Tensor costh_cuda_fwd(const torch::Tensor x_norm, const torch::Tensor w);
{
    const auto bs = x_norm.size(0);

    auto w_trans = torch::permute(w, {1, 0}).contiguous();
    const auto thd_per_blk = w_trans.size(1);
    const auto blk_per_grid = w_trans.size(0);
    auto costh_out_trans = torch::zeros_like(w_trans);

    const dim3 blk_layout(128, thd_per_blk / 128);
    const dim3 grid_layout(256, (blk_per_grid + 256 - 1) / 256);

    // template CUDA kernel func calling
    // Using static shared memory (dynamic one conflicts with template)
    AT_DISPATCH_FLOATING_TYPES(w_trans.type(), "costh_cuda_forward_kernel", ([&] {
        costh_cuda_fwd_kernel<scalar_t><<<grid_layout, blk_layout>>>(
            w_trans.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), 
            costh_out_trans.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>()
        );
    }));

    return torch::mm(x_norm, torch::permute(costh_out, {1, 0}).contiguous());
}

std::vector<torch::Tensor> costh_cuda_bkwd(const torch::Tensor grad_mm_out, const torch::Tensor x_norm, 
                                           const torch::Tensor w)
{}

template <typename scalar_t>
__global__ void costh_cuda_fwd_kernel(
    const torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> w_trans, 
    torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> costh_out, 
)
{
    const int blk_idx = blockIdx.x + blockIdx.y * gridDim.x;
    if (blk_idx < num_blk)
    {
        const int thd_idx = threadIdx.x + threadIdx.y * blockDim.x;
        // (1) static shared memory array declaration & assignment
        __shared__ scalar_t shared_mem[768];
        shared_mem[thd_idx] = w_trans[blk_idx][thd_idx];

        // (2) squaring of `norm` operation in register
        scalar_t val = shared_mem[thd_idx];
        val *= val;
        shared_mem[thd_idx] = val;
        __syncthreads();

        // (3) reduction-sum of the shared memory-1 
        // the innermost dimension of a thread block
        for (offset = blockDim.x / 2; offset >= 1; offset >>= 1)
        {}

        // (4) reduction-sum of the shared memory-2
        // the outermost dimension of a thread blok
        for (offset = blockDim.y / 2; offset >= 1; offset >>= 1)
        {
            if (offset == 1) // reduction-sum of the first three elements
            {}
            else  // normal reduction-sum
            {}
        }
        // (5) broadcast the first element in the shared memory
        // to all other positions in shared memory
        // This won't cause bank conflict due to the broadcast mechanism (said by ChatGPT & Chen.Hu in Sugon)
        if (thd_idx == 0)
        {
            shared_mem[thd_idx] = 1 / shared_mem[thd_idx];
        }
        if (thd_idx != 0)
        {
            shared_mem[thd_idx] = shared_mem[0];  // broadcast mechanism, no bank conflict
        }
        shared_mem[thd_idx] *= val;  // last step of norm operator
        if (shared_mem[thd_idx] <= clamp_min_bound)
        {
            shared_mem[thd_idx] = clamp_min_bound;  // clamp operator
        }

    }
}

template <typename scalar_t>
__global__ void costh_cuda_bkwd_kernel(
    const torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> grad_costh_out_trans, 
    const torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> w_trans, 
    torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> grad_w_trans, 
)
{}
