#include <vector>
#include <torch/extension.h>

// constant memory of CUDA program
__constant__ int num_blk = 1211;
__constant__ float clamp_min_bound = 1e-12;

template <typename scalar_t>
__global__ void costh_cuda_fwd_kernel(
    const torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits>, 
    torch::PackedTensorAccessor32<scalar_t, 1, torch::RestrictPtrTraits>, 
    torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits>
);

template <typename scalar_t>
__global__ void costh_cuda_bkwd_kernel(
    const torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits>, 
    const torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits>, 
    const torch::PackedTensorAccessor32<scalar_t, 1, torch::RestrictPtrTraits>, 
    torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits>
);

std::vector<torch::Tensor> costh_cuda_fwd(const torch::Tensor x_norm, const torch::Tensor w)
{
    // const auto w_trans = torch::permute(w, {1, 0}).contiguous();
    const auto w_trans = torch::transpose(w, 0, 1).contiguous();
    const auto thd_per_blk = w_trans.size(1);
    const auto blk_per_grid = w_trans.size(0);
    auto norm_vals = torch::zeros(blk_per_grid).to(w_trans.device());
    auto costh_out_trans = torch::zeros_like(w_trans);

    const dim3 blk_layout(128, (thd_per_blk + 128 - 1) / 128);
    const dim3 grid_layout(256, (blk_per_grid + 256 - 1) / 256);

    // template CUDA kernel func calling
    // Using static shared memory (dynamic one conflicts with template)
    AT_DISPATCH_FLOATING_TYPES(w_trans.type(), "costh_cuda_forward_kernel", ([&] {
        costh_cuda_fwd_kernel<scalar_t><<<grid_layout, blk_layout>>>(
            w_trans.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), 
            norm_vals.packed_accessor32<scalar_t, 1, torch::RestrictPtrTraits>(), 
            costh_out_trans.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>()
        );
    }));
    // auto costh_out = torch::permute(costh_out_trans, {1, 0}).contiguous();
    auto costh_out = torch::transpose(costh_out_trans, 0, 1).contiguous();

    return {torch::mm(x_norm, costh_out), costh_out, norm_vals};
}

std::vector<torch::Tensor> costh_cuda_bkwd(const torch::Tensor grad_mm, const torch::Tensor x_norm, 
                                           const torch::Tensor w, const torch::Tensor costh_out, 
                                           const torch::Tensor norm_vals)
{
    // both matrix `w` and `costh_out` have the same shape, 
    // maybe I can delete the parameter whose name is w
    const auto thd_per_blk = w.size(0);
    const auto blk_per_grid = w.size(1);
    const dim3 blk_layout(128, (thd_per_blk + 128 - 1) / 128);
    const dim3 grid_layout(256, (blk_per_grid + 256 - 1) / 256);

    // const auto costh_out_trans = torch::permute(costh_out, {1, 0}).contiguous();
    const auto costh_out_trans = torch::transpose(costh_out, 0, 1).contiguous();
    auto grad_x_norm = torch::mm(grad_mm, costh_out_trans);
    // mm(transpose<B>, transpose<A>) equals to transpose<mm(A, B)>
    // const auto grad_costh_out_trans = torch::mm(torch::permute(grad_mm, {1, 0}).contiguous(), x_norm);
    const auto grad_costh_out_trans = torch::mm(torch::transpose(grad_mm, 0, 1).contiguous(), x_norm);

    // const auto w_trans = torch::permute(w, {1, 0}).contiguous();
    const auto w_trans = torch::transpose(w, 0, 1).contiguous();
    auto grad_w_trans = torch::zeros_like(w_trans);

    AT_DISPATCH_FLOATING_TYPES(costh_out_trans.type(), "costh_cuda_backward_kernel", ([&] {
        costh_cuda_bkwd_kernel<scalar_t><<<grid_layout, blk_layout>>>(
            grad_costh_out_trans.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), 
            w_trans.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), 
            norm_vals.packed_accessor32<scalar_t, 1, torch::RestrictPtrTraits>(), 
            grad_w_trans.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>()
        );
    }));
    // auto grad_w = torch::permute(grad_w_trans, {1, 0}).contiguous();
    auto grad_w = torch::transpose(grad_w_trans, 0, 1).contiguous();

    return {grad_x_norm, grad_w};
}

template <typename scalar_t>
__global__ void costh_cuda_fwd_kernel(
    const torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> w_trans, 
    torch::PackedTensorAccessor32<scalar_t, 1, torch::RestrictPtrTraits> norm_vals, 
    torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> costh_out_trans)
{
    const int blk_idx = blockIdx.x + blockIdx.y * gridDim.x;
    if (blk_idx < num_blk)
    {
        const int thd_idx = threadIdx.x + threadIdx.y * blockDim.x;
        // (1) static shared memory declaration & assignment
        __shared__ scalar_t shared_mem[768];
        shared_mem[thd_idx] = w_trans[blk_idx][thd_idx];

        // (2) register variables declaration & definition
        const scalar_t init_val = shared_mem[thd_idx];
        scalar_t val;
        val = init_val * init_val;
        shared_mem[thd_idx] = val;
        __syncthreads();

        // (3) reduction-sum of the shared memory, dimension `x` of a thread block
        for (int offset = blockDim.x >> 1; offset >= 32; offset >>= 1)
        {
            if (threadIdx.x < offset)
            {
                shared_mem[thd_idx] += shared_mem[thd_idx + offset];
            }
            __syncthreads();
        }
        for (int offset = 16; offset > 0; offset >>= 1)
        {
            if (threadIdx.x < offset)
            {
                shared_mem[thd_idx] += shared_mem[thd_idx + offset];
            }
            __syncwarp();
        }

        // (4) reduction-sum of the shared memory, dimension `y` of a thread block
        // for (int offset = blockDim.y >> 1; offset >= 1; offset >>= 1)
        for (int offset = blockDim.y >> 1; offset > 0; offset >>= 1)  // blockDim.y default is 6
        {
            // if ((threadIdx.y < offset) && (threadIdx.x == 0))
            if ((thd_idx / blockDim.x < offset) && (thd_idx % blockDim.x == 0))
            {
                shared_mem[thd_idx] += shared_mem[thd_idx + offset * blockDim.x];
            }
            __syncthreads();
            if ((offset == 1) && (thd_idx == 0))
            {
                shared_mem[0] += shared_mem[2 * blockDim.x];
            }
        }
        scalar_t norm_res = shared_mem[0];  // no bank conflict, due to broadcast mechanism
        norm_res = sqrtf(norm_res);
        if (norm_res < clamp_min_bound)
        {
            norm_res = clamp_min_bound;  // clamp operator
        }
        val = init_val / norm_res;  // div operator
        // write to global mem directly, without shared mem
        costh_out_trans[blk_idx][thd_idx] = val;
        if (thd_idx == 0)
        {
            norm_vals[blk_idx] = norm_res;
        }
    }
}

template <typename scalar_t>
__global__ void costh_cuda_bkwd_kernel(
    const torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> grad_costh_out_trans, 
    const torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> w_trans, 
    const torch::PackedTensorAccessor32<scalar_t, 1, torch::RestrictPtrTraits> norm_vals, 
    torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> grad_w_trans)
{
    const int blk_idx = blockIdx.x + blockIdx.y * gridDim.x;
    if (blk_idx < num_blk)
    {
        const int thd_idx = threadIdx.x + threadIdx.y * blockDim.x;
        __shared__ scalar_t norm_val;
        if (thd_idx == 0)
        {
            norm_val = norm_vals[blk_idx];
        }
        const auto out_grad = grad_costh_out_trans[blk_idx][thd_idx];
        if (norm_val != clamp_min_bound)
        {
	        __shared__ scalar_t reduction_mem[768];
            const auto w = w_trans[blk_idx][thd_idx];
            auto reduction_val = out_grad * w;
            reduction_mem[thd_idx] = reduction_val;

            // reduction-sum, operating on multiplication between out_grad and w
            // like forward CUDA kernel, reduction in the inner dimension of thread block
            for (int offset = blockDim.x >> 1; offset >=32; offset >>= 1)
            {
                if (threadIdx.x < offset)
                {
                    reduction_mem[thd_idx] += reduction_mem[thd_idx + offset];
                }
                __syncthreads();
            }
            for (int offset = 16; offset >= 1; offset >>= 1)
            {
                if (threadIdx.x < offset)
                {
                    reduction_mem[thd_idx] += reduction_mem[thd_idx + offset];
                }
                __syncwarp();
            }

            // reduction of the outter dimension of thread block
            for (int offset = blockDim.y / 2; offset >= 1; offset >>= 1)
            {
                if ((threadIdx.y < offset) && (threadIdx.x == 0))
                {
                    reduction_mem[thd_idx] += reduction_mem[thd_idx + offset * blockDim.x];
                }
                __syncthreads();
                if ((offset == 1) && (thd_idx == 0))
                {
                    reduction_mem[0] += reduction_mem[2 * blockDim.x];
                }
            }

            const auto reduction_res = reduction_mem[0];
            const auto mul_factor = (-1) * w / (norm_val * norm_val * norm_val);
            const auto sum_factor = 1 / norm_val * out_grad;
            auto grad_w = reduction_res * mul_factor + sum_factor;
            grad_w_trans[blk_idx][thd_idx] = grad_w;
        }
        else
        {
            auto grad_w = out_grad / clamp_min_bound;
            grad_w_trans[blk_idx][thd_idx] = grad_w;
        }
    }
}
