#include <vector>
#include <torch/extension.h>

// constant memory of CUDA program
__constant__ float clamp_min_bound = 1e-12;

template <typename scalar_t>
__global__ void costh_cuda_fwd_kernel(
    const torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits>, 
    torch::PackedTensorAccessor32<scalar_t, 1, torch::RestrictPtrTraits>, 
    torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits>, 
    int, int, int
);

template <typename scalar_t>
__global__ void costh_cuda_bkwd_kernel(
    const torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits>, 
    const torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits>, 
    const torch::PackedTensorAccessor32<scalar_t, 1, torch::RestrictPtrTraits>, 
    torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits>
);

std::vector<torch::Tensor> costh_cuda_fwd(const torch::Tensor w)
{
    const auto dim1 = w.size(1);
    const auto dim0 = w.size(0);
    auto norm_vals = torch::sum(w, 0);
    auto costh_out = torch::zeros_like(w);

    // according to the params info of special GPU device, set the layout of thread block
    cudaDeviceProp device_props;
    cudaGetDeviceProperties(&device_props, 0);
    int max_thd_per_blk, thd_per_blk;
    // only support the single precision float point number
    if (int(device_props.sharedMemPerBlock / 4) <= device_props.maxThreadsPerBlock)
        max_thd_per_blk = int(device_props.sharedMemPerBlock / 4);
    else
        max_thd_per_blk = device_props.maxThreadsPerBlock;
    int epoch, remainder;
    if (dim0 <= max_thd_per_blk)
    {
        epoch = 1;
        int tmp = max_thd_per_blk;
        while (tmp > 0)
        {
            if (tmp < dim0)
            {
                thd_per_blk = remainder = 2 * tmp;
                break;
            }
            else if (tmp == dim0)
            {
                thd_per_blk = remainder = tmp;
                break;
            }
            tmp /= 2;
        }
    }
    else  // dim0 greater than max_thd_per_blk
    {
        thd_per_blk = max_thd_per_blk;
        epoch = dim0 / thd_per_blk;
        remainder = dim0 % thd_per_blk;
    }

    const dim3 blk_layout(thd_per_blk);
    const dim3 grid_layout(256, (dim1 + 256 - 1) / 256);

    // template CUDA kernel func calling
    // use the specific type of shared memory
    AT_DISPATCH_FLOATING_TYPES(w.type(), "costh_cuda_forward_kernel", ([&] {
        costh_cuda_fwd_kernel<scalar_t><<<grid_layout, blk_layout, sizeof(scalar_t) * thd_per_blk>>>(
            w.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), 
            norm_vals.packed_accessor32<scalar_t, 1, torch::RestrictPtrTraits>(), 
            costh_out.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(),
            epoch, remainder, dim1
        );
    }));

    return {costh_out, norm_vals};
}

torch::Tensor costh_cuda_bkwd(const torch::Tensor grad_costh_out, const torch::Tensor w, 
                              const torch::Tensor costh_out, const torch::Tensor norm_vals)
{
    // both matrix `w` and `costh_out` have the same shape, 
    // maybe I can delete the parameter whose name is w
    const auto thd_per_blk = w.size(0);
    const auto blk_per_grid = w.size(1);
    const dim3 blk_layout(128, (thd_per_blk + 128 - 1) / 128);
    const unsigned int thds_per_blk = 128 * ((thd_per_blk + 128 - 1) / 128);
    const dim3 grid_layout(256, (blk_per_grid + 256 - 1) / 256);

    const auto costh_out_trans = torch::transpose(costh_out, 0, 1).contiguous();
    const auto grad_costh_out_trans = torch::transpose(grad_costh_out, 0, 1).contiguous();

    const auto w_trans = torch::transpose(w, 0, 1).contiguous();
    auto grad_w_trans = torch::zeros_like(w_trans);

    AT_DISPATCH_FLOATING_TYPES(costh_out_trans.type(), "costh_cuda_backward_kernel", ([&] {
        costh_cuda_bkwd_kernel<scalar_t><<<grid_layout, blk_layout, sizeof(scalar_t) * thds_per_blk>>>(
            grad_costh_out_trans.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), 
            w_trans.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), 
            norm_vals.packed_accessor32<scalar_t, 1, torch::RestrictPtrTraits>(), 
            grad_w_trans.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>()
        );
    }));
    auto grad_w = torch::transpose(grad_w_trans, 0, 1).contiguous();

    return grad_w;
}

template <typename scalar_t>
__global__ void costh_cuda_fwd_kernel(
    const torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> w, 
    torch::PackedTensorAccessor32<scalar_t, 1, torch::RestrictPtrTraits> norm_vals, 
    torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> costh_out, 
    int epoch, int remainder, int grid_dim)
{
    const int blk_idx = blockIdx.x + blockIdx.y * gridDim.x + \
                        blockIdx.z * gridDim.y * gridDim.x;
    const int thd_idx = threadIdx.x + threadIdx.y * blockDim.x + \
                        threadIdx.z * blockDim.y * blockDim.x;
    if (blk_idx < grid_dim)
    {
        extern __shared__ float shared_mem[];
        scalar_t thd_val, thd_sum_val = 0;
        for (int idx = 0; idx < epoch; idx++)
        {
            if (idx == (epoch - 1))
            {}
            else
            {
                shared_mem[thd_idx] = w[thd_idx][blk_idx];
                thd_val = shared_mem[thd_idx];
                thd_val = thd_val * thd_val;
                shared_mem[thd_idx] = thd_val;
                __syncthreads();

                for (int offset = (blockDim.z * blockDim.y * blockDim.x) >> 1; offset >= 32; offset >>= 1)
                {}
                for (int offset = 16; offset > 0; offset >>= 1)
                {}
                thd_val = shared_mem[0];
                thd_sum_val += thd_val;
            }
        }
        thd_sum_val = sqrtf(thd_sum_val);
        if (thd_sum_val < clamp_min_bound) // clamp operator
            thd_sum_val = clamp_min_bound;
        for (int idx = 0; idx < epoch; idx++)
        {

            if ((idx < (epoch - 1)) || (thd_idx < remainder))
            {
                shared_mem[thd_idx] = w[thd_idx][blk_idx];
                thd_val = shared_mem[thd_idx];
                thd_val /= thd_sum_val;
                costh_out[thd_idx][blk_idx] = thd_val;  // div operator
                __syncthreads();  // maybe this one can be commented out
            }
        }
        if (thd_idx == 0)
            norm_vals[blk_idx] = thd_sum_val;
    }
}

template <typename scalar_t>
__global__ void costh_cuda_bkwd_kernel(
    const torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> grad_costh_out_trans, 
    const torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> w_trans, 
    const torch::PackedTensorAccessor32<scalar_t, 1, torch::RestrictPtrTraits> norm_vals, 
    torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> grad_w_trans)
{
    const int blk_idx = blockIdx.x + blockIdx.y * gridDim.x;
    if (blk_idx < num_blk)
    {
        const int thd_idx = threadIdx.x + threadIdx.y * blockDim.x;
        __shared__ scalar_t norm_val;
        if (thd_idx == 0)
        {
            norm_val = norm_vals[blk_idx];
        }
        const auto out_grad = grad_costh_out_trans[blk_idx][thd_idx];
        if (norm_val != clamp_min_bound)
        {
            extern __shared__ float reduction_mem[];
            const auto w = w_trans[blk_idx][thd_idx];
            auto reduction_val = out_grad * w;
            reduction_mem[thd_idx] = reduction_val;

            // reduction-sum, operating on multiplication between out_grad and w
            // like forward CUDA kernel, reduction in the inner dimension of thread block
            for (int offset = blockDim.x >> 1; offset >=32; offset >>= 1)
            {
                if (threadIdx.x < offset)
                {
                    reduction_mem[thd_idx] += reduction_mem[thd_idx + offset];
                }
                __syncthreads();
            }
            for (int offset = 16; offset >= 1; offset >>= 1)
            {
                if (threadIdx.x < offset)
                {
                    reduction_mem[thd_idx] += reduction_mem[thd_idx + offset];
                }
                __syncwarp();
            }

            // reduction of the outter dimension of thread block
            for (int offset = blockDim.y / 2; offset >= 1; offset >>= 1)
            {
                if ((threadIdx.y < offset) && (threadIdx.x == 0))
                {
                    reduction_mem[thd_idx] += reduction_mem[thd_idx + offset * blockDim.x];
                }
                __syncthreads();
                if ((offset == 1) && (thd_idx == 0))
                {
                    reduction_mem[0] += reduction_mem[2 * blockDim.x];
                }
            }

            const auto reduction_res = reduction_mem[0];
            const auto mul_factor = (-1) * w / (norm_val * norm_val * norm_val);
            const auto sum_factor = 1 / norm_val * out_grad;
            auto grad_w = reduction_res * mul_factor + sum_factor;
            grad_w_trans[blk_idx][thd_idx] = grad_w;
        }
        else
        {
            auto grad_w = out_grad / clamp_min_bound;
            grad_w_trans[blk_idx][thd_idx] = grad_w;
        }
    }
}
