/**
 * @file hs_cuda_scan.cuh
 * @author iam002
 * @brief 并行应用场景: 扫描操作 scan
 * @version 1.0
 * @date 2024-10-15
 * 
 * @copyright Copyright (c) 2024
 * 
 */
#ifndef __HS_CUDA_SCAN_CUH__
#define __HS_CUDA_SCAN_CUH__

#include "hs_cuda_utils.cuh"
#include "hs_cuda_elemwise.cuh"


namespace hs
{

namespace cuda
{

namespace scan
{

/* 辅助算子: 实现逐元素相加 */
template<typename T>
struct AddFunctor
{
    __device__ T operator()(T a, T b) const
    {
        return a + b;
    }
};

/* 辅助算子: 逐元素加上一个固定值 */
template<typename T>
struct AddOneElemFunctor
{
    __device__ T operator()(T a) const
    {
        return a + m_k;
    }

    AddOneElemFunctor(T k) : m_k(k) { } 

private:
    T m_k;
};

/**
 * @brief 为避免shared memory相邻规约导致bank conflict, 每隔32个
 *  线程(针对float或int)插入一个空位.
 * 
 * @tparam T 类型
 * @param id shared memory 索引
 */
template<typename T>
__device__ __host__ inline int padding(int id)
{
    constexpr int bank_bytes = 128;
    constexpr int bank_num = bank_bytes / sizeof(T);
    return id / bank_num;
}

/*
  输入: a[0], a[1], ..., a[N-1]

闭scan: b[0] = a[0]
        b[1] = a[0] + a[1]
        b[2] = a[0] + a[1] + a[2]
        ...
        b[N-1] = a[0] + ... + a[N-1]

开scan: c[0] = 0
        c[1] = a[0]
        c[2] = a[0] + a[1]
        ...
        c[N-1] = a[0] + ... + a[N-2]

开scan + 输入 = 闭scan
*/

/**
 * @brief 执行开扫描前缀和
 * 
 * @note 参考: https://blog.csdn.net/qq_44161734/article/details/134620943
 * 
 * @tparam T 
 * @param N 数组长度
 * @param p_in 输入数组
 * @param p_out 开扫描前缀和
 * @param p_block_sum 
 */
template<typename T>
__global__ void prefixOpenSumKernel(int N, const T* p_in, T* p_out, T* p_block_sum)
{
    extern __shared__ char* shared_buffer[];
    T* shared_mem = (T*)shared_buffer;

    int tid = threadIdx.x;
    int idx = blockIdx.x * blockDim.x + threadIdx.x;

    if (idx < N) {
        shared_mem[tid + padding<T>(tid)] = p_in[idx];
    }
    else {
        shared_mem[tid + padding<T>(tid)] = 0;
    }
    __syncthreads();

    // Up-sweep (reduce)
    for (int stride = 1; stride < blockDim.x; stride <<= 1) {
        int index = (tid + 1) * stride * 2 - 1;
        if (index < blockDim.x) {
            shared_mem[index + padding<T>(index)] += shared_mem[index - stride + padding<T>(index - stride)];
        }
        __syncthreads();
    }

    // 设定最后一个元素为0 (down-sweep)
    if (tid == blockDim.x - 1) {
        shared_mem[blockDim.x - 1 + padding<T>(blockDim.x - 1)] = 0;
    }
    __syncthreads();

    // Down-sweep
    for (int stride = blockDim.x >> 1; stride > 0; stride >>= 1) {
        int index = (tid + 1) * stride * 2 - 1;
        if (index < blockDim.x) {
            int id0 = index - stride + padding<T>(index - stride);
            int id1 = index + padding<T>(index);
            int val = shared_mem[id0];
            shared_mem[id0] = shared_mem[id1];
            shared_mem[id1] += val;
        }
        __syncthreads();
    }
    
    if (idx < N) {
        p_out[idx] = shared_mem[tid + padding<T>(tid)];
    }

    // 保存每个block最后一个线程的开扫描前缀和, 加上输入数组对应位置的数值 = 闭扫描前缀和
    if (p_block_sum && tid == blockDim.x - 1) {
        if (idx < N) {
            p_block_sum[blockIdx.x] = p_out[idx] + p_in[idx];
        }
        else {
            p_block_sum[blockIdx.x] = p_out[idx];
        } 
    }

}

template<typename T>
__global__ void addBlockPrefixSumKernel(int N, T* p_out, T* p_block_sum)
{
    int idx = blockIdx.x * blockDim.x + threadIdx.x;
    if (idx < N && blockIdx.x > 0) {
        p_out[idx] += p_block_sum[blockIdx.x - 1];
    }
}

/**
 * @brief 计算输入数组p_in开扫描前缀和
 * 
 * @tparam T 
 * @param N 数组长度
 * @param p_in 输入数组
 * @param p_out 输出数组
 * @return cudaError_t 
 */
template<typename T>
cudaError_t PrefixOpenSum(int N, const T* p_in, T* p_out)
{
    cudaError_t err = cudaSuccess;

    // 根据数组长度 N 计算 block 的个数
    int grid_size;
    err = GetNumBlocks(N, &grid_size);
    if (err != cudaSuccess) {
        return err;
    }
    int block_size = KERNEL_BLOCK_SIZE;

    int shared_size = (block_size + padding<T>(block_size))* sizeof(T);
    
    // 存放每个block最后一个线程的闭scan前缀和
    T* p_block_sum = nullptr;
    err = cudaMalloc(&p_block_sum, sizeof(T) * grid_size);
    if (err != cudaSuccess) {
        return err;
    }

    // 存放 p_block_sum 的扫描结果
    T* p_scan_block_sum = nullptr;
    err = cudaMalloc(&p_scan_block_sum, sizeof(T) * grid_size);
    if (err != cudaSuccess) {
        return err;
    }

    // NOTE 数组 N 可能大于CUDA线程限制, 此时需要逐组进行运行
    int group_size = grid_size * block_size;
    int blocks_group_num = (N + group_size - 1) / group_size;

    T cumsum = 0;
    T cur_group_cumsum;

    for (int gid = 0; gid < blocks_group_num; ++gid) {
        // 指针偏移量
        int offset = gid * group_size;
        
        // 当前分组的实际大小
        int N0 = (N - offset < group_size) ? (N - offset) : (group_size);

        grid_size = (N0 + block_size - 1) / block_size;
        
        // 扫描当前组
        prefixOpenSumKernel<<<grid_size, block_size, shared_size>>>(N0, p_in + offset, p_out + offset, p_block_sum);
        cudaDeviceSynchronize();

        if (grid_size > 1) {
        
            // 扫描每个block最后一个线程的闭scan前缀和, 输出为开扫描前缀和
            if (grid_size <= block_size) {
                prefixOpenSumKernel<<<1, block_size, shared_size>>>(grid_size, p_block_sum, p_scan_block_sum, (T*)nullptr);
                cudaDeviceSynchronize();
                if (cudaPeekAtLastError() != cudaSuccess) {
                    return err;
                }
            }
            else {
                // 递归调用
                err = PrefixOpenSum(grid_size, p_block_sum, p_scan_block_sum);
                if (err != cudaSuccess) {
                    return err;
                }
            }

            // 转换为闭扫描前缀和
            err = hs::cuda::elemwise::Binary(grid_size, p_block_sum, p_scan_block_sum, AddFunctor<T>(), p_scan_block_sum);
            cudaDeviceSynchronize();
            if (err != cudaSuccess) {
                return err;
            }

            addBlockPrefixSumKernel<<<grid_size, block_size, 0>>>(N0, p_out + offset, p_scan_block_sum);
            cudaDeviceSynchronize();
            if (cudaPeekAtLastError() != cudaSuccess) {
                return err;
            }
        }
        
        if (gid > 0) {
            err = hs::cuda::elemwise::Unary(N0, p_out + offset, AddOneElemFunctor<T>(cumsum), p_out + offset);
            cudaDeviceSynchronize();
            if (err != cudaSuccess) {
                return err;
            }   
        }

        err = cudaMemcpy(&cur_group_cumsum, p_out + offset + N0 - 1, sizeof(T), cudaMemcpyDeviceToHost);
        if (err != cudaSuccess) {
            return err;
        }
        cumsum = cur_group_cumsum;

        err = cudaMemcpy(&cur_group_cumsum, p_in + offset + N0 - 1, sizeof(T), cudaMemcpyDeviceToHost);
        if (err != cudaSuccess) {
            return err;
        }
        cumsum = cumsum + cur_group_cumsum;
    }

    cudaFree(p_block_sum);
    cudaFree(p_scan_block_sum);

    return err;
}

/**
 * @brief 计算输入数组p_in闭扫描前缀和
 * 
 * @tparam T 
 * @param N 数组长度
 * @param p_in 输入数组
 * @param p_out 输出数组
 * @return cudaError_t 
 */
template<typename T>
cudaError_t PrefixSum(int N, const T* p_in, T* p_out)
{
    cudaError_t err = cudaSuccess;
    
    err = PrefixOpenSum<T>(N, p_in, p_out);
    if (err != cudaSuccess) {
        return err;
    }

    err = hs::cuda::elemwise::Binary(N, p_in, p_out, AddFunctor<T>(), p_out);
    if (err != cudaSuccess) {
        return err;
    }

    return err;
}


} // end namespace scan

} // end namespace cuda

} // end namespace hs

#endif

