/**
 * @file hs_cuda_reduce.cuh
 * @author iam002
 * @brief 并行应用场景: 规约运算 reduction
 * @version 0.1
 * @date 2024-10-15
 * 
 * @copyright Copyright (c) 2024
 * 
 */
#ifndef __HS_CUDA_REDUCTION_CUH__
#define __HS_CUDA_REDUCTION_CUH__

#include "hs_cuda_utils.cuh"

namespace hs
{

namespace cuda
{

namespace reduce
{

/**************************************************************

规约算子的要求:
    1. 算子的运算必须是符合结合律的二元操作
    2. 记得使用 const 修饰

算子实例:

template<typename T>
struct ReduceMaxFunctor
{
    __device__ __forceinline__ T operator()(T a, T b) const
    {
        return (a > b) ? a : b;
    }
};

***************************************************************/


/**
 * @brief 规约操作核函数
 * 
 * @tparam DataType 输入数据类型
 * @tparam FunctorType 二元运算单元
 * @param N 处理数据长度
 * @param input 输入数据指针
 * @param functor 规约算子
 * @param init_value 规约操作的初始值
 * @param host_output 主机端的输出结果
 * @return cudaError_t 
 */
template<typename DataType, typename FunctorType>
__global__ void reduceKernel(int N, const DataType* input, FunctorType functor, DataType init_value, DataType* output)
{
    extern __shared__ char shared_mem[];
    DataType* shared_data = (DataType*)shared_mem;

    int tid = threadIdx.x;
    int global_idx = blockIdx.x * blockDim.x + tid;
    int g_step = blockDim.x * gridDim.x;

    shared_data[tid] = init_value;
    for (int gid = global_idx; gid < N; gid += g_step) {
        shared_data[tid] = functor(shared_data[tid], input[gid]); 
    }
    __syncthreads();
    
    for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) {
        if (tid < stride) {
            shared_data[tid] = functor(shared_data[tid], shared_data[tid + stride]);
        }
        __syncthreads();
    }

    if (tid == 0) {
        output[blockIdx.x] = shared_data[0];
    }
}


/**
 * @brief 规约操作
 * 
 * @tparam DataType 输入数据类型
 * @tparam FunctorType 二元运算单元
 * @param N 处理数据长度
 * @param input 输入数据指针
 * @param functor 规约算子
 * @param init_value 规约操作的初始值
 * @param host_output 主机端的输出结果
 * @return cudaError_t 
 */
template<typename DataType, typename FunctorType>
cudaError_t apply(int N, const DataType* input, FunctorType functor, DataType init_value, DataType* host_output)
{
    cudaError_t err = cudaSuccess;

    int grid_size;
    err = GetNumBlocks(N, &grid_size);
    if (err != cudaSuccess) {
        return err;
    }

    DataType* partial_result;
    err = cudaMalloc(&partial_result, grid_size * sizeof(DataType));
    if (err != cudaSuccess) {
        return err;
    }

    int shared_mem_size = sizeof(DataType) * KERNEL_BLOCK_SIZE;

    reduceKernel<<<grid_size, KERNEL_BLOCK_SIZE, shared_mem_size>>>(
        N, input, functor, init_value, partial_result);
    
    while (grid_size > 1) {
        int prev_grid_size = grid_size;
        GetNumBlocks(prev_grid_size, &grid_size);
        reduceKernel<<<grid_size, KERNEL_BLOCK_SIZE, shared_mem_size>>>(
            prev_grid_size, partial_result, functor, init_value, partial_result);
    }

    err = cudaMemcpy(host_output, partial_result, sizeof(DataType), cudaMemcpyDeviceToHost);
    if (err != cudaSuccess) {
        return err;
    }

    err = cudaFree(partial_result);

    return err;
}

} // end namespace reduce

namespace reduceIndex
{

/**************************************************************

带索引规约算子的要求:
    1. 算子输入形参为 (T a, int aid, T b, int bid, T* output, int* output_index)
        a/b 是输入元素值
        aid/bid 为元素对应的索引值
        *output 为当前二元运算的输出结果
        *output_index 为当前二元运算的输出索引
    2. 记得使用 const 修饰

算子实例:

template<typename T>
struct ReduceMaxIndexFunctor
{
    __device__ __forceinline__ void operator()(T a, int aid, T b, int bid, T* output, int* output_index) const
    {
        if (a > b) {
            (*output) = a;
            (*output_index) = aid;
        }
        else {
            (*output) = b;
            (*output_index) = bid;
        }
    }
};

**************************************************************/


/**
 * @brief 带索引的规约操作核函数
 * 
 * @tparam DataType 输入数据类型
 * @tparam FunctorType 二元运算单元
 * @param N 处理数据长度
 * @param input 输入数据指针
 * @param functor 带索引规约算子
 * @param init_value 初始值
 * @param host_output 主机端输出值
 * @param host_output_index 主机端对应的索引值
 */
template<typename DataType, typename FunctorType>
__global__ void reduceIndexKernel(int N, const DataType* input, const int* input_index, FunctorType functor, DataType init_value, DataType* output, int* output_index)
{
    extern __shared__ char shared_mem[];
    DataType* shared_data = (DataType*)shared_mem;
    int* shared_index = (int*)&shared_data[blockDim.x];

    int tid = threadIdx.x;
    int global_idx = blockIdx.x * blockDim.x + tid;
    int g_step = blockDim.x * gridDim.x;

    shared_data[tid] = init_value;
    shared_index[tid] = -1;
    for (int gid = global_idx; gid < N; gid += g_step) {
        int tmp_id;
        if (input_index == nullptr) {
            tmp_id = gid;
        }
        else {
            tmp_id = input_index[gid];
        }
        functor(shared_data[tid], shared_index[tid]
            , input[gid], tmp_id
            , shared_data + tid, shared_index + tid);
    }
    __syncthreads();

    for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) {
        if (tid < stride) {
            functor(shared_data[tid], shared_index[tid]
                , shared_data[tid + stride], shared_index[tid + stride]
                , shared_data + tid, shared_index + tid);
        }
        __syncthreads();
    }

    if (tid == 0) {
        output[blockIdx.x] = shared_data[0];
        output_index[blockIdx.x] = shared_index[0];
    }
}

/**
 * @brief 带索引的规约操作
 * 
 * @tparam DataType 输入数据类型
 * @tparam FunctorType 二元运算单元
 * @param N 处理数据长度
 * @param input 输入数据指针
 * @param functor 带索引规约算子
 * @param init_value 初始值
 * @param host_output 主机端输出值
 * @param host_output_index 主机端对应的索引值
 * @return cudaError_t 
 */
template<typename DataType, typename FunctorType>
cudaError_t apply(int N, const DataType* input, FunctorType functor, DataType init_value, DataType* host_output, int* host_output_index)
{
    cudaError_t err = cudaSuccess;

    int grid_size;
    err = GetNumBlocks(N, &grid_size);
    if (err != cudaSuccess) {
        return err;
    }

    DataType* partial_result;
    err = cudaMalloc(&partial_result, grid_size * sizeof(DataType));
    if (err != cudaSuccess) {
        return err;
    }

    int* partial_index;
    err = cudaMalloc(&partial_index, grid_size * sizeof(int));
    if (err != cudaSuccess) {
        return err;
    }

    int shared_mem_size = (sizeof(DataType) + sizeof(int)) * KERNEL_BLOCK_SIZE;

    reduceIndexKernel<<<grid_size, KERNEL_BLOCK_SIZE, shared_mem_size>>>(N, input, nullptr, functor, init_value, partial_result, partial_index);

    while (grid_size > 1) {
        int prev_grid_size = grid_size;
        GetNumBlocks(prev_grid_size, &grid_size);
        reduceIndexKernel<<<grid_size, KERNEL_BLOCK_SIZE, shared_mem_size>>>(prev_grid_size, partial_result, partial_index, functor, init_value, partial_result, partial_index);
    }

    err = cudaMemcpy(host_output, partial_result, sizeof(DataType), cudaMemcpyDeviceToHost);
    if (err != cudaSuccess) {
        return err;
    }

    err = cudaMemcpy(host_output_index, partial_index, sizeof(int), cudaMemcpyDeviceToHost);
    if (err != cudaSuccess) {
        return err;
    }

    err = cudaFree(partial_result);
    err = cudaFree(partial_index);

    return err;
}


} // end namespace reduceIndex

} // end namespace cuda

} // end namespace hs


#endif
