/**
 * @file hs_cuda_outputwise.cuh
 * @author iam002
 * @brief 并行应用场景: 输出逐元素运算
 * @version 1.0
 * @date 2024-10-15
 * 
 * @copyright Copyright (c) 2024
 * 
 */
#ifndef __HS_CUDA_OUTPUTWISE_CUH__
#define __HS_CUDA_OUTPUTWISE_CUH__

#include "hs_cuda_utils.cuh"

namespace hs
{

namespace cuda
{

namespace outputwise
{

/**************************************************************

功能算子演示:

struct FillValueFunctor
{

    FillValueFunctor(float fill_value)
        : m_fill_value(fill_value)
    { } 

    __device__ void operator()(float & out_val)
    {
        out_val = m_fill_value;
    }

private:
    float m_fill_value;

};

**************************************************************/

/* 解包, 执行功能单元, 展开可变长输入参数.
 * 功能单元调用: functor(a, b, ...)
 */
template<int pack_size, typename FunctorType, typename... OutputType>
__device__ void applyPack(FunctorType functor
    , Packed<pack_size, OutputType>&... dst_pack)
{
    #pragma unroll
    for (int i = 0; i < pack_size; ++i) {
        functor((dst_pack.elem[i])...);
    }
}


/* 核函数, 请注意, 核函数形参涉及到数组和指针的传递时, 应该保证它们是在device端, 这真的很重要;
 * 另外, 核函数也不要使用引用.
 */
template<int pack_size, typename FunctorType, typename... OutputType>
__global__ void __launch_bounds__(KERNEL_BLOCK_SIZE)
applyGeneric(FunctorType functor
        , int num_pack
        , Packed<pack_size, OutputType>*... dst_pack
        , int num_tail
        , OutputType*... p_out_tail)
{
    const int global_tid = threadIdx.x + blockIdx.x * blockDim.x;
    for (int i = global_tid; i < num_pack; i += blockDim.x * gridDim.x) {
        /* 处理包 */
        applyPack<pack_size, FunctorType, OutputType...>(
            functor, (dst_pack[i])...
        );
    }
    if (global_tid < num_tail) {
        /* 处理尾部数据 */
        functor((p_out_tail[global_tid])...);
    }
}


/* 准备工作, 用于启动核函数 */
template<size_t pack_size, typename FunctorType, typename... OutputType>
cudaError_t launchKernel(FunctorType functor, int N, OutputType*... p_out)
{
    /* 计算打包数和尾部剩余数据个数 */
    const int num_pack = N / pack_size;
    const int tail_offset = num_pack * pack_size;
    const int num_tail = N - tail_offset;

    /* 计算线程块数 */
    int num_blocks;
    {
        // 不调用 hs_cuda_utils.cu
        cudaError_t err = GetNumBlocks(num_pack, &num_blocks);
        if (err != cudaSuccess) {
            return err;
        }
    }

    /* 调用核函数 */
    applyGeneric<pack_size, FunctorType, OutputType...><<<num_blocks, KERNEL_BLOCK_SIZE, 0>>>(
        functor
        , num_pack
        , (reinterpret_cast<Packed<pack_size, OutputType>*>(p_out))...
        , num_tail
        , (p_out + tail_offset)... 
    );

    return cudaPeekAtLastError();
}


/* 再次封装, 隐藏 pack_size */
template<typename FunctorType, typename... OutputType>
struct GenericLauncher
{
    static cudaError_t launch(FunctorType functor, int N, OutputType*... p_out)
    {
        constexpr int pack_size = PackSize<OutputType...>();
        
        cudaError_t err;
        if (isAlignedForPack<pack_size, OutputType...>(p_out...)) {
            err = launchKernel<pack_size, FunctorType, OutputType...>(functor, N, p_out...);
        }
        else {
            err = launchKernel<1, FunctorType, OutputType...>(functor, N, p_out...);
        }
        return err;
    }
};


/**
 * @brief 一元运算模板
 * 
 * @tparam FunctorType 重载()运算的结构体
 * @tparam OutputType 输出数据类型
 * @param N 数组长度
 * @param functor 函数器
 * @param p_out 输出数据指针
 * @return cudaError_t 
 */
template<typename FunctorType, typename OutputType>
inline cudaError_t Unary(int N, FunctorType functor, OutputType* p_out)
{  
    return GenericLauncher<FunctorType, OutputType>::launch(functor, N, p_out);
}


/**
 * @brief 二元运算模板
 * 
 * @tparam FunctorType 重载()运算的结构体
 * @tparam OutputTypeA  输出数据类型A
 * @tparam OutputTypeB  输出数据类型B
 * @param N 数组长度
 * @param functor 函数器
 * @param p_out_a 输出数据指针A
 * @param p_out_b 输出数据指针B
 * @return cudaError_t 
 */
template<typename FunctorType, typename OutputTypeA, typename OutputTypeB>
inline cudaError_t Binary(int N, FunctorType & functor, OutputTypeA* p_out_a, OutputTypeB* p_out_b)
{
    return GenericLauncher<FunctorType, OutputTypeA, OutputTypeB>::launch(functor, N, p_out_a, p_out_b);
}


/**
 * @brief 三元运算模板
 * 
 * @tparam FunctorType  重载()运算的结构体
 * @tparam OutputTypeA   输出数据类型A
 * @tparam OutputTypeB   输出数据类型B
 * @tparam OutputTypeC   输出数据类型C
 * @param N 数组长度
 * @param functor 函数器
 * @param p_out_a 输出数据指针A
 * @param p_out_b 输出数据指针B
 * @param p_out_c 输出数据指针C
 * @return cudaError_t 
 */
template<typename FunctorType, typename OutputTypeA, typename OutputTypeB, typename OutputTypeC>
inline cudaError_t Ternary(int N, FunctorType & functor, OutputTypeA* p_out_a, OutputTypeB* p_out_b, OutputTypeC* p_out_c)
{
    return GenericLauncher<FunctorType, OutputTypeA, OutputTypeB, OutputTypeC>::launch(functor, N, p_out_a, p_out_b, p_out_c);
}

} // end namespace outputwise

namespace outputwiseIndex
{

/**************************************************************
 
功能算子:


struct SubRasterFunctor
{
    SubRasterFunctor(int offset_x, int offset_y
        , int sub_size_x, int sub_size_y
        , int src_size_x, int src_size_y, float* p_src_datas)
        : m_offset_x(offset_x), m_offset_y(offset_y)
        , m_sub_size_x(sub_size_x), m_sub_size_y(sub_size_y)
        , m_src_size_x(src_size_x), m_src_size_y(src_size_y)
        , m_p_src_datas(p_src_datas)
    { }

    __device__ void operator()(int id, float & out_val)
    {
        if (id < m_sub_size_x * m_sub_size_y) {
            // 将输出数据的线性索引转换为平面索引
            int sub_yid = id / m_sub_size_x;
            int sub_xid = id - sub_yid * m_sub_size_x;

            sub_yid += m_offset_y;
            sub_xid += m_offset_x;

            // 将平面索引转换为输入原图像的线性索引
            int src_ind = sub_xid + sub_yid * m_src_size_x;
            if (src_ind < m_src_size_x * m_src_size_y) {
                out_val = m_p_src_datas[src_ind];
            }
            else {
                out_val = 0;
            }
        }
    }

private:
    int m_offset_x;
    int m_offset_y;
    int m_sub_size_x;
    int m_sub_size_y;

    int m_src_size_x;
    int m_src_size_y;
    float* m_p_src_datas; 
};


**************************************************************/

/* 解包, 执行功能单元, 展开可变长输入参数.
 * 功能单元调用: functor(ID, a, b, ...), ID为输入索引, a, b为对应类型的引用
 */
template<int pack_size, typename FunctorType, typename... OutputType>
__device__ void applyPack(FunctorType functor
    , int pack_id
    , Packed<pack_size, OutputType>&... dst_pack)
{
    #pragma unroll
    for (int i = 0; i < pack_size; ++i) {
        functor(pack_id * pack_size + i, (dst_pack.elem[i])...);
    }
}


/* 核函数, 请注意, 核函数形参涉及到数组和指针的传递时, 应该保证它们是在device端, 这真的很重要;
 * 另外, 核函数也不要使用引用.
 */
template<int pack_size, typename FunctorType, typename... OutputType>
__global__ void __launch_bounds__(KERNEL_BLOCK_SIZE)
applyGeneric(FunctorType functor
        , int num_pack
        , Packed<pack_size, OutputType>*... dst_pack
        , int num_tail
        , OutputType*... p_out_tail)
{
    const int global_tid = threadIdx.x + blockIdx.x * blockDim.x;
    for (int i = global_tid; i < num_pack; i += blockDim.x * gridDim.x) {
        /* 处理包 */
        applyPack<pack_size, FunctorType, OutputType...>(
            functor, i, (dst_pack[i])...
        );
    }
    if (global_tid < num_tail) {
        /* 处理尾部数据 */
        const int tail_offset = num_pack * pack_size;
        functor(global_tid + tail_offset, (p_out_tail[global_tid])...);
    }
}


/* 准备工作, 用于启动核函数 */
template<size_t pack_size, typename FunctorType, typename... OutputType>
cudaError_t launchKernel(FunctorType functor, int N, OutputType*... p_out)
{
    /* 计算打包数和尾部剩余数据个数 */
    const int num_pack = N / pack_size;
    const int tail_offset = num_pack * pack_size;
    const int num_tail = N - tail_offset;

    /* 计算线程块数 */
    int num_blocks;
    {
        // 不调用 hs_cuda_utils.cu
        cudaError_t err = GetNumBlocks(num_pack, &num_blocks);
        if (err != cudaSuccess) {
            return err;
        }
    }

    /* 调用核函数 */
    applyGeneric<pack_size, FunctorType, OutputType...><<<num_blocks, KERNEL_BLOCK_SIZE, 0>>>(
        functor
        , num_pack
        , (reinterpret_cast<Packed<pack_size, OutputType>*>(p_out))...
        , num_tail
        , (p_out + tail_offset)... 
    );

    return cudaPeekAtLastError();
}


/* 再次封装, 隐藏 pack_size */
template<typename FunctorType, typename... OutputType>
struct GenericLauncher
{
    static cudaError_t launch(FunctorType functor, int N, OutputType*... p_out)
    {
        constexpr int pack_size = PackSize<OutputType...>();
        
        cudaError_t err;
        if (isAlignedForPack<pack_size, OutputType...>(p_out...)) {
            err = launchKernel<pack_size, FunctorType, OutputType...>(functor, N, p_out...);
        }
        else {
            err = launchKernel<1, FunctorType, OutputType...>(functor, N, p_out...);
        }
        return err;
    }
};


/**
 * @brief 一元运算模板
 * 
 * @tparam FunctorType 重载()运算的结构体
 * @tparam OutputType 输出数据类型
 * @param N 数组长度
 * @param functor 函数器
 * @param p_out 输出数据指针
 * @return cudaError_t 
 */
template<typename FunctorType, typename OutputType>
inline cudaError_t Unary(int N, FunctorType functor, OutputType* p_out)
{  
    return GenericLauncher<FunctorType, OutputType>::launch(functor, N, p_out);
}


/**
 * @brief 二元运算模板
 * 
 * @tparam FunctorType 重载()运算的结构体
 * @tparam OutputTypeA  输出数据类型A
 * @tparam OutputTypeB  输出数据类型B
 * @param N 数组长度
 * @param functor 函数器
 * @param p_out_a 输出数据指针A
 * @param p_out_b 输出数据指针B
 * @return cudaError_t 
 */
template<typename FunctorType, typename OutputTypeA, typename OutputTypeB>
inline cudaError_t Binary(int N, FunctorType & functor, OutputTypeA* p_out_a, OutputTypeB* p_out_b)
{
    return GenericLauncher<FunctorType, OutputTypeA, OutputTypeB>::launch(functor, N, p_out_a, p_out_b);
}


/**
 * @brief 三元运算模板
 * 
 * @tparam FunctorType  重载()运算的结构体
 * @tparam OutputTypeA   输出数据类型A
 * @tparam OutputTypeB   输出数据类型B
 * @tparam OutputTypeC   输出数据类型C
 * @param N 数组长度
 * @param functor 函数器
 * @param p_out_a 输出数据指针A
 * @param p_out_b 输出数据指针B
 * @param p_out_c 输出数据指针C
 * @return cudaError_t 
 */
template<typename FunctorType, typename OutputTypeA, typename OutputTypeB, typename OutputTypeC>
inline cudaError_t Ternary(int N, FunctorType & functor, OutputTypeA* p_out_a, OutputTypeB* p_out_b, OutputTypeC* p_out_c)
{
    return GenericLauncher<FunctorType, OutputTypeA, OutputTypeB, OutputTypeC>::launch(functor, N, p_out_a, p_out_b, p_out_c);
}

} // end namespace outputwiseIndex

} // end namespace cuda

} // end namespace hs

#endif
