#pragma once
#include "MyRecurse.h"

//xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx CUDA 内核函数宏定义
#define CUDA_GLBL __global__ void
#define CUDA_EXTC_GLBL extern "C" __global__

// CUDA 设备函数宏定义
#define CUDA_DVCE __device__
#define CUDA_EXTC_DVCE extern "C" __device__

// CUDA 主机设备函数宏定义
#define CUDA_HOST_DVCE __host__ __device__
#define CUDA_EXTC_HOST_DVCE extern "C" __host__ __device__

// CUDA 主机函数宏定义
#define CUDA_HOST __host__
#define CUDA_EXTC_HOST extern "C" __host__

// 普通extern "C"函数宏定义
#define CUDA_EXTC extern "C"

///xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx exception 处理
#define MY_CUDA_ERR_EXIT(x) exit(x)
#define MY_CUDA_ERR_EXCPT(err) throw err
#define MY_CUDA_ERR(x) MY_CUDA_ERR_EXCPT(x)

#define MY_CUDA_ERR_MSG(msg) throw std::runtime_error(msg)
#define MY_CUDA_ERR_MSG_FMT(fmt, ...) throw std::runtime_error(fmt::format(fmt, ##__VA_ARGS__))
#define MY_CUDA_ERR_MSG_FMT_CUDA(fmt, ...)                                               \
    throw std::runtime_error(fmt::format(fmt, ##__VA_ARGS__));                           \
    cudaError_t err = cudaGetLastError();                                                \
    if (err != cudaSuccess)                                                              \
    {                                                                                    \
        printf("CUDA Error: %s (%s:%d)\n", cudaGetErrorString(err), __FILE__, __LINE__); \
        MY_CUDA_ERR(err);                                                                \
    }

///xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx CUDA 调用检查
/// call with check
#define CUDA_CALLC(call)                                                                     \
    {                                                                                        \
        cudaError_t err = call;                                                              \
        if (err != cudaSuccess)                                                              \
        {                                                                                    \
            printf("CUDA Error: %s (%s:%d)\n", cudaGetErrorString(err), __FILE__, __LINE__); \
            MY_CUDA_ERR(err);                                                                \
        }                                                                                    \
    }

///================================ 检查上次调用是否出错
#define CUDA_CHECK_LAST                                                                      \
    {                                                                                        \
        cudaError_t err = cudaGetLastError();                                                \
        if (err != cudaSuccess)                                                              \
        {                                                                                    \
            printf("CUDA Error: %s (%s:%d)\n", cudaGetErrorString(err), __FILE__, __LINE__); \
            MY_CUDA_ERR(err);                                                                \
        }                                                                                    \
    }

///xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx CUDA 内存管理

///================================ cuda malloc with check; 简化CUDA内存分配和释放
#define CUDA_MALOC(size, ptr)                                                                        \
    {                                                                                                \
        cudaError_t err = cudaMalloc((void**)&ptr, size);                                            \
        if (err != cudaSuccess)                                                                      \
        {                                                                                            \
            printf("CUDA Malloc Failed: %s (%s:%d)\n", cudaGetErrorString(err), __FILE__, __LINE__); \
            MY_CUDA_ERR(err);                                                                        \
        }                                                                                            \
    }

#define MYHLP_CUDA_MALOCN(type, len, ptr) \
    type* ptr;                            \
    CUDA_MALOC((sizeof(type) * len), ptr)

#define CUDA_MALOCNS(type, len, ...) MYHLP_RCUR2_SELECT_FN(__VA_ARGS__)(MYHLP_CUDA_MALOCN, type, len, __VA_ARGS__)

///xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx 主机内存管理
///================================ 主机内存分配 with check
#define HOST_MALOC(type, size, ptr)                                \
    ptr = (type*)malloc(size);                                     \
    if (ptr == nullptr)                                            \
    {                                                              \
        printf("Host Malloc Failed: %s:%d\n", __FILE__, __LINE__); \
        MY_CUDA_ERR(std::runtime_error("Host Malloc Failed"));     \
    }

///================================ 分配多个相同类型和大小的主机内存指针
#define MYHLE_HOST_MALOCN(type, len, ptr) \
    type* ptr{nullptr};                   \
    HOST_MALOC(type, (sizeof(type) * len), ptr)

#define HOST_MALOCNS(type, len, ...) MYHLP_RCUR2_SELECT_FN(__VA_ARGS__)(MYHLE_HOST_MALOCN, type, len, __VA_ARGS__)

///================================主机内存释放 with check
#define MYHLP_HOST_FREEC(ptr) \
    if (ptr != nullptr)       \
    {                         \
        free(ptr);            \
        ptr = nullptr;        \
    }
#define HOST_FREEC(...) MYHLP_RCUR0_SELECT_FN(__VA_ARGS__)(MYHLP_HOST_FREEC, __VA_ARGS__)

///================================ cuda memset with check
#define MYHLP_CUDA_MSETC(value, size, ptr)                                                           \
    {                                                                                                \
        cudaError_t err = cudaMemset(ptr, value, size);                                              \
        if (err != cudaSuccess)                                                                      \
        {                                                                                            \
            printf("CUDA Memset Failed: %s (%s:%d)\n", cudaGetErrorString(err), __FILE__, __LINE__); \
            MY_CUDA_ERR(err);                                                                        \
        }                                                                                            \
    }
#define CUDA_MSETC(value, size, ...) MYHLP_RCUR2_SELECT_FN(__VA_ARGS__)(MYHLP_CUDA_MSETC, value, size, __VA_ARGS__)

///================================ host memset
#define MYHLP_HOST_MSETC(value, size, ptr) memset(ptr, value, size);
#define HOST_MSETC(value, size, ...) MYHLP_RCUR2_SELECT_FN(__VA_ARGS__)(MYHLP_HOST_MSETC, value, size, __VA_ARGS__)

#define MYHLP_HOST_AINIT(type, value, len, ptr) \
    for (int i = 0; i < len; ++i)               \
    {                                           \
        ptr[i] = (type)value;                   \
    }
#define HOST_AINIT(type, value, len, ...) MYHLP_RCUR3_SELECT_FN(__VA_ARGS__)(MYHLP_HOST_AINIT, type, value, len, __VA_ARGS__)

///================================ memory free with check
#define MYHLP_CUDA_FREEC(ptr)                                                                      \
    {                                                                                              \
        cudaError_t err = cudaFree(ptr);                                                           \
        if (err != cudaSuccess)                                                                    \
        {                                                                                          \
            printf("CUDA Free Failed: %s (%s:%d)\n", cudaGetErrorString(err), __FILE__, __LINE__); \
            MY_CUDA_ERR(err);                                                                      \
        }                                                                                          \
    }
#define CUDA_FREEC(...) MYHLP_RCUR0_SELECT_FN(__VA_ARGS__)(MYHLP_CUDA_FREEC, __VA_ARGS__)

///============================================== cuda memcpy with check
#define CUDA_MCPYC(type, dst, src, len, direction)                                                 \
    {                                                                                              \
        cudaError_t err = cudaMemcpy(dst, src, sizeof(type) * len, direction);                     \
        if (err != cudaSuccess)                                                                    \
        {                                                                                          \
            printf("CUDA Copy Failed: %s (%s:%d)\n", cudaGetErrorString(err), __FILE__, __LINE__); \
            MY_CUDA_ERR(err);                                                                      \
        }                                                                                          \
    }

///xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx CUDA kernel 相关
///======== 简化内核调用和错误检查
#define CUDA_KRNL_LOOP(i, n) for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); i += blockDim.x * gridDim.x)

///======== launch kernel 函数, with check
#define CUDA_KRNL_LUNC(kernel, blocks, threads, ...) \
    kernel<<<blocks, threads>>>(__VA_ARGS__);        \
    CUDA_CHECK_LAST;

//xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx 线程index计算宏
// 简化多维线程索引计算
#define MY_IDX_L1(i, iDim, j) ((i) + ((iDim) * (j)))  // 行优先, row major
#define MY_IDX_L2(j, jDim, i) ((j) + ((jDim) * (i)))  // 列优先, column major

// 三维索引计算
#define MY_IDX3D_L1(i, j, k, iDim, jDim) ((i) + (j) * (iDim) + (k) * (iDim) * (jDim))  // i优先

//xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx 循环优化宏
// 展开循环以提高性能
#define MY_UNRL_LOOP _Pragma("unroll")
#define MY_NO_UNRL_LOOP _Pragma("nounroll")

// 示例用法
#if 0
template <typename T>
CUDA_GLBL processArray(T* data, int size)
{
    MY_UNRL_LOOP
    for (int i = 0; i < 4; i++)
    {
        // 循环体
    }
}
#endif