#include <cuda_runtime_api.h>
#include <driver_types.h>

#include "CuMacro.h"
#include "MyPrtFns.h"
#include "Test0.cuh"

///xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx cuda 硬件基本信息
namespace te0_0
{
    int tt()
    {
        MY_ECHOFUNPATH;
        constexpr int dev = 0;
        cudaDeviceProp devProp;
        CUDA_CALLC(cudaGetDeviceProperties(&devProp, dev));

        MYINFO("使用GPU device: {} -> {}", dev, devProp.name);
        MYINFO("SM的数量: {}", devProp.multiProcessorCount);                            //70
        MYINFO("每个线程块的共享内存大小：{} KB", devProp.sharedMemPerBlock / 1024.0);  //48KB
        MYINFO("每个线程块的最大线程数：{}", devProp.maxThreadsPerBlock);               //
        MYINFO("每个SM的最大线程数: {}", devProp.maxThreadsPerMultiProcessor);          // 1536
        MYINFO("每个SM的最大线程束数: {}", devProp.maxThreadsPerMultiProcessor / 32);   // 48

        return 0;
    }
}  // namespace te0_0

///xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx 简单测试程序
namespace te0_1
{

    ///========================= kernel, 两个向量加法，grid和block均为一维
    CUDA_GLBL add(float* x, float* y, float* z, int n)
    {
#if 0
        // 步长; [thread per block] * [block per grid]
        int stride = blockDim.x * gridDim.x;  //1048576
#endif

#if 0

        CUDA_KRNL_LOOP(i, n) { z[i] = x[i] + y[i]; }
        /// grid-stride loop; 等价于下面
#else
        // 获取全局索引; blockDim: threads per block; gridDim: blocks per grid;
        int index = threadIdx.x + blockIdx.x * blockDim.x;
        // grid带宽; [thread per block] * [block per grid]; 每次外循环可以处理的元素数目
        int stride = blockDim.x * gridDim.x;
        int gridIdx = 0;
        for (int i = index; i < n; i += stride)
        {
            z[i] = x[i] + y[i];
            //printf("GLoop i: %d\r", gridIdx);
            gridIdx++;
        }
        //printf("GLoop i end: %d\n", gridIdx);
#endif
    }

    ///========================= run on cuda
    int tt0()
    {
        MY_ECHOFUNPATH;

        constexpr int N = 1 << 20;
        constexpr int nBytes = N * sizeof(float);

        /// 申请host内存;
        float *x, *y, *z;
        x = (float*)malloc(nBytes);
        y = (float*)malloc(nBytes);
        z = (float*)malloc(nBytes);

        /// 初始化数据
        for (int i = 0; i < N; ++i)
        {
            x[i] = 10.0;
            y[i] = 20.0;
        }

        // 申请device内存
        float *d_x, *d_y, *d_z;
        cudaMalloc((void**)&d_x, nBytes);
        cudaMalloc((void**)&d_y, nBytes);
        cudaMalloc((void**)&d_z, nBytes);

        /// 将host数据拷贝到device

        cudaMemcpy((void*)d_x, (void*)x, nBytes, cudaMemcpyHostToDevice);
        cudaMemcpy((void*)d_y, (void*)y, nBytes, cudaMemcpyHostToDevice);

        ///========================== 定义kernel的执行配置
        dim3 blockSize(256);
        dim3 gridSize((N + blockSize.x - 1) / blockSize.x);

        // 执行kernel
        add<<<gridSize, blockSize>>>(d_x, d_y, d_z, N);

        /// 将device得到的结果拷贝到host
        cudaMemcpy(z, d_z, nBytes, cudaMemcpyDeviceToHost);

        // 检查执行结果
        float maxError = 0.0;
        for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(z[i] - 30.0));
        std::cout << "Vector add, max error: " << maxError << std::endl;

        /// 释放device内存; 释放host内存
        cudaFree(d_x);
        cudaFree(d_y);
        cudaFree(d_z);
        free(x);
        free(y);
        free(z);

        return 0;
    }

    ///================================ 完全等价, 但是使用宏, 模板函数, 更简洁, 带检查
    int tt()
    {
        MY_ECHOFUNPATH;
        constexpr int N = 1 << 20;
        constexpr int nBytes = N * sizeof(float);

        /// 申请host内存;
        float *x, *y, *z;

        HostMallocC<float>(N, x, y, z);

        /// 初始化数据
        HostFillC<float>(10, N, x);
        HostFillC<float>(20, N, y);
        // 申请device内存; 等价于
        float *d_x, *d_y, *d_z;
        CudaMaloc<float>(N, d_x, d_y, d_z);

        /// 将host数据拷贝到device
        CUDA_MCPYC(float, d_x, x, N, cudaMemcpyHostToDevice);
        CUDA_MCPYC(float, d_y, y, N, cudaMemcpyHostToDevice);

        ///========================== 定义kernel的执行配置
        dim3 blockSize(256); // 每个 block 256 threads; 每个线程1个元素; 8 个 warp
        dim3 gridSize(MYCEIL(N, blockSize.x));

        /// 执行kernel
        CUDA_KRNL_LUNC(add, gridSize, blockSize, d_x, d_y, d_z, N)

        /// 将device得到的结果拷贝到host
        CUDA_MCPYC(float, z, d_z, N, cudaMemcpyDeviceToHost)

        // 检查执行结果
        float maxError = 0.0;
        for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(z[i] - 30.0));
        std::cout << "Vector add, max error: " << maxError << std::endl;

        /// 释放device内存; 释放host内存
        CudaFreeC(d_x, d_y, d_z);
        HostFreeC(x, y, z);

        return 0;
    }

    ///================================ 托管内存, 功能等价
    int tt1()
    {
        int N = 1 << 20;
        int nBytes = N * sizeof(float);

        /// 申请托管内存
        float *x, *y, *z;
        CudaMalocMC<float>(N, x, y, z);

        /// 初始化数据
        HostFillC<float>(10, N, x);
        HostFillC<float>(20, N, y);

        // 定义kernel的执行配置
        dim3 blockSize(256);
        dim3 gridSize((N + blockSize.x - 1) / blockSize.x);
        /// 执行kernel
        CUDA_KRNL_LUNC(add, gridSize, blockSize, x, y, z, N);

        /// 同步device 保证结果能正确访问
        cudaDeviceSynchronize();
        /// 检查执行结果
        float maxError = 0.0;
        for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(z[i] - 30.0));
        std::cout << "最大误差: " << maxError << std::endl;

        // 释放内存
        CudaFreeC(x, y, z);

        return 0;
    }
}  // namespace te0_1

///xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
namespace te0_2
{

    // CUDA内核函数实现
    CUDA_GLBL addKernel(int* c, const int* a, const int* b)
    {
        int i = threadIdx.x;
        c[i] = a[i] + b[i];
    }

    // 辅助函数实现
    cudaError_t addWithCuda(int* c, const int* a, const int* b, unsigned int len)
    {
#if 0
        try
        {
#endif
        // Choose which GPU to run on, change this on a multi-GPU system.
        CUDA_CALLC(cudaSetDevice(0));

        // allocate memory on device
        int *dev_a, *dev_b, *dev_c;
        CudaMaloc<int>(len, dev_a, dev_b, dev_c);

        // Copy input vectors from host memory to GPU buffers.
        CUDA_MCPYC(int, dev_a, a, len, cudaMemcpyHostToDevice);
        CUDA_MCPYC(int, dev_b, b, len, cudaMemcpyHostToDevice);
        CUDA_MCPYC(int, dev_c, c, len, cudaMemcpyHostToDevice);

        /// Launch a kernel on the GPU with one thread for each element.
        //auto blocks = dim3(1);
        //auto threads = dim3(size);
        CUDA_KRNL_LUNC(addKernel, 1, len, dev_c, dev_a, dev_b);

        // cudaDeviceSynchronize waits for the kernel to finish, and returns
        // any errors encountered during the launch.
        CUDA_CALLC(cudaDeviceSynchronize());

        /// Copy output vector from GPU buffer to host memory.
        CUDA_MCPYC(int, c, dev_c, len, cudaMemcpyDeviceToHost);

        CudaFreeC(dev_a, dev_b, dev_c);
#if 0
    }
    catch (cudaError_t err) { spdlog::error("cudaError_t: {}", (int)err); }
#endif

        return cudaSuccess;
    }

    int tt()
    {
        MY_ECHOFUNPATH;
        constexpr int arraySize = 5;
        constexpr int a[arraySize] = {1, 2, 3, 4, 11};
        constexpr int b[arraySize] = {10, 20, 30, 40, 161};
        int c[arraySize] = {0};

        // Add vectors in parallel using CUDA.
        CUDA_CALLC(addWithCuda(c, a, b, arraySize));

        // 格式化输出结果
        spdlog::info("addWithCuda result: [1,2,3,4,5] + [10,20,30,40,50] = [{},{},{},{},{}]", c[0], c[1], c[2], c[3], c[4]);

        return 0;
    }

    //---- 计算 block num of grid; 如果 N 恰好被整除, 会多算一个
    // dim3 numBlocks(N / threadsPerBlock.x + 1, N / threadsPerBlock.y + 1);

    //---- 跟下面等价; 除法向下取整; 加上 1-eps; eps 是最小分辨率
    // dim3 numBlocks((N + threadsPerBlock.x - 1) / threadsPerBlock.x, (N + threadsPerBlock.y - 1) / threadsPerBlock.y)

    //---- 如果 N 刚好被整除, 上面+1 会多执行一次; 所以这里用 (N - 1) / threadsPerBlock.x + 1
    // dim3 numBlocks((N - 1) / threadsPerBlock.x + 1, (N - 1) / threadsPerBlock.y + 1)

}  // namespace te0_2
