#include <stdio.h>

#include <numeric>
#include <vector>

#include "CuFUtils.cuh"
#include "CuMacro.h"
#include "Logger.h"
#include "MyMacro.h"
#include "MyPrtFns.h"
#include "Test1.cuh"

static constexpr int BLOCK_SIZE = 256;

namespace te1_0
{

    // 使用上述宏的示例
    CUDA_GLBL addVectors(float* a, float* b, float* c, int n)
    {
        CUDA_KRNL_LOOP(i, n)  //
        {
            c[i] = a[i] + b[i];
        }
    }

    int tt()
    {
        MY_ECHOFUNPATH;
        constexpr int len = 1000;

        // 使用宏分配内存
        float *a, *b, *c;
        CudaMaloc<float>(len, a, b, c);

        logInfo("initial data");
        // 初始化数据...
        // 在主机上创建并初始化数组
        float* host_a = (float*)malloc(len * sizeof(float));
        float* host_b = (float*)malloc(len * sizeof(float));
        for (int i = 0; i < len; ++i)
        {
            host_a[i] = static_cast<float>(i);
            host_b[i] = static_cast<float>(i * 2);
        }
        // 将数据从主机复制到设备
        CUDA_MCPYC(float, a, host_a, len, cudaMemcpyHostToDevice);
        CUDA_MCPYC(float, b, host_b, len, cudaMemcpyHostToDevice);
        // 释放主机内存
        free(host_a);
        free(host_b);

        // 注意：不能直接访问设备内存，所以这行代码会注释掉
        // printf("a[0] = %f\n", a[0]);

        // 使用宏启动内核
        constexpr int threads = 256;
        constexpr int blocks = (len + threads - 1) / threads;
        CUDA_KRNL_LUNC(addVectors, blocks, threads, a, b, c, len);

        // 同步设备
        CUDA_CALLC(cudaDeviceSynchronize());

        // 将结果从设备内存复制到主机内存以打印
        float* host_c = (float*)malloc(len * sizeof(float));
        CUDA_MCPYC(float, host_c, c, len, cudaMemcpyDeviceToHost);

        // 等间隔打印 10 个值
        spdlog::info("计算结果 c 的 10 个等间隔值：");
        int step = len / 9;  // 计算步长
        for (int i = 0; i < 10; i++)
        {
            int index = i * step;
            if (index >= len) index = len - 1;  // 防止越界
            //printf("c[%d] = %f, ", index, host_c[index]);
            spdlog::info("\tc[{}] = {}", index, host_c[index]);
        }

        // 释放主机内存
        free(host_c);

        // 使用宏释放内存
        CudaFreeC(a, b, c);

        return 0;
    }
}  // namespace te1_0

namespace te1_1
{

    void tt()
    {
        constexpr int lenArr = 1 << 10;
        const int nBlocks = MYCEIL(lenArr, BLOCK_SIZE);

        double *d_doses, *d_volumes, *d_minDose, *d_maxDose, *d_binVolumes;
        CudaMaloc<double>(lenArr, d_doses, d_volumes);
        CudaMaloc<double>(1, d_minDose, d_maxDose);

        std::vector<double> doses(lenArr);
        std::iota(begin(doses), end(doses), 0.0);
        CUDA_MCPYC(double, d_doses, doses.data(), lenArr, cudaMemcpyHostToDevice);

        // double dose = doses[idx];
        CuReduceMinMax<<<nBlocks, BLOCK_SIZE>>>(lenArr, d_doses, d_minDose, d_maxDose);

        double minDose, maxDose;
        CUDA_MCPYC(double, &minDose, d_minDose, 1, cudaMemcpyDeviceToHost);
        CUDA_MCPYC(double, &maxDose, d_maxDose, 1, cudaMemcpyDeviceToHost);

        MYINFO("min dose -> {}, max dose -> {}", minDose, maxDose);
    }

};  // namespace te1_1

//xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx

namespace te1_2
{
    __device__ float broadcast(float x, int srcLane)
    {
        // 整 warp 广播，mask 全 1
        return __shfl_sync(0xffffffff, x, srcLane);
    }

    //===================== inclusive scan（前缀和）
    __device__ int warp_scan(int val)
    {
#pragma unroll
        for (int off = 1; off < 32; off <<= 1)
        {
            const int tmp = __shfl_up_sync(MYFULL_MASK, val, off);
            // 边界判断; 只有当前线程的 lane 索引大于等于 d 时，才需要累加
            if ((threadIdx.x & 0x1f) >= off)
            {
                val += tmp;
            }
        }
        return val;
    }

    //===================== 做 reduce（树形规约）
    __device__ float warp_reduce_sum(float val)
    {
        MYLANE_ID_X(laneID);
        for (int off = 16; off > 0; off >>= 1)
        {
#if 10
            const int tmp = __shfl_down_sync(MYFULL_MASK, val, off);
            if (laneID < off) val += tmp;
            //printf("laneID %d -> val %f\n", laneID, val);
#else
            // 使用 3元运算符也不行
            val += (laneID < off) ? __shfl_down_sync(MYFULL_MASK, val, off) : 0.f;
#endif
        }

        return val;  // 最终结果只保存在 lane 0
    }
}  // namespace te1_2

__global__ void te1_2_run_shfl(int len)
{
    MYIDX_X(idx);
    MYLANE_ID_X(lidx);
    const float xf = lidx;
    const int srcLane = 3;
    //==========================
    const auto val = te1_2::broadcast(xf, srcLane);
    if (lidx <= 5)
    {
        printf("idx %d, lane %d, warped value -> %f\n", idx, lidx, val);
    }

    //==========================
    const int xd = lidx;
    const auto xd_sum = te1_2::warp_scan(xd);
    if (lidx == 31)
    {
        printf("idx %d lane %d, xd_sum = %d\n", idx, lidx, xd_sum);
    }

    //==========================
    const auto xf_sum = te1_2::warp_reduce_sum(xf);
    if (lidx == 0)
    {
        printf("idx %d lane %d, xf_sum = %f\n", idx, lidx, xf_sum);
    }
}

namespace te1_2
{

    void tt()
    {
        constexpr int nThrds = 32;
        constexpr int len = nThrds * 2;
        const int nBlks = MYCEIL(len, nThrds);
        MYINFO("blocks = {}", nBlks);

        te1_2_run_shfl<<<nBlks, nThrds>>>(len);
    }
}  // namespace te1_2