// reduce0.cu
#include <cuda_runtime.h>
#include <iostream>
#include <vector>
#include <numeric>
#include <random>
#include <iomanip>

#ifndef THREAD_PER_BLOCK
#define THREAD_PER_BLOCK 256   // 必须是 2 的幂；示例 kernel 要求
#endif

// 简单的 CUDA 错误检查宏
#define CUDA_CHECK(call)                                                              \
    do {                                                                              \
        cudaError_t err__ = (call);                                                   \
        if (err__ != cudaSuccess) {                                                   \
            std::cerr << "CUDA error " << cudaGetErrorString(err__)                   \
                      << " at " << __FILE__ << ":" << __LINE__ << std::endl;          \
            std::exit(EXIT_FAILURE);                                                  \
        }                                                                             \
    } while (0)

__global__ void reduce0(float *d_in, float *d_out){
    __shared__ float sdata[THREAD_PER_BLOCK];

    // each thread loads one element from global memory to shared mem
    unsigned int i   = blockIdx.x * blockDim.x + threadIdx.x;
    unsigned int tid = threadIdx.x;
    sdata[tid] = d_in[i];
    __syncthreads();

    // do reduction in shared mem (interleaved addressing)
    for (unsigned int s = 1; s < blockDim.x; s *= 2) {
        if (tid % (2 * s) == 0) {
            sdata[tid] += sdata[tid + s];
        }
        __syncthreads();
    }

    // write result for this block to global mem
    if (tid == 0) d_out[blockIdx.x] = sdata[0];
}

int main() {
    // ---------------- 配置与数据准备 ----------------
    const size_t N = 1 << 20; // 1,048,576 元素
    // static_assert((THREAD_PER_BLOCK & (THREAD_PER_BLOCK - 1)) == 0,
    //               "THREAD_PER_BLOCK must be a power of two.");
    // if (N % THREAD_PER_BLOCK != 0) {
    //     std::cerr << "N must be a multiple of THREAD_PER_BLOCK for this kernel.\n";
    //     return EXIT_FAILURE;
    // }

    const int threads = THREAD_PER_BLOCK;
    const int blocks  = static_cast<int>(N / THREAD_PER_BLOCK);

    std::cout << "N=" << N
              << ", THREAD_PER_BLOCK=" << threads
              << ", blocks=" << blocks << std::endl;

    // 准备主机数据：用随机数或常数都可。这里用随机数，并计算 CPU 基准和。
    std::vector<float> h_in(N);
    std::mt19937 rng(12345);
    std::uniform_real_distribution<float> dist(0.0f, 1.0f);
    for (size_t i = 0; i < N; ++i) h_in[i] = dist(rng);

    // CPU 参考结果
    double ref_sum = 0.0;
    for (size_t i = 0; i < N; ++i) ref_sum += static_cast<double>(h_in[i]);

    // ---------------- 申请 GPU 内存 ----------------
    float *d_in  = nullptr;
    float *d_out = nullptr;
    CUDA_CHECK(cudaMalloc(&d_in,  N * sizeof(float)));
    CUDA_CHECK(cudaMalloc(&d_out, blocks * sizeof(float)));

    // 拷贝输入数据到 GPU
    CUDA_CHECK(cudaMemcpy(d_in, h_in.data(), N * sizeof(float), cudaMemcpyHostToDevice));

    // ---------------- 启动 kernel ----------------
    reduce0<<<blocks, threads>>>(d_in, d_out);
    // reduce0<<<grid, block>>>


    CUDA_CHECK(cudaGetLastError());
    CUDA_CHECK(cudaDeviceSynchronize());

    // ---------------- 回收每个 block 的部分和 ----------------
    std::vector<float> h_partial(blocks);
    CUDA_CHECK(cudaMemcpy(h_partial.data(), d_out, blocks * sizeof(float), cudaMemcpyDeviceToHost));

    double gpu_sum = 0.0;
    for (int b = 0; b < blocks; ++b) gpu_sum += static_cast<double>(h_partial[b]);

    // ---------------- 结果对比 ----------------
    double abs_err  = std::abs(gpu_sum - ref_sum);
    double rel_err  = abs_err / (std::abs(ref_sum) + 1e-12);

    std::cout << std::fixed << std::setprecision(6);
    std::cout << "CPU sum = " << ref_sum << "\n";
    std::cout << "GPU sum = " << gpu_sum << "\n";
    std::cout << "abs err = " << abs_err  << ", rel err = " << rel_err << "\n";

    // ---------------- 资源释放 ----------------
    CUDA_CHECK(cudaFree(d_in));
    CUDA_CHECK(cudaFree(d_out));

    std::cout << (rel_err < 1e-6 ? "PASS" : "CHECK") << std::endl;
    return 0;
}
