#include <cuda_runtime.h>
#include <cstdio>
#include <time.h>
#include <sys/time.h>
#include <cmath>

#define THREAD_PER_BLOCK 256
#define ceil(a, b) ((a) + (b) - 1) / (b)

__device__ void warpReduce(volatile float *shared, unsigned int tid){
    // 从 32 开始，逐步折半至 1
    for (int stride = 32; stride > 0; stride >>= 1) {
        if (tid < stride) {
            shared[tid] += shared[tid + stride];
        }
        // Warp 内线程隐式同步
    }
}

// 使用last_unroll是为了消去当工作的线程数小于等于一个warp时__syncthreads()的时间开销
template <unsigned int DATA_PER_BLOCK, unsigned int DATA_PER_THREAD>
__global__ void reduce7(float *d_in, float *d_out) {
//     // 使用共享内存计算
//     const int tid = threadIdx.x;
//     __shared__ float shared[THREAD_PER_BLOCK];

//     // 优化：减少idle线程的数量，在取数据时就进行一次加法计算
//     // shared[tid] = input_begin[tid] + input_begin[tid + blockDim.x];
    
//     // 再次优化，上面只是在取数时取两个进行一次加法，现在可以取多个。
//     float *input_begin = d_in + blockIdx.x * DATA_PER_BLOCK;
//     shared[tid] = 0;   // 需要置为0，避免在累加过程中使用shared[tid]的脏数据
//     for (int i = 0; i < DATA_PER_THREAD; ++i) {
//         shared[tid] += input_begin[tid + i * THREAD_PER_BLOCK];
//         __syncthreads();
//     }

//     // i是步长 
// #pragma unroll
//     for (int i = blockDim.x / 2; i > 32; i /= 2) {
//         if (tid < i) {
//             shared[tid] += shared[tid + i];
//         }
//         __syncthreads();
//     }

//     // 只有0~31号线程进行计算
//     if (tid < 32) {
//         warpReduce(shared, tid);
//     }
    
//     if (tid == 0) {
//         d_out[blockIdx.x] = shared[0];
//     }


    // 不再使用共享内存，而是使用shuffle共享内存。
    int tid = threadIdx.x;
    float sum =0.f;
    float *idx_begin = d_in + threadIdx.x * DATA_PER_BLOCK;

    // 1.将GM上的数据复制到reg上
    for (int i = 0; i < DATA_PER_THREAD; ++i) {
        sum += idx_begin[tid + i * DATA_PER_BLOCK]; 
    }

    // 2.warp内规约
    // 最终计算结果为，每个warp内LaneID为0的sum为该warp的数据总和，其他线程的值无意义
    sum += __shfl_down_sync(0xffffffff, sum, 16);
    sum += __shfl_down_sync(0xffffffff, sum, 8);
    sum += __shfl_down_sync(0xffffffff, sum, 4);
    sum += __shfl_down_sync(0xffffffff, sum, 2);
    sum += __shfl_down_sync(0xffffffff, sum, 1);

    // 3. 共享内存聚合warp结果
    constexpr int warpPerBlock = ceil(THREAD_PER_BLOCK, 32);
    __shared__ float warpLevelSums[warpPerBlock];  // 取32是因为一个block最多有1024个线程，也就是32个warp
    const int laneId = tid % warpSize;
    const int warpId = tid / warpSize;
    if (laneId == 0) {
        warpLevelSums[warpId] = sum;        // 把每个warp的总和存入共享内存
    }
    // 此处需要同步，因为不是一个warp内的操作
    __syncthreads();

    // 4.BLock级别的二次规约
    if (warpId == 0)   // block内的第一个warp从共享内存读取所有warp的总和，再次规约的总和
    {
        sum = (laneId < warpPerBlock) ? warpLevelSums[laneId] : 0.f;   // 仅让warp0内小于block内warp数的线程参与计算
        sum += __shfl_down_sync(0xffffffff, sum, 16);
        sum += __shfl_down_sync(0xffffffff, sum, 8);
        sum += __shfl_down_sync(0xffffffff, sum, 4);
        sum += __shfl_down_sync(0xffffffff, sum, 2);
        sum += __shfl_down_sync(0xffffffff, sum, 1);
    }

    if (tid == 0)
        d_out[blockIdx.x] = sum;

}


bool check(const float *res, const float *out, int n) {
    for (int i = 0; i < n; ++i) {
        if (abs(res[i] - out[i]) > 0.0005f) {
            return false;
        }
    }
    return true;
}

// palnA：
// 减少block的数量，保持block中的thread数量，即让每个thread处理更多数据
int main() {
    constexpr int N = 32 * 1024 * 1024;
    float *h_a = (float*)malloc(N * sizeof(float));
    float *d_a = nullptr;
    cudaMalloc((void **)&d_a, N * sizeof(float));

    constexpr int block_nums = ceil(N, THREAD_PER_BLOCK) / 2;
    constexpr int data_per_block = N / block_nums;
    constexpr int data_per_thread = data_per_block / THREAD_PER_BLOCK;
    // 每个block存一个规约结果
    float *h_out = (float*)malloc(block_nums * sizeof(float));
    float *d_out = nullptr;
    cudaMalloc((void **)&d_out, block_nums * sizeof(float));
    float *res = (float*)malloc(block_nums * sizeof(float));

    for (int i = 0; i < N; ++i) {
        h_a[i] = 1;
    }

    // cpu的计算逻辑
    for (int i = 0; i < block_nums; ++i) {
        float cur = 0;
        for (int j = 0; j < THREAD_PER_BLOCK * 2; ++ j) {
            cur += h_a[i * THREAD_PER_BLOCK * 2 + j];
        }
        res[i] = cur;
    }

    // 将cpu的数据拷贝到GPU
    cudaMemcpy(d_a, h_a, N * sizeof(float), cudaMemcpyHostToDevice);
    
    // 计算
    dim3 grid_size(block_nums, 1);
    dim3 block_size(THREAD_PER_BLOCK, 1);
    reduce7<data_per_block, data_per_thread><<<grid_size, block_size>>>(d_a, d_out);

    // 将数据拷回GPU
    cudaMemcpy(h_out, d_out, block_nums * sizeof(float), cudaMemcpyDeviceToHost);

    if (check(res, h_out, block_nums)) {
        printf("the result is right\n");
    } else {
        printf("the result is wrong\n");
        for (int i = 0; i < block_nums; ++i) {
            printf("%.2lf", h_out[i]);
        }
        printf("\n");
    }


    // 第二次规约，计算最终结果
    float h_final_res = 0, *d_final_out;
    cudaMalloc((void**)&d_final_out, sizeof(float));
    reduce7<data_per_block, data_per_thread><<<1, block_size>>>(d_out, d_final_out);
    float d_final_res = 0;
    cudaMemcpy(&d_final_res, d_final_out, sizeof(float), cudaMemcpyDeviceToHost);

    for (int i = 0; i < block_nums; ++i) {
        h_final_res += h_out[i];
    }

    if (abs(d_final_res - h_final_res) < 1e-2 ) {
        printf("the final res is rgiht\n");
    } else {
        printf("the final res is wrong\n h_final_res is :%.2f, d_final_res is :%.2f\n", h_final_res, d_final_res);
    }
    printf("block_nums: %d\n", block_nums);

    free(h_a);
    free(res);
    cudaFree(d_a);
    cudaFree(d_out);
    cudaFree(d_final_out);
    return 0;
}