#include <cuda_runtime.h>
#include <cstdio>
#include <time.h>
#include <sys/time.h>
#include <cmath>

#define THREAD_PER_BLOCK 256
#define ceil(a, b) ((a) + (b) - 1) / (b)

__device__ void warpReduce(volatile float *shared, unsigned int tid){
    // 从 16 开始，逐步折半至 1
    for (int stride = 32; stride > 0; stride >>= 1) {
        if (tid < stride) {
            shared[tid] += shared[tid + stride];
        }
        // Warp 内线程隐式同步，所以不用再添加__syncthreads()
    }
}

// 使用last_unroll是为了消去当工作的线程数小于等于一个warp时__syncthreads()的时间开销
__global__ void reduce1(float *d_in, float *d_out) {
    // 使用共享内存计算
    const int tid = threadIdx.x;
    __shared__ float shared[THREAD_PER_BLOCK];
    // 优化：减少idle线程的数量，在取数据时就进行一次加法计算
    float *input_begin = d_in + blockIdx.x * blockDim.x * 2;
    shared[tid] = input_begin[tid] + input_begin[tid + blockDim.x];   // 此处不用将shared[tid]置为0，因为是直接覆盖，没有访问到shared中的脏数据
    __syncthreads();

    // i是步长 
// #pragma unroll
//     for (int i = blockDim.x / 2; i > 32; i /= 2) {
//         if (tid < i) {
//             shared[tid] += shared[tid + i];
//         }
//         __syncthreads();
//     }
    if (THREAD_PER_BLOCK >= 512) {
        if (tid < 256) {
            shared[tid] += shared[tid + 256];
            __syncthreads();
        }
    }

    if (THREAD_PER_BLOCK >= 256) {
        if (tid < 128) {
            shared[tid] += shared[tid + 128];
            __syncthreads();
        }
    }

    if (THREAD_PER_BLOCK >= 64)
    {
        if (tid < 64)
            shared[tid] += shared[tid + 64];
        __syncthreads();
    }

    // 只有0~31号线程进行计算
    if (tid < 32) {
        warpReduce(shared, tid);
    }
    
    if (tid == 0) {
        d_out[blockIdx.x] = shared[0];
    }
}


bool check(const float *res, const float *out, int n) {
    for (int i = 0; i < n; ++i) {
        if (abs(res[i] - out[i]) > 0.0005f) {
            return false;
        }
    }
    return true;
}

// palnA：
// 减少block的数量，保持block中的thread数量，即让每个thread处理更多数据
int main() {
    const int N = 32 * 1024 * 1024;
    float *h_a = (float*)malloc(N * sizeof(float));
    float *d_a = nullptr;
    cudaMalloc((void **)&d_a, N * sizeof(float));

    int block_nums = ceil(N, THREAD_PER_BLOCK) / 2;
    // 每个block存一个规约结果
    float *h_out = (float*)malloc(block_nums * sizeof(float));
    float *d_out = nullptr;
    cudaMalloc((void **)&d_out, block_nums * sizeof(float));
    float *res = (float*)malloc(block_nums * sizeof(float));

    for (int i = 0; i < N; ++i) {
        h_a[i] = 1;
    }

    // cpu的计算逻辑
    for (int i = 0; i < block_nums; ++i) {
        float cur = 0;
        for (int j = 0; j < THREAD_PER_BLOCK * 2; ++ j) {
            cur += h_a[i * THREAD_PER_BLOCK * 2 + j];
        }
        res[i] = cur;
    }

    // 将cpu的数据拷贝到GPU
    cudaMemcpy(d_a, h_a, N * sizeof(float), cudaMemcpyHostToDevice);
    
    // 计算
    dim3 grid_size(block_nums, 1);
    dim3 block_size(THREAD_PER_BLOCK, 1);
    reduce1<<<grid_size, block_size>>>(d_a, d_out);

    // 将数据拷回GPU
    cudaMemcpy(h_out, d_out, block_nums * sizeof(float), cudaMemcpyDeviceToHost);

    if (check(res, h_out, block_nums)) {
        printf("the result is right\n");
    } else {
        printf("the result is wrong\n");
        for (int i = 0; i < block_nums; ++i) {
            printf("%.2lf", h_out[i]);
        }
        printf("\n");
    }

    free(h_a);
    free(res);
    cudaFree(d_a);
    cudaFree(d_out);

    return 0;
}