#include <cuda_runtime.h>
#include <cstdio>
#include <time.h>
#include <sys/time.h>
#include <cmath>

#define THREAD_PER_BLOCK 128
#define ceil(a, b) ((a) + (b) - 1) / (b)

__global__ void reduce1(float *d_in, float *d_out) {
    // 使用共享内存计算
    __shared__ float shared[THREAD_PER_BLOCK];
    float *block_begin = d_in + blockIdx.x * blockDim.x * 2;
    shared[threadIdx.x] = block_begin[threadIdx.x] + block_begin[threadIdx.x + blockDim.x];
    __syncthreads();

    // i是步长 
    for (int i = blockDim.x / 2; i > 0; i /= 2) {
        // 取线程块内前半部分进行计算，避免一个warp内的线程产生分支
        // 在此处线程i可能不是处理data[i]的数据，而是data[index]上的数据
        // if (threadIdx.x < blockDim.x / (2 * i)) {
        //     int index = threadIdx.x * 2 * i;
        //     shared[index] += shared[index + i];
        // }
        // 在上面的方式中，分析一个warp为例：
        // 0号线程计算data[0]和data[1]，16号线程计算data[32]和data[31]，会产生冲突
        if (threadIdx.x < i) {
            shared[threadIdx.x] += shared[threadIdx.x + i];
        }
        __syncthreads();
    }

    if (threadIdx.x == 0) {
        d_out[blockIdx.x] = shared[0];
    }
}


bool check(const float *res, const float *out, int n) {
    for (int i = 0; i < n; ++i) {
        if (abs(res[i] - out[i]) > 0.0005f) {
            return false;
        }
    }
    return true;
}

// planB:
// 保持block的数量，减少block中thread的数量，同样让每个thread处理更多的数据
int main() {
    const int N = 32 * 1024 * 1024;
    float *h_a = (float*)malloc(N * sizeof(float));
    float *d_a = nullptr;
    cudaMalloc((void **)&d_a, N * sizeof(float));

    int block_nums = ceil(N, THREAD_PER_BLOCK) / 2;
    // 每个block存一个规约结果
    float *h_out = (float*)malloc(block_nums * sizeof(float));
    float *d_out = nullptr;
    cudaMalloc((void **)&d_out, block_nums * sizeof(float));
    float *res = (float*)malloc(block_nums * sizeof(float));

    for (int i = 0; i < N; ++i) {
        h_a[i] = 1;
    }

    // cpu的计算逻辑
    for (int i = 0; i < block_nums; ++i) {
        float cur = 0;
        for (int j = 0; j < THREAD_PER_BLOCK * 2; ++j) {
            cur += h_a[i * THREAD_PER_BLOCK * 2 + j];
        }
        res[i] = cur;
    }

    // 将cpu的数据拷贝到GPU
    cudaMemcpy(d_a, h_a, N * sizeof(float), cudaMemcpyHostToDevice);
    
    // 计算
    dim3 grid_size(block_nums, 1);
    dim3 block_size(THREAD_PER_BLOCK, 1);
    reduce1<<<grid_size, block_size>>>(d_a, d_out);

    // 将数据拷回GPU
    cudaMemcpy(h_out, d_out, block_nums * sizeof(float), cudaMemcpyDeviceToHost);

    if (check(res, h_out, block_nums)) {
        printf("the result is right\n");
    } else {
        printf("the result is wrong\n");
        for (int i = 0; i < block_nums; ++i) {
            printf("%.2lf", h_out[i]);
        }
        printf("\n");
    }

    free(h_a);
    free(res);
    cudaFree(d_a);
    cudaFree(d_out);

    return 0;
}