#include <iostream>
#include <cuda_runtime.h>
#include "../common/common.h"
#define DEVICENUM 0

// 1. 相邻配对
__global__ void reduceNeighbored(float *array_input, float *array_output, const int nElem)
{
    unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x;
    if (threadIdx.x >= nElem || idx >= nElem) // 检查block块内的边界和整个线程边界。
        return;
    // 获取每个block的数组首地址，这步表明把长度为nElem数组切分为多个block块，分开求和
    float *idata = array_input + blockIdx.x * blockDim.x;
    // 下面是对每个block块进行操作
    // 由于是对单个数组操作(相邻数字求和再存储到该数组),所以每次循环步长加倍,stride从1开始
    for (int stride = 1; stride < blockDim.x; stride *= 2)
    {
        if ((threadIdx.x % (2 * stride)) == 0) // 这里很明显造成了线程束分化
        {
            idata[threadIdx.x] += idata[threadIdx.x + stride];
        }
        __syncthreads(); // 必须阻碍一下，否则下次循环会导致结果出错
    }
    // 将此block块的结果写入全局内存
    if (threadIdx.x == 0)
        // 注意，array_output里面是block数目个数组和，如果想要求最终的和，需要把array_output里所有元素相加求和
        array_output[blockIdx.x] = idata[0];
}

// 2. 交错配对
__global__ void reduceNeighboredStep(float *array_input, float *array_output, const int nElem)
{
    unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x;
    if (threadIdx.x >= nElem || idx >= nElem) // 检查block块内的边界和整个线程边界。
        return;
    float *idata = array_input + blockIdx.x * blockDim.x;

    for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) // stride>>=1 <---> strid/2
    {

        if (threadIdx.x < stride)
        {
            idata[threadIdx.x] += idata[threadIdx.x + stride];
        }
        __syncthreads();
    }
    // 将此block块的结果写入全局内存
    if (threadIdx.x == 0)
        // 注意，array_output里面是block数目个数组和，如果想要求最终的和，需要把array_output里所有元素相加求和
        array_output[blockIdx.x] = idata[0];
}

int main(int argc, char **argv)
{
    cudaSetDevice(DEVICENUM);
    int nElem = 1 << 24;
    size_t nBytes = sizeof(float) * nElem;
    // 输出数组的字节长度，由于下面的每个block块中的threads有512个，所以输出数组长度无需太长
    size_t nBytesOut = sizeof(float) * (nElem / 512 + 1);

    // cpu申请内存并初始化数据
    float *host_array_input = (float *)malloc(nBytes);
    initialDataRandom(host_array_input, nElem);

    // gpu内存申请
    float *device_array_input = nullptr;
    float *device_array_output = nullptr;
    CHECK(cudaMalloc((float **)&device_array_input, nBytes));
    CHECK(cudaMalloc((float **)&device_array_output, nBytesOut));
    // 给gpu内存初始化数据
    CHECK(cudaMemcpy(device_array_input, host_array_input, nBytes, cudaMemcpyHostToDevice));
    CHECK(cudaMemset(device_array_output, 0, nBytesOut));

    // 调用kernel函数执行gpu数组加法运算
    dim3 block_size = block_dims(nElem);
    dim3 grid_size = grid_dims(nElem);

    reduceNeighbored<<<grid_size, block_size>>>(device_array_input, device_array_output, nElem);
    CHECK(cudaDeviceSynchronize());

    reduceNeighboredStep<<<grid_size, block_size>>>(device_array_input, device_array_output, nElem);
    CHECK(cudaDeviceSynchronize());

    // 释放gpu和cpu内存
    cudaFree(device_array_input);
    cudaFree(device_array_output);

    free(host_array_input);

    return 0;
}