// Project partly adapted from https://developer.download.nvidia.com/assets/cuda/files/reduction.pdf

#include <iostream>
#include <math.h>

template <unsigned int blockSize>
__device__ __forceinline__ float warpReduceSum(float sum){
    if(blockSize >= 32) sum += __shfl_down_sync(0xffffffff,sum,16);
    if(blockSize >= 16) sum += __shfl_down_sync(0xffffffff,sum,8);
    if(blockSize >=  8) sum += __shfl_down_sync(0xffffffff,sum,4);
    if(blockSize >=  4) sum += __shfl_down_sync(0xffffffff,sum,2);
    if(blockSize >=  2) sum += __shfl_down_sync(0xffffffff,sum,1);
    return sum;
}

// This will output the proper CUDA error strings in the event
// that a CUDA host call returns an error
#define checkGPUErrors(val) __check((val), #val, __FILE__, __LINE__)

void __check(cudaError_t result, char const *const func, const char *const file,
           int const line) {
  if (result) {
    fprintf(stderr, "CUDA error at %s:%d code=%d(%s) \"%s\" \n", file, line,
            static_cast<unsigned int>(result), cudaGetErrorName(result), func);
    exit(EXIT_FAILURE);
  }
}

const int NUM_ELEMENT = 32 * 1024 * 1024; // 32 M elements
const int THREAD_PER_BLOCK = 256;
const int ELEMENT_PER_THREAD = 16;  // 需要是2的整数次幂，方便griddim的计算
const int ELEMENT_PER_BLOCK = (THREAD_PER_BLOCK * ELEMENT_PER_THREAD);

// 每个block负责将ELEMENT_PER_BLOCK个数据累加，结果写到z[blockIdx.x]
__global__ void reduce(float *x, float *z){
    __shared__ float sdata[THREAD_PER_BLOCK];

    // each thread loads one element from global to shared mem
    unsigned int tid = threadIdx.x;
    unsigned int i = blockIdx.x * (blockDim.x * ELEMENT_PER_THREAD) + threadIdx.x;
    sdata[tid] = 0;
    for(unsigned int s = 0; s < ELEMENT_PER_THREAD; s++) {
         sdata[tid] += x[i + s * blockDim.x];
    }
    __syncthreads();

    // do reduction in shared mem
    for (unsigned int s=blockDim.x/2; s>=32; s>>=1) {
        if (tid < s) {
            sdata[tid] += sdata[tid + s];
        }
        __syncthreads();
    }
    const int WARP_SIZE = 32;
    float partial;
    if (tid < WARP_SIZE) {  // only 1 warp is active when the number of elements <= 32
        partial = sdata[tid];
        partial = warpReduceSum<32>(partial);
    }
    // write result for this block to global mem
    if (tid == 0) z[blockIdx.x] = partial;
}

int main(void)
{
    int array_size = NUM_ELEMENT; // 32 M elements
    int z_size = array_size/ELEMENT_PER_BLOCK;

    float *x, *z;
    x = (float*)malloc(array_size*sizeof(float));
    z = (float*)malloc(z_size*sizeof(float));

    float *x_gpu, *z_gpu; 
    checkGPUErrors(cudaMalloc(&x_gpu, array_size*sizeof(float)));
    checkGPUErrors(cudaMalloc(&z_gpu, z_size*sizeof(float)));

    // initialize x arrays on the host
    for (int i = 0; i < array_size; i++) {
        x[i] = 1.0f;
    }
    checkGPUErrors(cudaMemcpy(x_gpu, x, array_size*sizeof(float), cudaMemcpyHostToDevice));

    cudaEvent_t gpu_start, gpu_stop;
    cudaEventCreate(&gpu_start);
    cudaEventCreate(&gpu_stop);
    cudaEventRecord(gpu_start);
    int block_dim = THREAD_PER_BLOCK;
    int grid_dim = array_size / ELEMENT_PER_BLOCK;
    reduce<<<grid_dim, block_dim>>>(x_gpu, z_gpu);
    // 等待GPU上的kernel执行完毕
    cudaEventRecord(gpu_stop);
    cudaEventSynchronize(gpu_stop);  
    float milliseconds = 0;
    cudaEventElapsedTime(&milliseconds, gpu_start, gpu_stop);

    printf("Execution time measured on GPU: %f us.\n", milliseconds*1000);

    //检验是否所有的结果都是ELEMENT_PER_BLOCK
    checkGPUErrors(cudaMemcpy(z, z_gpu, z_size*sizeof(float), cudaMemcpyDeviceToHost));
    float maxError = 0.0f;
    for(int i = 0; i<z_size; i++) {
      maxError = fmax(maxError, fabs(z[i] - ELEMENT_PER_BLOCK));
    }
    if(maxError < 1e-6)
        std::cout << "PASS. MaxError: " << maxError << std::endl;
    else
        std::cout << "FAIL. MaxError: " << maxError << std::endl;

    // 计算访存比
    // 对于1024个数字的归约求和，需要1023次乘加
     // 对于ELEMENT_PER_BLOCK个数字的归约求和，需要ELEMENT_PER_BLOCK-1次加法
    float gflops = (float)(ELEMENT_PER_BLOCK - 1) * grid_dim / 1024 / 1024 / milliseconds;
    printf("Throughput: %f GFLOP/s.\n", gflops);
    float bandwidth = (float)sizeof(float) * array_size / 1024 / 1024 / milliseconds;
    printf("DRAM Bandwidth: %f GByte/s.\n", bandwidth);
    printf("Arithmetic Intensity: %f FLOP/Byte.\n", gflops/bandwidth);

    // Free memory
    checkGPUErrors(cudaFree(x_gpu));
    checkGPUErrors(cudaFree(z_gpu));
    free(x);
    free(z);

    return 0;
}
