#include <iostream>
#include <cuda_runtime.h>
#include <random>
#include <ctime>

size_t g_eln = 1024 * 1024 * 32;
const int BLOCK_SIZE = 256;

#define PERF(name) Perf perf_##name##__COUNTER__(#name)

class Perf
{
public:
    Perf(const std::string& name) {
        m_name = name;
        cudaEventCreate(&m_start);
        cudaEventCreate(&m_end);
        cudaEventRecord(m_start);
        cudaEventSynchronize(m_start);
    }

    ~Perf() {
        cudaEventRecord(m_end);
        cudaEventSynchronize(m_end);
        float elapsed_time = 0.0;
        cudaEventElapsedTime(&elapsed_time, m_start, m_end);
        std::cout << m_name << " elapse: " << elapsed_time << " ms" << std::endl;
    }

private:
    std::string m_name;
    cudaEvent_t m_start, m_end;
}; // class Perf

__global__ void reduce_shuffle4(float* in, float* out)
{
    int tid = threadIdx.x;
    int offset_idx = (blockIdx.x * blockDim.x + threadIdx.x);
    float4* data = reinterpret_cast<float4*>(in);
    float sum = data[offset_idx].x + data[offset_idx].y + data[offset_idx].z + data[offset_idx].w;
    __syncthreads();

    sum += __shfl_down_sync(0xffffffff, sum, 16);
    sum += __shfl_down_sync(0xffffffff, sum, 8);
    sum += __shfl_down_sync(0xffffffff, sum, 4);
    sum += __shfl_down_sync(0xffffffff, sum, 2);
    sum += __shfl_down_sync(0xffffffff, sum, 1);
    
    __shared__ float sdata[8];
    int warp_idx = tid / 32;
    if (warp_idx * 32 == tid) sdata[warp_idx] = sum;
    __syncthreads();

    if (tid < 8) {
        sum = sdata[warp_idx];
        __syncthreads();
        // sum += __shfl_down_sync(0xffffffff, sum, 16);
        // sum += __shfl_down_sync(0xffffffff, sum, 8);
        sum += __shfl_down_sync(0xffffffff, sum, 4);
        sum += __shfl_down_sync(0xffffffff, sum, 2);
        sum += __shfl_down_sync(0xffffffff, sum, 1);
    }

    if (tid == 0) out[blockIdx.x] = sum;
}

int main(int argc, char* argv[])
{
    std::cout << "start to run reduce base..." << std::endl;

    std::srand(std::time(NULL));
    float* input = (float*)malloc(g_eln * sizeof(float));
    float output = 0;
    // init input
    for (size_t i = 0; i < g_eln; ++i) {
        input[i] = rand() * 1.0 / INT_MAX;
    }

    // CPU
    for (size_t i = 0; i < g_eln; ++i) {
        output += input[i];
    }
    std::cout << "CPU result: " << output << std::endl;

    // GPU
    float* d_input;
    cudaMalloc((void**)&d_input, g_eln * sizeof(float));
    cudaMemcpy(d_input, input, g_eln * sizeof(float), cudaMemcpyHostToDevice);

    g_eln /= 4;
    int block_num = (g_eln - 1) / BLOCK_SIZE + 1;
    float* d_output;
    // int warp_size = block_num * (BLOCK_SIZE / 32);
    cudaMalloc((void**)&d_output, block_num * sizeof(float));
    float* h_output = (float*)malloc(block_num * sizeof(float));

    {
        PERF(reduce_index_gpu);
        reduce_shuffle4<<<block_num, BLOCK_SIZE>>>(d_input, d_output);
        cudaDeviceSynchronize();
        cudaMemcpy(h_output, d_output, block_num * sizeof(float), cudaMemcpyDeviceToHost);
    }
    output = 0.0;
    for (int i = 0; i < block_num; ++i) {
        output += h_output[i];
    }
    std::cout << "GPU result: " << output << std::endl;

    return 0;
}