#include <iostream>
#include <cuda_runtime.h>
#include <random>
#include <ctime>
#include <sys/time.h>

size_t g_eln = 1024 * 1024 * 32;
const int BLOCK_SIZE = 128;

#define PERF(name) Perf perf_##name##__COUNTER__(#name)

class Perf
{
public:
    Perf(const std::string& name) {
        m_name = name;
        cudaEventCreate(&m_start);
        cudaEventCreate(&m_end);
        cudaEventRecord(m_start);
        cudaEventSynchronize(m_start);
    }

    ~Perf() {
        cudaEventRecord(m_end);
        cudaEventSynchronize(m_end);
        float elapsed_time = 0.0;
        cudaEventElapsedTime(&elapsed_time, m_start, m_end);
        std::cout << m_name << " elapse: " << elapsed_time << " ms" << std::endl;
    }

private:
    std::string m_name;
    cudaEvent_t m_start, m_end;
}; // class Perf

__global__ void reduce_base(float* in, float* out, int eln)
{
    extern __shared__ float sdata[];
    int total_id = threadIdx.x + blockIdx.x * blockDim.x;
    int tid = threadIdx.x;
    sdata[tid] = in[total_id];
    __syncthreads();

    // calc
    for (int s = 1; s < blockDim.x; s *= 2) {
        if (tid % (2 * s) == 0) {
            sdata[tid] += sdata[tid + s];
        }
        __syncthreads();
    }

    if (tid == 0) out[blockIdx.x] = sdata[0];
}

int main(int argc, char* argv[])
{
    std::cout << "start to run reduce base..." << std::endl;

    std::srand(std::time(NULL));
    float* input = (float*)malloc(g_eln * sizeof(float));
    float output = 0;
    // init input
    for (size_t i = 0; i < g_eln; ++i) {
        input[i] = rand() * 1.0 / INT_MAX;
    }

    // CPU
    for (size_t i = 0; i < g_eln; ++i) {
        output += input[i];
    }
    std::cout << "CPU result: " << output << std::endl;

    output = 0;
    for (int i = g_eln - 1; i >= 0; --i) {
        output += input[i];
    }
    std::cout << "CPU result: " << output << std::endl;

    // GPU
    float* d_input;
    cudaMalloc((void**)&d_input, g_eln * sizeof(float));
    cudaMemcpy(d_input, input, g_eln * sizeof(float), cudaMemcpyHostToDevice);

    int block_num = (g_eln - 1) / BLOCK_SIZE + 1;
    float* d_output;
    cudaMalloc((void**)&d_output, block_num * sizeof(float));

    float* h_output = (float*)malloc(block_num * sizeof(float));

    {
        PERF(reduce_base_gpu);
        reduce_base<<<block_num, BLOCK_SIZE, BLOCK_SIZE * sizeof(float)>>>(d_input, d_output, g_eln);
        // g_eln = g_eln / BLOCK_SIZE;
        // block_num = (g_eln - 1) / BLOCK_SIZE + 1;
        // reduce_base<<<block_num, BLOCK_SIZE, BLOCK_SIZE * sizeof(float)>>>(d_output, d_input, g_eln);
        // g_eln = g_eln / BLOCK_SIZE;
        // block_num = (g_eln - 1) / BLOCK_SIZE + 1;
        // reduce_base<<<block_num, BLOCK_SIZE, BLOCK_SIZE * sizeof(float)>>>(d_input, d_output, g_eln);
        cudaDeviceSynchronize();
        cudaMemcpy(h_output, d_output, block_num * sizeof(float), cudaMemcpyDeviceToHost);
    }
    output = 0.0;
    for (int i = 0; i < block_num; ++i) {
        output += h_output[i];
    }
    std::cout << "GPU result: " << output << std::endl;

    return 0;
}