#include <iostream>
#include <cuda_runtime.h>
#include <random>
#include <ctime>

size_t g_eln = 1024 * 1024 * 32;
const int BLOCK_SIZE = 256;
const int WARP_SIZE = 32;

#define PERF(name) Perf perf_##name##__COUNTER__(#name)

class Perf
{
public:
    Perf(const std::string& name) {
        m_name = name;
        cudaEventCreate(&m_start);
        cudaEventCreate(&m_end);
        cudaEventRecord(m_start);
        cudaEventSynchronize(m_start);
    }

    ~Perf() {
        cudaEventRecord(m_end);
        cudaEventSynchronize(m_end);
        float elapsed_time = 0.0;
        cudaEventElapsedTime(&elapsed_time, m_start, m_end);
        std::cout << m_name << " elapse: " << elapsed_time << " ms" << std::endl;
    }

private:
    std::string m_name;
    cudaEvent_t m_start, m_end;
}; // class Perf


__global__ void PrefixBlock(float* in, float* out) // 438 us, 547gb/s, bc: 27w
{
    int buffer_idx = threadIdx.x + blockDim.x * blockIdx.x;

    __shared__ float cache[BLOCK_SIZE];
    cache[threadIdx.x] = in[buffer_idx];
    __syncthreads();

    for (int stride = 1; stride < blockDim.x; stride *= 2) {
        if (threadIdx.x >= stride) {
            cache[threadIdx.x] += cache[threadIdx.x - stride];
        }
        __syncthreads();
    }

    // 更新每个线程的值
    out[buffer_idx] = cache[threadIdx.x];
}

__global__ void PrefixBlockRegister(float* in, float* out) // 397us, 600gb/s, bc: 14w
{
    int buffer_idx = threadIdx.x + blockDim.x * blockIdx.x;

    __shared__ float cache[BLOCK_SIZE];
    cache[threadIdx.x] = in[buffer_idx];
    float sum = cache[threadIdx.x];
    __syncthreads();

    for (int stride = 1; stride < blockDim.x; stride *= 2) {
        if (threadIdx.x >= stride) {
            sum += cache[threadIdx.x - stride];
            cache[threadIdx.x] = sum;
        }
        __syncthreads();
    }

    // 更新每个线程的值
    out[buffer_idx] = sum;
}

void PrefixSum(const float* input, float* output, int len)
{
    output[0] = input[0];
    for (int i = 1; i < len; ++i) {
        output[i] += output[i - 1] + input[i];
    }
}

int main(int argc, char* argv[])
{
    std::cout << "start to run reduce base..." << std::endl;

    std::srand(std::time(NULL));
    float* input = (float*)malloc(g_eln * sizeof(float));
    float* output = (float*)malloc(g_eln * sizeof(float));

    // init input
    for (size_t i = 0; i < g_eln; ++i) {
        input[i] = (float)i;
    }

    // CPU
    PrefixSum(input, output, g_eln);

    // GPU
    float* d_input;
    cudaMalloc((void**)&d_input, g_eln * sizeof(float));
    cudaMemcpy(d_input, input, g_eln * sizeof(float), cudaMemcpyHostToDevice);

    float* d_output;
    cudaMalloc((void**)&d_output, g_eln * sizeof(float));
    float* h_output = (float*)malloc(g_eln * sizeof(float));

    int block_num = (g_eln + BLOCK_SIZE - 1) / BLOCK_SIZE;

    std::cout << "block_num: " << block_num << std::endl;

    {
        PERF(prefix_base);
        PrefixBlockRegister<<<block_num, BLOCK_SIZE>>>(d_input, d_output);
        cudaDeviceSynchronize();
        cudaMemcpy(h_output, d_output, g_eln * sizeof(float), cudaMemcpyDeviceToHost);
    }

    return 0;
}