#include <iostream>
#include <cub/cub.cuh>
#include <cuda_runtime.h>

#define N 1000000
#define K 100
#define NUM_WINDOWS (N - K + 1)
const float inv100 = 1.0f / K;

__global__ void compute_means(const float* prefix_sum, float* means) {
    extern __shared__ float s_prefix[];
    
    const int block_start = blockIdx.x * (blockDim.x << 2);
    const int num_elements = (blockDim.x << 2) + K;
    const int tid = threadIdx.x;

    // 协作加载前缀和到共享内存
    for (int i = tid; i < num_elements; i += blockDim.x) {
        const int global_idx = block_start + i;
        s_prefix[i] = (global_idx <= N) ? prefix_sum[global_idx] : 0.0f;
    }
    __syncthreads();

    // 处理四个窗口
    const int global_i = block_start + (tid << 2);
    if (global_i < NUM_WINDOWS) {
        const int local_start = tid << 2;
        // const int valid_elements = min(4, NUM_WINDOWS - global_i);
        
        // 向量化读取共享内存
        float4 start = *reinterpret_cast<float4*>(&s_prefix[local_start]);
        float4 end = *reinterpret_cast<float4*>(&s_prefix[local_start + K]);
        
        // 计算均值
        float4 sum = {end.x - start.x, end.y - start.y, 
                     end.z - start.z, end.w - start.w};
        float4 mean = {sum.x * inv100, sum.y * inv100,
                      sum.z * inv100, sum.w * inv100};
        
        // 向量化存储结果
        *reinterpret_cast<float4*>(&means[global_i]) = mean;
    }
}

int main() {
    cudaSetDevice(1); // sometime it's free
    // 分配主机内存
    float *h_in = new float[N];
    float *h_out = new float[NUM_WINDOWS];
    for (int i = 0; i < N; ++i) h_in[i] = i*1.0f;

    float *d_data, *d_prefix_sum, *d_means;
    cudaMalloc(&d_data, N * sizeof(float));
    cudaMalloc(&d_prefix_sum, (N + 1) * sizeof(float));
    cudaMalloc(&d_means, NUM_WINDOWS * sizeof(float));
    cudaMemcpy(d_data, h_in, N * sizeof(float), cudaMemcpyHostToDevice);

    // 计算前缀和
    void *d_temp = nullptr;
    size_t temp_bytes = 0;
    cub::DeviceScan::InclusiveSum(d_temp, temp_bytes, d_data, d_prefix_sum + 1, N);
    cudaMalloc(&d_temp, temp_bytes);
    cudaMemset(d_prefix_sum, 0, sizeof(float));

    // 配置kernel参数
    const int block_size = 256;
    const int elements_per_block = block_size << 2;
    const int grid_size = (NUM_WINDOWS + elements_per_block - 1) / elements_per_block;
    const size_t shared_mem_size = (block_size * 4 + K) * sizeof(float);

    // 预热
    compute_means<<<grid_size, block_size, shared_mem_size>>>(d_prefix_sum, d_means);
    cudaDeviceSynchronize();

    // 计时
    cudaEvent_t start, stop;
    cudaEventCreate(&start);
    cudaEventCreate(&stop);
    const int num_runs = 100;
    float total_time = 0;

    cudaEventRecord(start);
    for (int i = 0; i < num_runs; ++i) {
        cub::DeviceScan::InclusiveSum(d_temp, temp_bytes, d_data, d_prefix_sum + 1, N);
        compute_means<<<grid_size, block_size, shared_mem_size>>>(d_prefix_sum, d_means);
    }
    cudaEventRecord(stop);
    cudaEventSynchronize(stop);
    
    float elapsed_ms;
    cudaEventElapsedTime(&elapsed_ms, start, stop);
    total_time = elapsed_ms / 1000.0f;

    // 输出性能指标
    std::cout << "Average Time: " << elapsed_ms / num_runs << " ms" << std::endl;
    std::cout << "Window Throughput: " 
              << (num_runs * NUM_WINDOWS) / (total_time * 1e9) << " E9 elements/s" << std::endl;
    std::cout << "Memory Throughput: " 
              << (num_runs * NUM_WINDOWS * 2 * sizeof(float)) / (total_time * 1e9) << " GB/s" << std::endl;

    // 验证结果
    cudaMemcpy(h_out, d_means, NUM_WINDOWS * sizeof(float), cudaMemcpyDeviceToHost);
    for (int i = 0; i < NUM_WINDOWS; ++i) {
        if (std::abs(h_out[i] - 1.0f) > 1e-4) {
            std::cout << "Error at index " << i << ": " << h_out[i] << std::endl;
            break;
        }
    }

    // 释放资源
    cudaFree(d_data);
    cudaFree(d_prefix_sum);
    cudaFree(d_means);
    cudaEventDestroy(start);
    cudaEventDestroy(stop);

    return 0;
}
