#include <iostream>
#include <cub/cub.cuh>
#include <cuda_runtime.h>

#define N 1000000
#define K 100
#define NUM_WINDOWS (N - K + 1)

__global__ void compute_means(float* prefix_sum, float* means) {
    const int i = blockIdx.x * blockDim.x + threadIdx.x;
    if (i < NUM_WINDOWS) {
        const float sum = prefix_sum[i + K] - prefix_sum[i];
        means[i] = sum * (1.0f / K); // 使用乘法代替除法
    }
}

int main() {
    cudaSetDevice(1);
    // 分配主机内存
    float *h_in = new float[N];
    float *h_out = new float[NUM_WINDOWS];
    for (int i = 0; i < N; ++i) h_in[i] = 1.0f*(i%N);

    float *d_data, *d_prefix_sum, *d_means;
    cudaMalloc(&d_data, N * sizeof(float));
    cudaMalloc(&d_prefix_sum, (N + 1) * sizeof(float));
    cudaMalloc(&d_means, NUM_WINDOWS * sizeof(float));
    cudaMemcpy(d_data, h_in, N * sizeof(float), cudaMemcpyHostToDevice);

    void *d_temp = nullptr;
    size_t temp_bytes = 0;
    cub::DeviceScan::InclusiveSum(d_temp, temp_bytes, d_data, d_prefix_sum + 1, N);
    cudaMalloc(&d_temp, temp_bytes);
    cudaMemset(d_prefix_sum, 0, sizeof(float)); // d_prefix_sum[0] 应该为 0

    // 配置kernel参数
    const int block_size = 256;
    const int grid_size = (NUM_WINDOWS + block_size - 1) / block_size;

    // 预热
    compute_means<<<grid_size, block_size>>>(d_prefix_sum, d_means);
    cudaDeviceSynchronize();

    // 计时
    cudaEvent_t start, stop;
    cudaEventCreate(&start);
    cudaEventCreate(&stop);
    const int num_runs = 1;
    float total_time = 0;

    cudaEventRecord(start);
    for (int i = 0; i < num_runs; ++i) {
        // cudaCtxResetPersistingL2Cache();
        cub::DeviceScan::InclusiveSum(d_temp, temp_bytes, d_data, d_prefix_sum + 1, N);
        compute_means<<<grid_size, block_size>>>(d_prefix_sum, d_means);
    }
    
    cudaEventRecord(stop);
    cudaEventSynchronize(stop);
    
    float elapsed_ms;
    cudaEventElapsedTime(&elapsed_ms, start, stop);
    total_time = elapsed_ms / 1000.0f; // 转换为秒

    // 计算吞吐量
    float window_throughput = (num_runs * NUM_WINDOWS) / total_time;
    float data_processed_gb = (num_runs * NUM_WINDOWS * 12) / 1e9;
    float memory_throughput = data_processed_gb / total_time;

    // 输出结果
    std::cout << "Average Time: " << total_time / num_runs * 1000 << " ms" << std::endl;
    std::cout << "Window Throughput: " << window_throughput/1e9  << " E9 elements/s" << std::endl;
    std::cout << "Memory Throughput: " << memory_throughput << " E9 Bytes/s" << std::endl;
    // check results
    cudaMemcpy(h_out, d_means, NUM_WINDOWS * sizeof(float), cudaMemcpyDeviceToHost);
    for (int i = 0; i < NUM_WINDOWS; ++i) {
        if (std::abs(h_out[i] - 49.5) > 1e-6) {
            std::cout << "Error: Mean at index " << i << " is incorrect." << " Expected: "<<49.5<<", Actual: " << h_out[i] << std::endl;
            return -1;
        }
    }
    // 释放资源
    cudaFree(d_data);
    cudaFree(d_prefix_sum);
    cudaFree(d_means);
    cudaEventDestroy(start);
    cudaEventDestroy(stop);

    return 0;
}
