#include <iostream>
#include <cub/cub.cuh>
#include <cuda_runtime.h>
#include <cooperative_groups.h>

#define N 1000000
#define K 100
#define NUM_WINDOWS (N - K + 1)
const float inv100 = 1.0f / K;


// 第一阶段：块内扫描（Brent-Kung优化版）
template <int BLOCK_DIM, int ELEMENTS_PER_THREAD>
__global__ void inclusive_scan_kernel(const float* __restrict__ input,
                                      float* __restrict__ output,
                                      float* __restrict__ block_sums,
                                      int num_elements) {
  namespace cg = cooperative_groups;
  constexpr int TILE_SIZE = BLOCK_DIM * ELEMENTS_PER_THREAD;
  
  cg::thread_block tb = cg::this_thread_block();
  __shared__ float smem[2 * BLOCK_DIM];

  // 每个线程处理4个元素（向量化加载）
  float4 vec_data = {0.0f, 0.0f, 0.0f, 0.0f};
  const int global_tid = blockIdx.x * TILE_SIZE + threadIdx.x * ELEMENTS_PER_THREAD;
  
  if (global_tid + 3 < num_elements) {
    vec_data = *reinterpret_cast<const float4*>(&input[global_tid]);
  } else { // 处理尾部不足4个元素的情况
    const int remain = num_elements - global_tid;
    if (remain > 0) vec_data.x = input[global_tid];
    if (remain > 1) vec_data.y = input[global_tid+1];
    if (remain > 2) vec_data.z = input[global_tid+2];
  }

  // 本地前缀和计算（展开循环）
  float local_sums[ELEMENTS_PER_THREAD];
  local_sums[0] = vec_data.x;
  #pragma unroll
  for (int i = 1; i < ELEMENTS_PER_THREAD; ++i) {
    local_sums[i] = local_sums[i-1] + (&vec_data.x)[i];
  }

  // 块内归约（Brent-Kung结构）
  const float thread_sum = local_sums[ELEMENTS_PER_THREAD - 1];
  smem[threadIdx.x] = thread_sum;
  tb.sync();

  // 上行阶段
  for (int stride = 1; stride < BLOCK_DIM; stride *= 2) {
    if (threadIdx.x >= stride) {
      smem[BLOCK_DIM + threadIdx.x] = smem[threadIdx.x] + smem[threadIdx.x - stride];
    }
    tb.sync();
    if (threadIdx.x >= stride) {
      smem[threadIdx.x] = smem[BLOCK_DIM + threadIdx.x];
    }
    tb.sync();
  }

  // 下行阶段（仅需BLOCK_DIM/2线程）
  if (threadIdx.x < BLOCK_DIM/2) {
    const int idx = BLOCK_DIM/2 + threadIdx.x;
    smem[BLOCK_DIM + idx] = smem[idx] + smem[idx - 1];
  }
  tb.sync();

  // 合并结果
  const float block_prefix = (threadIdx.x == 0) ? 0.0f : smem[threadIdx.x - 1];
  float carry = block_prefix;

  // 应用前缀到本地数据
  #pragma unroll
  for (int i = 0; i < ELEMENTS_PER_THREAD; ++i) {
    local_sums[i] += carry;
    carry += (&vec_data.x)[i];
  }

  // 存储块总和
  if (block_sums && threadIdx.x == BLOCK_DIM - 1) {
    block_sums[blockIdx.x] = smem[BLOCK_DIM - 1];
  }

  // 向量化存储结果
  if (global_tid + 3 < num_elements) {
    *reinterpret_cast<float4*>(&output[global_tid]) = 
      make_float4(local_sums[0], local_sums[1], local_sums[2], local_sums[3]);
  } else {
    const int remain = num_elements - global_tid;
    if (remain > 0) output[global_tid]   = local_sums[0];
    if (remain > 1) output[global_tid+1] = local_sums[1];
    if (remain > 2) output[global_tid+2] = local_sums[2];
  }
}

// 第二阶段：块间扫描合并
template <int BLOCK_DIM>
__global__ void add_block_sums_kernel(float* __restrict__ scanned_blocks,
                                      const float* __restrict__ block_sums,
                                      int num_elements) {
  __shared__ float shared_sum;

  const int block_id = blockIdx.x;
  const int global_base = block_id * BLOCK_DIM * 4; // 每个块处理BLOCK_DIM*4元素
  
  // 加载当前块的基值
  if (threadIdx.x == 0) {
    shared_sum = (block_id > 0) ? block_sums[block_id-1] : 0.0f;
  }
  __syncthreads();

  // 向量化添加基值
  const int global_offset = global_base + threadIdx.x * 4;
  if (global_offset + 3 < num_elements) {
    float4 data = *reinterpret_cast<float4*>(&scanned_blocks[global_offset]);
    data.x += shared_sum;
    data.y += shared_sum;
    data.z += shared_sum;
    data.w += shared_sum;
    *reinterpret_cast<float4*>(&scanned_blocks[global_offset]) = data;
  } else {
    const int remain = num_elements - global_offset;
    if (remain > 0) scanned_blocks[global_offset]   += shared_sum;
    if (remain > 1) scanned_blocks[global_offset+1] += shared_sum;
    if (remain > 2) scanned_blocks[global_offset+2] += shared_sum;
  }
}

// 包装函数（需在主函数中调用）
void inclusive_scan(const float* d_input, 
    float* d_output, 
    int num_elements,
    cudaStream_t stream = 0) {
    constexpr int BLOCK_DIM = 256;
    constexpr int ELEMENTS_PER_THREAD = 4;
    const int grid_size = (num_elements + BLOCK_DIM*ELEMENTS_PER_THREAD - 1) 
            / (BLOCK_DIM*ELEMENTS_PER_THREAD);

    float* d_block_sums;
    cudaMallocAsync(&d_block_sums, grid_size * sizeof(float), stream);

    // 第一步：块内扫描
    inclusive_scan_kernel<BLOCK_DIM, ELEMENTS_PER_THREAD>
    <<<grid_size, BLOCK_DIM, 0, stream>>>(d_input, d_output, d_block_sums, num_elements);

    // 第二步：扫描块和数组
    float* d_final_sum;
    cudaMallocAsync(&d_final_sum, sizeof(float), stream);
    inclusive_scan_kernel<1, 1><<<1, 1, 0, stream>>>(d_block_sums, d_block_sums, nullptr, grid_size);

    // 第三步：添加块和到各个块
    add_block_sums_kernel<BLOCK_DIM>
    <<<grid_size, BLOCK_DIM, 0, stream>>>(d_output, d_block_sums, num_elements);

    cudaFreeAsync(d_block_sums, stream);
    cudaFreeAsync(d_final_sum, stream);
}
__global__ void compute_means(const float* prefix_sum, float* means) {
    extern __shared__ float s_prefix[];
    
    const int block_start = blockIdx.x * (blockDim.x << 2);
    const int num_elements = (blockDim.x << 2) + K;
    const int tid = threadIdx.x;

    // 协作加载前缀和到共享内存
    for (int i = tid; i < num_elements; i += blockDim.x) {
        const int global_idx = block_start + i;
        s_prefix[i] = (global_idx <= N) ? prefix_sum[global_idx] : 0.0f;
    }
    __syncthreads();

    // 处理四个窗口
    const int global_i = block_start + (tid << 2);
    if (global_i < NUM_WINDOWS) {
        const int local_start = tid << 2;
        // const int valid_elements = min(4, NUM_WINDOWS - global_i);
        
        // 向量化读取共享内存
        float4 start = *reinterpret_cast<float4*>(&s_prefix[local_start]);
        float4 end = *reinterpret_cast<float4*>(&s_prefix[local_start + K]);
        
        // 计算均值
        float4 sum = {end.x - start.x, end.y - start.y, 
                     end.z - start.z, end.w - start.w};
        float4 mean = {sum.x * inv100, sum.y * inv100,
                      sum.z * inv100, sum.w * inv100};
        
        // 向量化存储结果
        *reinterpret_cast<float4*>(&means[global_i]) = mean;
    }
}

int main() {
    cudaSetDevice(1); // sometime it's free
    // 分配主机内存
    float *h_in = new float[N];
    float *h_out = new float[NUM_WINDOWS];
    for (int i = 0; i < N; ++i) h_in[i] = 1.0f;

    float *d_data, *d_prefix_sum, *d_means;
    cudaMalloc(&d_data, N * sizeof(float));
    cudaMalloc(&d_prefix_sum, (N + 1) * sizeof(float));
    cudaMalloc(&d_means, NUM_WINDOWS * sizeof(float));
    cudaMemcpy(d_data, h_in, N * sizeof(float), cudaMemcpyHostToDevice);

    // 计算前缀和
    // void *d_temp = nullptr;
    // size_t temp_bytes = 0;
    // cub::DeviceScan::InclusiveSum(d_temp, temp_bytes, d_data, d_prefix_sum + 1, N);
    // cudaMalloc(&d_temp, temp_bytes);
    // cudaMemset(d_prefix_sum, 0, sizeof(float));

    // 配置kernel参数
    const int block_size = 256;
    const int elements_per_block = block_size << 2;
    const int grid_size = (NUM_WINDOWS + elements_per_block - 1) / elements_per_block;
    const size_t shared_mem_size = (block_size * 4 + K) * sizeof(float);

    // 预热
    compute_means<<<grid_size, block_size, shared_mem_size>>>(d_prefix_sum, d_means);
    cudaDeviceSynchronize();

    // 创建CUDA流
    cudaStream_t stream;
    cudaStreamCreate(&stream);
    // 计时
    cudaEvent_t start, stop;
    cudaEventCreate(&start);
    cudaEventCreate(&stop);
    const int num_runs = 100;
    float total_time = 0;

    cudaEventRecord(start, stream);
    for (int i = 0; i < num_runs; ++i) {
        // cub::DeviceScan::InclusiveSum(d_temp, temp_bytes, d_data, d_prefix_sum + 1, N);
        inclusive_scan(d_data, d_prefix_sum, N, stream); // 调用包装函数
        compute_means<<<grid_size, block_size, shared_mem_size>>>(d_prefix_sum, d_means);
    }
    cudaEventRecord(stop, stream);
    cudaEventSynchronize(stop);
    
    float elapsed_ms;
    cudaEventElapsedTime(&elapsed_ms, start, stop);
    total_time = elapsed_ms / 1000.0f;

    // 输出性能指标
    std::cout << "Average Time: " << elapsed_ms / num_runs << " ms" << std::endl;
    std::cout << "Window Throughput: " 
              << (num_runs * NUM_WINDOWS) / (total_time * 1e9) << " E9 elements/s" << std::endl;
    std::cout << "Memory Throughput: " 
              << (num_runs * NUM_WINDOWS * 2 * sizeof(float)) / (total_time * 1e9) << " GB/s" << std::endl;

    // 验证结果
    cudaMemcpy(h_out, d_means, NUM_WINDOWS * sizeof(float), cudaMemcpyDeviceToHost);
    for (int i = 0; i < NUM_WINDOWS; ++i) {
        if (std::abs(h_out[i] - 1.0f) > 1e-4) {
            std::cout << "Error at index " << i << ": " << h_out[i] << std::endl;
            break;
        }
    }

    // 释放资源
    cudaFree(d_data);
    cudaFree(d_prefix_sum);
    cudaFree(d_means);
    cudaEventDestroy(start);
    cudaEventDestroy(stop);

    return 0;
}
