#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>

#define N 1000000
#define BLOCK_SIZE 512  // Must be power of 2
#define MAX_BLOCKS ((N + BLOCK_SIZE - 1) / BLOCK_SIZE)


#define CUDA_CHECK(call) \
    do { \
        cudaError_t err = call; \
        if (err != cudaSuccess) { \
            fprintf(stderr, "CUDA error at %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); \
            exit(1); \
        } \
    } while(0)

// CPU 前缀和（exclusive）: output[0] = 0, output[i] = sum_{k=0}^{i-1} input[k]
void cpu_prefix_sum_exclusive(unsigned int* in, unsigned int* out, int n) {
    if (n == 0) return;
    out[0] = 0;  // exclusive
    for (int i = 1; i < n; i++) {
        out[i] = out[i-1] + in[i-1];
    }
}

// 将 block 的偏移量加到其扫描结果上
__global__ void add_block_offsets(unsigned int* output, unsigned int* block_offsets, int n) {
    int tid = threadIdx.x;
    int bid = blockIdx.x;
    int gid = bid * BLOCK_SIZE + tid;
    // 获取当前 block 的偏移量（即前面所有 block 的总和）
    unsigned int offset = block_offsets[bid];
    // 将偏移量加到当前线程的输出结果上
    if (gid < n) {
        output[gid] += offset;
    }
}

// Device: Blelloch 扫描（exclusive scan）- 用于主数据数组
__device__ unsigned int blelloch_scan_exclusive(unsigned int* s_data, int tid, int block_size) {
    int offset = 1;
    // Up-sweep (reduce) phase
    for (int d = block_size >> 1; d > 0; d >>= 1) {
        if (tid < d) {
            int ai = offset * (2 * tid + 1) - 1;
            int bi = offset * (2 * tid + 2) - 1;
            s_data[bi] += s_data[ai];
        }
        offset *= 2;
        __syncthreads();
    }
    // Save total sum of this block (last element before clearing)
    unsigned int total_sum = (tid == 0) ? s_data[block_size - 1] : 0;
    // Clear last element for down-sweep
    if (tid == 0) {
        s_data[block_size - 1] = 0;
    }
    __syncthreads();
    // Down-sweep phase
    for (int d = 1; d < block_size; d <<= 1) {
        offset >>= 1;
        if (tid < d) {
            int ai = offset * (2 * tid + 1) - 1;
            int bi = offset * (2 * tid + 2) - 1;
            unsigned int t = s_data[ai];
            s_data[ai] = s_data[bi];
            s_data[bi] += t;
        }
        __syncthreads();
    }
    return total_sum;
}

// 第一阶段：每个 block 做 exclusive 扫描，并返回 block 的总和
__global__ void blelloch_scan_block(unsigned int* input, unsigned int* output, unsigned int* block_sums) {
    extern __shared__ unsigned int s_data[];
    int tid = threadIdx.x;
    int bid = blockIdx.x;
    int gid = bid * BLOCK_SIZE + tid;
    // Load data
    if (gid < N) {
        s_data[tid] = input[gid];
    } else {
        s_data[tid] = 0;
    }
    __syncthreads();
    // 执行 exclusive 扫描
    unsigned int block_sum = blelloch_scan_exclusive(s_data, tid, BLOCK_SIZE);
    // 写出 exclusive 扫描结果（从 0 开始）
    if (gid < N) {
        output[gid] = s_data[tid];
    }
    __syncthreads();
    // 记录当前 block 的总和（用于后续偏移）
    if (tid == 0) {
        block_sums[bid] = block_sum;
    }
}

// 分层扫描第一层：对 block_sums 进行扫描，但每个 block 只扫描一部分，并输出其总和
__global__ void hierarchical_scan_level1(unsigned int* input, unsigned int* output, unsigned int* block_totals, int n) {
    extern __shared__ unsigned int s_data[];
    int tid = threadIdx.x;
    int bid = blockIdx.x;
    int block_size = blockDim.x;
    int start = bid * block_size;

    // Load data
    if (start + tid < n) {
        s_data[tid] = input[start + tid];
    } else {
        s_data[tid] = 0;
    }
    __syncthreads();

    // 执行 Blelloch 扫描的 up-sweep 和 down-sweep
    int offset = 1;
    // Up-sweep
    for (int d = block_size >> 1; d > 0; d >>= 1) {
        if (tid < d) {
            int ai = offset * (2 * tid + 1) - 1;
            int bi = offset * (2 * tid + 2) - 1;
            if (bi < block_size) {
                s_data[bi] += s_data[ai];
            }
        }
        offset *= 2;
        __syncthreads();
    }
    // 保存这个 block 处理的这段数据的总和
    unsigned int total = (tid == 0) ? s_data[block_size - 1] : 0;
    if (tid == 0) {
        block_totals[bid] = total;
    }
    // Clear for down-sweep (makes it exclusive)
    if (tid == 0) {
        s_data[block_size - 1] = 0;
    }
    __syncthreads();
    // Down-sweep
    for (int d = 1; d < block_size; d <<= 1) {
        offset >>= 1;
        if (tid < d) {
            int ai = offset * (2 * tid + 1) - 1;
            int bi = offset * (2 * tid + 2) - 1;
            unsigned int t = s_data[ai];
            s_data[ai] = s_data[bi];
            s_data[bi] += t;
        }
        __syncthreads();
    }
    // 写出扫描结果
    if (start + tid < n) {
        output[start + tid] = s_data[tid];
    }
}

// 分层扫描第二层：对第一层的 block_totals 进行扫描
__global__ void hierarchical_scan_level2(unsigned int* block_totals, unsigned int* block_offsets, int num_blocks) {
    extern __shared__ unsigned int s_data[];
    int tid = threadIdx.x;
    int block_size = blockDim.x;
    if (tid < num_blocks) {
        s_data[tid] = block_totals[tid];
    } else {
        s_data[tid] = 0;
    }
    __syncthreads();
    int offset = 1;
    // Up-sweep
    for (int d = block_size >> 1; d > 0; d >>= 1) {
        if (tid < d && (tid + offset) < block_size) {
            int ai = offset * (2 * tid + 1) - 1;
            int bi = offset * (2 * tid + 2) - 1;
            if (bi < block_size) {
                s_data[bi] += s_data[ai];
            }
        }
        offset *= 2;
        __syncthreads();
    }
    // Clear last element
    if (tid == 0) {
        s_data[block_size - 1] = 0;
    }
    __syncthreads();
    // Down-sweep
    for (int d = 1; d < block_size; d <<= 1) {
        offset >>= 1;
        if (tid < d) {
            int ai = offset * (2 * tid + 1) - 1;
            int bi = offset * (2 * tid + 2) - 1;
            unsigned int t = s_data[ai];
            s_data[ai] = s_data[bi];
            s_data[bi] += t;
        }
        __syncthreads();
    }
    // 写出偏移量（每个 block 的总和的前缀和）
    if (tid < num_blocks) {
        block_offsets[tid] = s_data[tid];
    }
}

// 分层扫描修正阶段：将第二层的偏移量加到第一层的结果上
__global__ void hierarchical_scan_correction(unsigned int* output, unsigned int* block_offsets, int n, int level1_block_size) {
    int bid = blockIdx.x;
    int tid = threadIdx.x;
    int start = bid * level1_block_size;
    unsigned int offset = block_offsets[bid];
    if (start + tid < n) {
        output[start + tid] += offset;
    }
}

// 计算下一个 2 的幂作为实际 block size
int next_power_of_2(int x) {
    if (x <= 1) return 1;
    int pow2 = 1;
    while (pow2 < x) pow2 <<= 1;
    return pow2;
}

void print_array(unsigned int* arr, int n, const char* label) {
    if(n > 20) return;
    printf("\n%s\n", label);
    for (int i = 0; i < n; i++) {
        printf("%d  ", arr[i]);
    }
    printf("\n");
}

int main() {
    unsigned int* h_input = (unsigned int*)malloc(N * sizeof(unsigned int));
    unsigned int* h_output_gpu = (unsigned int*)malloc(N * sizeof(unsigned int));
    unsigned int* h_output_cpu = (unsigned int*)malloc(N * sizeof(unsigned int));

    srand(123);
    for (int i = 0; i < N; i++) {
        h_input[i] = rand() % N;  
    }

    unsigned int *d_input, *d_output, *d_block_sums;
    CUDA_CHECK(cudaMalloc(&d_input, N * sizeof(unsigned int)));
    CUDA_CHECK(cudaMalloc(&d_output, N * sizeof(unsigned int)));
    CUDA_CHECK(cudaMalloc(&d_block_sums, MAX_BLOCKS * sizeof(unsigned int)));
    CUDA_CHECK(cudaMemcpy(d_input, h_input, N * sizeof(unsigned int), cudaMemcpyHostToDevice));

    int num_blocks = (N + BLOCK_SIZE - 1) / BLOCK_SIZE;

    // 创建 CUDA 事件用于计时
    cudaEvent_t start, stop;
    CUDA_CHECK(cudaEventCreate(&start));
    CUDA_CHECK(cudaEventCreate(&stop));

    // 记录开始时间
    CUDA_CHECK(cudaEventRecord(start));

    // 阶段1: Block 内 exclusive 扫描
    size_t shared_mem = BLOCK_SIZE * sizeof(unsigned int);
    blelloch_scan_block<<<num_blocks, BLOCK_SIZE, shared_mem>>>(
        d_input, d_output, d_block_sums);
    CUDA_CHECK(cudaGetLastError());
    CUDA_CHECK(cudaDeviceSynchronize());

    // 阶段2: 在 Device 上分层计算 block sums 的 prefix sum (偏移)
    // level1: 用多个 block 扫描 d_block_sums
    int level1_num_blocks = (num_blocks + BLOCK_SIZE - 1) / BLOCK_SIZE;
    unsigned int *d_block_totals, *d_block_offsets, *d_block_sums_scan; // 新增 d_block_sums_scan

    CUDA_CHECK(cudaMalloc(&d_block_totals, level1_num_blocks * sizeof(unsigned int)));
    CUDA_CHECK(cudaMalloc(&d_block_offsets, level1_num_blocks * sizeof(unsigned int)));
    CUDA_CHECK(cudaMalloc(&d_block_sums_scan, num_blocks * sizeof(unsigned int))); // 分配内存

    size_t level1_shared_mem = BLOCK_SIZE * sizeof(unsigned int);
    // 修改调用：将结果写入 d_block_sums_scan，而不是覆盖 d_block_sums
    hierarchical_scan_level1<<<level1_num_blocks, BLOCK_SIZE, level1_shared_mem>>>(
        d_block_sums, d_block_sums_scan, d_block_totals, num_blocks);
    CUDA_CHECK(cudaGetLastError());
    CUDA_CHECK(cudaDeviceSynchronize());

    // level2: 用一个 block 扫描 d_block_totals (即每个 level1 block 的总和)
    int level2_block_size = next_power_of_2(level1_num_blocks); // 必须是 2 的幂
    size_t level2_shared_mem = level2_block_size * sizeof(unsigned int);
    hierarchical_scan_level2<<<1, level2_block_size, level2_shared_mem>>>(
    d_block_totals, d_block_offsets, level1_num_blocks);
    CUDA_CHECK(cudaGetLastError());
    CUDA_CHECK(cudaDeviceSynchronize());

    // correction: 将 d_block_offsets 作为偏移量加到 d_block_sums_scan 的各个段上
    hierarchical_scan_correction<<<level1_num_blocks, BLOCK_SIZE>>>(
        d_block_sums_scan, d_block_offsets, num_blocks, BLOCK_SIZE);
    CUDA_CHECK(cudaGetLastError());
    CUDA_CHECK(cudaDeviceSynchronize());

    // 此时 d_block_sums_scan 已经是最终的 block 偏移量数组
    // 阶段3: 加偏移 (使用修正后的偏移量)
    add_block_offsets<<<num_blocks, BLOCK_SIZE>>>(d_output, d_block_sums_scan, N);
    CUDA_CHECK(cudaGetLastError());
    CUDA_CHECK(cudaDeviceSynchronize());

    // 记录结束时间
    CUDA_CHECK(cudaEventRecord(stop));
    CUDA_CHECK(cudaEventSynchronize(stop));

    // 计算耗时（毫秒）
    float gpu_time_ms = 0;
    CUDA_CHECK(cudaEventElapsedTime(&gpu_time_ms, start, stop));

    // 拷贝结果
    CUDA_CHECK(cudaMemcpy(h_output_gpu, d_output, N * sizeof(unsigned int), cudaMemcpyDeviceToHost));

    // CPU 验证（exclusive scan）
    clock_t cpu_start = clock(); // 记录 CPU 开始时间
    cpu_prefix_sum_exclusive(h_input, h_output_cpu, N);
    clock_t cpu_end = clock();   // 记录 CPU 结束时间
    double cpu_time_ms = ((double)(cpu_end - cpu_start)) / CLOCKS_PER_SEC * 1000.0; // 转换为毫秒

    print_array(h_input, N, "originally array:");
    print_array(h_output_cpu, N, "cpu exclusive prefix sum:");
    print_array(h_output_gpu, N, "gpu exclusive prefix sum:");
 
    // 验证
    bool passed = true;
    for (int i = 0; i < N; i++) {
        if (h_output_gpu[i] != h_output_cpu[i]) {
            printf("Mismatch at index %d: GPU=%u, CPU=%u\n", i, h_output_gpu[i], h_output_cpu[i]);
            passed = false;
            break;
        }
    }
    if (passed) {
        printf("Test PASSED! GPU and CPU exclusive prefix sums match.\n");
    } else {
        printf("Test FAILED!\n");
    }

    // 输出 GPU 运行时间
    printf("Data size: %d\n", N);
    printf("GPU execution time: %.3f ms\n", gpu_time_ms);
    // 输出 CPU 运行时间
    printf("CPU execution time: %.3f ms\n", cpu_time_ms); // 新增

    // Cleanup
    free(h_input);
    free(h_output_gpu);
    free(h_output_cpu);
    cudaFree(d_input);
    cudaFree(d_output);
    cudaFree(d_block_sums);
    cudaFree(d_block_totals);
    cudaFree(d_block_offsets);
    cudaFree(d_block_sums_scan); // 释放新增的内存
    CUDA_CHECK(cudaEventDestroy(start));
    CUDA_CHECK(cudaEventDestroy(stop));

    return 0;
}