#include <hip/hip_runtime.h>
#include <iostream>
#include <cstdlib>
#include <cmath>
#include <random>

// 定义 HIP 错误检查宏
#define HIP_CHECK(cmd) { \
    hipError_t error = (cmd); \
    if(error != hipSuccess) { \
        std::cerr << "HIP error: " << hipGetErrorString(error) \
                  << " at " << __FILE__ << ":" << __LINE__ << std::endl; \
        exit(EXIT_FAILURE); \
    } \
}

// 内核参数配置
const int WORK_PER_THREAD = 4;    // 每个线程处理元素数
const int BLOCK_SIZE = 256;       // 线程块大小
const int GRID_SIZE = 1024;       // 网格大小
const int TOTAL_ELEMENTS = GRID_SIZE * BLOCK_SIZE;
const int SHARED_SIZE = 2048;     // 共享内存大小
const int MAX_SCRATCH = 512;      // 局部数组大小
const int FBARRIER_COUNT = 8;     // 栅栏同步次数
const int SHARED_PADDING = 32;    // 共享内存填充

// 基线内核：低效实现（用于产生PMC指标）
__global__ void baseline_kernel(float* out, const float* in, int size) {
    // 使用大局部数组强制使用scratch memory
    float scratch[MAX_SCRATCH];
    
    int global_idx = blockIdx.x * blockDim.x + threadIdx.x;
    if(global_idx >= size) return;
    
    int local_idx = threadIdx.x;
    
    // 共享内存（无填充，可能产生bank冲突）
    __shared__ float shared[SHARED_SIZE];
    
    // 多次栅栏同步
    for (int i = 0; i < FBARRIER_COUNT; i++) {
        // 加载数据到共享内存
        shared[local_idx] = in[global_idx];
        __syncthreads();
        
        float sum = 0.0f;
        for (int j = 0; j < WORK_PER_THREAD; j++) {
            // 故意制造bank冲突的访问模式
            int conflict_idx = (local_idx % 32) * 32 + j;
            float val = shared[conflict_idx % SHARED_SIZE];
            
            // 复杂数学运算
            val = sinf(val) * cosf(val) + sqrtf(fabsf(val));
            
            // 使用局部数组（可能溢出到scratch memory）
            scratch[j % MAX_SCRATCH] = val;
            sum += scratch[j % MAX_SCRATCH];
        }
        
        // 结果写入输出
        int out_idx = global_idx * WORK_PER_THREAD + local_idx;
        if(out_idx < size) {
            out[out_idx] = sum;
        }
        __syncthreads();
    }
}

// 优化内核：高效实现（避免PMC问题）
__global__ void optimized_kernel(float* out, const float* in, int size) {
    // 共享内存（带填充避免bank冲突）
    __shared__ float shared[SHARED_SIZE + SHARED_PADDING];
    
    int global_idx = blockIdx.x * blockDim.x + threadIdx.x;
    if(global_idx >= size) return;
    
    int local_idx = threadIdx.x;
    
    // 高效加载数据（合并访问）
    float local_data[WORK_PER_THREAD];
    for (int i = 0; i < WORK_PER_THREAD; i++) {
        int idx = global_idx * WORK_PER_THREAD + i;
        local_data[i] = (idx < size) ? in[idx] : 0.0f;
    }
    
    // 仅必要栅栏同步
    shared[local_idx] = local_data[0];
    __syncthreads();
    
    // 高效计算
    for (int j = 0; j < WORK_PER_THREAD; j++) {
        float val = local_data[j];
        
        // 数学运算
        float sin_val = sinf(val);
        float cos_val = cosf(val);
        float result = sin_val * cos_val + sqrtf(fabsf(val));
        
        // 线程间通信
        float shuffled = __shfl_xor(result, 1);
        val = result * (1.0f - shuffled);
        
        // 高效写入输出（合并访问）
        int out_idx = global_idx * WORK_PER_THREAD + j;
        if (out_idx < size) {
            out[out_idx] = val;
        }
    }
    
    // 最后同步
    __syncthreads();
}

int main() {
    float *d_input, *d_output_baseline, *d_output_optimized;
    size_t data_size = TOTAL_ELEMENTS * sizeof(float);
    size_t output_size = TOTAL_ELEMENTS * WORK_PER_THREAD * sizeof(float);
    
    std::cout << "Total elements: " << TOTAL_ELEMENTS << std::endl;
    std::cout << "Input size: " << data_size / (1024 * 1024) << " MB" << std::endl;
    std::cout << "Output size: " << output_size / (1024 * 1024) << " MB" << std::endl;
    
    // 分配设备内存
    HIP_CHECK(hipMalloc(&d_input, data_size));
    HIP_CHECK(hipMalloc(&d_output_baseline, output_size));
    HIP_CHECK(hipMalloc(&d_output_optimized, output_size));
    
    // 初始化输入数据
    float* h_input = new float[TOTAL_ELEMENTS];
    std::random_device rd;
    std::mt19937 gen(rd());
    std::uniform_real_distribution<float> dis(0.0f, 1.0f);
    
    for (int i = 0; i < TOTAL_ELEMENTS; i++) {
        h_input[i] = dis(gen);
    }
    
    // 拷贝数据到设备
    HIP_CHECK(hipMemcpy(d_input, h_input, data_size, hipMemcpyHostToDevice));
    
    // 执行基线内核
    std::cout << "Running baseline kernel..." << std::endl;
    hipLaunchKernelGGL(baseline_kernel,
                      dim3(GRID_SIZE),
                      dim3(BLOCK_SIZE),
                      SHARED_SIZE * sizeof(float), 0,
                      d_output_baseline, d_input, TOTAL_ELEMENTS);
    HIP_CHECK(hipDeviceSynchronize());
    std::cout << "Baseline kernel completed successfully." << std::endl;
    
    // ======================================================
    // 性能分析部分 - 只分析优化内核
    // ======================================================
    std::cout << "\nStarting profiler for optimized kernel..." << std::endl;
    HIP_CHECK(hipProfilerStart());
    
    std::cout << "Running optimized kernel under profiler..." << std::endl;
    hipLaunchKernelGGL(optimized_kernel,
                      dim3(GRID_SIZE),
                      dim3(BLOCK_SIZE),
                      (SHARED_SIZE + SHARED_PADDING) * sizeof(float), 0,
                      d_output_optimized, d_input, TOTAL_ELEMENTS);
    HIP_CHECK(hipDeviceSynchronize());
    
    HIP_CHECK(hipProfilerStop());
    std::cout << "Profiler stopped." << std::endl;
    std::cout << "Optimized kernel completed successfully." << std::endl;
    
    // 清理资源
    HIP_CHECK(hipFree(d_input));
    HIP_CHECK(hipFree(d_output_baseline));
    HIP_CHECK(hipFree(d_output_optimized));
    delete[] h_input;
    
    std::cout << "\nBoth kernels completed. Use rocprof to analyze PMC metrics:" << std::endl;
    std::cout << "  rocprof --stats ./your_program" << std::endl;
    
    return 0;
}