#include <hip/hip_runtime.h>
#include <vector>
#include <iostream>

#define CHECK_HIP(cmd) { hipError_t error = cmd; if (error != hipSuccess) { \
    std::cerr << "HIP error: " << hipGetErrorString(error) << " at line " << __LINE__ << std::endl; \
    exit(EXIT_FAILURE); }}

// 多阶段复杂内核
__launch_bounds__(1024)
__global__ void advancedKernel(
    float* output,
    const float* input,
    int width,
    int* histogram,
    float* reductionBuf)
{
    // 阶段1: 矩阵转置（带Bank冲突的共享内存操作）
    __shared__ float smemTile[32][32+2];  // 故意制造Bank冲突

    unsigned x = blockIdx.x * blockDim.x + threadIdx.x;
    unsigned y = blockIdx.y * blockDim.y + threadIdx.y;

    if (x < width && y < width) {
        smemTile[threadIdx.y][threadIdx.x] = input[y * width + x];
    }
    __syncthreads();

    // 阶段2: 矩阵乘法（计算密集型VALU）
    float sum = 0.0f;
    #pragma unroll
    for (int k = 0; k < 32; ++k) {
        sum += smemTile[threadIdx.y][k] * smemTile[k][threadIdx.x];
    }

    // 阶段3: 直方图统计（原子操作压力测试）
    int bin = static_cast<int>(sum * 100) % 256;
    atomicAdd(&histogram[bin], 1);

    // 阶段4: 条件分支（分支效率测试）
    float result = 0.0f;
    if (sum > 0.5f) {          // 分支路径1
        result = sqrtf(sum);
    } else if (sum > 0.2f) {   // 分支路径2
        result = log2f(sum);
    } else {                   // 分支路径3
        result = sinf(sum);
    }

    // 阶段5: 归约操作（内存密集型）
    __shared__ float partialSum[256];
    partialSum[threadIdx.y * blockDim.x + threadIdx.x] = result;
    __syncthreads();

    for (int stride = blockDim.x * blockDim.y / 2; stride > 0; stride >>= 1) {
        if (threadIdx.x + threadIdx.y < stride) {
            partialSum[threadIdx.x + threadIdx.y] += partialSum[threadIdx.x + threadIdx.y + stride];
        }
        __syncthreads();
    }

    if (threadIdx.x == 0 && threadIdx.y == 0) {
        reductionBuf[blockIdx.y * gridDim.x + blockIdx.x] = partialSum[0];
    }

    output[y * width + x] = result;
}

int main() {
    const int width = 2048;
    const int size = width * width * sizeof(float);

    // 分配设备内存
    float *d_input, *d_output, *d_reduction;
    int *d_histogram;

    CHECK_HIP(hipMalloc(&d_input, size));
    CHECK_HIP(hipMalloc(&d_output, size));
    CHECK_HIP(hipMalloc(&d_histogram, 256 * sizeof(int)));
    CHECK_HIP(hipMalloc(&d_reduction, (width/32) * (width/32) * sizeof(float)));

    // 初始化数据
    std::vector<float> h_input(width*width, 0.5f);
    CHECK_HIP(hipMemcpy(d_input, h_input.data(), size, hipMemcpyHostToDevice));
    CHECK_HIP(hipMemset(d_histogram, 0, 256 * sizeof(int)));

    // 内核配置
    dim3 block(32, 32);
    dim3 grid((width + block.x - 1)/block.x, (width + block.y - 1)/block.y);

    // 执行内核
    advancedKernel<<<grid, block>>>(d_output, d_input, width, d_histogram, d_reduction);
    CHECK_HIP(hipDeviceSynchronize());

    // 清理
    CHECK_HIP(hipFree(d_input));
    CHECK_HIP(hipFree(d_output));
    CHECK_HIP(hipFree(d_histogram));
    CHECK_HIP(hipFree(d_reduction));

    return 0;
}