#include <iostream>
#include <vector>
#include <random>
#include <future>
#include <chrono>
#include <algorithm>
#include <numeric>
#include <cassert>
#include "hip/hip_runtime.h"
#include "roctx.h"

// HIP错误检查宏
#define HIP_CHECK(cmd) do {                    \
    hipError_t error = cmd;                    \
    if (error != hipSuccess) {                 \
        std::cerr << "HIP error: " << hipGetErrorString(error) << " at " << __FILE__ << ":" << __LINE__ << std::endl; \
        exit(EXIT_FAILURE);                    \
    }                                          \
} while(0)

// 内核函数定义
__global__ void vectorAddKernel(const float* A, const float* B, float* C, int n) {
    int global_idx = blockIdx.x * blockDim.x + threadIdx.x;
    if (global_idx < n) {
        C[global_idx] = A[global_idx] + B[global_idx];
    }
}

__global__ void matrixMulKernel(const float* A, const float* B, float* C, int width) {
    int row = blockIdx.y * blockDim.y + threadIdx.y;
    int col = blockIdx.x * blockDim.x + threadIdx.x;

    if (row < width && col < width) {
        float sum = 0.0f;
        for (int k = 0; k < width; k++) {
            sum += A[row * width + k] * B[k * width + col];
        }
        C[row * width + col] = sum;
    }
}

__global__ void scalarOpsKernel(float* data, float scalar, int n, int op_type) {
    int global_idx = blockIdx.x * blockDim.x + threadIdx.x;
    if (global_idx < n) {
        switch (op_type % 4) {
            case 0: data[global_idx] += scalar; break;
            case 1: data[global_idx] *= scalar; break;
            case 2: data[global_idx] = fmaxf(data[global_idx], scalar); break;
            case 3: data[global_idx] = fminf(data[global_idx], scalar); break;
        }
    }
}

// 数据初始化函数
void initializeData(float* data, size_t size, unsigned int seed) {
    std::mt19937 gen(seed);
    std::uniform_real_distribution<float> dist(0.0f, 1.0f);

    #pragma omp parallel for
    for (size_t i = 0; i < size; ++i) {
        data[i] = dist(gen);
    }
}

// 验证函数 - 更严格的验证
bool validateResults(const std::vector<float>& A, const std::vector<float>& B,
                    const std::vector<float>& C, size_t start, size_t end, float tolerance = 1e-4f) {
    for (size_t i = start; i < end; ++i) {
        // 简单验证：C应该大于等于A和B中的较大值
        float expected_min = std::max(A[i], B[i]);
        if (C[i] < expected_min - tolerance) {
            return false;
        }
    }
    return true;
}

// 单个设备处理函数 - 优化版本
void processDevice(int device_id, int num_streams, size_t data_per_stream, int kernels_per_stream) {
    // 设置设备
    HIP_CHECK(hipSetDevice(device_id));

    // 获取设备属性
    hipDeviceProp_t prop;
    HIP_CHECK(hipGetDeviceProperties(&prop, device_id));

    std::cout << "Using Device " << device_id << ": " << prop.name
              << " with " << prop.multiProcessorCount << " CUs" << std::endl;

    // 设备开始标记
    char mark[128];
    sprintf(mark, "Device%d-Start", device_id);
    roctxMarkA(mark);

    // 计算总数据量
    size_t total_data = num_streams * data_per_stream;
    size_t total_bytes = total_data * sizeof(float);

    // 确保矩阵乘法不会越界
    int small_matrix_size = 64;
    size_t matrix_elements = small_matrix_size * small_matrix_size;
    if (data_per_stream < matrix_elements) {
        std::cerr << "Warning: data_per_stream too small for matrix multiplication, adjusting kernel usage" << std::endl;
    }

    // 主机内存分配和初始化（使用锁页内存提高传输性能）
    float *h_A, *h_B, *h_C;
    HIP_CHECK(hipHostMalloc(&h_A, total_bytes, hipHostMallocDefault));
    HIP_CHECK(hipHostMalloc(&h_B, total_bytes, hipHostMallocDefault));
    HIP_CHECK(hipHostMalloc(&h_C, total_bytes, hipHostMallocDefault));

    initializeData(h_A, total_data, device_id * 1000);
    initializeData(h_B, total_data, device_id * 1000 + 1);

    // 设备内存分配
    float *d_A, *d_B, *d_C, *d_temp;
    HIP_CHECK(hipMalloc(&d_A, total_bytes));
    HIP_CHECK(hipMalloc(&d_B, total_bytes));
    HIP_CHECK(hipMalloc(&d_C, total_bytes));
    HIP_CHECK(hipMalloc(&d_temp, total_bytes));

    // 创建多个流
    std::vector<hipStream_t> streams(num_streams);
    for (int i = 0; i < num_streams; i++) {
        HIP_CHECK(hipStreamCreate(&streams[i]));
    }

    // 设备处理范围
    sprintf(mark, "Device%d-Processing", device_id);
    roctxRangePushA(mark);

    auto stream_start = std::chrono::high_resolution_clock::now();

    // 为每个流异步启动操作
    for (int stream_id = 0; stream_id < num_streams; ++stream_id) {
        // 计算数据偏移
        size_t offset = stream_id * data_per_stream;
        size_t data_bytes = data_per_stream * sizeof(float);

        // 获取当前流的设备内存指针
        float *d_A_stream = d_A + offset;
        float *d_B_stream = d_B + offset;
        float *d_C_stream = d_C + offset;
        float *d_temp_stream = d_temp + offset;

        // 流处理开始
        sprintf(mark, "Device%d-Stream%d-Start", device_id, stream_id);
        roctxMarkA(mark);

        // 异步H2D传输
        HIP_CHECK(hipMemcpyAsync(d_A_stream, h_A + offset, data_bytes,
                                hipMemcpyHostToDevice, streams[stream_id]));
        HIP_CHECK(hipMemcpyAsync(d_B_stream, h_B + offset, data_bytes,
                                hipMemcpyHostToDevice, streams[stream_id]));

        // 初始化临时内存
        HIP_CHECK(hipMemsetAsync(d_temp_stream, 0, data_bytes, streams[stream_id]));

        // 异步执行多个kernel
        int block_size = 256;
        int grid_size = (data_per_stream + block_size - 1) / block_size;

        for (int i = 0; i < kernels_per_stream; ++i) {
            int kernel_type = i % 5;

            switch (kernel_type) {
                case 0:
                    vectorAddKernel<<<grid_size, block_size, 0, streams[stream_id]>>>(
                        d_A_stream, d_B_stream, d_C_stream, data_per_stream);
                    break;
                case 1:
                    scalarOpsKernel<<<grid_size, block_size, 0, streams[stream_id]>>>(
                        d_temp_stream, 1.25f, data_per_stream, i);
                    break;
                case 2:
                    if (data_per_stream >= matrix_elements) {
                        dim3 block(16, 16);
                        dim3 grid((small_matrix_size + 15) / 16, (small_matrix_size + 15) / 16);
                        matrixMulKernel<<<grid, block, 0, streams[stream_id]>>>(
                            d_A_stream, d_B_stream, d_temp_stream, small_matrix_size);
                    }
                    break;
                case 3:
                    vectorAddKernel<<<grid_size, block_size, 0, streams[stream_id]>>>(
                        d_C_stream, d_temp_stream, d_C_stream, data_per_stream);
                    break;
                case 4:
                    scalarOpsKernel<<<grid_size, block_size, 0, streams[stream_id]>>>(
                        d_C_stream, 0.95f, data_per_stream, i + 1);
                    break;
            }

            // 检查kernel启动错误
            HIP_CHECK(hipGetLastError());
        }

        // 异步D2H传输
        HIP_CHECK(hipMemcpyAsync(h_C + offset, d_C_stream, data_bytes,
                                hipMemcpyDeviceToHost, streams[stream_id]));

        // 流处理完成标记
        sprintf(mark, "Device%d-Stream%d-Complete", device_id, stream_id);
        roctxMarkA(mark);
    }

    // 等待所有流完成
    for (int stream_id = 0; stream_id < num_streams; ++stream_id) {
        HIP_CHECK(hipStreamSynchronize(streams[stream_id]));
    }

    auto stream_end = std::chrono::high_resolution_clock::now();
    auto stream_duration = std::chrono::duration_cast<std::chrono::milliseconds>(stream_end - stream_start);

    // 结束设备处理范围
    roctxRangePop();

    // 验证结果
    size_t check_count = std::min(size_t(1000), total_data);
    bool validation_passed = true;

    for (int stream_id = 0; stream_id < num_streams && validation_passed; ++stream_id) {
        size_t offset = stream_id * data_per_stream;
        size_t check_end = offset + std::min(check_count / num_streams, data_per_stream);

        if (!validateResults(std::vector<float>(h_A, h_A + total_data),
                           std::vector<float>(h_B, h_B + total_data),
                           std::vector<float>(h_C, h_C + total_data),
                           offset, check_end)) {
            validation_passed = false;
        }
    }

    // 计算结果总和
    float device_sum = 0.0f;
    #pragma omp parallel for reduction(+:device_sum)
    for (size_t i = 0; i < total_data; ++i) {
        device_sum += h_C[i];
    }

    // 输出设备结果
    printf("Device %d: Validation %s, Sum = %.2f, Time = %ld ms\n",
           device_id, validation_passed ? "PASSED" : "FAILED", device_sum, stream_duration.count());

    // 释放资源
    for (auto& stream : streams) {
        HIP_CHECK(hipStreamDestroy(stream));
    }

    HIP_CHECK(hipFree(d_A));
    HIP_CHECK(hipFree(d_B));
    HIP_CHECK(hipFree(d_C));
    HIP_CHECK(hipFree(d_temp));

    HIP_CHECK(hipHostFree(h_A));
    HIP_CHECK(hipHostFree(h_B));
    HIP_CHECK(hipHostFree(h_C));

    // 设备完成标记
    sprintf(mark, "Device%d-Complete", device_id);
    roctxMarkA(mark);
}

int main(int argc, char** argv) {
    // 程序开始标记
    roctxMarkA("Program-Start");

    // 可配置参数
    int num_streams_per_device = 2;
    size_t data_per_stream = 1 << 18;
    int kernels_per_stream = 100;

    // 支持命令行参数
    if (argc > 1) num_streams_per_device = std::atoi(argv[1]);
    if (argc > 2) data_per_stream = 1 << std::atoi(argv[2]);
    if (argc > 3) kernels_per_stream = std::atoi(argv[3]);

    // 获取GPU设备数量
    int num_devices;
    HIP_CHECK(hipGetDeviceCount(&num_devices));

    if (num_devices == 0) {
        std::cerr << "No GPU devices found!" << std::endl;
        return 1;
    }

    std::cout << "Found " << num_devices << " GPU devices" << std::endl;
    std::cout << "Configuration: " << num_streams_per_device
              << " streams per device, " << data_per_stream
              << " elements per stream" << std::endl;
    std::cout << "Kernels per stream: " << kernels_per_stream << std::endl;

    // 记录开始时间
    auto start_time = std::chrono::high_resolution_clock::now();

    // 使用异步任务处理每个设备
    std::vector<std::future<void>> device_futures;

    for (int device_id = 0; device_id < num_devices; ++device_id) {
        device_futures.push_back(
            std::async(std::launch::async, [=]() {
                processDevice(device_id, num_streams_per_device,
                            data_per_stream, kernels_per_stream);
            })
        );
    }

    // 等待所有设备完成
    for (auto& future : device_futures) {
        future.get();  // 使用get()而不是wait()以捕获异常
    }

    // 计算总执行时间
    auto end_time = std::chrono::high_resolution_clock::now();
    auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(end_time - start_time);

    // 输出统计信息
    std::cout << "\n=== Execution Summary ===" << std::endl;
    std::cout << "Total devices: " << num_devices << std::endl;
    std::cout << "Streams per device: " << num_streams_per_device << std::endl;
    std::cout << "Kernels per stream: " << kernels_per_stream << std::endl;
    std::cout << "Total kernels executed: "
              << num_devices * num_streams_per_device * kernels_per_stream << std::endl;
    std::cout << "Total execution time: " << duration.count() << " ms" << std::endl;

    // 计算吞吐量
    double total_operations = num_devices * num_streams_per_device *
                             kernels_per_stream * data_per_stream;
    double throughput = total_operations / (duration.count() / 1000.0);
    std::cout << "Estimated throughput: " << throughput / 1e6 << " M operations/sec" << std::endl;

    // 程序结束标记
    roctxMarkA("Program-End");

    return 0;
}