/*
编译方法：
${ROCM_PATH}/llvm/bin/clang++ -o cl_MatrixTranspose_thread_multi_device -I${ROCM_PATH}/opencl/include -lamdocl64 -lpthread -lOpenCL cl_MatrixTranspose_thread_multi_device.cpp
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <CL/cl.h>
#include <vector>
#include <thread>
#include <mutex>
#include <atomic>

#define WIDTH 1024
#define NUM (WIDTH * WIDTH)
#define THREADS_PER_BLOCK_X 16
#define THREADS_PER_BLOCK_Y 16

// OpenCL内核代码
const char* kernelSource =
"__kernel void matrixTranspose(__global float* out, __global float* in, const int width) {\n"
"    int x = get_global_id(0);\n"
"    int y = get_global_id(1);\n"
"    out[y * width + x] = in[x * width + y];\n"
"}\n";

// CPU参考实现
void matrixTransposeCPUReference(float* output, float* input, const unsigned int width) {
    for (unsigned int j = 0; j < width; j++) {
        for (unsigned int i = 0; i < width; i++) {
            output[i * width + j] = input[j * width + i];
        }
    }
}

// 用于同步输出的互斥锁
std::mutex cout_mutex;

void runOnDevice(int device_id) {
    cl_int err;

    // 获取平台
    cl_platform_id platform;
    err = clGetPlatformIDs(1, &platform, NULL);
    if (err != CL_SUCCESS) {
        std::lock_guard<std::mutex> lock(cout_mutex);
        printf("Error getting platform IDs: %d\n", err);
        return;
    }

    // 获取设备
    cl_device_id device;
    err = clGetDeviceIDs(platform, CL_DEVICE_TYPE_GPU, 1, &device, NULL);
    if (err != CL_SUCCESS) {
        std::lock_guard<std::mutex> lock(cout_mutex);
        printf("Error getting device ID %d: %d\n", device_id, err);
        return;
    }

    // 获取设备名称
    char deviceName[128];
    err = clGetDeviceInfo(device, CL_DEVICE_NAME, sizeof(deviceName), deviceName, NULL);

    std::lock_guard<std::mutex> lock(cout_mutex);
    printf("Thread %d using device: %s\n", device_id, deviceName);

    // 创建上下文
    cl_context context = clCreateContext(NULL, 1, &device, NULL, NULL, &err);
    if (err != CL_SUCCESS) {
        printf("Error creating context: %d\n", err);
        return;
    }

    // 创建命令队列
    cl_command_queue_properties props = 0;
    cl_command_queue queue = clCreateCommandQueueWithProperties(context, device, &props, &err);
    if (err != CL_SUCCESS) {
        printf("Error creating command queue: %d\n", err);
        clReleaseContext(context);
        return;
    }

    // 分配主机内存
    float* Matrix = (float*)malloc(NUM * sizeof(float));
    float* TransposeMatrix = (float*)malloc(NUM * sizeof(float));
    float* cpuTransposeMatrix = (float*)malloc(NUM * sizeof(float));

    if (!Matrix || !TransposeMatrix || !cpuTransposeMatrix) {
        printf("Error: Failed to allocate host memory!\n");
        if (Matrix) free(Matrix);
        if (TransposeMatrix) free(TransposeMatrix);
        if (cpuTransposeMatrix) free(cpuTransposeMatrix);
        clReleaseCommandQueue(queue);
        clReleaseContext(context);
        return;
    }

    // 初始化输入数据
    for (int i = 0; i < NUM; i++) {
        Matrix[i] = (float)i * 10.0f;
    }

    // 创建内存缓冲区
    cl_mem gpuMatrix = clCreateBuffer(context, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR,
                                     NUM * sizeof(float), Matrix, &err);
    if (err != CL_SUCCESS) {
        printf("Error creating input buffer: %d\n", err);
        free(Matrix);
        free(TransposeMatrix);
        free(cpuTransposeMatrix);
        clReleaseCommandQueue(queue);
        clReleaseContext(context);
        return;
    }

    cl_mem gpuTransposeMatrix = clCreateBuffer(context, CL_MEM_WRITE_ONLY,
                                              NUM * sizeof(float), NULL, &err);
    if (err != CL_SUCCESS) {
        printf("Error creating output buffer: %d\n", err);
        clReleaseMemObject(gpuMatrix);
        free(Matrix);
        free(TransposeMatrix);
        free(cpuTransposeMatrix);
        clReleaseCommandQueue(queue);
        clReleaseContext(context);
        return;
    }

    // 创建并构建程序
    cl_program program = clCreateProgramWithSource(context, 1, &kernelSource, NULL, &err);
    if (err != CL_SUCCESS) {
        printf("Error creating program: %d\n", err);
        clReleaseMemObject(gpuTransposeMatrix);
        clReleaseMemObject(gpuMatrix);
        free(Matrix);
        free(TransposeMatrix);
        free(cpuTransposeMatrix);
        clReleaseCommandQueue(queue);
        clReleaseContext(context);
        return;
    }

    err = clBuildProgram(program, 1, &device, NULL, NULL, NULL);
    if (err != CL_SUCCESS) {
        printf("Error building program: %d\n", err);
        // 获取构建日志
        size_t logSize;
        clGetProgramBuildInfo(program, device, CL_PROGRAM_BUILD_LOG, 0, NULL, &logSize);
        char* log = (char*)malloc(logSize);
        clGetProgramBuildInfo(program, device, CL_PROGRAM_BUILD_LOG, logSize, log, NULL);
        printf("Build log:\n%s\n", log);
        free(log);

        clReleaseProgram(program);
        clReleaseMemObject(gpuTransposeMatrix);
        clReleaseMemObject(gpuMatrix);
        free(Matrix);
        free(TransposeMatrix);
        free(cpuTransposeMatrix);
        clReleaseCommandQueue(queue);
        clReleaseContext(context);
        return;
    }

    // 创建内核
    cl_kernel kernel = clCreateKernel(program, "matrixTranspose", &err);
    if (err != CL_SUCCESS) {
        printf("Error creating kernel: %d\n", err);
        clReleaseProgram(program);
        clReleaseMemObject(gpuTransposeMatrix);
        clReleaseMemObject(gpuMatrix);
        free(Matrix);
        free(TransposeMatrix);
        free(cpuTransposeMatrix);
        clReleaseCommandQueue(queue);
        clReleaseContext(context);
        return;
    }

    // 设置内核参数
    int width_arg = WIDTH;
    err = clSetKernelArg(kernel, 0, sizeof(cl_mem), &gpuTransposeMatrix);
    err |= clSetKernelArg(kernel, 1, sizeof(cl_mem), &gpuMatrix);
    err |= clSetKernelArg(kernel, 2, sizeof(int), &width_arg);

    if (err != CL_SUCCESS) {
        printf("Error setting kernel arguments: %d\n", err);
        clReleaseKernel(kernel);
        clReleaseProgram(program);
        clReleaseMemObject(gpuTransposeMatrix);
        clReleaseMemObject(gpuMatrix);
        free(Matrix);
        free(TransposeMatrix);
        free(cpuTransposeMatrix);
        clReleaseCommandQueue(queue);
        clReleaseContext(context);
        return;
    }

    // 执行内核
    size_t globalSize[2] = {WIDTH, WIDTH};
    size_t localSize[2] = {THREADS_PER_BLOCK_X, THREADS_PER_BLOCK_Y};
    err = clEnqueueNDRangeKernel(queue, kernel, 2, NULL, globalSize, localSize, 0, NULL, NULL);
    if (err != CL_SUCCESS) {
        printf("Error executing kernel: %d\n", err);
        clReleaseKernel(kernel);
        clReleaseProgram(program);
        clReleaseMemObject(gpuTransposeMatrix);
        clReleaseMemObject(gpuMatrix);
        free(Matrix);
        free(TransposeMatrix);
        free(cpuTransposeMatrix);
        clReleaseCommandQueue(queue);
        clReleaseContext(context);
        return;
    }

    // 读取结果
    err = clEnqueueReadBuffer(queue, gpuTransposeMatrix, CL_TRUE, 0,
                             NUM * sizeof(float), TransposeMatrix, 0, NULL, NULL);
    if (err != CL_SUCCESS) {
        printf("Error reading from buffer: %d\n", err);
        clReleaseKernel(kernel);
        clReleaseProgram(program);
        clReleaseMemObject(gpuTransposeMatrix);
        clReleaseMemObject(gpuMatrix);
        free(Matrix);
        free(TransposeMatrix);
        free(cpuTransposeMatrix);
        clReleaseCommandQueue(queue);
        clReleaseContext(context);
        return;
    }

    // 等待所有命令完成
    clFinish(queue);

    // CPU参考实现
    matrixTransposeCPUReference(cpuTransposeMatrix, Matrix, WIDTH);

    // 验证结果
    int errors = 0;
    double eps = 1.0E-6;
    for (int i = 0; i < NUM; i++) {
        if (fabs(TransposeMatrix[i] - cpuTransposeMatrix[i]) > eps) {
            errors++;
            if (errors < 5) { // 只打印前5个错误
                printf("Mismatch at index %d: GPU %f vs CPU %f\n",
                      i, TransposeMatrix[i], cpuTransposeMatrix[i]);
            }
        }
    }

    if (errors != 0) {
        printf("Device %d: FAILED with %d errors\n", device_id, errors);
    } else {
        printf("Device %d: PASSED!\n", device_id);
    }

    // 释放资源
    clReleaseKernel(kernel);
    clReleaseProgram(program);
    clReleaseMemObject(gpuTransposeMatrix);
    clReleaseMemObject(gpuMatrix);
    clReleaseCommandQueue(queue);
    clReleaseContext(context);

    free(Matrix);
    free(TransposeMatrix);
    free(cpuTransposeMatrix);
}

int main(int argc, char **argv) {
    printf("> %s Starting...\n", argv[0]);

    // 获取平台
    cl_platform_id platform;
    cl_int err = clGetPlatformIDs(1, &platform, NULL);
    if (err != CL_SUCCESS) {
        printf("Error getting platform IDs: %d\n", err);
        return 1;
    }

    // 获取GPU设备数量
    cl_uint numDevices = 0;
    err = clGetDeviceIDs(platform, CL_DEVICE_TYPE_GPU, 0, NULL, &numDevices);
    if (err != CL_SUCCESS || numDevices == 0) {
        printf("Error getting number of GPU devices: %d\n", err);
        return 1;
    }

    printf("Found %d GPU devices\n", numDevices);

    // 创建线程向量
    std::vector<std::thread> threads;

    // 为每个设备创建一个线程
    for (cl_uint i = 0; i < numDevices; i++) {
        threads.emplace_back(runOnDevice, i);
    }

    // 等待所有线程完成
    for (auto& t : threads) {
        t.join();
    }

    printf("All devices completed.\n");
    return 0;
}