#include <cuda_runtime.h>
#include <iostream>
#include <chrono>
#include <vector>

#define CHECK_CUDA(call)                                                    \
    {                                                                       \
        cudaError_t err = call;                                             \
        if (err != cudaSuccess) {                                           \
            std::cerr << "CUDA error at " << __FILE__ << ":" << __LINE__    \
                      << ": " << cudaGetErrorString(err) << std::endl;      \
            exit(EXIT_FAILURE);                                             \
        }                                                                   \
    }

// CUDA 核函数：计算两个数组元素的和
__global__ void addVectors(float *a, float *b, float *c, int n) {
    int idx = blockIdx.x * blockDim.x + threadIdx.x;
    if (idx < n) {
        c[idx] = a[idx] + b[idx];
    }
}

// 使用单个 GPU 执行向量加法
void runSingleGPU(float *a, float *b, float *c, int n) {
    float *d_a, *d_b, *d_c;
    CHECK_CUDA(cudaSetDevice(0));

    // 分配 GPU 内存
    CHECK_CUDA(cudaMalloc(&d_a, n * sizeof(float)));
    CHECK_CUDA(cudaMalloc(&d_b, n * sizeof(float)));
    CHECK_CUDA(cudaMalloc(&d_c, n * sizeof(float)));

    // 将数据拷贝到 GPU
    CHECK_CUDA(cudaMemcpy(d_a, a, n * sizeof(float), cudaMemcpyHostToDevice));
    CHECK_CUDA(cudaMemcpy(d_b, b, n * sizeof(float), cudaMemcpyHostToDevice));

    // 配置 CUDA 核函数
    int blockSize = 256;
    int gridSize = (n + blockSize - 1) / blockSize;
    addVectors<<<gridSize, blockSize>>>(d_a, d_b, d_c, n);

    // 拷贝结果回主机
    CHECK_CUDA(cudaMemcpy(c, d_c, n * sizeof(float), cudaMemcpyDeviceToHost));

    // 释放 GPU 内存
    CHECK_CUDA(cudaFree(d_a));
    CHECK_CUDA(cudaFree(d_b));
    CHECK_CUDA(cudaFree(d_c));
}

// 使用多个 GPU 执行向量加法
void runMultiGPU(float *a, float *b, float *c, int n, int numGPUs) {
    // 获取每个 GPU 需要处理的数据大小
    int chunkSize = n / numGPUs;

    std::vector<cudaStream_t> streams(numGPUs);
    std::vector<float *> d_a(numGPUs), d_b(numGPUs), d_c(numGPUs);

    // 为每个 GPU 分配内存并创建流
    for (int i = 0; i < numGPUs; ++i) {
        CHECK_CUDA(cudaSetDevice(i));
        CHECK_CUDA(cudaStreamCreate(&streams[i]));
        CHECK_CUDA(cudaMalloc(&d_a[i], chunkSize * sizeof(float)));
        CHECK_CUDA(cudaMalloc(&d_b[i], chunkSize * sizeof(float)));
        CHECK_CUDA(cudaMalloc(&d_c[i], chunkSize * sizeof(float)));

        // 拷贝数据到每个 GPU
        CHECK_CUDA(cudaMemcpyAsync(d_a[i], a + i * chunkSize, chunkSize * sizeof(float), cudaMemcpyHostToDevice, streams[i]));
        CHECK_CUDA(cudaMemcpyAsync(d_b[i], b + i * chunkSize, chunkSize * sizeof(float), cudaMemcpyHostToDevice, streams[i]));

        // 配置并运行 CUDA 核函数
        int blockSize = 256;
        int gridSize = (chunkSize + blockSize - 1) / blockSize;
        addVectors<<<gridSize, blockSize, 0, streams[i]>>>(d_a[i], d_b[i], d_c[i], chunkSize);

        // 拷贝结果回主机
        CHECK_CUDA(cudaMemcpyAsync(c + i * chunkSize, d_c[i], chunkSize * sizeof(float), cudaMemcpyDeviceToHost, streams[i]));
    }

    // 同步所有流
    for (int i = 0; i < numGPUs; ++i) {
        CHECK_CUDA(cudaSetDevice(i));
        CHECK_CUDA(cudaStreamSynchronize(streams[i]));
        CHECK_CUDA(cudaStreamDestroy(streams[i]));
        CHECK_CUDA(cudaFree(d_a[i]));
        CHECK_CUDA(cudaFree(d_b[i]));
        CHECK_CUDA(cudaFree(d_c[i]));
    }
}

int main() {
    int numGPUs;
    CHECK_CUDA(cudaGetDeviceCount(&numGPUs));

    if (numGPUs < 1) {
        std::cerr << "No CUDA-capable GPU found!" << std::endl;
        return EXIT_FAILURE;
    }

    std::cout << "Number of GPUs available: " << numGPUs << std::endl;

    // 设置向量大小
    // if n == 80 则会出现single GPU 比 multi GPU运行慢
    // 初始时n=24
    const int n = 1 << 80; // 16777216 elements
    std::vector<float> a(n, 1.0f), b(n, 2.0f), c(n);

    // 测量单 GPU 时间
    auto start = std::chrono::high_resolution_clock::now();
    runSingleGPU(a.data(), b.data(), c.data(), n);
    auto end = std::chrono::high_resolution_clock::now();
    auto singleGPUTime = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
    std::cout << "Single GPU time: " << singleGPUTime << " ms" << std::endl;

    // 测量多 GPU 时间
    start = std::chrono::high_resolution_clock::now();
    runMultiGPU(a.data(), b.data(), c.data(), n, numGPUs);
    end = std::chrono::high_resolution_clock::now();
    auto multiGPUTime = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
    std::cout << "Multi GPU time: " << multiGPUTime << " ms" << std::endl;

    // 验证结果
    bool success = true;
    for (int i = 0; i < n; ++i) {
        if (c[i] != 3.0f) {
            std::cerr << "Error at index " << i << ": " << c[i] << std::endl;
            success = false;
            break;
        }
    }

    if (success) {
        std::cout << "Results are correct!" << std::endl;
    } else {
        std::cerr << "Results are incorrect!" << std::endl;
    }

    return EXIT_SUCCESS;
}
