#include <iostream>
#include <cuda_runtime.h>
#include <chrono>

// 向量加法的 CUDA 核函数
__global__ void vectorAdd(const float *a, const float *b, float *c, int n) {
    int idx = blockIdx.x * blockDim.x + threadIdx.x;
    if (idx < n) {
        c[idx] = a[idx] + b[idx];
    }
}

// 矩阵乘法的 CUDA 核函数
__global__ void matrixMultiply(const float *A, const float *B, float *C, int M, int N, int K) {
    int row = blockIdx.y * blockDim.y + threadIdx.y;
    int col = blockIdx.x * blockDim.x + threadIdx.x;
    if (row < M && col < K) {
        float sum = 0.0f;
        for (int i = 0; i < N; ++i) {
            sum += A[row * N + i] * B[i * K + col];
        }
        C[row * K + col] = sum;
    }
}

// 检查 CUDA 调用是否出错
void checkCudaError(cudaError_t error, const char *message) {
    if (error != cudaSuccess) {
        std::cerr << message << ": " << cudaGetErrorString(error) << std::endl;
        exit(EXIT_FAILURE);
    }
}

int main() {
    // 向量加法测试
    const int vectorSize = 1000000;
    float *h_a, *h_b, *h_c;
    float *d_a, *d_b, *d_c;

    // 分配主机内存
    h_a = new float[vectorSize];
    h_b = new float[vectorSize];
    h_c = new float[vectorSize];

    // 初始化向量数据
    for (int i = 0; i < vectorSize; ++i) {
        h_a[i] = static_cast<float>(i);
        h_b[i] = static_cast<float>(i * 2);
    }

    // 分配设备内存
    checkCudaError(cudaMalloc((void**)&d_a, vectorSize * sizeof(float)), "cudaMalloc d_a");
    checkCudaError(cudaMalloc((void**)&d_b, vectorSize * sizeof(float)), "cudaMalloc d_b");
    checkCudaError(cudaMalloc((void**)&d_c, vectorSize * sizeof(float)), "cudaMalloc d_c");

    // 将数据从主机复制到设备
    checkCudaError(cudaMemcpy(d_a, h_a, vectorSize * sizeof(float), cudaMemcpyHostToDevice), "cudaMemcpy h_a to d_a");
    checkCudaError(cudaMemcpy(d_b, h_b, vectorSize * sizeof(float), cudaMemcpyHostToDevice), "cudaMemcpy h_b to d_b");

    // 定义线程块和网格大小
    int threadsPerBlock = 256;
    int blocksPerGrid = (vectorSize + threadsPerBlock - 1) / threadsPerBlock;

    // 开始计时
    auto start = std::chrono::high_resolution_clock::now();

    // 执行向量加法核函数
    vectorAdd<<<blocksPerGrid, threadsPerBlock>>>(d_a, d_b, d_c, vectorSize);
    checkCudaError(cudaGetLastError(), "vectorAdd kernel launch");

    // 同步设备操作
    checkCudaError(cudaDeviceSynchronize(), "cudaDeviceSynchronize");

    // 结束计时
    auto end = std::chrono::high_resolution_clock::now();
    auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();

    std::cout << "向量加法耗时: " << duration << " 毫秒" << std::endl;

    // 将结果从设备复制到主机
    checkCudaError(cudaMemcpy(h_c, d_c, vectorSize * sizeof(float), cudaMemcpyDeviceToHost), "cudaMemcpy d_c to h_c");

    // 释放设备内存
    checkCudaError(cudaFree(d_a), "cudaFree d_a");
    checkCudaError(cudaFree(d_b), "cudaFree d_b");
    checkCudaError(cudaFree(d_c), "cudaFree d_c");

    // 释放主机内存
    delete[] h_a;
    delete[] h_b;
    delete[] h_c;

    // 矩阵乘法测试
    const int M = 1000;
    const int N = 1000;
    const int K = 1000;

    float *h_A, *h_B, *h_C;
    float *d_A, *d_B, *d_C;

    // 分配主机内存
    h_A = new float[M * N];
    h_B = new float[N * K];
    h_C = new float[M * K];

    // 初始化矩阵数据
    for (int i = 0; i < M * N; ++i) {
        h_A[i] = static_cast<float>(i);
    }
    for (int i = 0; i < N * K; ++i) {
        h_B[i] = static_cast<float>(i * 2);
    }

    // 分配设备内存
    checkCudaError(cudaMalloc((void**)&d_A, M * N * sizeof(float)), "cudaMalloc d_A");
    checkCudaError(cudaMalloc((void**)&d_B, N * K * sizeof(float)), "cudaMalloc d_B");
    checkCudaError(cudaMalloc((void**)&d_C, M * K * sizeof(float)), "cudaMalloc d_C");

    // 将数据从主机复制到设备
    checkCudaError(cudaMemcpy(d_A, h_A, M * N * sizeof(float), cudaMemcpyHostToDevice), "cudaMemcpy h_A to d_A");
    checkCudaError(cudaMemcpy(d_B, h_B, N * K * sizeof(float), cudaMemcpyHostToDevice), "cudaMemcpy h_B to d_B");

    // 定义线程块和网格大小
    dim3 threadsPerBlock2(16, 16);
    dim3 blocksPerGrid2((K + threadsPerBlock2.x - 1) / threadsPerBlock2.x, (M + threadsPerBlock2.y - 1) / threadsPerBlock2.y);

    // 开始计时
    start = std::chrono::high_resolution_clock::now();

    // 执行矩阵乘法核函数
    matrixMultiply<<<blocksPerGrid2, threadsPerBlock2>>>(d_A, d_B, d_C, M, N, K);
    checkCudaError(cudaGetLastError(), "matrixMultiply kernel launch");

    // 同步设备操作
    checkCudaError(cudaDeviceSynchronize(), "cudaDeviceSynchronize");

    // 结束计时
    end = std::chrono::high_resolution_clock::now();
    duration = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();

    std::cout << "矩阵乘法耗时: " << duration << " 毫秒" << std::endl;

    // 将结果从设备复制到主机
    checkCudaError(cudaMemcpy(h_C, d_C, M * K * sizeof(float), cudaMemcpyDeviceToHost), "cudaMemcpy d_C to h_C");

    // 释放设备内存
    checkCudaError(cudaFree(d_A), "cudaFree d_A");
    checkCudaError(cudaFree(d_B), "cudaFree d_B");
    checkCudaError(cudaFree(d_C), "cudaFree d_C");

    // 释放主机内存
    delete[] h_A;
    delete[] h_B;
    delete[] h_C;

    return 0;
}