#include <iostream>
#include <cuda_runtime.h>
#include "device_launch_parameters.h"

// CUDA核函数，实现GEMV
__global__ void gemv_kernel(const float* matrix, const float* vector, float* result, int rows, int cols) {
    int idx = blockIdx.x * blockDim.x + threadIdx.x;
    if (idx < rows) {
        float sum = 0.0f;
        for (int j = 0; j < cols; ++j) {
            sum += matrix[idx * cols + j] * vector[j];
        }
        result[idx] = sum;
    }
}

// 验证GEMV结果的函数
void verify_result(const float* matrix, const float* vector, float* result, int rows, int cols) {
    float* host_result = new float[rows];
    for (int i = 0; i < rows; ++i) {
        float sum = 0.0f;
        for (int j = 0; j < cols; ++j) {
            sum += matrix[i * cols + j] * vector[j];
        }
        host_result[i] = sum;
    }

    bool is_correct = true;
    for (int i = 0; i < rows; ++i) {
        if (abs(host_result[i] - result[i]) > 1e-5) {
            is_correct = false;
            break;
        }
    }

    if (is_correct) {
        std::cout << "GEMV result is correct." << std::endl;
    }
    else {
        std::cout << "GEMV result is incorrect." << std::endl;
    }

    delete[] host_result;
}

int gemv_blck() {
    int rows = 1024;
    int cols = 1024;

    // 分配主机内存
    float* h_matrix = new float[rows * cols];
    float* h_vector = new float[cols];
    float* h_result = new float[rows];

    // 初始化矩阵和向量
    for (int i = 0; i < rows * cols; ++i) {
        h_matrix[i] = static_cast<float>(rand()) / RAND_MAX;
    }
    for (int i = 0; i < cols; ++i) {
        h_vector[i] = static_cast<float>(rand()) / RAND_MAX;
    }

    // 分配设备内存
    float* d_matrix;
    float* d_vector;
    float* d_result;
    cudaMalloc((void**)&d_matrix, rows * cols * sizeof(float));
    cudaMalloc((void**)&d_vector, cols * sizeof(float));
    cudaMalloc((void**)&d_result, rows * sizeof(float));

    // 将数据从主机复制到设备
    cudaMemcpy(d_matrix, h_matrix, rows * cols * sizeof(float), cudaMemcpyHostToDevice);
    cudaMemcpy(d_vector, h_vector, cols * sizeof(float), cudaMemcpyHostToDevice);

    // 定义线程块和网格的大小
    int blockSize = 256;
    int gridSize = (rows + blockSize - 1) / blockSize;

    // 调用核函数
    gemv_kernel <<<gridSize, blockSize >>> (d_matrix, d_vector, d_result, rows, cols);

    // 检查核函数调用是否出错
    cudaError_t err = cudaGetLastError();
    if (err != cudaSuccess) {
        std::cerr << "CUDA kernel launch failed: " << cudaGetErrorString(err) << std::endl;
        return 1;
    }

    // 将结果从设备复制到主机
    cudaMemcpy(h_result, d_result, rows * sizeof(float), cudaMemcpyDeviceToHost);

    // 验证结果
    verify_result(h_matrix, h_vector, h_result, rows, cols);

    // 释放设备内存
    cudaFree(d_matrix);
    cudaFree(d_vector);
    cudaFree(d_result);

    // 释放主机内存
    delete[] h_matrix;
    delete[] h_vector;
    delete[] h_result;

    return 0;
}