#include <cuda.h>
#include <iostream>

// 定义矩阵大小
#define M 10
#define N 10
#define K 10

// 定义输入和输出矩阵
float A[M * K];
float B[K * N];
float C[M * N];

// 定义 CUDA 内核函数
__global__ void matrixMultiply(float* A, float* B, float* C, int m, int n, int k) {
    int row = blockIdx.y * blockDim.y + threadIdx.y;
    int col = blockIdx.x * blockDim.x + threadIdx.x;
    if (row < m && col < n) {
        C[row * n + col] = 0;
        for (int i = 0; i < k; i++) {
            C[row * n + col] += A[row * k + i] * B[i * n + col];
        }
    }
}

// 定义 CUDA 块和网格大小
#define BLOCK_SIZE 16
#define GRID_SIZE (M * N) / BLOCK_SIZE

int main() {
    // 初始化输入矩阵
    for (int i = 0; i < M * K; i++) {
        A[i] = (float)i;
    }
    for (int i = 0; i < K * N; i++) {
        B[i] = (float)i;
    }

    // 分配输出矩阵内存
    float* d_C;
    cudaMalloc((void**)&d_C, M * N * sizeof(float));

    // 执行 CUDA 内核
    matrixMultiply<<<GRID_SIZE, BLOCK_SIZE>>>(A, B, d_C, M, N, K);

    // 同步主机和设备
    cudaDeviceSynchronize();

    // 验证结果
    float* h_C = new float[M * N];
    cudaMemcpy(h_C, d_C, M * N * sizeof(float), cudaMemcpyDeviceToHost);
    for (int i = 0; i < M * N; i++) {
        if (h_C[i] != i * (i + 1) / 2) {
            std::cout << "Error: " << h_C[i] << " != " << i * (i + 1) / 2 << std::endl;
            break;
        }
    }

    // 释放资源
    delete[] h_C;
    cudaFree(d_C);

    return 0;
}
