#include <stdio.h>  
#include <stdlib.h>  
#include <cuda_runtime.h>  
#include <time.h>  
// 202321000517
// CUDA核函数  
__global__ void matrixAdd(float* A, float* B, float* C, int width, int height) {  
    int x = blockIdx.x * blockDim.x + threadIdx.x;  
    int y = blockIdx.y * blockDim.y + threadIdx.y;  
    int index = y * width + x;  
    if (x < width && y < height) {  
        C[index] = A[index] + B[index];  
    }  
}  
void matrixAddCPU(float* A, float* B, float* C, int width, int height) {  
    for (int y = 0; y < height; ++y) {  
        for (int x = 0; x < width; ++x) {  
            C[y * width + x] = A[y * width + x] + B[y * width + x];  
        }  
    }  
} 
void printMatrix(float* matrix, int width, int height) {  
    for (int y = 0; y < height; ++y) {  
        for (int x = 0; x < width; ++x) {  
            printf("%f ", matrix[y * width + x]);  
        }  
        printf("\n");  
    }  
    printf("\n");  
}

int main() {  
    const int width = 4095;  
    const int height = 2049;  
    size_t size = width * height * sizeof(float);  

    // 分配CPU内存  
    float* h_A = (float*)malloc(size);  
    float* h_B = (float*)malloc(size);  
    float* h_C_cpu = (float*)malloc(size);  
    float* h_C_gpu = (float*)malloc(size);  
  
    // 初始化矩阵A和B  
    for (int i = 0; i < width * height; ++i) {  
        h_A[i] = 7.0f;  
        h_B[i] = 1.0f;  
    }  
  
    // 分配GPU内存  
    float* d_A, *d_B, *d_C;  
    cudaMalloc((void**)&d_A, size);  
    cudaMalloc((void**)&d_B, size);  
    cudaMalloc((void**)&d_C, size);  
  
    // 将数据从主机传输到设备  
    cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);  
    cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);  
  
    // CUDA事件计时  
    float gpu_time;  
    cudaEvent_t start, stop;  
    cudaEventCreate(&start);  
    cudaEventCreate(&stop);  
    cudaEventRecord(start, 0);  
  
    // 调用CUDA核函数  
    // dim3 blocks(width / 16, height / 16);  
    // dim3 threads(16, 16);  
    // matrixAdd<<<blocks, threads>>>(d_A, d_B, d_C, width, height);  
    dim3 blockSize(16, 16); // 假设这是一个合适的线程块大小  
    dim3 gridSize((width + blockSize.x - 1) / blockSize.x, (height + blockSize.y - 1) / blockSize.y);  
    matrixAdd<<<gridSize, blockSize>>>(d_A, d_B, d_C, width, height);  
  
    // 等待GPU操作完成  
    cudaDeviceSynchronize();  
    cudaEventRecord(stop, 0);  
    cudaEventSynchronize(stop);  
    cudaEventElapsedTime(&gpu_time, start, stop);  
  
    // 将结果从设备传输回主机  
    cudaMemcpy(h_C_gpu, d_C, size, cudaMemcpyDeviceToHost);  
  
    // CPU计时  
    clock_t start_cpu = clock();  
  
    // CPU矩阵相加  
    matrixAddCPU(h_A,h_B,h_C_cpu,width, height);
  
    clock_t end_cpu = clock();  
    double cpu_time = (double)(end_cpu - start_cpu) / CLOCKS_PER_SEC;  
  

  
    // 清理  
    cudaFree(d_A);  
    cudaFree(d_B);  
    cudaFree(d_C);  
    free(h_A);  
    free(h_B);  
    free(h_C_cpu);  
    free(h_C_gpu);  
  
    // 输出时间  
    printf("GPU time: %f ms\n", gpu_time);  
    printf("CPU time: %f ms\n", cpu_time*1000); 
        printf("加速比: %f\n",cpu_time*1000/gpu_time); 
  
    // 销毁CUDA事件  
    cudaEventDestroy(start);  
    cudaEventDestroy(stop);  
  
    return 0;  
}