#include <iostream>
#include <cuda_runtime.h>
#include "../common/common.h"
#define DEVICENUM 0
#define BLOCKLEN 32

__host__ void sumMatrixCPU(float *matrix_a, float *matrix_b, float *matrix_res, int nx, int ny)
{
    for (int i = 0; i < nx; i++)
        for (int j = 0; j < ny; j++)
            matrix_res[j + i * nx] = matrix_a[j + i * nx] + matrix_b[j + i * nx];
}

__global__ void sumMatrixGPU(float *matrix_a, float *matrix_b, float *matrix_res, int nx, int ny)
{
    int nCol = threadIdx.x + blockDim.x * blockIdx.x; // 整个grid的x轴维度
    int nRow = threadIdx.y + blockDim.y * blockIdx.y; // 整个grid的y轴维度
    int index = nCol + nRow * ny;
    if (nCol < nx && nRow < ny)
        matrix_res[index] = matrix_a[index] + matrix_b[index];
}

int main(int argc, char **argv)
{
    cudaSetDevice(DEVICENUM);
    double time_cpu, time_gpu;
    int nx = 1 << 8;
    int ny = 1 << 8;
    int nElem = nx * ny;
    size_t nBytes = sizeof(float) * nElem;

    // cpu申请内存并初始化数据
    float *host_matrix_a = (float *)malloc(nBytes);
    float *host_matrix_b = (float *)malloc(nBytes);
    float *host_matrix_res = (float *)malloc(nBytes);
    initialDataConstValue(host_matrix_a, nElem, 1);
    initialDataConstValue(host_matrix_b, nElem, 2);
    memset(host_matrix_res, 0, nBytes);

    // 计算cpu中两数组相加结果并计算时间
    time_cpu = get_time();
    sumMatrixCPU(host_matrix_a, host_matrix_b, host_matrix_res, nx, ny);
    std::cout << "CPU Sum matrix time:" << get_time() - time_cpu << "ms" << std::endl;
    // printMatrix(host_matrix_res, nx, ny,"host_matrix_res");

    // gpu内存申请
    float *device_matrix_a = nullptr;
    float *device_matrix_b = nullptr;
    float *device_matrix_res = nullptr;
    CHECK(cudaMalloc((float **)&device_matrix_a, nBytes));
    CHECK(cudaMalloc((float **)&device_matrix_b, nBytes));
    CHECK(cudaMalloc((float **)&device_matrix_res, nBytes));
    // 给gpu内存初始化数据
    CHECK(cudaMemcpy(device_matrix_a, host_matrix_a, nBytes, cudaMemcpyHostToDevice));
    CHECK(cudaMemcpy(device_matrix_b, host_matrix_b, nBytes, cudaMemcpyHostToDevice));
    CHECK(cudaMemset(device_matrix_res, 0, nBytes));

    // 调用kernel函数执行gpu数组加法运算
    dim3 block_size(BLOCKLEN, BLOCKLEN);
    dim3 grid_size((nx - 1) / block_size.x + 1, (ny - 1) / block_size.y + 1);
    std::cout << "grid and block dim:\n"
              << "\tgrid.x:" << grid_size.x << "\tgrid.y:" << grid_size.y << "\n"
              << "\tblock.x:" << block_size.x << "\tblock.y:" << block_size.y << std::endl;
    // cuda warm up
    // for (size_t i = 0; i < 10; i++)
    //     sumMatrixGPU<<<grid_size, block_size>>>(device_matrix_a, device_matrix_b, device_matrix_res, nx, ny);

    time_gpu = get_time();
    sumMatrixGPU<<<grid_size, block_size>>>(device_matrix_a, device_matrix_b, device_matrix_res, nx, ny);
    CHECK(cudaDeviceSynchronize());
    std::cout << "GPU Sum matrix time:" << get_time() - time_gpu << "ms" << std::endl;

    // 将gpu运算结果复制到cpu后并与cpu结果进行比较
    float *res_gpu_to_cpu = (float *)malloc(nBytes);
    memset(res_gpu_to_cpu, 0, nBytes);
    CHECK(cudaMemcpy(res_gpu_to_cpu, device_matrix_res, nBytes, cudaMemcpyDeviceToHost));
    // printMatrix(res_gpu_to_cpu, nx, ny,"res_gpu_to_cpu");
    checkResult(host_matrix_res, res_gpu_to_cpu, nElem);

    // 释放gpu和cpu内存
    cudaFree(device_matrix_a);
    cudaFree(device_matrix_b);
    cudaFree(device_matrix_res);

    free(host_matrix_a);
    free(host_matrix_b);
    free(host_matrix_res);
    free(res_gpu_to_cpu);

    return 0;
}