#include <iostream>
#include <cuda_runtime.h>
#include "../common/common.h"
#define DEVICENUM 0
#define BLOCKLEN 32

// 计算次数 m*n*k次，计算复杂度为O(N^3)
__host__ void multiMatrixCPU(float *matrix_a, float *matrix_b, float *matrix_res, int M, int N, int K)
{
    for (int i = 0; i < M; i++)
        for (int j = 0; j < N; j++)
            for (int l = 0; l < K; l++)
                matrix_res[j + i * N] += matrix_a[l + i * K] * matrix_b[j + l * N];
}

__global__ void multiMatrixGPUNormal(float *matrix_a, float *matrix_b, float *matrix_res, int M, int N, int K)
{
    int nCol = threadIdx.x + blockDim.x * blockIdx.x; // 整个grid的x轴维度
    int nRow = threadIdx.y + blockDim.y * blockIdx.y; // 整个grid的y轴维度
    float fCval = 0.0f;

    for (int i = 0; i < K; i++)
        fCval += matrix_a[i + nRow * K] * matrix_b[nCol + i * N];

    if (nCol < N && nRow < M)
        matrix_res[nCol + nRow * N] = fCval;
}

int main(int argc, char **argv)
{
    cudaSetDevice(DEVICENUM);
    double time_cpu, time_gpu;
    int M = 1 << 8; // 8
    int N = 1 << 8; // 6
    int K = 1 << 8; // 5

    // cpu申请内存并初始化数据
    float *host_matrix_a = (float *)malloc(sizeof(float) * M * K);
    float *host_matrix_b = (float *)malloc(sizeof(float) * K * N);
    float *host_matrix_res = (float *)malloc(sizeof(float) * M * N);
    initialDataConstValue(host_matrix_a, M * K, 1);
    initialDataConstValue(host_matrix_b, K * N, 2);
    memset(host_matrix_res, 0, sizeof(float) * M * N);

    // 计算cpu中两数组相加结果并计算时间
    time_cpu = get_time();
    multiMatrixCPU(host_matrix_a, host_matrix_b, host_matrix_res, M, N, K);
    std::cout << "CPU Sum matrix time:" << get_time() - time_cpu << "ms" << std::endl;

    // gpu内存申请
    float *device_matrix_a = nullptr;
    float *device_matrix_b = nullptr;
    float *device_matrix_res = nullptr;
    CHECK(cudaMalloc((float **)&device_matrix_a, sizeof(float) * M * K));
    CHECK(cudaMalloc((float **)&device_matrix_b, sizeof(float) * K * N));
    CHECK(cudaMalloc((float **)&device_matrix_res, sizeof(float) * M * N));
    // 给gpu内存初始化数据
    CHECK(cudaMemcpy(device_matrix_a, host_matrix_a, sizeof(float) * M * K, cudaMemcpyHostToDevice));
    CHECK(cudaMemcpy(device_matrix_b, host_matrix_b, sizeof(float) * K * N, cudaMemcpyHostToDevice));
    CHECK(cudaMemset(device_matrix_res, 0, sizeof(float) * M * N));

    // 调用kernel函数执行gpu数组加法运算
    dim3 block_size(BLOCKLEN, BLOCKLEN);
    dim3 grid_size((N - 1) / block_size.x + 1, (M - 1) / block_size.y + 1);
    std::cout << "grid and block dim:\n"
              << "\tgrid.x:" << grid_size.x << "\tgrid.y:" << grid_size.y << "\n"
              << "\tblock.x:" << block_size.x << "\tblock.y:" << block_size.y << std::endl;
    // cuda warm up
    // for (size_t i = 0; i < 10; i++)
    //     multiMatrixGPUNormal<<<grid_size, block_size>>>(device_matrix_a, device_matrix_b, device_matrix_res, M, N, K);

    time_gpu = get_time();
    multiMatrixGPUNormal<<<grid_size, block_size>>>(device_matrix_a, device_matrix_b, device_matrix_res, M, N, K);
    CHECK(cudaDeviceSynchronize());
    std::cout << "GPU matrix multiply time:" << get_time() - time_gpu << "ms" << std::endl;

    // 将gpu运算结果复制到cpu后并与cpu结果进行比较
    float *res_gpu_to_cpu = (float *)malloc(sizeof(float) * M * N);
    memset(res_gpu_to_cpu, 0, sizeof(float) * M * N);
    CHECK(cudaMemcpy(res_gpu_to_cpu, device_matrix_res, sizeof(float) * M * N, cudaMemcpyDeviceToHost));
    checkResult(host_matrix_res, res_gpu_to_cpu, M * N);
    // printMatrix(host_matrix_res, M, N,"host_matrix_res");
    // std::cout << "*****************************" << std::endl;
    // printMatrix(res_gpu_to_cpu, M, N,"res_gpu_to_cpu");

    // 释放gpu和cpu内存
    cudaFree(device_matrix_a);
    cudaFree(device_matrix_b);
    cudaFree(device_matrix_res);

    free(host_matrix_a);
    free(host_matrix_b);
    free(host_matrix_res);
    free(res_gpu_to_cpu);

    return 0;
}