#include <iostream>
#include <cuda_runtime.h>
#include "../common/common.h"
#define DEVICENUM 0
#define BLOCKLEN 32

// 计算次数 m*n*k次，计算复杂度为O(N^3)
__host__ void multiMatrixCPU(float *matrix_a, float *matrix_b, float *matrix_res, int M, int N, int K)
{
    for (int i = 0; i < M; i++)
        for (int j = 0; j < N; j++)
            for (int l = 0; l < K; l++)
                matrix_res[j + i * N] += matrix_a[l + i * K] * matrix_b[j + l * N];
}

__global__ void multiMatrixGPUNormal(float *matrix_a, float *matrix_b, float *matrix_res, int M, int N, int K)
{
    int nCol = threadIdx.x + blockDim.x * blockIdx.x; // 整个grid的x轴维度
    int nRow = threadIdx.y + blockDim.y * blockIdx.y; // 整个grid的y轴维度
    float fCval = 0.0f;

    for (int i = 0; i < K; i++)
        fCval += matrix_a[i + nRow * K] * matrix_b[nCol + i * N];

    if (nCol < N && nRow < M)
        matrix_res[nCol + nRow * N] = fCval;
}

__global__ void multiMatrixSharedMemGPU(float *matrix_a, float *matrix_b, float *matrix_res, int M, int N, int K)
{
    int nCol = threadIdx.x + blockDim.x * blockIdx.x; // 整个grid的x轴维度
    int nRow = threadIdx.y + blockDim.y * blockIdx.y; // 整个grid的y轴维度

    float fCval = 0.0f;
    // 小块的共享内存的静态申请
    __shared__ float mtrixABlock[BLOCKLEN][BLOCKLEN];
    __shared__ float mtrixBBlock[BLOCKLEN][BLOCKLEN];
    int nIter = (K + BLOCKLEN - 1) / BLOCKLEN; // 单个能分多少个小块，常用写法，向上取整

    // A,B中对应的n个小块进行矩阵乘法运算并相加
    for (int i = 0; i < nIter; i++)
    {
        // 将数据从全局内存加载到共享内存，下面直接写会出现bug，因为有些情况threadIdx.x/y会越界
        // mtrixABlock[threadIdx.y][threadIdx.x] = matrix_a[(threadIdx.x + i * BLOCKLEN) + nRow * K];
        // mtrixBBlock[threadIdx.y][threadIdx.x] = matrix_b[nCol + (threadIdx.y + i * BLOCKLEN) * N];
        if (threadIdx.x + i * BLOCKLEN < K && nRow < M)
            mtrixABlock[threadIdx.y][threadIdx.x] = matrix_a[(threadIdx.x + i * BLOCKLEN) + nRow * K];
        else
            mtrixABlock[threadIdx.y][threadIdx.x] = 0.0f;

        if (threadIdx.y + i * BLOCKLEN < K && nCol < N)
            mtrixBBlock[threadIdx.y][threadIdx.x] = matrix_b[nCol + (threadIdx.y + i * BLOCKLEN) * N];
        else
            mtrixBBlock[threadIdx.y][threadIdx.x] = 0.0f;

        // 同步等待一个块中的所有线程完成数据加载
        __syncthreads();

        // 子矩阵相乘
        for (int l = 0; l < BLOCKLEN; l++)
            fCval += mtrixABlock[threadIdx.y][l] * mtrixBBlock[l][threadIdx.x];
        // 同步等待一个块中的所有线程完成计算
        __syncthreads();
    }
    if (nCol < N && nRow < M)
        matrix_res[nCol + nRow * N] = fCval;
}

int main(int argc, char **argv)
{
    cudaSetDevice(DEVICENUM);
    double time_cpu, time_gpu_normal, time_gpu_sharedMem;
    int M = 1 << 8; // 8;
    int N = 1 << 8; // 6;
    int K = 1 << 8; // 5;
    // int M = 1 << 15;
    // int N = 1 << 15;
    // int K = 1 << 15;

    // cpu申请内存并初始化数据
    float *host_matrix_a = (float *)malloc(sizeof(float) * M * K);
    float *host_matrix_b = (float *)malloc(sizeof(float) * K * N);
    float *host_matrix_res = (float *)malloc(sizeof(float) * M * N);
    initialDataConstValue(host_matrix_a, M * K, 1);
    initialDataConstValue(host_matrix_b, K * N, 2);
    memset(host_matrix_res, 0, sizeof(float) * M * N);

    // 计算cpu中两数组相加结果并计算时间
    time_cpu = get_time();
    multiMatrixCPU(host_matrix_a, host_matrix_b, host_matrix_res, M, N, K);
    std::cout << "CPU Sum matrix time:" << get_time() - time_cpu << "ms" << std::endl;

    // gpu内存申请
    float *device_matrix_a = nullptr;
    float *device_matrix_b = nullptr;
    float *device_matrix_res = nullptr;
    CHECK(cudaMalloc((float **)&device_matrix_a, sizeof(float) * M * K));
    CHECK(cudaMalloc((float **)&device_matrix_b, sizeof(float) * K * N));
    CHECK(cudaMalloc((float **)&device_matrix_res, sizeof(float) * M * N));
    // 给gpu内存初始化数据
    CHECK(cudaMemcpy(device_matrix_a, host_matrix_a, sizeof(float) * M * K, cudaMemcpyHostToDevice));
    CHECK(cudaMemcpy(device_matrix_b, host_matrix_b, sizeof(float) * K * N, cudaMemcpyHostToDevice));
    CHECK(cudaMemset(device_matrix_res, 0, sizeof(float) * M * N));

    // 调用kernel函数执行gpu数组加法运算
    dim3 block_size(BLOCKLEN, BLOCKLEN);
    dim3 grid_size((N - 1) / block_size.x + 1, (M - 1) / block_size.y + 1);
    std::cout << "grid and block dim:\n"
              << "\tgrid.x:" << grid_size.x << "\tgrid.y:" << grid_size.y << "\n"
              << "\tblock.x:" << block_size.x << "\tblock.y:" << block_size.y << std::endl;
    // cuda warm up
    // for (size_t i = 0; i < 10; i++)
    //     multiMatrixGPUNormal<<<grid_size, block_size>>>(device_matrix_a, device_matrix_b, device_matrix_res, M, N, K);

    // time_gpu_normal, time_gpu_sharedMem
    time_gpu_normal = get_time();
    multiMatrixGPUNormal<<<grid_size, block_size>>>(device_matrix_a, device_matrix_b, device_matrix_res, M, N, K);
    CHECK(cudaDeviceSynchronize());
    std::cout << "GPU matrix multiply normal time:" << get_time() - time_gpu_normal << "ms" << std::endl;

    CHECK(cudaMemset(device_matrix_res, 0, sizeof(float) * M * N)); // 用来清空multiMatrixGPUNormal的计算结果

    time_gpu_sharedMem = get_time();
    multiMatrixSharedMemGPU<<<grid_size, block_size>>>(device_matrix_a, device_matrix_b, device_matrix_res, M, N, K);
    CHECK(cudaDeviceSynchronize());
    std::cout << "GPU matrix multiply shared memory time:" << get_time() - time_gpu_sharedMem << "ms" << std::endl;

    // 将gpu运算结果复制到cpu后并与cpu结果进行比较
    float *res_gpu_to_cpu = (float *)malloc(sizeof(float) * M * N);
    memset(res_gpu_to_cpu, 0, sizeof(float) * M * N);
    CHECK(cudaMemcpy(res_gpu_to_cpu, device_matrix_res, sizeof(float) * M * N, cudaMemcpyDeviceToHost));
    checkResult(host_matrix_res, res_gpu_to_cpu, M * N);
    // printMatrix(host_matrix_res, M, N,"host_matrix_res");
    // std::cout << "*****************************" << std::endl;
    // printMatrix(res_gpu_to_cpu, M, N，"res_gpu_to_cpu");

    // 释放gpu和cpu内存
    cudaFree(device_matrix_a);
    cudaFree(device_matrix_b);
    cudaFree(device_matrix_res);

    free(host_matrix_a);
    free(host_matrix_b);
    free(host_matrix_res);
    free(res_gpu_to_cpu);

    return 0;
}