#include <cstdio>
#include <cuda_runtime.h>
#include <chrono>

#define A(i, j) a[(i) * n + (j)]
#define B(i, j) b[(i) * n + (j)]

#define ceil(a, b) ((a) + (b) - 1) / (b)

void random_matrix(int m, int n, float *a){
    for (int i = 0; i < m; ++i) {
        for (int j = 0; j < n; ++j) {
            #if 1
                // 不用将n传入，因为本质是文本替换，a和n替换后都可被预见
                A(i, j) = 2.0 * (float)drand48() - 1.0;
            #else
                A(i, j) = (j - i) % 3;  // 固定模式，用于调试
            #endif
        }
    }
}

float compare_matrices(int m, int n, float *a, float *b){
    int i, j;
    float max_diff = 0.f, diff;
    bool printed = true;

    for (i = 0; i < m; ++i) {
        for (j = 0; j < n; ++j) {
            diff = abs(A(i, j) - B(i, j));
            max_diff = (diff > max_diff) ? diff : max_diff;
            if (printed) {
                if (max_diff > 0.5f || max_diff < -0.5f) {
                    printf("\n error: i %d  j %d diff %f  got %f  expect %f ", i, j, max_diff, A(i, j), B(i, j));
                }
            }
        }
    }

    return max_diff;
}

void cpu_sgemm(float *a_ptr, float *b_ptr, float *c_ptr, const int M, const int N , const int K){
    for (int i = 0; i < M; ++i) {
        for (int j = 0; j < N; ++j) {
            float temp = 0.f;
            for (int k = 0; k < K; ++k) {
                temp += a_ptr[i * K + k] * b_ptr[k * N + j];
            }
            c_ptr[i * N + j] = temp;
        }
    }
}

template<unsigned int BLOCK_SIZE>
__global__ void gpu_sgemm(float *a_ptr, float *b_ptr, float *c_ptr, const int M, const int N, const int K){
    // 1.计算全局索引，每个线程计算输出矩阵中的c[y][x]
    const int x = blockIdx.x * blockDim.x + threadIdx.x;  // 定位横坐标(列方向全局索引)
    const int y = blockIdx.y * blockDim.y + threadIdx.y;  // 定位纵坐标(行方向全局索引)

    // 2.分别找到左右矩阵的起始行和列
    float *a_start = a_ptr + blockIdx.y * blockDim.y * K;  // 找到左矩阵的起始行：[起始行][0]
    float *b_start = b_ptr + blockIdx.x * blockDim.x;   // 找到右矩阵的起始列：[0][起始列]

    // 3.将数据拷贝到smem，每个block拥有独立的SM
    __shared__ float a_smem[BLOCK_SIZE][BLOCK_SIZE];
    __shared__ float b_smem[BLOCK_SIZE][BLOCK_SIZE];
    float temp = 0.f;
    
    for (int i = 0; i < K; i += blockDim.x) {
        a_smem[threadIdx.y][threadIdx.x] = a_start[threadIdx.y * K + threadIdx.x + i];
        b_smem[threadIdx.y][threadIdx.x] = b_start[threadIdx.x + (threadIdx.y + i) * N];
        __syncthreads();
        for (int k = 0; k < BLOCK_SIZE; ++k) {
            temp += a_smem[threadIdx.y][k] * b_smem[k][threadIdx.x];
        }
        __syncthreads();
    }

    c_ptr[y * N + x] = temp;
}

int main(){
    // 1.定义矩阵块的大小
    const int m = 2 * 1024;
    const int n = 2 * 1024;
    const int k = 2 * 1024;
    const int size_a = m * k * sizeof(float);
    const int size_b = k * n * sizeof(float);
    const int size_c = m * n * sizeof(float);

    // 2.申请矩阵空间
    // 2.1 host侧内存
    float *h_a = (float*)malloc(size_a);
    float *h_b = (float*)malloc(size_b);
    float *h_result = (float*)malloc(size_c);
    float *d_result = (float*)malloc(size_c);

    // 2.2 device侧内存
    float *d_a, *d_b, *d_c;
    cudaMalloc((void**)&d_a, size_a);
    cudaMalloc((void**)&d_b, size_b);
    cudaMalloc((void**)&d_c, size_c);

    // 3.初始化host矩阵
    random_matrix(m, k, h_a);
    random_matrix(k, n, h_b);
    memset(h_result, 0, size_c);
    memset(d_result, 0, size_c);

    // 4.将host数据copy到device
    cudaMemcpy(d_a, h_a, size_a, cudaMemcpyHostToDevice);
    cudaMemcpy(d_b, h_b, size_b, cudaMemcpyHostToDevice);

    // 5.host进行计算
    cpu_sgemm(h_a, h_b, h_result, m, n, k);

    // 6.device计算
    // 6.1 设置block和grid的维度
    constexpr int block_x = 16, block_y = 16;
    dim3 block(block_x, block_y);
    dim3 grid(ceil(m, block_x), ceil(n, block_y));

    // 6.2 启动核函数并且计时
    // 创建事件
    cudaEvent_t start, stop;
    cudaEventCreate(&start);
    cudaEventCreate(&stop);

    // 计时
    cudaEventRecord(start);
    
    gpu_sgemm<block_x><<<grid, block>>>(d_a, d_b, d_c, m, n, k);
    
    // 结束计时
    cudaEventRecord(stop);
    cudaEventSynchronize(stop);

    // 计算时间
    float elapsed_time;
    cudaEventElapsedTime(&elapsed_time, start, stop);

    // 清理事件
    cudaEventDestroy(start);
    cudaEventDestroy(stop);
    printf("cal_time: %f\n", elapsed_time);

    // 7.将device结果拷贝回host
    cudaMemcpy(d_result, d_c, size_c, cudaMemcpyDeviceToHost);

    // 8.比较
    float diff = compare_matrices(m, n, d_result, h_result);
    if (diff > 0.5f || diff < -0.5f)
    {
        printf("diff too big !\n");
        exit(-1);
    }
    else
    {
        printf("right\n");
    }

    // 9.释放内存
    free(h_a);
    free(h_b);
    free(h_result);
    free(d_result);

    cudaFree(d_a);
    cudaFree(d_b);
    cudaFree(d_c);

    return 0;
}