#include <cstdio>
#include <cuda_runtime.h>
#include <chrono>

#define A(i, j) a[(i) * n + (j)]
#define B(i, j) b[(i) * n + (j)]

#define ceil(a, b) ((a) + (b) - 1) / (b)

void random_matrix(int m, int n, float *a){
    for (int i = 0; i < m; ++i) {
        for (int j = 0; j < n; ++j) {
            #if 1
                // 不用将n传入，因为本质是文本替换，a和n替换后都可被预见
                A(i, j) = 2.0 * (float)drand48() - 1.0;
            #else
                A(i, j) = (j - i) % 3;  // 固定模式，用于调试
            #endif
        }
    }
}

float compare_matrices(int m, int n, float *a, float *b){
    int i, j;
    float max_diff = 0.f, diff;
    bool printed = true;

    for (i = 0; i < m; ++i) {
        for (j = 0; j < n; ++j) {
            diff = abs(A(i, j) - B(i, j));
            max_diff = (diff > max_diff) ? diff : max_diff;
            if (printed) {
                if (max_diff > 0.5f || max_diff < -0.5f) {
                    printf("\n error: i %d  j %d diff %f  got %f  expect %f ", i, j, max_diff, A(i, j), B(i, j));
                }
            }
        }
    }

    return max_diff;
}

void cpu_sgemm(float *a_ptr, float *b_ptr, float *c_ptr, const int M, const int N , const int K){
    for (int i = 0; i < M; ++i) {
        for (int j = 0; j < N; ++j) {
            float temp = 0.f;
            for (int k = 0; k < K; ++k) {
                temp += a_ptr[i * K + k] * b_ptr[k * N + j];
            }
            c_ptr[i * N + j] = temp;
        }
    }
}

#define FLOAT4(ptr) *(reinterpret_cast<float4*>(ptr))

// 引入stride，增加每个线程处理的数据量，原本每个线程计算一个数据，每个block计算[block_size][block_size]
// 现在每个线程计算[stride][stride]个数据，每个block计算[block_size * stride][block_size * stride]个数据
// 在v4的基础上优化A矩阵的读取，先做了一个转置，然后就可以用float4读取A矩阵的一列向量
template<
    unsigned int M_PER_BLOCK,
    unsigned int N_PER_BLOCK,
    unsigned int K_PER_BLOCK,
    unsigned int M_PER_THREAD,
    unsigned int N_PER_THREAD,
    unsigned int K_PER_THREAD>
__global__ void gpu_sgemm(
    float *__restrict__ a_ptr,
    float *__restrict__ b_ptr,
    float *__restrict__ c_ptr,
    int M, int N, int K)
{
    // 线程索引
    const int tx = threadIdx.x;
    const int ty = threadIdx.y;

    // 块级起始位置
    float *a_block_start = a_ptr + blockIdx.y * M_PER_BLOCK * K;
    float *b_block_start = b_ptr + blockIdx.x * N_PER_BLOCK;

    // 共享内存声明
    __shared__ float a_smem[M_PER_BLOCK][K_PER_BLOCK];
    __shared__ float b_smem[K_PER_BLOCK][N_PER_BLOCK];

    // 寄存器存储
    float accum[M_PER_THREAD][N_PER_THREAD] = {0.f};

    // 临时空间，用于A矩阵的转置
    float temp_trans[K_PER_THREAD] = {0.f};

    // 分块计算
    for (int s = 0; s < K; s += K_PER_BLOCK) {

        // 加载A分块到共享内存，中间需要用寄存器做一步转置
        for (int i = 0; i < M_PER_THREAD; ++i) {
            // FLOAT4(&a_smem[ty*M_PER_THREAD + i][tx*K_PER_THREAD]) = 
            //     FLOAT4(&a_block_start[(ty*M_PER_THREAD + i)*K + tx*K_PER_THREAD + s]);

            FLOAT4(&temp_trans[0]) = FLOAT4(&a_block_start[(ty * M_PER_THREAD + i) * K + tx * K_PER_THREAD + s]);
            // 转置
            a_smem[tx * K_PER_THREAD][ty * M_PER_THREAD + i] = temp_trans[0];
            a_smem[tx * K_PER_THREAD + 1][ty * M_PER_THREAD + i] = temp_trans[1];
            a_smem[tx * K_PER_THREAD + 2][ty * M_PER_THREAD + i] = temp_trans[2];
            a_smem[tx * K_PER_THREAD + 3][ty * M_PER_THREAD + i] = temp_trans[3];
        }

        // 加载B分块到共享内存（修正索引）
        for (int i = 0; i < K_PER_THREAD; ++i) {
            FLOAT4(&b_smem[ty*K_PER_THREAD + i][tx*N_PER_THREAD]) = 
                FLOAT4(&b_block_start[(ty*K_PER_THREAD + i + s)*N + tx*N_PER_THREAD]);
        }
        __syncthreads();

        // 计算核心
        // 此处运用外积进行计算，A取一列，B取一行
        for (int k = 0; k < K_PER_BLOCK; ++k) {
            float a_reg[M_PER_THREAD];
            float b_reg[N_PER_THREAD];

            // 加载A寄存器
            FLOAT4(&a_reg[0]) = FLOAT4(&a_smem[k][ty * M_PER_THREAD]);

            // 加载B寄存器
            FLOAT4(&b_reg[0]) = FLOAT4(&b_smem[k][tx*N_PER_THREAD]);

            // 乘累加运算
            for (int i = 0; i < M_PER_THREAD; ++i) {
                for (int j = 0; j < N_PER_THREAD; ++j) {
                    accum[i][j] += a_reg[i] * b_reg[j];
                }
            }
        }
        __syncthreads();
    }

    // 写回全局内存
    float *c_block_start = c_ptr + blockIdx.y*M_PER_BLOCK*N + blockIdx.x*N_PER_BLOCK;
    for (int i = 0; i < M_PER_THREAD; ++i) {
        FLOAT4(&c_block_start[(ty*M_PER_THREAD + i)*N + tx*N_PER_THREAD]) = 
            FLOAT4(&accum[i][0]);
    }
}

int main(){
    // 1.定义矩阵块的大小
    const int m = 2 * 1024;
    const int n = 2 * 1024;
    const int k = 2 * 1024;
    const int size_a = m * k * sizeof(float);
    const int size_b = k * n * sizeof(float);
    const int size_c = m * n * sizeof(float);

    // 2.申请矩阵空间
    // 2.1 host侧内存
    float *h_a = (float*)malloc(size_a);
    float *h_b = (float*)malloc(size_b);
    float *h_result = (float*)malloc(size_c);
    float *d_result = (float*)malloc(size_c);

    // 2.2 device侧内存
    float *d_a, *d_b, *d_c;
    cudaMalloc((void**)&d_a, size_a);
    cudaMalloc((void**)&d_b, size_b);
    cudaMalloc((void**)&d_c, size_c);

    // 3.初始化host矩阵
    random_matrix(m, k, h_a);
    random_matrix(k, n, h_b);
    memset(h_result, 0, size_c);
    memset(d_result, 0, size_c);

    // 4.将host数据copy到device
    cudaMemcpy(d_a, h_a, size_a, cudaMemcpyHostToDevice);
    cudaMemcpy(d_b, h_b, size_b, cudaMemcpyHostToDevice);

    // 5.host进行计算
    cpu_sgemm(h_a, h_b, h_result, m, n, k);

    // 6.device计算
    // 6.1 设置block和grid的维度
    constexpr int M_PER_BLOCK = 64;
    constexpr int N_PER_BLOCK = 64;
    constexpr int K_PER_BLOCK = 64;
    
    constexpr int M_PER_THREAD = 4;
    constexpr int N_PER_THREAD = 4;
    constexpr int K_PER_THREAD = 4;
    dim3 block(16, 16);
    dim3 grid(ceil(m, M_PER_BLOCK), ceil(n, N_PER_BLOCK));     

    // 6.2 启动核函数并且计时
    // 创建事件
    cudaEvent_t start, stop;
    cudaEventCreate(&start);
    cudaEventCreate(&stop);

    // 计时
    cudaEventRecord(start);
    
    gpu_sgemm<M_PER_BLOCK, N_PER_BLOCK, K_PER_BLOCK, M_PER_THREAD, N_PER_THREAD, K_PER_THREAD><<<grid, block>>>(d_a, d_b, d_c, m, n, k);
    
    // 结束计时
    cudaEventRecord(stop);
    cudaEventSynchronize(stop);

    // 计算时间
    float elapsed_time;
    cudaEventElapsedTime(&elapsed_time, start, stop);

    // 清理事件
    cudaEventDestroy(start);
    cudaEventDestroy(stop);
    printf("cal_time: %f\n", elapsed_time);

    // 7.将device结果拷贝回host
    cudaMemcpy(d_result, d_c, size_c, cudaMemcpyDeviceToHost);

    // 8.比较
    float diff = compare_matrices(m, n, d_result, h_result);
    if (diff > 0.5f || diff < -0.5f)
    {
        printf("diff too big !\n");
        exit(-1);
    }
    else
    {
        printf("right\n");
    }

    // 9.释放内存
    free(h_a);
    free(h_b);
    free(h_result);
    free(d_result);

    cudaFree(d_a);
    cudaFree(d_b);
    cudaFree(d_c);

    return 0;
}