#include <cstdio>
#include <cuda_runtime.h>
#include <chrono>

#define A(i, j) a[(i) * n + (j)]
#define B(i, j) b[(i) * n + (j)]

#define ceil(a, b) ((a) + (b) - 1) / (b)

void random_matrix(int m, int n, float *a){
    for (int i = 0; i < m; ++i) {
        for (int j = 0; j < n; ++j) {
            #if 1
                // 不用将n传入，因为本质是文本替换，a和n替换后都可被预见
                A(i, j) = 2.0 * (float)drand48() - 1.0;
            #else
                A(i, j) = (j - i) % 3;  // 固定模式，用于调试
            #endif
        }
    }
}

float compare_matrices(int m, int n, float *a, float *b){
    int i, j;
    float max_diff = 0.f, diff;
    bool printed = true;

    for (i = 0; i < m; ++i) {
        for (j = 0; j < n; ++j) {
            diff = abs(A(i, j) - B(i, j));
            max_diff = (diff > max_diff) ? diff : max_diff;
            if (printed) {
                if (max_diff > 0.5f || max_diff < -0.5f) {
                    printf("\n error: i %d  j %d diff %f  got %f  expect %f ", i, j, max_diff, A(i, j), B(i, j));
                }
            }
        }
    }

    return max_diff;
}

void cpu_sgemm(float *a_ptr, float *b_ptr, float *c_ptr, const int M, const int N , const int K){
    for (int i = 0; i < M; ++i) {
        for (int j = 0; j < N; ++j) {
            float temp = 0.f;
            for (int k = 0; k < K; ++k) {
                temp += a_ptr[i * K + k] * b_ptr[k * N + j];
            }
            c_ptr[i * N + j] = temp;
        }
    }
}

#define FETCH_FLOAT4(pointer) (reinterpret_cast<float4 *>(&(pointer))[0])

template <const int BLOCK_SIZE_M,  // height of block of C that each thread block calculate
          const int BLOCK_SIZE_N,  // width of block of A that each thread block load into shared memory
          const int BLOCK_SIZE_K,  // width of block of C that each thread block calculate
          const int THREAD_SIZE_Y, // height of block of C that each thread calculate
          const int THREAD_SIZE_X, // width of block of C that each thread calculate
          const bool ENABLE_DOUBLE_BUFFER>
__global__ void cuda_sgemm(float *A_ptr, float *B_ptr, float *C_ptr, const int M, const int N, const int K)
{
    // Block index
    int bx = blockIdx.x;
    int by = blockIdx.y;

    // Thread index
    int tx = threadIdx.x;
    int ty = threadIdx.y;

    // thread id in cur Block
    const int tid = ty * blockDim.x + tx;
    __shared__ float a_shared[2][BLOCK_SIZE_K][BLOCK_SIZE_M];
    __shared__ float b_shared[2][BLOCK_SIZE_K][BLOCK_SIZE_N];

    float accum[THREAD_SIZE_Y][THREAD_SIZE_X] = {0.f};
    float reg_a[THREAD_SIZE_Y] = {0.f};
    float reg_b[THREAD_SIZE_X] = {0.f};
    float ldg_a_reg[4] = {0.f};

    float *A_ptr_start = A_ptr + blockIdx.y * BLOCK_SIZE_M * K;
    float *B_ptr_start = B_ptr + blockIdx.x * BLOCK_SIZE_N;

    const int A_tile_thread_per_row = BLOCK_SIZE_K / 4; // 2    // 作用是？
    const int B_tile_thread_per_row = BLOCK_SIZE_N / 4; // 32

    const int A_tile_tid_x = tid % A_tile_thread_per_row;  // 作用是？
    const int A_tile_tid_y = tid / A_tile_thread_per_row;
    const int B_tile_tid_x = tid % B_tile_thread_per_row;
    const int B_tile_tid_y = tid / B_tile_thread_per_row;

    FETCH_FLOAT4(ldg_a_reg[0]) = FETCH_FLOAT4(A_ptr_start[K * A_tile_tid_y + A_tile_tid_x * 4]);
    a_shared[0][A_tile_tid_x * 4][A_tile_tid_y] = ldg_a_reg[0];
    a_shared[0][A_tile_tid_x * 4 + 1][A_tile_tid_y] = ldg_a_reg[1];
    a_shared[0][A_tile_tid_x * 4 + 2][A_tile_tid_y] = ldg_a_reg[2];
    a_shared[0][A_tile_tid_x * 4 + 3][A_tile_tid_y] = ldg_a_reg[3];
    FETCH_FLOAT4(b_shared[0][B_tile_tid_y][B_tile_tid_x * 4]) = FETCH_FLOAT4(B_ptr_start[N * B_tile_tid_y + B_tile_tid_x * 4]);
    __syncthreads();
    int write_stage_idx = 1;
    for (int s = BLOCK_SIZE_K; s < K; s += BLOCK_SIZE_K)
    {
        FETCH_FLOAT4(ldg_a_reg[0]) = FETCH_FLOAT4(A_ptr_start[K * A_tile_tid_y + A_tile_tid_x * 4 + s]);
        a_shared[write_stage_idx][A_tile_tid_x * 4][A_tile_tid_y] = ldg_a_reg[0];
        a_shared[write_stage_idx][A_tile_tid_x * 4 + 1][A_tile_tid_y] = ldg_a_reg[1];
        a_shared[write_stage_idx][A_tile_tid_x * 4 + 2][A_tile_tid_y] = ldg_a_reg[2];
        a_shared[write_stage_idx][A_tile_tid_x * 4 + 3][A_tile_tid_y] = ldg_a_reg[3];
        FETCH_FLOAT4(b_shared[write_stage_idx][B_tile_tid_y][B_tile_tid_x * 4]) = FETCH_FLOAT4(B_ptr_start[N * (B_tile_tid_y + s) + B_tile_tid_x * 4]);
        write_stage_idx = write_stage_idx ^ 1;
        for (int k = 0; k < BLOCK_SIZE_K; k++)
        {
            FETCH_FLOAT4(reg_a[0]) = FETCH_FLOAT4(a_shared[write_stage_idx][k][ty * THREAD_SIZE_Y]);
            FETCH_FLOAT4(reg_a[4]) = FETCH_FLOAT4(a_shared[write_stage_idx][k][ty * THREAD_SIZE_Y + 4]);
            FETCH_FLOAT4(reg_b[0]) = FETCH_FLOAT4(b_shared[write_stage_idx][k][tx * THREAD_SIZE_X]);
            FETCH_FLOAT4(reg_b[4]) = FETCH_FLOAT4(b_shared[write_stage_idx][k][tx * THREAD_SIZE_X + 4]);

            for (int i = 0; i < THREAD_SIZE_Y; i++)
                for (int j = 0; j < THREAD_SIZE_X; j++)
                    accum[i][j] += reg_a[i] * reg_b[j];
        }
        __syncthreads();
    }
    write_stage_idx = write_stage_idx ^ 1;
    for (int k = 0; k < BLOCK_SIZE_K; k++)
    {
        FETCH_FLOAT4(reg_a[0]) = FETCH_FLOAT4(a_shared[write_stage_idx][k][ty * THREAD_SIZE_Y]);
        FETCH_FLOAT4(reg_a[4]) = FETCH_FLOAT4(a_shared[write_stage_idx][k][ty * THREAD_SIZE_Y + 4]);
        FETCH_FLOAT4(reg_b[0]) = FETCH_FLOAT4(b_shared[write_stage_idx][k][tx * THREAD_SIZE_X]);
        FETCH_FLOAT4(reg_b[4]) = FETCH_FLOAT4(b_shared[write_stage_idx][k][tx * THREAD_SIZE_X + 4]);

        for (int i = 0; i < THREAD_SIZE_Y; i++)
            for (int j = 0; j < THREAD_SIZE_X; j++)
                accum[i][j] += reg_a[i] * reg_b[j];
    }

    float *C_ptr_start = C_ptr + N * by * BLOCK_SIZE_M +
                         bx * BLOCK_SIZE_N;
    for (int i = 0; i < THREAD_SIZE_Y; i++)
    {
        FETCH_FLOAT4(C_ptr_start[N * (ty * THREAD_SIZE_Y + i) + tx * THREAD_SIZE_X]) = FETCH_FLOAT4(accum[i][0]);
        FETCH_FLOAT4(C_ptr_start[N * (ty * THREAD_SIZE_Y + i) + tx * THREAD_SIZE_X + 4]) = FETCH_FLOAT4(accum[i][4]);
    }
}

int main(){
    // 1.定义矩阵块的大小
    const int m = 4 * 1024;
    const int n = 4 * 1024;
    const int k = 4 * 1024;
    const int size_a = m * k * sizeof(float);
    const int size_b = k * n * sizeof(float);
    const int size_c = m * n * sizeof(float);

    // 2.申请矩阵空间
    // 2.1 host侧内存
    float *h_a = (float*)malloc(size_a);
    float *h_b = (float*)malloc(size_b);
    float *h_result = (float*)malloc(size_c);
    float *d_result = (float*)malloc(size_c);

    // 2.2 device侧内存
    float *d_a, *d_b, *d_c;
    cudaMalloc((void**)&d_a, size_a);
    cudaMalloc((void**)&d_b, size_b);
    cudaMalloc((void**)&d_c, size_c);

    // 3.初始化host矩阵
    random_matrix(m, k, h_a);
    random_matrix(k, n, h_b);
    memset(h_result, 0, size_c);
    memset(d_result, 0, size_c);

    // 4.将host数据copy到device
    cudaMemcpy(d_a, h_a, size_a, cudaMemcpyHostToDevice);
    cudaMemcpy(d_b, h_b, size_b, cudaMemcpyHostToDevice);

    // 5.host进行计算
    cpu_sgemm(h_a, h_b, h_result, m, n, k);

    // 6.device计算
    // 6.1 设置block和grid的维度
    const int BLOCK_SIZE_M = 128;
    const int BLOCK_SIZE_K = 8;
    const int BLOCK_SIZE_N = 128;
    const int THREAD_SIZE_X = 8;
    const int THREAD_SIZE_Y = 8;
    const bool ENABLE_DOUBLE_BUFFER = true;

    dim3 block(ceil(BLOCK_SIZE_N, THREAD_SIZE_X), ceil(BLOCK_SIZE_M, THREAD_SIZE_Y));
    dim3 grid(ceil(n, BLOCK_SIZE_N), ceil(m, BLOCK_SIZE_M));     

    // 6.2 启动核函数并且计时
    // 创建事件
    cudaEvent_t start, stop;
    cudaEventCreate(&start);
    cudaEventCreate(&stop);

    // 计时
    cudaEventRecord(start);
    
    cuda_sgemm<BLOCK_SIZE_M, BLOCK_SIZE_N, BLOCK_SIZE_K, THREAD_SIZE_X, THREAD_SIZE_Y, ENABLE_DOUBLE_BUFFER><<<grid, block>>>(d_a, d_b, d_c, m, n, k);
    
    // 结束计时
    cudaEventRecord(stop);
    cudaEventSynchronize(stop);

    // 计算时间
    float elapsed_time;
    cudaEventElapsedTime(&elapsed_time, start, stop);

    // 清理事件
    cudaEventDestroy(start);
    cudaEventDestroy(stop);
    printf("cal_time: %f\n", elapsed_time);

    // 7.将device结果拷贝回host
    cudaMemcpy(d_result, d_c, size_c, cudaMemcpyDeviceToHost);

    // 8.比较
    float diff = compare_matrices(m, n, d_result, h_result);
    if (diff > 0.5f || diff < -0.5f)
    {
        printf("diff too big !\n");
        exit(-1);
    }
    else
    {
        printf("right\n");
    }

    // 9.释放内存
    free(h_a);
    free(h_b);
    free(h_result);
    free(d_result);

    cudaFree(d_a);
    cudaFree(d_b);
    cudaFree(d_c);

    return 0;
}