#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <cuda_runtime.h>

#define M 512
#define K 512
#define N 512

#define KERNEL matmul_smem

// ---------------- CPU 矩阵乘法 ----------------
void matmul_cpu(const float *A, const float *B, float *C) {
    for (int row = 0; row < M; row++) {
        for (int col = 0; col < N; col++) {
            float local_sum = 0;
            for (int idx = 0; idx < K; idx++) {
                local_sum += A[row * K + idx] * B[N * idx + col];
            }
            C[row * N + col] = local_sum;
        }
    }
}

// ---------------- GPU Kernel ----------------
__global__ void matmul_naive(const float *A, const float *B, float *C) {
    int t_y = blockIdx.y * blockDim.y + threadIdx.y;
    int t_x = blockIdx.x * blockDim.x + threadIdx.x;

    if (t_y < M && t_x < N) {
        float local_sum = 0.0f;
        for (int idx = 0; idx < K; idx++) {
            local_sum += A[t_y * K + idx] * B[N * idx + t_x];
        }
        C[t_y * N + t_x] = local_sum;
    }
}

__global__ void matmul_smem(const float *A, const float *B, float *C) {
    int blockRow = blockIdx.y;
    int blockCol = blockIdx.x;
    int local_y = threadIdx.y;
    int local_x = threadIdx.x;

    int global_base_m = blockRow * BLOCK_SIZE;
    int global_base_n = blockCol * BLOCK_SIZE;

    float local_sum = 0;
    __shared__ float As[BLOCK_SIZE][BLOCK_SIZE]; // [BM, BK]
    __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE]; // [BK, BN]

    for (int i = 0; i < (K + BLOCK_SIZE - 1) / BLOCK_SIZE; i++) {
        int global_base_k = i * BLOCK_SIZE;
        // load all data before calc
        int a_row = global_base_m + local_y;
        int a_col = global_base_k + local_x;
        int b_row = global_base_k + local_y;
        int b_col = global_base_n + local_x;
        As[local_y][local_x] = (a_row < M && a_col < K) ? A[a_row * K + a_col] : 0.0f;
        Bs[local_y][local_x] = (b_row < K && b_col < N) ? B[b_row * N + b_col] : 0.0f;
        __syncthreads();
        for (int j = 0; j < BLOCK_SIZE; j++) {
            local_sum += As[local_y][j] * Bs[j][local_x];
        }
        __syncthreads();
    }
    C[(global_base_m + local_y) * N + global_base_n + local_x] = local_sum;
}

// grid [M/BM, N/BN]
// block [BM/TM, BN/TN] --> [16, 16]
// As [128, 8] --> [128, 2] load 4 element one time on column
// Bs [8, 128] --> [8, 32] load 4 element one time on column
// As [128, 8] --> 16 * [8, 8] process 8 * 8 matrics in one thread, slice As on dim 0
// Bs [8, 128] --> [8, 8] * 16 process 8 * 8 matrics in one thread, slice Bs on dim 1
template <const int BM = 128, const int BN = 128, const int BK = 8, const int TM = 8, const int TN = 8>
__global__ void matmul_smem_fp32x4(float *A, float *B, float *C) {
    int bx = blockIdx.x;
    int by = blockIdx.y;
    int BaseMdimOnHbm = by * BM;
    int BaseNdimOnHbm = bx * BN;

    int tx = threadIdx.x;
    int ty = threadIdx.y;
    int tid = blockDim.x * ty + tx;
    int LoadSmemAm = tid / 2;
    int LoadSmemAk = (tid % 2) * 4;
    int LoadSmemBk = tid / 32;
    int LoadSmemBn = (tid % 32) * 4;

    __shared__ float As[BM][BK];
    __shared__ float Bs[BK][BN];

    int aRow = BaseMdimOnHbm + LoadSmemAm;
    int bCol = BaseNdimOnHbm + LoadSmemBn;

    float CLocal[TM][TN] = {0.0f};
    for (int k = 0; k < (K + BK - 1) / BK; k++) {
        // 一个 block 有 32*32 = 256 个 thread
        int BaseKdimOnHbm = k * BK;
        int aCol = BaseKdimOnHbm + LoadSmemAk;
        int bRow = BaseKdimOnHbm + LoadSmemBk;

        FLOAT4(As[LoadSmemAm][LoadSmemAk]) = FLOAT4(A[aRow * K + aCol]);
        FLOAT4(Bs[LoadSmemBk][LoadSmemBn]) = FLOAT4(B[bRow * N + bCol]);
        __syncthreads();
#pragma unroll
        for (int i = 0; i < BK; i++) {
#pragma unroll
            for (int m = 0; m < TM; m++) {
#pragma unroll
                for (int n = 0; n < TN; n++) {
                    CLocal[m][n] += As[ty * TM + m][i] * Bs[i][tx * TN + n];
                }
            }
        }
        __syncthreads();
    }
#pragma unroll
    for (int m = 0; m < TM; m++) {
        int cRow = BaseMdimOnHbm + ty * TM + m;
#pragma unroll
        for (int n = 0; n < TN; n += 4) {
            int cCol = BaseNdimOnHbm + tx * TN + n;
            FLOAT4(C[cRow * N + cCol]) = FLOAT4(CLocal[m][n]);
        }
    }
}

// ---------------- 主程序 ----------------
int main() {
    int size_A = M * K;
    int size_B = K * N;
    int size_C = M * N;

    float *h_A = (float *)malloc(sizeof(float) * size_A);
    float *h_B = (float *)malloc(sizeof(float) * size_B);
    float *h_C_cpu = (float *)malloc(sizeof(float) * size_C);
    float *h_C_gpu = (float *)malloc(sizeof(float) * size_C);

    // 初始化矩阵
    for (int i = 0; i < size_A; ++i) h_A[i] = 1.0f;
    for (int i = 0; i < size_B; ++i) h_B[i] = 1.0f;

    // CPU 计时
    clock_t start_cpu = clock();
    matmul_cpu(h_A, h_B, h_C_cpu);
    clock_t end_cpu = clock();
    float cpu_time = 1000.0f * (end_cpu - start_cpu) / CLOCKS_PER_SEC;
    printf("CPU Time: %.3f ms\n", cpu_time);

    // CUDA 内存分配
    float *d_A, *d_B, *d_C;
    cudaMalloc(&d_A, sizeof(float) * size_A);
    cudaMalloc(&d_B, sizeof(float) * size_B);
    cudaMalloc(&d_C, sizeof(float) * size_C);

    cudaMemcpy(d_A, h_A, sizeof(float) * size_A, cudaMemcpyHostToDevice);
    cudaMemcpy(d_B, h_B, sizeof(float) * size_B, cudaMemcpyHostToDevice);

    // CUDA 计时
    cudaEvent_t start, stop;
    cudaEventCreate(&start);
    cudaEventCreate(&stop);

    dim3 block(32, 32);
    dim3 grid((N + 31) / 32, (M + 31) / 32);

    cudaEventRecord(start);
    KERNEL<<<grid, block>>>(d_A, d_B, d_C);
    cudaEventRecord(stop);

    cudaMemcpy(h_C_gpu, d_C, sizeof(float) * size_C, cudaMemcpyDeviceToHost);
    cudaEventSynchronize(stop);

    float gpu_time = 0;
    cudaEventElapsedTime(&gpu_time, start, stop);
    printf("GPU Time: %.3f ms\n", gpu_time);

    // 校验
    int correct = 1;
    for (int i = 0; i < size_C; ++i) {
        if (fabs(h_C_cpu[i] - h_C_gpu[i]) > 1e-3) {
            printf("Mismatch at %d: CPU=%f, GPU=%f\n", i, h_C_cpu[i], h_C_gpu[i]);
            correct = 0;
            break;
        }
    }
    printf("Result Check: %s\n", correct ? "PASS" : "FAIL");

    // 清理
    free(h_A); free(h_B); free(h_C_cpu); free(h_C_gpu);
    cudaFree(d_A); cudaFree(d_B); cudaFree(d_C);

    return 0;
}
