﻿#include "common.cuh"

template <const int BM, const int BN, const int BK, const int TM, const int TN, const int TK>
__global__ void sgemm_v1(const float *A, const float *B, float *C, const int M, const int N, const int K) {
    const int x = blockIdx.x * BN + threadIdx.x;
    const int y = blockIdx.y * BM + threadIdx.y;
    if (x < N && y < M) {
        float reg = 0;
        for (int i = 0; i < K; ++i)
            reg += A[y * K + i] * B[i * N + x];
        C[y * N + x] = reg;
    }
}

/*
 * 使用 shared memory 提高速度
 */
// BM = BN = BK = 16
template <const int BM, const int BN, const int BK, const int TM, const int TN, const int TK>
__global__ void sgemm_v2(const float *A, const float *B, float *C, const int M, const int N, const int K) {
    __shared__ float s_a[BM][BK], s_b[BK][BN];
    float reg = 0.0f;

    const int tx = threadIdx.x, ty = threadIdx.y;
    const int bx = blockIdx.x * BN, by = blockIdx.y * BM;
    const int x = bx + tx, y = by + ty;

    if (y < M && x < N) {
#pragma unroll
        for (int offs = 0; offs < K; offs += BK) {
            if (offs + tx < K)
                s_a[ty][tx] = A[y * K + (offs + tx)];
            if (offs + ty < K)
                s_b[ty][tx] = B[(offs + ty) * N + x];
            __syncthreads();
#pragma unroll
            for (int k = 0; k < BK && (offs + k) < K; ++k)
                reg += s_a[ty][k] * s_b[k][tx];
            __syncthreads();
        }
        C[y * N + x] = reg;
    }
}

/*
 * 思路: 1. 之前一次只 load 一个 float 到 smem, 太少了, 能不能用 float4 来向量化加载, 一次 load 4 个 float?
 * 实现: 1. block 还是 (16, 16), 但是由于每个 thread 一次 load 4 个 float, 所以 BK 变成 16x4 = 64
 */
// BM = BN = 16, BK = 64
template <const int BM, const int BN, const int BK, const int TM, const int TN, const int TK>
__global__ void sgemm_v3(const float *A, const float *B, float *C, const int M, const int N, const int K) {
    __shared__ float s_a[BM][BK], s_b[BK][BN];
    float reg = 0.0f;

    const int tx = threadIdx.x, ty = threadIdx.y;
    const int bx = blockIdx.x * BN, by = blockIdx.y * BM;
    const int x = bx + tx, y = by + ty;
    const int tid = ty * blockDim.x + tx;

    const int row_smem_a = tid / (BK / 4);
    const int col_smem_a = (tid % (BK / 4)) * 4;
    const int row_smem_b = tid / (BN / 4);
    const int col_smem_b = (tid % (BN / 4)) * 4;
    const int row_A = by + row_smem_a;
    const int col_B = bx + col_smem_b;

    if (y < M && x < N) {
#pragma unroll
        for (int offs = 0; offs < K; offs += BK) {
            int col_A = offs + col_smem_a;
            int row_B = offs + row_smem_b;
            FLOAT4(s_a[row_smem_a][col_smem_a]) = CFLOAT4(A[row_A * K + col_A]);
            FLOAT4(s_b[row_smem_b][col_smem_b]) = CFLOAT4(B[row_B * N + col_B]);
            __syncthreads();
#pragma unroll
            for (int k = 0; k < BK; ++k)
                reg += s_a[ty][k] * s_b[k][tx];
            __syncthreads();
        }
        C[y * N + x] = reg;
    }
}

/*
 * 思路: 1. 每个 thread 实际上只算了 C 的一个元素, 能不能多计算一些?
 *      2. 进一步考虑: 之前是从 global memory 取子块到 shared memory, 能不能从 shared memory 取子块到 register?
 *      3. 对于每一个 block, 计算量是 2 * K * BM * BN, 访存量是 4 * (BM * K + K * BN) Bytes, 计算访存比为 1/(2(1/BM + 1/BN)),
 *         可以看到, 如果增大 BM 和 BN, 可以增大计算访存比, 提高效率
 * 实现: 1. 这里的 block 布局还是和之前一样 (16, 16), 但是每个 thread 变成计算 4x4 的子块
 *      2. TK = 1, 相比之前就是: 点 -> 线
 *      3. 这里一个 thread 不止 load 一个 float4, 而是 ((BM * BK) / 4) / (( BM / TM) * (BN / TN)) = (BK * TM * TN) / (4 * BN) 个 float4
 */
// BM = BN = 64, BK = 64, TM = TN = 4, TK = 1
template <const int BM, const int BN, const int BK, const int TM, const int TN, const int TK>
__global__ void sgemm_v4(float *A, float *B, float *C, int M, int N, int K) {
    SHARED_ALIEN16 float s_a[BM][BK];
    SHARED_ALIEN16 float s_b[BK][BN];
    float r_a[TM], r_b[TN];
    float r_c[TM][TN];
    memset(r_a, 0, sizeof(r_a));
    memset(r_b, 0, sizeof(r_b));
    memset(r_c, 0, sizeof(r_c));

    const int tx = threadIdx.x, ty = threadIdx.y;
    const int tid = ty * blockDim.x + tx;
    const int total_threads = blockDim.x * blockDim.y;
    const int total_loads_a = (BM * BK) / 4;
    const int total_loads_b = (BK * BN) / 4;

    const int bx = blockIdx.x * BN;
    const int by = blockIdx.y * BM;

    for (int offs = 0; offs < K; offs += BK) {
        for (int load_offs = tid; load_offs < total_loads_a; load_offs += total_threads) {
            int row_smem_a = load_offs / (BK / 4);
            int col_smem_a = (load_offs % (BK / 4)) * 4;
            int row_A = by + row_smem_a;
            int col_A = offs + col_smem_a;
            if (row_A < M && col_A + 3 < K)
                FLOAT4(s_a[row_smem_a][col_smem_a]) = FLOAT4(A[row_A * K + col_A]);
            else {
                s_a[row_smem_a][col_smem_a + 0] = (row_A < M && col_A < K) ? A[row_A * K + col_A] : 0.0f;
                s_a[row_smem_a][col_smem_a + 1] = (row_A < M && col_A + 1 < K) ? A[row_A * K + col_A + 1] : 0.0f;
                s_a[row_smem_a][col_smem_a + 2] = (row_A < M && col_A + 2 < K) ? A[row_A * K + col_A + 2] : 0.0f;
                s_a[row_smem_a][col_smem_a + 3] = (row_A < M && col_A + 3 < K) ? A[row_A * K + col_A + 3] : 0.0f;
            }
        }
        for (int load_offs = tid; load_offs < total_loads_b; load_offs += total_threads) {
            int row_smem_b = load_offs / (BN / 4);
            int col_smem_b = (load_offs % (BN / 4)) * 4;
            int row_B = offs + row_smem_b;
            int col_B = bx + col_smem_b;
            if (row_B < K && col_B + 3 < N)
                FLOAT4(s_b[row_smem_b][col_smem_b]) = FLOAT4(B[row_B * N + col_B]);
            else {
                s_b[row_smem_b][col_smem_b + 0] = (row_B < K && col_B < N) ? B[row_B * N + col_B] : 0.0f;
                s_b[row_smem_b][col_smem_b + 1] = (row_B < K && col_B + 1 < N) ? B[row_B * N + col_B + 1] : 0.0f;
                s_b[row_smem_b][col_smem_b + 2] = (row_B < K && col_B + 2 < N) ? B[row_B * N + col_B + 2] : 0.0f;
                s_b[row_smem_b][col_smem_b + 3] = (row_B < K && col_B + 3 < N) ? B[row_B * N + col_B + 3] : 0.0f;
            }
        }
        __syncthreads();

#pragma unroll
        for (int k = 0; k < BK; ++k) {
            int sa_row_start = ty * TM;
            int sb_col_start = tx * TN;
#pragma unroll
            for (int i = 0; i < TM; ++i)
                r_a[i] = s_a[sa_row_start + i][k];
#pragma unroll
            for (int j = 0; j < TN; j += 4)
                FLOAT4(r_b[j]) = FLOAT4(s_b[k][sb_col_start + j]);
#pragma unroll
            for (int i = 0; i < TM; ++i)
#pragma unroll
                for (int j = 0; j < TN; ++j)
                    r_c[i][j] = __fmaf_rn(r_a[i], r_b[j], r_c[i][j]);
        }
        __syncthreads();
    }

#pragma unroll
    for (int i = 0; i < TM; ++i) {
#pragma unroll
        for (int j = 0; j < TN; j += 4) {
            int x = bx + tx * TN + j;
            int y = by + ty * TM + i;
            if (x < N && y < M)
                FLOAT4(C[y * N + x]) = FLOAT4(r_c[i][j]);
        }
    }
}

/*
 * 思路: 1. 使用 double buffer 隐藏从 gmem 到 smem 的访存延迟
 * 实现: 1. 使用两个 smem, 一个用于 load, 一个用于计算
 *      2. 由于 smem 空间有限, 此时 BK 变为原来的一半
 */
// BM = BN = 64, BK = 32, TM = TN = 4, TK = 1
template <const int BM, const int BN, const int BK, const int TM, const int TN, const int TK>
__global__ void sgemm_v5(float *A, float *B, float *C, int M, int N, int K) {
    SHARED_ALIEN16 float s_a[2][BM][BK];
    SHARED_ALIEN16 float s_b[2][BK][BN];
    float r_a[TM], r_b[TN];
    float r_c[TM][TN];
    memset(r_a, 0, sizeof(r_a));
    memset(r_b, 0, sizeof(r_b));
    memset(r_c, 0, sizeof(r_c));

    const int tx = threadIdx.x, ty = threadIdx.y;
    const int tid = ty * blockDim.x + tx;
    const int total_threads = blockDim.x * blockDim.y;
    const int total_loads_a = (BM * BK) / 4;
    const int total_loads_b = (BK * BN) / 4;

    const int bx = blockIdx.x * BN;
    const int by = blockIdx.y * BM;

    int cur_use = 0, next_load = 1;

    for (int load_offs = tid; load_offs < total_loads_a; load_offs += total_threads) {
        int row_smem_a = load_offs / (BK / 4);
        int col_smem_a = (load_offs % (BK / 4)) * 4;
        int row_A = by + row_smem_a;
        int col_A = col_smem_a;

        if (row_A < M && col_A + 3 < K)
            FLOAT4(s_a[0][row_smem_a][col_smem_a]) = FLOAT4(A[row_A * K + col_A]);
        else {
            s_a[0][row_smem_a][col_smem_a + 0] = (row_A < M && col_A < K) ? A[row_A * K + col_A] : 0.0f;
            s_a[0][row_smem_a][col_smem_a + 1] = (row_A < M && col_A + 1 < K) ? A[row_A * K + col_A + 1] : 0.0f;
            s_a[0][row_smem_a][col_smem_a + 2] = (row_A < M && col_A + 2 < K) ? A[row_A * K + col_A + 2] : 0.0f;
            s_a[0][row_smem_a][col_smem_a + 3] = (row_A < M && col_A + 3 < K) ? A[row_A * K + col_A + 3] : 0.0f;
        }
    }

    for (int load_offs = tid; load_offs < total_loads_b; load_offs += total_threads) {
        int row_smem_b = load_offs / (BN / 4);
        int col_smem_b = (load_offs % (BN / 4)) * 4;
        int row_B = row_smem_b;
        int col_B = bx + col_smem_b;

        if (row_B < K && col_B + 3 < N)
            FLOAT4(s_b[0][row_smem_b][col_smem_b]) = FLOAT4(B[row_B * N + col_B]);
        else {
            s_b[0][row_smem_b][col_smem_b + 0] = (row_B < K && col_B < N) ? B[row_B * N + col_B] : 0.0f;
            s_b[0][row_smem_b][col_smem_b + 1] = (row_B < K && col_B + 1 < N) ? B[row_B * N + col_B + 1] : 0.0f;
            s_b[0][row_smem_b][col_smem_b + 2] = (row_B < K && col_B + 2 < N) ? B[row_B * N + col_B + 2] : 0.0f;
            s_b[0][row_smem_b][col_smem_b + 3] = (row_B < K && col_B + 3 < N) ? B[row_B * N + col_B + 3] : 0.0f;
        }
    }
    __syncthreads();

    for (int offs = BK; offs < K; offs += BK) {
        for (int load_offs = tid; load_offs < total_loads_a; load_offs += total_threads) {
            int row_smem_a = load_offs / (BK / 4);
            int col_smem_a = (load_offs % (BK / 4)) * 4;
            int row_A = by + row_smem_a;
            int col_A = offs + col_smem_a;

            if (row_A < M && col_A + 3 < K)
                FLOAT4(s_a[next_load][row_smem_a][col_smem_a]) = FLOAT4(A[row_A * K + col_A]);
            else {
                s_a[next_load][row_smem_a][col_smem_a + 0] = (row_A < M && col_A < K) ? A[row_A * K + col_A] : 0.0f;
                s_a[next_load][row_smem_a][col_smem_a + 1] = (row_A < M && col_A + 1 < K) ? A[row_A * K + col_A + 1] : 0.0f;
                s_a[next_load][row_smem_a][col_smem_a + 2] = (row_A < M && col_A + 2 < K) ? A[row_A * K + col_A + 2] : 0.0f;
                s_a[next_load][row_smem_a][col_smem_a + 3] = (row_A < M && col_A + 3 < K) ? A[row_A * K + col_A + 3] : 0.0f;
            }
        }

        for (int load_offs = tid; load_offs < total_loads_b; load_offs += total_threads) {
            int row_smem_b = load_offs / (BN / 4);
            int col_smem_b = (load_offs % (BN / 4)) * 4;
            int row_B = offs + row_smem_b;
            int col_B = bx + col_smem_b;

            if (row_B < K && col_B + 3 < N)
                FLOAT4(s_b[next_load][row_smem_b][col_smem_b]) = FLOAT4(B[row_B * N + col_B]);
            else {
                s_b[next_load][row_smem_b][col_smem_b + 0] = (row_B < K && col_B < N) ? B[row_B * N + col_B] : 0.0f;
                s_b[next_load][row_smem_b][col_smem_b + 1] = (row_B < K && col_B + 1 < N) ? B[row_B * N + col_B + 1] : 0.0f;
                s_b[next_load][row_smem_b][col_smem_b + 2] = (row_B < K && col_B + 2 < N) ? B[row_B * N + col_B + 2] : 0.0f;
                s_b[next_load][row_smem_b][col_smem_b + 3] = (row_B < K && col_B + 3 < N) ? B[row_B * N + col_B + 3] : 0.0f;
            }
        }

#pragma unroll
        for (int k = 0; k < BK; ++k) {
            int sa_row_start = ty * TM;
            int sb_col_start = tx * TN;
#pragma unroll
            for (int i = 0; i < TM; ++i)
                r_a[i] = s_a[cur_use][sa_row_start + i][k];

#pragma unroll
            for (int j = 0; j < TN; j += 4)
                FLOAT4(r_b[j]) = FLOAT4(s_b[cur_use][k][sb_col_start + j]);
#pragma unroll
            for (int i = 0; i < TM; ++i)
#pragma unroll
                for (int j = 0; j < TN; ++j)
                    r_c[i][j] = __fmaf_rn(r_a[i], r_b[j], r_c[i][j]);
        }
        __syncthreads();
        cur_use ^= 1;
        next_load ^= 1;
    }

#pragma unroll
    for (int k = 0; k < BK; ++k) {
        int sa_row_start = ty * TM;
        int sb_col_start = tx * TN;

#pragma unroll
        for (int i = 0; i < TM; ++i)
            r_a[i] = s_a[cur_use][sa_row_start + i][k];
#pragma unroll
        for (int j = 0; j < TN; j += 4)
            FLOAT4(r_b[j]) = FLOAT4(s_b[cur_use][k][sb_col_start + j]);

#pragma unroll
        for (int i = 0; i < TM; ++i)
#pragma unroll
            for (int j = 0; j < TN; ++j)
                r_c[i][j] = __fmaf_rn(r_a[i], r_b[j], r_c[i][j]);
    }
    __syncthreads();

#pragma unroll
    for (int i = 0; i < TM; ++i) {
#pragma unroll
        for (int j = 0; j < TN; j += 4) {
            int x = bx + tx * TN + j;
            int y = by + ty * TM + i;
            if (x < N && y < M)
                FLOAT4(C[y * N + x]) = FLOAT4(r_c[i][j]);
        }
    }
}

/*
 * 观察到从 s_a 读数据到 r_a 存在 bank conflict, 考虑使用 padding 缓解
 * 这里为了使用 float4, 所以 padding 应该是 4 的倍数, 而不能是 1
 */
// BM = BN = 64, BK = 32, TM = TN = 4, TK = 1
template <const int BM, const int BN, const int BK, const int TM, const int TN, const int TK>
__global__ void sgemm_v6(float *A, float *B, float *C, int M, int N, int K) {
    SHARED_ALIEN16 float s_a[2][BM][BK + 4];
    SHARED_ALIEN16 float s_b[2][BK][BN + 4];
    float r_a[TM], r_b[TN];
    float r_c[TM][TN];
    memset(r_a, 0, sizeof(r_a));
    memset(r_b, 0, sizeof(r_b));
    memset(r_c, 0, sizeof(r_c));

    const int tx = threadIdx.x, ty = threadIdx.y;
    const int tid = ty * blockDim.x + tx;
    const int total_threads = blockDim.x * blockDim.y;
    const int total_loads_a = (BM * BK) / 4;
    const int total_loads_b = (BK * BN) / 4;

    const int bx = blockIdx.x * BN;
    const int by = blockIdx.y * BM;

    int cur_use = 0, next_load = 1;

    for (int load_offs = tid; load_offs < total_loads_a; load_offs += total_threads) {
        int row_smem_a = load_offs / (BK / 4);
        int col_smem_a = (load_offs % (BK / 4)) * 4;
        int row_A = by + row_smem_a;
        int col_A = col_smem_a;

        if (row_A < M && col_A + 3 < K)
            FLOAT4(s_a[0][row_smem_a][col_smem_a]) = FLOAT4(A[row_A * K + col_A]);
        else {
            s_a[0][row_smem_a][col_smem_a + 0] = (row_A < M && col_A < K) ? A[row_A * K + col_A] : 0.0f;
            s_a[0][row_smem_a][col_smem_a + 1] = (row_A < M && col_A + 1 < K) ? A[row_A * K + col_A + 1] : 0.0f;
            s_a[0][row_smem_a][col_smem_a + 2] = (row_A < M && col_A + 2 < K) ? A[row_A * K + col_A + 2] : 0.0f;
            s_a[0][row_smem_a][col_smem_a + 3] = (row_A < M && col_A + 3 < K) ? A[row_A * K + col_A + 3] : 0.0f;
        }
    }

    for (int load_offs = tid; load_offs < total_loads_b; load_offs += total_threads) {
        int row_smem_b = load_offs / (BN / 4);
        int col_smem_b = (load_offs % (BN / 4)) * 4;
        int row_B = row_smem_b;
        int col_B = bx + col_smem_b;

        if (row_B < K && col_B + 3 < N)
            FLOAT4(s_b[0][row_smem_b][col_smem_b]) = FLOAT4(B[row_B * N + col_B]);
        else {
            s_b[0][row_smem_b][col_smem_b + 0] = (row_B < K && col_B < N) ? B[row_B * N + col_B] : 0.0f;
            s_b[0][row_smem_b][col_smem_b + 1] = (row_B < K && col_B + 1 < N) ? B[row_B * N + col_B + 1] : 0.0f;
            s_b[0][row_smem_b][col_smem_b + 2] = (row_B < K && col_B + 2 < N) ? B[row_B * N + col_B + 2] : 0.0f;
            s_b[0][row_smem_b][col_smem_b + 3] = (row_B < K && col_B + 3 < N) ? B[row_B * N + col_B + 3] : 0.0f;
        }
    }
    __syncthreads();

    for (int offs = BK; offs < K; offs += BK) {
        for (int load_offs = tid; load_offs < total_loads_a; load_offs += total_threads) {
            int row_smem_a = load_offs / (BK / 4);
            int col_smem_a = (load_offs % (BK / 4)) * 4;
            int row_A = by + row_smem_a;
            int col_A = offs + col_smem_a;

            if (row_A < M && col_A + 3 < K)
                FLOAT4(s_a[next_load][row_smem_a][col_smem_a]) = FLOAT4(A[row_A * K + col_A]);
            else {
                s_a[next_load][row_smem_a][col_smem_a + 0] = (row_A < M && col_A < K) ? A[row_A * K + col_A] : 0.0f;
                s_a[next_load][row_smem_a][col_smem_a + 1] = (row_A < M && col_A + 1 < K) ? A[row_A * K + col_A + 1] : 0.0f;
                s_a[next_load][row_smem_a][col_smem_a + 2] = (row_A < M && col_A + 2 < K) ? A[row_A * K + col_A + 2] : 0.0f;
                s_a[next_load][row_smem_a][col_smem_a + 3] = (row_A < M && col_A + 3 < K) ? A[row_A * K + col_A + 3] : 0.0f;
            }
        }

        for (int load_offs = tid; load_offs < total_loads_b; load_offs += total_threads) {
            int row_smem_b = load_offs / (BN / 4);
            int col_smem_b = (load_offs % (BN / 4)) * 4;
            int row_B = offs + row_smem_b;
            int col_B = bx + col_smem_b;

            if (row_B < K && col_B + 3 < N)
                FLOAT4(s_b[next_load][row_smem_b][col_smem_b]) = FLOAT4(B[row_B * N + col_B]);
            else {
                s_b[next_load][row_smem_b][col_smem_b + 0] = (row_B < K && col_B < N) ? B[row_B * N + col_B] : 0.0f;
                s_b[next_load][row_smem_b][col_smem_b + 1] = (row_B < K && col_B + 1 < N) ? B[row_B * N + col_B + 1] : 0.0f;
                s_b[next_load][row_smem_b][col_smem_b + 2] = (row_B < K && col_B + 2 < N) ? B[row_B * N + col_B + 2] : 0.0f;
                s_b[next_load][row_smem_b][col_smem_b + 3] = (row_B < K && col_B + 3 < N) ? B[row_B * N + col_B + 3] : 0.0f;
            }
        }

#pragma unroll
        for (int k = 0; k < BK; ++k) {
            int sa_row_start = ty * TM;
            int sb_col_start = tx * TN;
#pragma unroll
            for (int i = 0; i < TM; ++i)
                r_a[i] = s_a[cur_use][sa_row_start + i][k];

#pragma unroll
            for (int j = 0; j < TN; j += 4)
                FLOAT4(r_b[j]) = FLOAT4(s_b[cur_use][k][sb_col_start + j]);
#pragma unroll
            for (int i = 0; i < TM; ++i)
#pragma unroll
                for (int j = 0; j < TN; ++j)
                    r_c[i][j] = __fmaf_rn(r_a[i], r_b[j], r_c[i][j]);
        }
        __syncthreads();
        cur_use ^= 1;
        next_load ^= 1;
    }

#pragma unroll
    for (int k = 0; k < BK; ++k) {
        int sa_row_start = ty * TM;
        int sb_col_start = tx * TN;

#pragma unroll
        for (int i = 0; i < TM; ++i)
            r_a[i] = s_a[cur_use][sa_row_start + i][k];
#pragma unroll
        for (int j = 0; j < TN; j += 4)
            FLOAT4(r_b[j]) = FLOAT4(s_b[cur_use][k][sb_col_start + j]);

#pragma unroll
        for (int i = 0; i < TM; ++i)
#pragma unroll
            for (int j = 0; j < TN; ++j)
                r_c[i][j] = __fmaf_rn(r_a[i], r_b[j], r_c[i][j]);
    }
    __syncthreads();

#pragma unroll
    for (int i = 0; i < TM; ++i) {
#pragma unroll
        for (int j = 0; j < TN; j += 4) {
            int x = bx + tx * TN + j;
            int y = by + ty * TM + i;
            if (x < N && y < M)
                FLOAT4(C[y * N + x]) = FLOAT4(r_c[i][j]);
        }
    }
}

/*
 * 使用 padding 会带来额外的 smem 开销, 考虑使用 swizzle 来缓解 bank conflict
 * 通过改变共享内存的数据布局，将原本可能冲突的访问分散到不同的bank
 * 与常规 swizzle 不同, 这里为了方便 swizzle 后也能直接用 float4 加载, 所以 swizzle 的是行而不是列
 * 缺点: 1. 增大计算量
 *      2. swizzle 函数不好找
 */
// BM = BN = 64, BK = 32, TM = TN = 4, TK = 1
template <const int BM, const int BN, const int BK, const int TM, const int TN, const int TK>
__global__ void sgemm_v7(float *A, float *B, float *C, int M, int N, int K) {
    SHARED_ALIEN16 float s_a[2][BM][BK];
    SHARED_ALIEN16 float s_b[2][BK][BN];
    float r_a[TM], r_b[TN];
    float r_c[TM][TN];
    memset(r_a, 0, sizeof(r_a));
    memset(r_b, 0, sizeof(r_b));
    memset(r_c, 0, sizeof(r_c));

    const int tx = threadIdx.x, ty = threadIdx.y;
    const int tid = ty * blockDim.x + tx;
    const int total_threads = blockDim.x * blockDim.y;
    const int total_loads_a = (BM * BK) / 4;
    const int total_loads_b = (BK * BN) / 4;

    const int bx = blockIdx.x * BN;
    const int by = blockIdx.y * BM;

    int cur_use = 0, next_load = 1;

    for (int load_offs = tid; load_offs < total_loads_a; load_offs += total_threads) {
        int row_smem_a = load_offs / (BK / 4);
        int col_smem_a = (load_offs % (BK / 4)) * 4;
        int row_A = by + row_smem_a;
        int col_A = col_smem_a;

        int swizzled_row = SWIZZLE_FLOAT4(row_smem_a, col_smem_a);

        if (row_A < M && col_A + 3 < K)
            FLOAT4(s_a[0][swizzled_row][col_smem_a]) = FLOAT4(A[row_A * K + col_A]);
        else {
            s_a[0][swizzled_row][col_smem_a + 0] = (row_A < M && col_A < K) ? A[row_A * K + col_A] : 0.0f;
            s_a[0][swizzled_row][col_smem_a + 1] = (row_A < M && col_A + 1 < K) ? A[row_A * K + col_A + 1] : 0.0f;
            s_a[0][swizzled_row][col_smem_a + 2] = (row_A < M && col_A + 2 < K) ? A[row_A * K + col_A + 2] : 0.0f;
            s_a[0][swizzled_row][col_smem_a + 3] = (row_A < M && col_A + 3 < K) ? A[row_A * K + col_A + 3] : 0.0f;
        }
    }

    for (int load_offs = tid; load_offs < total_loads_b; load_offs += total_threads) {
        int row_smem_b = load_offs / (BN / 4);
        int col_smem_b = (load_offs % (BN / 4)) * 4;
        int row_B = row_smem_b;
        int col_B = bx + col_smem_b;

        int swizzled_row = SWIZZLE_FLOAT4(row_smem_b, col_smem_b);

        if (row_B < K && col_B + 3 < N)
            FLOAT4(s_b[0][swizzled_row][col_smem_b]) = FLOAT4(B[row_B * N + col_B]);
        else {
            s_b[0][swizzled_row][col_smem_b + 0] = (row_B < K && col_B < N) ? B[row_B * N + col_B] : 0.0f;
            s_b[0][swizzled_row][col_smem_b + 1] = (row_B < K && col_B + 1 < N) ? B[row_B * N + col_B + 1] : 0.0f;
            s_b[0][swizzled_row][col_smem_b + 2] = (row_B < K && col_B + 2 < N) ? B[row_B * N + col_B + 2] : 0.0f;
            s_b[0][swizzled_row][col_smem_b + 3] = (row_B < K && col_B + 3 < N) ? B[row_B * N + col_B + 3] : 0.0f;
        }
    }
    __syncthreads();

    for (int offs = BK; offs < K; offs += BK) {
        for (int load_offs = tid; load_offs < total_loads_a; load_offs += total_threads) {
            int row_smem_a = load_offs / (BK / 4);
            int col_smem_a = (load_offs % (BK / 4)) * 4;
            int row_A = by + row_smem_a;
            int col_A = offs + col_smem_a;

            int swizzled_row = SWIZZLE_FLOAT4(row_smem_a, col_smem_a);

            if (row_A < M && col_A + 3 < K)
                FLOAT4(s_a[next_load][swizzled_row][col_smem_a]) = FLOAT4(A[row_A * K + col_A]);
            else {
                s_a[next_load][swizzled_row][col_smem_a + 0] = (row_A < M && col_A < K) ? A[row_A * K + col_A] : 0.0f;
                s_a[next_load][swizzled_row][col_smem_a + 1] = (row_A < M && col_A + 1 < K) ? A[row_A * K + col_A + 1] : 0.0f;
                s_a[next_load][swizzled_row][col_smem_a + 2] = (row_A < M && col_A + 2 < K) ? A[row_A * K + col_A + 2] : 0.0f;
                s_a[next_load][swizzled_row][col_smem_a + 3] = (row_A < M && col_A + 3 < K) ? A[row_A * K + col_A + 3] : 0.0f;
            }
        }

        for (int load_offs = tid; load_offs < total_loads_b; load_offs += total_threads) {
            int row_smem_b = load_offs / (BN / 4);
            int col_smem_b = (load_offs % (BN / 4)) * 4;
            int row_B = offs + row_smem_b;
            int col_B = bx + col_smem_b;

            int swizzled_row = SWIZZLE_FLOAT4(row_smem_b, col_smem_b);

            if (row_B < K && col_B + 3 < N)
                FLOAT4(s_b[next_load][swizzled_row][col_smem_b]) = FLOAT4(B[row_B * N + col_B]);
            else {
                s_b[next_load][swizzled_row][col_smem_b + 0] = (row_B < K && col_B < N) ? B[row_B * N + col_B] : 0.0f;
                s_b[next_load][swizzled_row][col_smem_b + 1] = (row_B < K && col_B + 1 < N) ? B[row_B * N + col_B + 1] : 0.0f;
                s_b[next_load][swizzled_row][col_smem_b + 2] = (row_B < K && col_B + 2 < N) ? B[row_B * N + col_B + 2] : 0.0f;
                s_b[next_load][swizzled_row][col_smem_b + 3] = (row_B < K && col_B + 3 < N) ? B[row_B * N + col_B + 3] : 0.0f;
            }
        }

#pragma unroll
        for (int k = 0; k < BK; ++k) {
            int sa_row_start = ty * TM;
            int sb_col_start = tx * TN;

#pragma unroll
            for (int i = 0; i < TM; ++i)
                r_a[i] = s_a[cur_use][SWIZZLE_FLOAT4(sa_row_start + i, k)][k];

#pragma unroll
            for (int j = 0; j < TN; j += 4)
                FLOAT4(r_b[j]) = FLOAT4(s_b[cur_use][SWIZZLE_FLOAT4(k, sb_col_start)][sb_col_start + j]);

#pragma unroll
            for (int i = 0; i < TM; ++i)
#pragma unroll
                for (int j = 0; j < TN; ++j)
                    r_c[i][j] = __fmaf_rn(r_a[i], r_b[j], r_c[i][j]);
        }
        __syncthreads();
        cur_use ^= 1;
        next_load ^= 1;
    }

#pragma unroll
    for (int k = 0; k < BK; ++k) {
        int sa_row_start = ty * TM;
        int sb_col_start = tx * TN;

#pragma unroll
        for (int i = 0; i < TM; ++i)
            r_a[i] = s_a[cur_use][SWIZZLE_FLOAT4(sa_row_start + i, k)][k];

#pragma unroll
        for (int j = 0; j < TN; j += 4)
            FLOAT4(r_b[j]) = FLOAT4(s_b[cur_use][SWIZZLE_FLOAT4(k, sb_col_start)][sb_col_start + j]);

#pragma unroll
        for (int i = 0; i < TM; ++i)
#pragma unroll
            for (int j = 0; j < TN; ++j)
                r_c[i][j] = __fmaf_rn(r_a[i], r_b[j], r_c[i][j]);
    }
    __syncthreads();

#pragma unroll
    for (int i = 0; i < TM; ++i) {
#pragma unroll
        for (int j = 0; j < TN; j += 4) {
            int x = bx + tx * TN + j;
            int y = by + ty * TM + i;
            if (x < N && y < M) {
                FLOAT4(C[y * N + x]) = FLOAT4(r_c[i][j]);
            }
        }
    }
}

/*
 * 除了上面两种解决 s_a 的 bank conflict 方法, 其实还可以考虑改变 s_a 的排布方式
 * 因为 s_b 读到 r_b 是没有 bank conflict 的, 所以如果把 s_a 的排布方式改成和 s_b 一样, 也可以缓解 bank conflict
 * s_a: [BM][BK] -> [BK][BM], 使用 r_a 的前四个位置辅助存储
 * 但是要注意的是, 由于 store s_a 的方式变了, 此时在 shared store 的时候会发生 bank conflict
 */
// BM = BN = 64, BK = 32, TM = TN = 4, TK = 1
template <const int BM, const int BN, const int BK, const int TM, const int TN, const int TK>
__global__ void sgemm_v8(float *A, float *B, float *C, int M, int N, int K) {
    SHARED_ALIEN16 float s_a[2][BK][BM];
    SHARED_ALIEN16 float s_b[2][BK][BN];
    float r_a[TM], r_b[TN];
    float r_c[TM][TN];
    memset(r_a, 0, sizeof(r_a));
    memset(r_b, 0, sizeof(r_b));
    memset(r_c, 0, sizeof(r_c));

    const int tx = threadIdx.x, ty = threadIdx.y;
    const int tid = ty * blockDim.x + tx;
    const int total_threads = blockDim.x * blockDim.y;
    const int total_loads_a = (BM * BK) / 4;
    const int total_loads_b = (BK * BN) / 4;

    const int bx = blockIdx.x * BN;
    const int by = blockIdx.y * BM;

    int cur_use = 0, next_load = 1;

    for (int load_offs = tid; load_offs < total_loads_a; load_offs += total_threads) {
        int row_smem_a = load_offs / (BK / 4);
        int col_smem_a = (load_offs % (BK / 4)) * 4;
        int row_A = by + row_smem_a;
        int col_A = col_smem_a;

        if (row_A < M && col_A + 3 < K)
            FLOAT4(r_a[0]) = FLOAT4(A[row_A * K + col_A]);
        else {
            r_a[0] = (row_A < M && col_A < K) ? A[row_A * K + col_A] : 0.0f;
            r_a[1] = (row_A < M && col_A + 1 < K) ? A[row_A * K + col_A + 1] : 0.0f;
            r_a[2] = (row_A < M && col_A + 2 < K) ? A[row_A * K + col_A + 2] : 0.0f;
            r_a[3] = (row_A < M && col_A + 3 < K) ? A[row_A * K + col_A + 3] : 0.0f;
        }
        s_a[0][col_smem_a + 0][row_smem_a] = r_a[0];
        s_a[0][col_smem_a + 1][row_smem_a] = r_a[1];
        s_a[0][col_smem_a + 2][row_smem_a] = r_a[2];
        s_a[0][col_smem_a + 3][row_smem_a] = r_a[3];
    }

    for (int load_offs = tid; load_offs < total_loads_b; load_offs += total_threads) {
        int row_smem_b = load_offs / (BN / 4);
        int col_smem_b = (load_offs % (BN / 4)) * 4;
        int row_B = row_smem_b;
        int col_B = bx + col_smem_b;

        if (row_B < K && col_B + 3 < N)
            FLOAT4(s_b[0][row_smem_b][col_smem_b]) = FLOAT4(B[row_B * N + col_B]);
        else {
            s_b[0][row_smem_b][col_smem_b + 0] = (row_B < K && col_B < N) ? B[row_B * N + col_B] : 0.0f;
            s_b[0][row_smem_b][col_smem_b + 1] = (row_B < K && col_B + 1 < N) ? B[row_B * N + col_B + 1] : 0.0f;
            s_b[0][row_smem_b][col_smem_b + 2] = (row_B < K && col_B + 2 < N) ? B[row_B * N + col_B + 2] : 0.0f;
            s_b[0][row_smem_b][col_smem_b + 3] = (row_B < K && col_B + 3 < N) ? B[row_B * N + col_B + 3] : 0.0f;
        }
    }
    __syncthreads();

    for (int offs = BK; offs < K; offs += BK) {
        for (int load_offs = tid; load_offs < total_loads_a; load_offs += total_threads) {
            int row_smem_a = load_offs / (BK / 4);
            int col_smem_a = (load_offs % (BK / 4)) * 4;
            int row_A = by + row_smem_a;
            int col_A = offs + col_smem_a;

            if (row_A < M && col_A + 3 < K)
                FLOAT4(r_a[0]) = FLOAT4(A[row_A * K + col_A]);
            else {
                r_a[0] = (row_A < M && col_A < K) ? A[row_A * K + col_A] : 0.0f;
                r_a[1] = (row_A < M && col_A + 1 < K) ? A[row_A * K + col_A + 1] : 0.0f;
                r_a[2] = (row_A < M && col_A + 2 < K) ? A[row_A * K + col_A + 2] : 0.0f;
                r_a[3] = (row_A < M && col_A + 3 < K) ? A[row_A * K + col_A + 3] : 0.0f;
            }
            s_a[next_load][col_smem_a + 0][row_smem_a] = r_a[0];
            s_a[next_load][col_smem_a + 1][row_smem_a] = r_a[1];
            s_a[next_load][col_smem_a + 2][row_smem_a] = r_a[2];
            s_a[next_load][col_smem_a + 3][row_smem_a] = r_a[3];
        }

        for (int load_offs = tid; load_offs < total_loads_b; load_offs += total_threads) {
            int row_smem_b = load_offs / (BN / 4);
            int col_smem_b = (load_offs % (BN / 4)) * 4;
            int row_B = offs + row_smem_b;
            int col_B = bx + col_smem_b;

            if (row_B < K && col_B + 3 < N)
                FLOAT4(s_b[next_load][row_smem_b][col_smem_b]) = FLOAT4(B[row_B * N + col_B]);
            else {
                s_b[next_load][row_smem_b][col_smem_b + 0] = (row_B < K && col_B < N) ? B[row_B * N + col_B] : 0.0f;
                s_b[next_load][row_smem_b][col_smem_b + 1] = (row_B < K && col_B + 1 < N) ? B[row_B * N + col_B + 1] : 0.0f;
                s_b[next_load][row_smem_b][col_smem_b + 2] = (row_B < K && col_B + 2 < N) ? B[row_B * N + col_B + 2] : 0.0f;
                s_b[next_load][row_smem_b][col_smem_b + 3] = (row_B < K && col_B + 3 < N) ? B[row_B * N + col_B + 3] : 0.0f;
            }
        }

#pragma unroll
        for (int k = 0; k < BK; ++k) {
            int sa_row_start = ty * TM;
            int sb_col_start = tx * TN;
#pragma unroll
            for (int i = 0; i < TM; i += 4)
                FLOAT4(r_a[i]) = FLOAT4(s_a[cur_use][k][sa_row_start + i]);

#pragma unroll
            for (int j = 0; j < TN; j += 4)
                FLOAT4(r_b[j]) = FLOAT4(s_b[cur_use][k][sb_col_start + j]);
#pragma unroll
            for (int i = 0; i < TM; ++i)
#pragma unroll
                for (int j = 0; j < TN; ++j)
                    r_c[i][j] = __fmaf_rn(r_a[i], r_b[j], r_c[i][j]);
        }
        __syncthreads();
        cur_use ^= 1;
        next_load ^= 1;
    }

#pragma unroll
    for (int k = 0; k < BK; ++k) {
        int sa_row_start = ty * TM;
        int sb_col_start = tx * TN;

#pragma unroll
        for (int i = 0; i < TM; i += 4)
            FLOAT4(r_a[i]) = FLOAT4(s_a[cur_use][k][sa_row_start + i]);
#pragma unroll
        for (int j = 0; j < TN; j += 4)
            FLOAT4(r_b[j]) = FLOAT4(s_b[cur_use][k][sb_col_start + j]);

#pragma unroll
        for (int i = 0; i < TM; ++i)
#pragma unroll
            for (int j = 0; j < TN; ++j)
                r_c[i][j] = __fmaf_rn(r_a[i], r_b[j], r_c[i][j]);
    }
    __syncthreads();

#pragma unroll
    for (int i = 0; i < TM; ++i) {
#pragma unroll
        for (int j = 0; j < TN; j += 4) {
            int x = bx + tx * TN + j;
            int y = by + ty * TM + i;
            if (x < N && y < M)
                FLOAT4(C[y * N + x]) = FLOAT4(r_c[i][j]);
        }
    }
}

#define UPDATE_GRID_BLOCK(BM, BN, TM, TN) \
    {                                     \
        block.x = DIV_UP(BN, TN);         \
        block.y = DIV_UP(BM, TM);         \
        grid.x = DIV_UP(N, BN);           \
        grid.y = DIV_UP(M, BM);           \
    }

#define WRAPPER(version, BM, BN, BK, TM, TN, TK)                                                                         \
    {                                                                                                                    \
        UPDATE_GRID_BLOCK(BM, BN, TM, TN);                                                                               \
        CHECK_CUDA_ERROR(cudaMemset(d_c, 0, size_C));                                                                    \
        timer.start_timer();                                                                                             \
        sgemm_##version<BM, BN, BK, TM, TN, TK><<<grid, block>>>(d_a, d_b, d_c, M, N, K);                                \
        CHECK_CUDA_ERROR(cudaGetLastError());                                                                            \
        timer.stop_timer();                                                                                              \
        CHECK_CUDA_ERROR(cudaMemcpy(h_c, d_c, size_C, cudaMemcpyDeviceToHost));                                          \
        printf(#version "     | time: %10.6f ms | max diff: %10.6f\n", timer.get_time(), max_diff(h_cublas, h_c, M *N)); \
    }

void one_turn(const int M, const int N, const int K) {
    printf("  ===== M = %d, N = %d, K = %d =====\n", M, N, K);

    const int size_A = M * K * sizeof(float);
    const int size_B = K * N * sizeof(float);
    const int size_C = M * N * sizeof(float);
    const float alpha = 1.0f;
    const float beta = 0.0f;

    Timer timer;
    dim3 block, grid;
    cublasHandle_t handle;
    CHECK_CUBLAS_ERROR(cublasCreate(&handle));

    float *h_a = (float *)malloc(size_A);
    float *h_b = (float *)malloc(size_B);
    float *h_c = (float *)malloc(size_C);
    float *h_cublas = (float *)malloc(size_C);
    float *d_a, *d_b, *d_c;
    CHECK_CUDA_ERROR(cudaMalloc(&d_a, size_A));
    CHECK_CUDA_ERROR(cudaMalloc(&d_b, size_B));
    CHECK_CUDA_ERROR(cudaMalloc(&d_c, size_C));

    random_init(h_a, M * K);
    random_init(h_b, K * N);
    memset(h_c, 0, size_C);
    memset(h_cublas, 0, size_C);

    CHECK_CUDA_ERROR(cudaMemcpy(d_a, h_a, size_A, cudaMemcpyHostToDevice));
    CHECK_CUDA_ERROR(cudaMemcpy(d_b, h_b, size_B, cudaMemcpyHostToDevice));
    CHECK_CUDA_ERROR(cudaMemset(d_c, 0, size_C));

    timer.start_timer();
    CHECK_CUBLAS_ERROR(cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N,
                                   N, M, K,
                                   &alpha,
                                   d_b, N,
                                   d_a, K,
                                   &beta,
                                   d_c, N));
    timer.stop_timer();
    CHECK_CUDA_ERROR(cudaMemcpy(h_cublas, d_c, size_C, cudaMemcpyDeviceToHost));
    printf("cublas | time: %10.6f ms | max diff: N/A\n", timer.get_time());

    WRAPPER(v1, 16, 16, 1, 1, 1, 1);
    WRAPPER(v2, 16, 16, 16, 1, 1, 1);
    WRAPPER(v3, 16, 16, 64, 1, 1, 1);
    WRAPPER(v4, 64, 64, 64, 4, 4, 1);
    WRAPPER(v5, 64, 64, 32, 4, 4, 1);
    WRAPPER(v6, 64, 64, 32, 4, 4, 1);
    WRAPPER(v7, 64, 64, 32, 4, 4, 1);
    WRAPPER(v8, 64, 64, 32, 4, 4, 1);

    cublasDestroy(handle);
    free(h_a);
    free(h_b);
    free(h_c);
    free(h_cublas);
    CHECK_CUDA_ERROR(cudaFree(d_a));
    CHECK_CUDA_ERROR(cudaFree(d_b));
    CHECK_CUDA_ERROR(cudaFree(d_c));
}

int main(int argc, char const *argv[]) {
    const int Ms[] = {256, 512, 1024, 2048, 4096, 8192};
    const int Ns[] = {256, 512, 1024, 2048, 4096, 8192};
    const int Ks[] = {1024, 1024, 1024, 1024, 1024, 1024};
    const int n_groups = sizeof(Ms) / sizeof(Ms[0]);
    warmup();

    for (int i = 0; i < n_groups; ++i) {
        const int M = Ms[i], N = Ns[i], K = Ks[i];
        one_turn(M, N, K);
    }
    return 0;
}
