#include <cuda_runtime.h>


template <const int BM, const int BN, const int BK, const int TM, const int TN>
__global__ void sgemm_BM_BN_BK_TM_TN(int M, int N, int K, float alpha,
                                                const float *A, const float *B,
                                                float beta, float *C) {
    assert((BK * TM * TN) % BN == 0);
    // for each block, we have BM * BN / (TM * TN) threads, each thread will compute TM * TN results
    const uint blockRow = blockIdx.x * BM;
    const uint blockCol = blockIdx.y * BN;
    const uint threadColA = threadIdx.x % BK;        // load BM * BK elements from A, for each line, we need BK threads
    const uint threadRowA = threadIdx.x / BK;        
    const uint threadColB = threadIdx.x % BN;        // load BK * BN elements from B, for each line, we need BN threads
    const uint threadRowB = threadIdx.x / BN;
    const uint threadColC = threadIdx.x % (BN / TN);
    const uint threadRowC = threadIdx.x / (BN / TN);

    const uint threadsNum = (BM * BN) / (TM * TN);
    const uint loadItemsPerThreadsA = (BM * BK) / threadsNum;
    const uint loadItemsPerThreadsB = (BK * BN) / threadsNum;
    const uint strideA = BM / loadItemsPerThreadsA;
    const uint strideB = BK / loadItemsPerThreadsB;

    __shared__ float As[BM * BK];       // origin shape (BM, BK)
    __shared__ float Bs[BK * BN];       // origin shape (BK, BN)
    float regA[TM] = {0.};
    float regB[TN] = {0.};
    float regC[TM * TN] = {0.};      // for each thread, we compute TM results

    // a block will compute BM * BN results
    A += (blockRow + threadRowA) * K + threadColA;                 // move to (blockRow + threadRowA, threadColA)
    B += threadRowB * N + blockCol + threadColB;                   // move to (threadRowB, blockCol + threadColB) 
    C += (blockRow + threadRowC) * N + blockCol + threadColC; // move to (blockRow + threadRowC, blockCol + threadColC)

    for (int k = 0; k < K; k += BK) {
        uint index = threadRowA * BK + threadColA;
        for (uint i = 0; i < BM; i += strideA) {
            // load (i + threadRowA, threadColA) -> (i + threadRowA, threadColA)
            if (blockRow + i + threadRowA < M && k + threadColA < K) {
                As[index] = A[i * K];
            } else {
                As[index] = 0;
            }
            index += strideA * BK;
        }
        index = threadRowB * BN + threadColB;
        for (uint i = 0; i < BK; i += strideB) {
            // load (i + threadRowB, threadColB) -> (i + threadRowB, threadColB)
            if (k + i + threadRowB < K && blockCol + threadColB < N) {
                Bs[index] = B[i * N];
            } else {
                Bs[index] = 0;
            }
            index += strideB * BN;
        }

        __syncthreads();
        A += BK;        // move to (blockRow, k)
        B += BK * N;    // move to (k, blockCol)

        for (uint ibk = 0; ibk < BK; ibk ++) {
            for (uint i = 0; i < TM; i ++) {
                regA[i] = As[(i * (BM / TM) + threadRowC) * BK + ibk];      // load (threadRowC, ibk), (threadRowC + BM / TM, ibk), ... to avoid ld bank conflict
            }
            for (uint i = 0; i < TN; i ++) {
                regB[i] = Bs[ibk * BN + i * (BN / TN) + threadColC];        // load (ibk, threadColC), (ibk, BN / TN + threadColC), ... to avoid ld bank conflict
            }
            for (uint i = 0; i < TM; i ++) {
                for (uint j = 0; j < TN; j ++) {
                    regC[i * TN + j] += regA[i] * regB[j];
                }
            }
        }

        __syncthreads();
    }

    // write results
    uint rowOffset = blockRow + threadRowC;
    for (uint i = 0; i < TM; i ++) {
        uint colOffset = blockCol + threadColC;
        for (uint j = 0; j < TN; j ++) {
            if (rowOffset < M && colOffset < N) {
                C[j * (BN / TN)] = regC[i * TN + j] * alpha + beta * C[j * (BN / TN)];
            }
            colOffset += BN / TN;
        }
        rowOffset += (BM / TM);
        C += (BM / TM) * N;
    }
}