#include <cuda_runtime.h>

template <const int BM, const int BN, const int BK, const int TM, const int TN>
__device__ inline void compute_rc(float* As, float* Bs, float* regA, float* regB, float* regC) {
    for (uint ibk = 0; ibk < BK; ibk ++) {
        for (uint i = 0; i < TM; i ++) {
            regA[i] = As[i * (BM / TM) * BK + ibk];
        }
        for (uint i = 0; i < TN; i += 4) {
            reinterpret_cast<float4*>(regB + i)[0] = reinterpret_cast<float4*>(Bs + (ibk + 0) * BN + i)[0];
        }
        for (uint i = 0; i < TM; i ++) {
            for (uint j = 0; j < TN; j ++) {
                regC[i * TN + j + 0] += regA[i] * regB[j];
            }
        }
    }
}

template <const int BM, const int BN, const int BK, const int TM, const int TN>
__global__ void sgemm_BM_BN_BK_TM_TN_vertorize(int M, int N, int K, float alpha,
                                               const float *A, const float *B,
                                               float beta, float *C) {
    assert(BM % TM == 0);
    assert(BN % TN == 0);
    assert(BK % 4 == 0);
    assert(BN % 4 == 0);
    assert(TN % 4 == 0);
    // for each block, we have BM * BN / (TM * TN) threads, each thread will compute TM * TN results
    const uint blockRow = blockIdx.x * BM;
    const uint blockCol = blockIdx.y * BN;
    const uint threadColA = threadIdx.x % (BK / 4);        // load BM * BK elements from A, for each line, we need BK / 4 threads
    const uint threadRowA = threadIdx.x / (BK / 4);        
    const uint threadColB = threadIdx.x % (BN / 4);        // load BK * BN elements from B, for each line, we need BN threads
    const uint threadRowB = threadIdx.x / (BN / 4);
    const uint threadColC = threadIdx.x % (BN / TN);
    const uint threadRowC = threadIdx.x / (BN / TN);

    const uint threadsNum = (BM * BN) / (TM * TN);
    assert((BM * BK) % threadsNum == 0);
    assert((BK * BN) % threadsNum == 0);
    const uint loadItemsPerThreadsA = (BM * BK) / threadsNum;
    const uint loadItemsPerThreadsB = (BK * BN) / threadsNum;
    assert(loadItemsPerThreadsA % 4 == 0);
    assert(loadItemsPerThreadsB % 4 == 0);

    const uint strideA = BM / loadItemsPerThreadsA * 4;
    const uint strideB = BK / loadItemsPerThreadsB * 4;

    __shared__ float As[BM * BK];       // origin shape (BM, BK)
    __shared__ float Bs[BK * BN];       // origin shape (BK, BN)
    float regA[TM] = {0.};
    float regB[TN] = {0.};
    float regC[TM * TN] = {0.};      // for each thread, we compute TM results

    // a block will compute BM * BN results
    A += (blockRow + threadRowA) * K + threadColA * 4;                 // move to (blockRow + threadRowA, threadColA * 4)
    B += threadRowB * N + blockCol + threadColB * 4;                   // move to (threadRowB, blockCol + threadColB * 4) 
    C += (blockRow + threadRowC) * N + blockCol + threadColC * TN;     // move to (blockRow + threadRowC, blockCol + threadColC)

    uint colOffsetA = threadColA * 4;
    bool colValidB = blockCol + threadColB * 4 < N;
    for (int k = 0; k < K; k += BK) {
        for (uint i = 0; i < BM; i += strideA) {
            if (K % 4 == 0) {
                float4& tmp = reinterpret_cast<float4*>(As + (i + threadRowA) * BK + threadColA * 4)[0];
                if (blockRow + threadRowA + i < M && colOffsetA < K) {
                    tmp = reinterpret_cast<const float4*>(A + i * K)[0];
                } else {
                    tmp.x = 0;
                    tmp.y = 0;
                    tmp.y = 0;
                    tmp.w = 0;
                }
            } else {
                for (uint j = 0; j < 4; j ++) {
                    if (blockRow + threadRowA + i < M && colOffsetA + j < K) {
                        As[(i + threadRowA) * BK + threadColA * 4 + j] = A[i * K + j];
                    } else {
                        As[(i + threadRowA) * BK + threadColA * 4 + j] = 0;
                    }
                }
            }
        }
        for (uint i = 0; i < BK; i += strideB) {
            if (N % 4 == 0) {
                float4& tmp = reinterpret_cast<float4*>(Bs + (i + threadRowB) * BN + threadColB * 4)[0];
                if (colValidB && k + threadRowB + i < K) {
                    tmp = reinterpret_cast<const float4*>(B + i * N)[0];
                } else {
                    tmp.x = 0;
                    tmp.y = 0;
                    tmp.y = 0;
                    tmp.w = 0;
                }
            } else {
                for (uint j = 0; j < 4; j ++) {
                    if (colValidB && k + threadRowB + i < K) {
                        Bs[(i + threadRowB) * BN + threadColB * 4 + j] = B[i * N + j];
                    } else {
                        Bs[(i + threadRowB) * BN + threadColB * 4 + j] = 0;
                    }
                }
            }
            
        }

        __syncthreads();
        A += BK;        // move to right
        B += BK * N;    // move to down
        colOffsetA += BK;

        compute_rc<BM, BN, BK, TM, TN>(As + threadRowC * BK, Bs + threadColC * TN, regA, regB, regC);
        __syncthreads();
    }

    // write results
    uint rowOffset = blockRow + threadRowC;
    bool col_valid = blockCol + threadColC * TN < N;
    for (uint i = 0; i < TM; i ++) {
        for (uint j = 0; j < TN; j += 4) {
            if (rowOffset < M && col_valid) {
                float4 tmp = reinterpret_cast<float4*>(C + j)[0];
                tmp.x = alpha * regC[i * TN + j + 0] + beta * tmp.x;
                tmp.y = alpha * regC[i * TN + j + 1] + beta * tmp.y;
                tmp.z = alpha * regC[i * TN + j + 2] + beta * tmp.z;
                tmp.w = alpha * regC[i * TN + j + 3] + beta * tmp.w;
                reinterpret_cast<float4*>(C + j)[0] = tmp;
            }
        }
        rowOffset += BM / TM;
        C += (BM / TM) * N;
    }
}