#include <cuda_runtime.h>


template <const int BLOCK_SIZE>
__global__ void sgemm_shared_mem_block(int M, int N, int K, float alpha,
                                       const float *A, const float *B,
                                       float beta, float *C) {
    const uint threadCol = threadIdx.x % BLOCK_SIZE;
    const uint threadRow = threadIdx.x / BLOCK_SIZE;

    __shared__ float As[BLOCK_SIZE * BLOCK_SIZE];
    __shared__ float Bs[BLOCK_SIZE * BLOCK_SIZE];

    A += blockIdx.x * BLOCK_SIZE * K;
    B += blockIdx.y * BLOCK_SIZE;
    C += blockIdx.x * BLOCK_SIZE * N + blockIdx.y * BLOCK_SIZE;

    float sum = 0.0;
    const float A_multiplier = blockIdx.x * BLOCK_SIZE + threadRow < M ? 1.0 : 0.;
    const float B_multiplier = blockIdx.y * BLOCK_SIZE + threadCol < N ? 1.0 : 0.;
    const uint A_idx = A_multiplier > 0 ? threadRow * K + threadCol : 0;
    const uint B_idx = B_multiplier > 0 ? threadRow * N + threadCol : 0;
    for (int k = 0; k < K / BLOCK_SIZE; k ++) {
        As[threadIdx.x] = A[A_idx] * A_multiplier;
        Bs[threadIdx.x] = B[B_idx] * B_multiplier;

        __syncthreads();
        A += BLOCK_SIZE;        // A move right `BLOCK_SIZE`
        B += BLOCK_SIZE * N;    // B move down `BLOCK_SIZE`

        for (int idx = 0; idx < BLOCK_SIZE; idx ++) {
            sum += As[threadRow * BLOCK_SIZE + idx] * Bs[idx * BLOCK_SIZE + threadCol];
        }
        __syncthreads();
    }

    const uint remainder = K % BLOCK_SIZE;
    if (remainder > 0) {
        As[threadIdx.x] = threadCol < remainder ? A[A_idx] * A_multiplier : 0;
        Bs[threadIdx.x] = threadRow < remainder ? B[B_idx] * B_multiplier : 0;

        __syncthreads();
        for (int idx = 0; idx < BLOCK_SIZE; idx ++) {
            sum += As[threadRow * BLOCK_SIZE + idx] * Bs[idx * BLOCK_SIZE + threadCol];
        }
    }
    if (A_multiplier * B_multiplier > 0) {
        C[B_idx] = alpha * sum + beta * C[B_idx];
    }
}