#include <assert.h>
#include <stdlib.h>

// CUDA runtime
#include "helper.h"
#include <cublas_v2.h>
#include <cuda_runtime.h>

#define ROW_MAJOR(row, col, ld) ((row) * (ld) + (col))
#define FLOAT4(pointer) (reinterpret_cast<float4*>(&(pointer))[0])
#define CEIL_DIV(x, y) (((x) + (y) - 1) / (y))

constexpr int kWarpSize = 32;

constexpr int BM = 64;
constexpr int BN = 128;
constexpr int BK = 16;
constexpr int TM = BM / 8;
constexpr int TN = BN / 16;
constexpr int kBlockDim = (BM / TM) * (BN / TN);
static_assert(kBlockDim >= 4 * kWarpSize, "At least 4 warps per block are required for performance");
static_assert(BM * BK >= 4 * kBlockDim, "Each thread needs at least 4 float to load A");
static_assert(BN * BK >= 4 * kBlockDim, "Each thread needs at least 4 float to load B");

constexpr int kThreadPerBK = BK / 4; // Align4
constexpr int kThreadPerBN = BN / 4; // Align4
constexpr int kLoadAIterM = CEIL_DIV(BM * BK, 4 * kBlockDim);  // Number of iterations required to load one block of A
constexpr int kLoadBIterK = CEIL_DIV(BK * BN, 4 * kBlockDim);  // Number of iterations required to load one block of A
// constexpr int kStoreCIterM = CEIL_DIV(BM * BN, 4 * kBlockDim);  // Number of iterations required to load one block of A
constexpr int kLoadAStepM = CEIL_DIV(4 * kBlockDim, BK);  // Step on M for loading A
constexpr int kLoadBStepK = CEIL_DIV(4 * kBlockDim, BN);  // Step on K for loading A
// constexpr int kStoreCStepM = CEIL_DIV(4 * kBlockDim, BN);  // Number of iterations required to load one block of C

__device__ __forceinline__ 
void loadGlobal_align4(int K, int N, float *a, float *b, float s_a[2][BM][BK], float s_b[2][BK][BN], int bk) {
    // Each block cooperatively loads BM * BK of A, row major
    // Shape: ((kLoadAIterM, BM/kLoadAIterM) ,(BK/4,4))
    const int bx = blockIdx.x;
    const int by = blockIdx.y;
    const int tx = threadIdx.x;
    const int ty = threadIdx.y;
    const int tid = ty * blockDim.x + tx;

    int load_a_smem_m =  tid / kThreadPerBK;
    int load_a_smem_k = (tid % kThreadPerBK) * 4;
    int load_b_smem_k =  tid / kThreadPerBN;
    int load_b_smem_n = (tid % kThreadPerBN) * 4;

    int load_a_gmem_m = by * BM + load_a_smem_m;
    int load_b_gmem_n = bx * BN + load_b_smem_n;
    int load_a_gmem_k = bk * BK + load_a_smem_k;
    int load_b_gmem_k = bk * BK + load_b_smem_k;
    int smem_sel = bk & 1;
    #pragma unroll
    for (int iterM = 0; iterM < kLoadAIterM; iterM++) {
        int offset = iterM * kLoadAStepM;
        int load_a_gmem_addr = ROW_MAJOR(load_a_gmem_m + offset, load_a_gmem_k, K);
        FLOAT4(s_a[smem_sel][load_a_smem_m + offset][load_a_smem_k]) = FLOAT4(a[load_a_gmem_addr]);
    }
    #pragma unroll
    for (int iterK = 0; iterK < kLoadBIterK; iterK++) {
        int offset = iterK * kLoadBStepK;
        int load_b_gmem_addr = ROW_MAJOR(load_b_gmem_k + offset, load_b_gmem_n, N);
        FLOAT4(s_b[smem_sel][load_b_smem_k + offset][load_b_smem_n]) = FLOAT4(b[load_b_gmem_addr]);
    }
}

//   dim3 block(BN/TN, BM/TM);
//   dim3 grid(ceil(n/BN), ceil(m/BM));  

// Global a shape:  [(m/(BM), BM), (k/BK, BK/4, 4)]
// Global a stride: [(  k*BM, k ), (BK  , 4 , 1)]
// Align4 for a, b, c to use float4 load/store

__global__ void sgemm_double_buffer(const int M, const int N, const int K,
    float * __restrict__ a, float * __restrict__ b, float * __restrict__ c) {

    const int bx = blockIdx.x;
    const int by = blockIdx.y;
    const int tx = threadIdx.x;
    const int ty = threadIdx.y;
    // const int tid = ty * blockDim.x + tx;

    __shared__ float s_a[2][BM][BK];
    __shared__ float s_b[2][BK][BN];

    // float r_load_a[4];
    // float r_load_b[4];
    // float r_comp_a[TM];
    // float r_comp_b[TN];
    float r_c[TM][TN] = {0.0};


    // int load_a_smem_m =  tid / kThreadPerBK;
    // int load_a_smem_k = (tid  %  kThreadPerBK) * 4;  // Align4
    // int load_b_smem_k =  tid / kThreadPerBN;
    // int load_b_smem_n = (tid % kThreadPerBN) * 4;

    // int load_a_gmem_m = by * BM + load_a_smem_m;
    // int load_b_gmem_n = bx * BN + load_b_smem_n;

    {
        // step 0: load the first block of A and B
        // int load_a_gmem_k = load_a_smem_k;
        // int load_a_gmem_addr = ROW_MAJOR(load_a_gmem_m, load_a_gmem_k, K);
        // int load_b_gmem_k = load_b_smem_k;
        // int load_b_gmem_addr = ROW_MAJOR(load_b_gmem_k, load_b_gmem_n, N);
        // FLOAT4(r_load_a[0]) = FLOAT4(a[load_a_gmem_addr]);
        // FLOAT4(r_load_b[0]) = FLOAT4(b[load_b_gmem_addr]);

        // s_a[0][load_a_smem_k    ][load_a_smem_m] = r_load_a[0];
        // s_a[0][load_a_smem_k + 1][load_a_smem_m] = r_load_a[1];
        // s_a[0][load_a_smem_k + 2][load_a_smem_m] = r_load_a[2];
        // s_a[0][load_a_smem_k + 3][load_a_smem_m] = r_load_a[3];
        // FLOAT4(s_b[0][load_b_smem_k][load_b_smem_n]) = FLOAT4(r_load_b[0]);
        loadGlobal_align4(K, N, a, b, s_a, s_b, 0);
    }
    __syncthreads();

    // 2-stage pipeline
    for (int bk = 1; bk < CEIL_DIV(K, BK); bk++) {

        int smem_sel = (bk - 1) & 1;
        // int smem_sel_next = bk & 1;
        // #pragma unroll
        // for (int tk = 0; tk < BK; tk++) {
        //     FLOAT4(r_comp_a[0]) = FLOAT4(s_a[smem_sel][tk][ty * TM / 2         ]);
        //     FLOAT4(r_comp_a[4]) = FLOAT4(s_a[smem_sel][tk][ty * TM / 2 + BM / 2]);
        //     FLOAT4(r_comp_b[0]) = FLOAT4(s_b[smem_sel][tk][tx * TN / 2         ]);
        //     FLOAT4(r_comp_b[4]) = FLOAT4(s_b[smem_sel][tk][tx * TN / 2 + BN / 2]);

        //     #pragma unroll
        //     for (int tm = 0; tm < TM; tm++) {
        //         #pragma unroll
        //         for (int tn = 0; tn < TN; tn++) {
        //             r_c[tm][tn] += r_comp_a[tm] * r_comp_b[tn];
        //         }
        //     }
        // }
        #pragma unroll
        for (int tk = 0; tk < BK; tk++) {
            #pragma unroll
            for (int tm = 0; tm < TM; tm++) {
                #pragma unroll
                for (int tn = 0; tn < TN; tn++) {
                    int comp_a_smem_m = ty * TM + tm;
                    int comp_b_smem_n = tx * TN + tn;
                    r_c[tm][tn] += s_a[smem_sel][comp_a_smem_m][tk] * s_b[smem_sel][tk][comp_b_smem_n];
                }
            }
        } 

        // step 1: load the next block of A and B
        loadGlobal_align4(K, N, a, b, s_a, s_b, bk);

        // int load_a_gmem_k = bk * BK + load_a_smem_k;
        // int load_a_gmem_addr = ROW_MAJOR(load_a_gmem_m, load_a_gmem_k, K);
        // int load_b_gmem_k = bk * BK + load_b_smem_k;
        // int load_b_gmem_addr = ROW_MAJOR(load_b_gmem_k, load_b_gmem_n, N);
        // FLOAT4(r_load_a[0]) = FLOAT4(a[load_a_gmem_addr]);
        // FLOAT4(r_load_b[0]) = FLOAT4(b[load_b_gmem_addr]);
        // s_a[smem_sel_next][load_a_smem_k    ][load_a_smem_m] = r_load_a[0];
        // s_a[smem_sel_next][load_a_smem_k + 1][load_a_smem_m] = r_load_a[1];
        // s_a[smem_sel_next][load_a_smem_k + 2][load_a_smem_m] = r_load_a[2];
        // s_a[smem_sel_next][load_a_smem_k + 3][load_a_smem_m] = r_load_a[3];
        // FLOAT4(s_b[smem_sel_next][load_b_smem_k][load_b_smem_n]) = FLOAT4(r_load_b[0]);

        __syncthreads();
    }
    // Last step
    {
        int smem_sel = (CEIL_DIV(K, BK) - 1) & 1;
        #pragma unroll
        for (int tk = 0; tk < BK; tk++) {
            #pragma unroll
            for (int tm = 0; tm < TM; tm++) {
                #pragma unroll
                for (int tn = 0; tn < TN; tn++) {
                    int comp_a_smem_m = ty * TM + tm;
                    int comp_b_smem_n = tx * TN + tn;
                    r_c[tm][tn] += s_a[smem_sel][comp_a_smem_m][tk] * s_b[smem_sel][tk][comp_b_smem_n];
                }
            }
        } 
    }
    // #pragma unroll
    // for (int tk = 0; tk < BK; tk++) {
    //     FLOAT4(r_comp_a[0]) = FLOAT4(s_a[1][tk][ty * TM / 2         ]);
    //     FLOAT4(r_comp_a[4]) = FLOAT4(s_a[1][tk][ty * TM / 2 + BM / 2]);
    //     FLOAT4(r_comp_b[0]) = FLOAT4(s_b[1][tk][tx * TN / 2         ]);
    //     FLOAT4(r_comp_b[4]) = FLOAT4(s_b[1][tk][tx * TN / 2 + BN / 2]);

    //     #pragma unroll
    //     for (int tm = 0; tm < TM; tm++) {
    //         #pragma unroll
    //         for (int tn = 0; tn < TN; tn++) {
    //             r_c[tm][tn] += r_comp_a[tm] * r_comp_b[tn];
    //         }
    //     }
    // }
    
    // Epilogue: store C
    #pragma unroll
    for (int i = 0; i < TM; i++) {
        int store_c_gmem_m = by * BM + ty * TM + i;
        #pragma unroll
        for (int j = 0; j < TN; j += 4) {
            int store_c_gmem_n = bx * BN + tx * TN + j;
            int store_c_gmem_addr = ROW_MAJOR(store_c_gmem_m, store_c_gmem_n, N);
            FLOAT4(c[store_c_gmem_addr]) = FLOAT4(r_c[i][j]);
        }
    }
    // #pragma unroll
    // for (int i = 0; i < TM / 2; i++) {
    //     int store_c_gmem_m = by * BM + ty * TM / 2 + i;
    //     int store_c_gmem_n = bx * BN + tx * TN / 2;
    //     int store_c_gmem_addr = ROW_MAJOR(store_c_gmem_m, store_c_gmem_n, N);
    //     FLOAT4(c[store_c_gmem_addr]) = FLOAT4(r_c[i][0]);
    //     FLOAT4(c[store_c_gmem_addr + BN / 2]) = FLOAT4(r_c[i][4]);
    // }
    // #pragma unroll
    // for (int i = 0; i < TM / 2; i++) {
    //     int store_c_gmem_m = by * BM + BM / 2 + ty * TM / 2 + i;
    //     int store_c_gmem_n = bx * BN + tx * TN / 2;
    //     int store_c_gmem_addr = ROW_MAJOR(store_c_gmem_m, store_c_gmem_n, N);
    //     FLOAT4(c[store_c_gmem_addr]) = FLOAT4(r_c[i + TM / 2][0]);
    //     FLOAT4(c[store_c_gmem_addr + BN / 2]) = FLOAT4(r_c[i + TM / 2][4]);
    // }
}

void SGEMM_GPU(cublasHandle_t handle, int m, int n, int k, float *d_A, float *d_B, float *d_C) {

  dim3 block(BN/TN, BM/TM);
  dim3 grid(ceil(n/BN), ceil(m/BM));  

  sgemm_double_buffer<<<grid, block>>>(m, n, k, d_A, d_B, d_C);
}
