#pragma once

#include <cuda_runtime.h>
#include <cuda_fp16.h>
#include <mma.h>

// =============================================================================
// Version 2: Naive WMMA Kernel
// =============================================================================
// - Block tile: 128x128x32
// - Warp tile: 64x64x16
// - WMMA tile: 16x16x16
// - Threads: [32, 2, 2] = 128
// - Direct mapping for shared memory
// - No pipeline, simple synchronous execution
// =============================================================================

namespace v2_naive_wmma {

const int MI = 128;      // Block tile M
const int NI = 128;      // Block tile N
const int KI = 32;       // Block tile K
const int MII = 64;      // Warp tile M
const int NII = 64;      // Warp tile N
const int KII = 16;      // Warp tile K
const int wmmaM = 16;
const int wmmaN = 16;
const int wmmaK = 16;

__device__ void loadSmemA(half *smem, half *A, int M, int K, int ko)
{
    // Load 128 x 32 tile
    int by = blockIdx.y;
    int tx = threadIdx.x;
    int ty = threadIdx.y;
    int tz = threadIdx.z;
    int tid = tz * 64 + ty * 32 + tx;
    
    // Debug disabled
    
    for (int i = 0; i < 32; ++i)
    {
        int row = i * 4 + tid / 32;
        int col = tid % 32;
        int gmem_idx = (by * 128 + row) * K + ko * KI + col;
        int smem_idx = row / 16 * (2 * 16 * 16) + col / 16 * (16 * 16) + row % 16 * 16 + col % 16;
        
        // Bounds check
        if ((by * 128 + row) < M && (ko * KI + col) < K) {
            smem[smem_idx] = A[gmem_idx];
        } else {
            smem[smem_idx] = __float2half(0.0f);
        }
        
        // Debug disabled
    }
}

__device__ void loadSmemB(half *smem, half *B, int N, int K, int ko)
{
    // Load 128 x 32 tile (B is col-major: K x N in col-major order)
    // Logical B[n][k], stored as B[k * N + n] in memory
    int bx = blockIdx.x;
    int tx = threadIdx.x;
    int ty = threadIdx.y;
    int tz = threadIdx.z;
    int tid = tz * 64 + ty * 32 + tx;
    
    // Debug disabled
    
    for (int i = 0; i < 32; ++i)
    {
        int row = i * 4 + tid / 32;  // n dimension (0-127)
        int col = tid % 32;           // k dimension (0-31)
        int n_idx = bx * 128 + row;
        int k_idx = ko * KI + col;
        int gmem_idx = k_idx * N + n_idx;
        int smem_idx = row / 16 * (2 * 16 * 16) + col / 16 * (16 * 16) + row % 16 * 16 + col % 16;
        
        // Bounds check
        if (n_idx < N && k_idx < K) {
            smem[smem_idx] = B[gmem_idx];
        } else {
            smem[smem_idx] = __float2half(0.0f);
        }
        
        // Debug disabled
    }
}

__device__ void loadFragA(
    nvcuda::wmma::fragment<nvcuda::wmma::matrix_a, wmmaM, wmmaN, wmmaK, half, nvcuda::wmma::row_major> *frag, 
    half *smem, int ki)
{
    // Load 64x16
    int tz = threadIdx.z;
    for (int i = 0; i < 4; ++i)
    {
        int row = tz * 64 + i * 16;
        int col = ki * KII;
        nvcuda::wmma::load_matrix_sync(frag[i], 
            smem + row / 16 * (2 * 16 * 16) + col / 16 * (16 * 16), 16);
    }
}

__device__ void loadFragB(
    nvcuda::wmma::fragment<nvcuda::wmma::matrix_b, wmmaM, wmmaN, wmmaK, half, nvcuda::wmma::col_major> *frag, 
    half *smem, int ki)
{
    // Load 64x16
    int ty = threadIdx.y;
    for (int i = 0; i < 4; ++i)
    {
        int row = ty * 64 + i * 16;
        int col = ki * KII;
        nvcuda::wmma::load_matrix_sync(frag[i], 
            smem + row / 16 * (2 * 16 * 16) + col / 16 * (16 * 16), 16);
    }
}

__device__ void storeAccum(float *smem, 
    nvcuda::wmma::fragment<nvcuda::wmma::accumulator, wmmaM, wmmaN, wmmaK, float> *accum)
{
    // Store 64x64
    int tx = threadIdx.x;
    int ty = threadIdx.y;
    int tz = threadIdx.z;
    
    // Debug disabled
    
    for (int mii = 0; mii < MII / wmmaM; mii += 1)
    {
        for (int nii = 0; nii < NII / wmmaN; nii += 1)
        {
            int row = tz * 64 + mii * 16;
            int col = ty * 64 + nii * 16;
            nvcuda::wmma::store_matrix_sync(smem + row * 128 + col, 
                                           accum[mii * (NII / wmmaN) + nii], 
                                           128, 
                                           nvcuda::wmma::mem_row_major);
            
            // Debug disabled
        }
    }
}

__device__ void storeSmemC(half *C, float *smem, int M, int N)
{
    // Store 128x128
    int bx = blockIdx.x;
    int by = blockIdx.y;
    int tx = threadIdx.x;
    int ty = threadIdx.y;
    int tz = threadIdx.z;
    int tid = tz * 64 + ty * 32 + tx;
    
    // Debug disabled
    
    for (int i = 0; i < 128; ++i)
    {
        int row = i;
        int col = tid;
        int gmem_row = by * 128 + row;
        int gmem_col = bx * 128 + col;
        
        if (gmem_row < M && gmem_col < N)
        {
            int gmem_idx = gmem_row * N + gmem_col;
            half val = __float2half(smem[row * 128 + col]);
            C[gmem_idx] = val;
            
            // Debug disabled
        }
    }
}

// Launch configuration
inline dim3 get_grid_dim(int M, int N) {
    return dim3((N + 127) / 128, (M + 127) / 128);
}

inline dim3 get_block_dim() {
    return dim3(32, 2, 2);
}

inline int get_smem_size() {
    int size_ab = MI * KI * 2 * sizeof(half);
    int size_c = MI * NI * sizeof(float);
    return (size_ab > size_c) ? size_ab : size_c;
}

} // namespace v2_naive_wmma

// Kernel must be outside namespace for proper symbol resolution
__global__ void v2_naive_wmma_matmul(half *A, half *B, half *C, int M, int N, int K)
{
    using namespace v2_naive_wmma;
    
    // A is row-major
    // B is col-major
    // 128 threads [x, y, z] = [32, 2, 2]
    // threadblock mma: 128x128x32
    // warp mma: 64x64x16
    
    // Debug: Print kernel launch info from first thread of first block (disabled for performance)
    // if (blockIdx.x == 0 && blockIdx.y == 0 && threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0) {
    //     printf("[KERNEL V2] Launched with M=%d, N=%d, K=%d\n", M, N, K);
    //     printf("[KERNEL V2] Block tile: %dx%dx%d\n", MI, NI, KI);
    //     printf("[KERNEL V2] Grid: (%d, %d)\n", gridDim.x, gridDim.y);
    // }
    
    extern __shared__ uint8_t shared_storage[];
    half *SA = reinterpret_cast<half *>(shared_storage);
    half *SB = reinterpret_cast<half *>(shared_storage + MI * KI * sizeof(half));
    float *SC = reinterpret_cast<float *>(shared_storage);

    nvcuda::wmma::fragment<nvcuda::wmma::matrix_a, wmmaM, wmmaN, wmmaK, half, nvcuda::wmma::row_major> FragA[MII / wmmaM];
    nvcuda::wmma::fragment<nvcuda::wmma::matrix_b, wmmaM, wmmaN, wmmaK, half, nvcuda::wmma::col_major> FragB[NII / wmmaN];
    nvcuda::wmma::fragment<nvcuda::wmma::accumulator, wmmaM, wmmaN, wmmaK, float> Accum[MII / wmmaM * NII / wmmaN];

    // Initialize accumulators
    for (int mii = 0; mii < MII / wmmaM; mii += 1)
    {
        for (int nii = 0; nii < NII / wmmaN; nii += 1)
        {
            nvcuda::wmma::fill_fragment(Accum[mii * (NII / wmmaN) + nii], 0.0);
        }
    }
    
    // Debug disabled for performance
    int tx = threadIdx.x;
    int ty = threadIdx.y;
    int tz = threadIdx.z;
    
    // Main loop
    for (int ko = 0; ko < K / KI; ko += 1)
    {
        loadSmemA(SA, A, M, K, ko);
        loadSmemB(SB, B, N, K, ko);
        __syncthreads();
        
        for (int ki = 0; ki < KI / KII; ki += 1)
        {
            // 64x64x16 mma for each warp
            loadFragA(FragA, SA, ki);
            loadFragB(FragB, SB, ki);
            
            for (int mii = 0; mii < MII / wmmaM; mii += 1)
            {
                for (int nii = 0; nii < NII / wmmaN; nii += 1)
                {
                    // 16x16x16 for each wmma
                    nvcuda::wmma::mma_sync(Accum[mii * (NII / wmmaN) + nii], 
                                          FragA[mii], FragB[nii], 
                                          Accum[mii * (NII / wmmaN) + nii]);
                }
            }
        }
        __syncthreads();
    }
    
    storeAccum(SC, Accum);
    __syncthreads();
    storeSmemC(C, SC, M, N);
}
