#define WMMA_M 16
#define WMMA_N 16
#define WMMA_K 16

#define WARP_SIZE 32
#define div_ceil(dividend, divisor) ((dividend + divisor - 1) / divisor)
#include <stdexcept>
#include <vector>
#include <cuda_runtime.h>
#include <mma.h>
#include <algorithm>
#include <iostream>
#include <cuda_fp16.h>
#include <iomanip> 
#include <chrono>
using namespace nvcuda;


__global__ void wmmaNaiveKernel(const half *__restrict__ A, const half *__restrict__ B, float *__restrict__ C, size_t M,
                                size_t N, size_t K) {
    const size_t K_tiles = div_ceil(K, WMMA_K);

    const size_t warp_row = blockIdx.y * WMMA_M;
    const size_t warp_col = blockIdx.x * WMMA_N;

    if (warp_row >= M && warp_col >= N) {
        return;
    }

    wmma::fragment<wmma::accumulator, WMMA_M, WMMA_N, WMMA_K, float> C_frag;

    wmma::fill_fragment(C_frag, 0.0f);

#pragma unroll
    for (size_t i = 0; i < K_tiles; ++i) {
        wmma::fragment<wmma::matrix_a, WMMA_M, WMMA_N, WMMA_K, half, wmma::row_major> A_frag;
        wmma::fragment<wmma::matrix_b, WMMA_M, WMMA_N, WMMA_K, half, wmma::col_major> B_frag;

        wmma::load_matrix_sync(A_frag, A + warp_row * K + i * WMMA_K, K);
        wmma::load_matrix_sync(B_frag, B + i * WMMA_K + warp_col * K, K);

        wmma::mma_sync(C_frag, A_frag, B_frag, C_frag);
    }

    wmma::store_matrix_sync(C + warp_row * N + warp_col, C_frag, N, wmma::mem_row_major);
}

void wmmaNaive(half *A, half *B, float *C, size_t M, size_t N, size_t K) {
    dim3 block(WARP_SIZE);
    dim3 grid(div_ceil(N, WMMA_N), div_ceil(M, WMMA_M));

    wmmaNaiveKernel<<<grid, block>>>(A, B, C, M, N, K);
}
int main(){
    const int size = 64;  // Set the size of the matrix to 16x16
    std::vector<std::vector<float>> A(size, std::vector<float>(size));
    std::vector<std::vector<float>> B(size, std::vector<float>(size));
    half *d_A, *d_B;
    float *d_C; // 设备上的半精度数组
    int M,N,K;
    M = N = K =size;
    int numElements = M * K; // A的元素数量
    int numElementsB = K * N; // B的元素数量
    int numElementsC = M * N; // C的元素数量
    // Fill matrices with some values
    for (int i = 0; i < size; ++i) {
        for (int j = 0; j < size; ++j) {
            // A[i][j] = 1;  // Simple pattern for easy verification
            // if (i>j)
            //     B[i][j]=1;
            // else if (j>i)
            //     B[i][j]=-1;
            // else 
            //     B[i][j]=0;
            A[i][j] = i%100 *0.01;
            B[i][j] = j%100 *0.01;
        }
    }
    auto startWMMA = std::chrono::high_resolution_clock::now();
    half *halfA = new half[numElements];
    half *halfB = new half[numElementsB];
        for (int i = 0; i < M; i++) {
            for (int j = 0; j < K; j++) {
                halfA[i * K + j] = __float2half(A[i][j]);
            }
        }
        for (int i = 0; i < K; i++) {
            for (int j = 0; j < N; j++) {
                halfB[i * N + j] = __float2half(B[i][j]);
            }
        }
    cudaMalloc((void**)&d_A, numElements * sizeof(half));
    cudaMalloc((void**)&d_B, numElementsB * sizeof(half));
    cudaMalloc((void**)&d_C, numElementsC * sizeof(float));
    cudaMemcpy(d_A, halfA, numElements * sizeof(half), cudaMemcpyHostToDevice);
    cudaMemcpy(d_B, halfB, numElementsB * sizeof(half), cudaMemcpyHostToDevice);
    wmmaNaive(d_A, d_B, d_C, M, N, K);
    cudaDeviceSynchronize(); // 等待GPU完成操作
    float *h_C = new float[numElementsC];
    cudaMemcpy(h_C, d_C, numElementsC * sizeof(float), cudaMemcpyDeviceToHost);
    float *result_C = new float[numElementsC];
    for (int i = 0; i < numElementsC; i++) {
        result_C[i] = h_C[i];
        std::cout<<(result_C[i])<<" ";
    }
    std::cout<<std::endl;
    delete[] h_C;
    delete[] result_C;
    cudaFree(d_A);
    cudaFree(d_B);
    cudaFree(d_C);
    delete[] halfA;
    delete[] halfB;
    auto endWMMA = std::chrono::high_resolution_clock::now();
    std::chrono::duration<double, std::milli> wmmaTime = endWMMA - startWMMA;
    std::cout << "WMMA Time: " << wmmaTime.count() << " ms" << std::endl;
}