#include <mma.h> 
#include <iostream>
#include "common.h"
using namespace std;
using namespace nvcuda; 
#define CUDA_CHECK(call) \
    do { \
        cudaError_t error = call; \
        if (error != cudaSuccess) { \
            std::cerr << "CUDA error at " << __FILE__ << ":" << __LINE__ \
                      << " code=" << static_cast<int>(error) << " (" \
                      << cudaGetErrorString(error) << ")\n"; \
            std::exit(EXIT_FAILURE); \
        } \
    } while (0)
__global__ void wmma_kernel(half *a, half *b, float *c) { 
    // Declare the fragments 
    wmma::fragment<wmma::matrix_a, 16, 16, 16, half, wmma::row_major> a_frag; 
    wmma::fragment<wmma::matrix_b, 16, 16, 16, half, wmma::row_major> b_frag; 
    wmma::fragment<wmma::accumulator, 16, 16, 16, float> c_frag; 
    // Initialize the output to zero 
    wmma::fill_fragment(c_frag, 0.0f); 
    // Load the inputs 
    wmma::load_matrix_sync(a_frag, a, 16); 
    wmma::load_matrix_sync(b_frag, b, 16); 
    // Perform the matrix multiplication 
    wmma::mma_sync(c_frag, a_frag, b_frag, c_frag); 
    // Store the output 
    wmma::store_matrix_sync(c, c_frag, 16, wmma::mem_row_major); 
}

__global__ void float_to_half(half *d_out,float *d_a,const int N){
    auto idx = blockDim.x*blockIdx.x+threadIdx.x;
    if(idx<N)
        d_out[idx] = __float2half(d_a[idx]);
};


int main(){
    const int m = 16;
    const int n = 16;
    const int k = 16;
    float *A = new float[m*k];
    float *B = new float[k*n];
    float *C = new float[m*n];
    float *C_gpu = new float[m*n];

    half *A_h = new half[m*k];
    half *B_h = new half[k*n];

    float *d_A,*d_B,*d_C;
    half *d_A_h,*d_B_h;

    randu(A,m*k,-10,10);
    randu(B,k*n,-10,10);
    matmul(C,A,B,16,16,16);

    CUDA_CHECK(cudaMalloc(reinterpret_cast<void**>(&d_A),m*k*sizeof(float)));
    CUDA_CHECK(cudaMalloc(reinterpret_cast<void**>(&d_B),k*n*sizeof(float)));
    CUDA_CHECK(cudaMalloc(reinterpret_cast<void**>(&d_C),m*n*sizeof(float)));

    CUDA_CHECK(cudaMalloc(reinterpret_cast<void**>(&d_A_h),m*k*sizeof(half)));
    CUDA_CHECK(cudaMalloc(reinterpret_cast<void**>(&d_B_h),k*n*sizeof(half)));
    vector_to_file("datas/A.txt", A,m*k);
    vector_to_file("datas/B.txt", B,k*n);
    vector_to_file("datas/C.txt", C,m*n);

    CUDA_CHECK(cudaMemcpy(d_A, A, m*k*sizeof(float), cudaMemcpyHostToDevice));
    CUDA_CHECK(cudaMemcpy(d_B, B, k*n*sizeof(float), cudaMemcpyHostToDevice));

    int block = 128;
    float_to_half<<<(m*k+block-1)/block,block>>>(d_A_h, d_A, m*k);
    float_to_half<<<(n*k+block-1)/block,block>>>(d_B_h, d_B, k*n);

    // A[16x16] B[16x16] C[16x16]
    wmma_kernel<<<1,32>>>(d_A_h, d_B_h, d_C);
    CUDA_CHECK(cudaMemcpy(C_gpu, d_C, m*n*sizeof(float), cudaMemcpyDeviceToHost));
    bool is_same = check(C, C_gpu, m*n);
    if(is_same){
        std::cout<<"TensorCore 计算正确\n";
    }else{
        std::cerr<<"TensorCore 计算错误\n";
    }

    delete [] A;
    delete [] B;
    delete [] C;
    delete [] C_gpu;

}

