#include	<cuda_runtime_api.h>
#include	<cuda_runtime.h>
#include	<cublas_v2.h>
#include	<mma.h>
#include	<sys/mman.h>
#include	<sys/stat.h>
#include	<sys/time.h>
#include	<iostream>
#include	<stdint.h>
#include	<unistd.h>
#include	<fcntl.h>
#include	<vector>
#include	<cfloat>
#include	<ctime>
using namespace nvcuda;
uint64_t	prng=0;
static inline uint64_t wyrand(uint64_t	*seed) {
    *seed+=0xa0761d6478bd642full;
    uint64_t  see1=*seed^0xe7037ed1a0b428dbull;
    see1*=(see1>>32)|(see1<<32);
    return	(*seed*((*seed>>32)|(*seed<<32)))^((see1>>32)|(see1<<32));
}
__device__	inline float wy2gau(uint64_t r) {
    const float	_wynorm=1.0f/(1ull<<20);
    return ((r&0x1fffff)+((r>>21)&0x1fffff)+((r>>42)&0x1fffff))*_wynorm-3.0f;
}
__device__	inline void wymum(uint64_t *A,	uint64_t *B) {
    uint64_t	hh=(*A>>32)*(*B>>32), hl=(*A>>32)*(uint32_t)*B, lh=(uint32_t)*A*(*B>>32), ll=(uint64_t)(uint32_t)*A*(uint32_t)*B;
    *A=((hl>>32)|(hl<<32))^hh;
    *B=((lh>>32)|(lh<<32))^ll;
}
__device__	inline	uint64_t	wyhash64(uint64_t	A,	uint64_t	B) {
    A^=0xa0761d6478bd642full;
    B^=0xe7037ed1a0b428dbull;
    wymum(&A,&B);
    A^=0xe7037ed1a0b428dbull;
    B^=0xa0761d6478bd642full;
    wymum(&A,&B);
    return	A^B;
}
__global__	void	_random(half	*w,	uint64_t	seed) {
    uint32_t	i=blockIdx.x*blockDim.x+threadIdx.x;
    w[i]=__float2half_rn(wy2gau(wyhash64(i,seed)));
}
template<uint32_t	N>
struct	tensor {
    __nv_half	*p;
    tensor() {
        cudaMallocManaged(&p,	N*sizeof(__nv_half));
        _random<<<N/16,16>>>(p,wyrand(&prng));
    }
    ~tensor() {
        cudaFree(p);
    }
};
cublasHandle_t	handle;
struct	auto_init {
    auto_init() {
        cublasCreate(&handle);
    }
    ~auto_init() {
        cublasDestroy(handle);
    }
} auto_init_instance;
__global__ void wmmaNaiveKernel(const __nv_half *A, const __nv_half *B, __nv_half *C, uint32_t M,  uint32_t N, uint32_t K) {
    const uint32_t	row=blockIdx.x<<4,	col=blockIdx.y<<4;
    wmma::fragment<wmma::matrix_a,16,16,16,__nv_half,wmma::row_major>	A_frag;
    wmma::fragment<wmma::matrix_b,16,16,16,__nv_half,wmma::col_major>	B_frag;
    wmma::fragment<wmma::accumulator,16,16,16,__nv_half>	C_frag;
    wmma::fill_fragment(C_frag,0.0f);
    for (uint32_t k = 0; k < K; k+=16) {
        wmma::load_matrix_sync(A_frag,A+row*K+k,K);
        wmma::load_matrix_sync(B_frag,B+col*K+k,K);
        wmma::mma_sync(C_frag,A_frag,B_frag,C_frag);
    }
    wmma::store_matrix_sync(C+col*N+row,C_frag,N,wmma::mem_col_major);
}
void hgemm(__nv_half *A, __nv_half *B, __nv_half *C, uint32_t M, uint32_t N, uint32_t K) {
    dim3	grid(N/16,M/16),	block(32);
    wmmaNaiveKernel<<<grid,block>>>(A,B,C,M,N,K);
}
int	main(void) {
    const	uint32_t	N=4096;
    tensor<N*N>	a,b,c,d;
    timeval	beg,	end;
    double	t,f;
    __nv_half	alf(1.0f),	bet(0.0f);

    gettimeofday(&beg,NULL);
    for(uint32_t	it=0;	it<1024;	it++)	cublasHgemm(handle,CUBLAS_OP_T,CUBLAS_OP_N,N,N,N,&alf,a.p,N,b.p,N,&bet,d.p,N);
    cudaDeviceSynchronize();
    gettimeofday(&end,NULL);
    t=(end.tv_sec-beg.tv_sec+1e-6*(end.tv_usec-beg.tv_usec));
    f=2.0f*N*N*N*1024;
    std::cerr<<"cublas\t"<<f/t/1e12<<'\n';

    gettimeofday(&beg,NULL);
    for(uint32_t	it=0;	it<1024;	it++)	hgemm(a.p,b.p,c.p,N,N,N);
    cudaDeviceSynchronize();
    gettimeofday(&end,NULL);
    t=(end.tv_sec-beg.tv_sec+1e-6*(end.tv_usec-beg.tv_usec));
    f=2.0f*N*N*N*1024;
    std::cerr<<"hgemm\t"<<f/t/1e12<<'\n';

    float	err=0;
    for(uint32_t	i=0;	i<10;	i++) {
        float	e=__half2float(__hsub(c.p[i],d.p[i]));
        err=fmaxf(err,fabsf(e));
    }
    std::cerr<<"error\t"<<err<<'\n';
    return	0;
}
