#include	<cuda_runtime.h>
#include	<cublas_v2.h>
#include	<cuda_bf16.h>
#include	<sys/mman.h>
#include	<sys/stat.h>
#include	<sys/time.h>
#include	<iostream>
#include	<stdint.h>
#include	<unistd.h>
#include	<fcntl.h>
#include	<cfloat>
using	namespace	std;
#define	tiger_beta	64.0f
cublasHandle_t	handle;
float	eta;
uint64_t	prng=0;
static inline uint64_t wyrand(uint64_t	*seed) {
    *seed+=0xa0761d6478bd642full;
    uint64_t  see1=*seed^0xe7037ed1a0b428dbull;
    see1*=(see1>>32)|(see1<<32);
    return	(*seed*((*seed>>32)|(*seed<<32)))^((see1>>32)|(see1<<32));
}
static inline float wy2gau(uint64_t r) {
    const float	_wynorm=1.0/(1ull<<20);
    return ((r&0x1fffff)+((r>>21)&0x1fffff)+((r>>42)&0x1fffff))*_wynorm-3.0f;
}
__device__ inline void _wymum(uint64_t *A,	uint64_t *B) {
    uint64_t	hh=(*A>>32)*(*B>>32), hl=(*A>>32)*(uint32_t)*B, lh=(uint32_t)*A*(*B>>32), ll=(uint64_t)(uint32_t)*A*(uint32_t)*B;
    *A=((hl>>32)|(hl<<32))^hh;
    *B=((lh>>32)|(lh<<32))^ll;
}
__device__ inline	uint64_t	_wyhash64(uint64_t	A,	uint64_t	B) {
    A^=0xa0761d6478bd642full;
    B^=0xe7037ed1a0b428dbull;
    _wymum(&A,&B);
    A^=0xe7037ed1a0b428dbull;
    B^=0xa0761d6478bd642full;
    _wymum(&A,&B);
    return	A^B;
}
struct	bfloat8 {
    __nv_bfloat162	x,y,z,w;
};
struct	float8 {
    float2	x,y,z,w;
};
__device__ inline	bfloat8	_convertfb(float8	x) {
    bfloat8	y;
    y.x=__float22bfloat162_rn(x.x);
    y.y=__float22bfloat162_rn(x.y);
    y.z=__float22bfloat162_rn(x.z);
    y.w=__float22bfloat162_rn(x.w);
    return	y;
}
__device__ inline	float8	_convertbf(bfloat8	x) {
    float8	y;
    y.x=__bfloat1622float2(x.x);
    y.y=__bfloat1622float2(x.y);
    y.z=__bfloat1622float2(x.z);
    y.w=__bfloat1622float2(x.w);
    return	y;
}
__device__ inline	float	_sum8(float8	x) {
    return	x.x.x+x.x.y+x.y.x+x.y.y+x.z.x+x.z.y+x.w.x+x.w.y;
}
__device__ inline	float	_sum8x(float8	x,	float8	y) {
    return	x.x.x*y.x.x+x.x.y*y.x.y+x.y.x*y.y.x+x.y.y*y.y.y+x.z.x*y.z.x+x.z.y*y.z.y+x.w.x*y.w.x+x.w.y*y.w.y;
}
__device__ inline	bfloat8	_hmul(bfloat8	x,	bfloat8	y) {
    bfloat8	z;
    z.x=__hmul2(x.x,y.x);
    z.y=__hmul2(x.y,y.y);
    z.z=__hmul2(x.z,y.z);
    z.w=__hmul2(x.w,y.w);
    return	z;
}
__device__ inline	bfloat8	_hadd(bfloat8	x,	bfloat8	y) {
    bfloat8	z;
    z.x=__hadd2(x.x,y.x);
    z.y=__hadd2(x.y,y.y);
    z.z=__hadd2(x.z,y.z);
    z.w=__hadd2(x.w,y.w);
    return	z;
}
__device__ inline	float	_ssqrt(float	x) {
    return	x>=0?sqrtf(x):-sqrtf(-x);
}
__global__	void	_sqrtum(float	*w, __nv_bfloat16	*m,	float	lr) {
    uint32_t	i=(blockIdx.x*blockDim.x+threadIdx.x)<<3;
    float8	*w8=(float8*)(w+i),	z=_convertbf(*(bfloat8*)(m+i));
    w8->x.x-=lr*_ssqrt(z.x.x);
    w8->x.y-=lr*_ssqrt(z.x.y);
    w8->y.x-=lr*_ssqrt(z.y.x);
    w8->y.y-=lr*_ssqrt(z.y.y);
    w8->z.x-=lr*_ssqrt(z.z.x);
    w8->z.y-=lr*_ssqrt(z.z.y);
    w8->w.x-=lr*_ssqrt(z.w.x);
    w8->w.y-=lr*_ssqrt(z.w.y);
}
__global__	void	_sexyadd(__nv_bfloat16	*u,	__nv_bfloat16	*v) {
    uint32_t  id=(blockIdx.x*blockDim.x+threadIdx.x)<<3;
    *(bfloat8*)(v+id)=_hadd(*(bfloat8*)(u+id),*(bfloat8*)(v+id));
}
__global__	void	_sexycopy(__nv_bfloat16	*u,	__nv_bfloat16	*v) {
    uint32_t  id=(blockIdx.x*blockDim.x+threadIdx.x)<<3;
    *(bfloat8*)(v+id)=*(bfloat8*)(u+id);
}
__global__	void	_save16(float	*inp, __nv_bfloat16	*out) {
    uint32_t	i=(blockIdx.x*blockDim.x+threadIdx.x)<<3;
    *(bfloat8*)(out+i)=_convertfb(*(float8*)(inp+i));
}
__global__	void	_load16(float	*inp, __nv_bfloat16	*out) {
    uint32_t	i=(blockIdx.x*blockDim.x+threadIdx.x)<<3;
    *(float8*)(inp+i)=_convertbf(*(bfloat8*)(out+i));
}
template<uint32_t	N>
struct	Data {
    __nv_bfloat16	*data;
    Data() {
        cudaMallocManaged(&data,	N<<1);
    }
    ~Data() {
        cudaFree(data);
    }
    void	zero(void) {
        cudaMemset(data,	0,	N<<1);
    }
};
template<uint32_t	N>
struct	Data32 {
    float	*data;
    Data32() {
        cudaMallocManaged(&data,	N<<2);
    }
    ~Data32() {
        cudaFree(data);
    }
    void	save(FILE	*F) {
        Data<N>	tmp;
        _save16<<<N/8/16,16>>>(data,tmp.data);
        cudaDeviceSynchronize();
        fwrite(tmp.data,N*2,1,F);
    }
    void	load(FILE	*F) {
        Data<N>	tmp;
        fread(tmp.data,N*2,1,F);
        _load16<<<N/8/16,16>>>(data,tmp.data);
        cudaDeviceSynchronize();
    }
    uint64_t	size(void) {
        return	N;
    }
    void	zero(void) {
        cudaMemset(data,	0,	N<<2);
    }
    void	rand(float	norm=1) {
        for(uint32_t	i=0;	i<N;	i++)	data[i]=norm*wy2gau(wyrand(&prng));
    }
};
template<uint32_t	R0,	uint32_t	R1,	uint32_t	C>
struct	linear {
    Data<R0*R1>	wei,wem;
    Data32<R0*R1>	wei32;
    Data<R1*C>	out;
    linear() {
        wei32.rand(1/sqrtf(R0));
        wem.zero();
    }
    void	save(FILE	*F) {
        wei32.save(F);
    }
    void	load(FILE	*F) {
        wei32.load(F);
    }
    uint64_t	size(void) {
        return	wei32.size();
    }
    void	fw(Data<R0*C>	&inp) {
        float	alf=1/sqrtf(R0),	bet=0;
        _save16<<<R0*R1/8/16,16>>>(wei32.data,wei.data);
        cublasGemmEx(handle,CUBLAS_OP_T,CUBLAS_OP_N,R1,C,R0,&alf,wei.data,CUDA_R_16BF,R0,inp.data,CUDA_R_16BF,R0,&bet,out.data,CUDA_R_16BF,R1,CUBLAS_COMPUTE_32F,CUBLAS_GEMM_DEFAULT);
    }
    void	bk(Data<R0*C>	&inp,	Data<R1*C>	&gin,	Data<R0*C>	&gra,	bool	accumulate=false) {
        float	alf=1/sqrtf(R0),	alf1=1/sqrtf(R0*C)/tiger_beta,	bet=(tiger_beta-1.0f)/tiger_beta,	bet1=accumulate;
        cublasGemmEx(handle,CUBLAS_OP_N,CUBLAS_OP_T,R0,R1,C,&alf1,inp.data,CUDA_R_16BF,R0,gin.data,CUDA_R_16BF,R1,&bet,wem.data,CUDA_R_16BF,R0,CUBLAS_COMPUTE_32F,CUBLAS_GEMM_DEFAULT);
        cublasGemmEx(handle,CUBLAS_OP_N,CUBLAS_OP_N,R0,C,R1,&alf,wei.data,CUDA_R_16BF,R0,gin.data,CUDA_R_16BF,R1,&bet1,gra.data,CUDA_R_16BF,R0,CUBLAS_COMPUTE_32F,CUBLAS_GEMM_DEFAULT);
        _sqrtum<<<R0*R1/8/16,16>>>(wei32.data,wem.data,eta);
    }
};
__global__	void	_rmsnormf(uint32_t	R,	uint32_t	C,	uint32_t	H,	__nv_bfloat16	*inp,	float	*norm) {
    uint32_t	id=blockIdx.x*blockDim.x+threadIdx.x,	r=R/H;
    float	nor=0;
    __nv_bfloat16	*in=inp+id*r;
    for(uint32_t	i=0;	i<r;	i+=8) {
        float8	z=_convertbf(*(bfloat8*)(in+i));
        nor+=_sum8x(z,z);
    }
    norm[id]=nor;
    nor=sqrtf(r/nor);
    bfloat8	a;
    a.x.x=a.x.y=a.y.x=a.y.y=a.z.x=a.z.y=a.w.x=a.w.y=__float2bfloat16_rn(nor);
    for(uint32_t	i=0;	i<r;	i+=8) *(bfloat8*)(in+i)=_hmul(a,*(bfloat8*)(in+i));
}
__global__	void	_rmsnormb(uint32_t	R,	uint32_t	C,	uint32_t	H,	__nv_bfloat16	*inp,	__nv_bfloat16	*gin,	float	*norm) {
    uint32_t	id=blockIdx.x*blockDim.x+threadIdx.x,	r=R/H;
    float	mg=0,	s=sqrtf(r/norm[id]);
    __nv_bfloat16	*in=inp+id*r,	*gi=gin+id*r;
    for(uint32_t	i=0;	i<r;	i+=8) {
        float8	a=_convertbf(*(bfloat8*)(gi+i)),b=_convertbf(*(bfloat8*)(in+i));
        mg+=_sum8x(a,b);
    }
    mg/=norm[id]*s;
    bfloat8	x;
    x.x.x=x.x.y=x.y.x=x.y.y=x.z.x=x.z.y=x.w.x=x.w.y=__float2bfloat16_rn(s);
    bfloat8	y;
    y.x.x=y.x.y=y.y.x=y.y.y=y.z.x=y.z.y=y.w.x=y.w.y=__float2bfloat16_rn(-mg);
    for(uint32_t	i=0;	i<r;	i+=8)	*(bfloat8*)(gi+i)=_hadd(_hmul(*(bfloat8*)(gi+i),x),_hmul(*(bfloat8*)(in+i),y));
}
template<uint32_t	R,	uint32_t	C,	uint32_t	H>
struct	rmsnorm {
    Data32<C*H>	nor;
    void	fw(Data<R*C>	&inp) {
        _rmsnormf<<<C*H/16,16>>>(R,C,H,inp.data,nor.data);
    }
    void	bk(Data<R*C>	&inp,	Data<R*C>	&gin) {
        _rmsnormb<<<C*H/16,16>>>(R,C,H,inp.data,gin.data,nor.data);
    }
};
__global__	void	_softmaxf(uint32_t	R,	__nv_bfloat16	*inp) {
    uint32_t	id=blockIdx.x*blockDim.x+threadIdx.x;
    float	sum=0;
    float2	z= {-FLT_MAX/2,-FLT_MAX/2};
    __nv_bfloat162	*p=(__nv_bfloat162*)(inp+id*R),	ma=__float22bfloat162_rn(z),	s;
    for(uint32_t	i=0;	i<R/2;	i++)	ma=__hmax2(p[i],ma);
    ma.x=ma.y=__hmax(ma.x,ma.y);
    for(uint32_t	i=0;	i<R/2;	i++) {
        float2	t=__bfloat1622float2(h2exp(__hsub2(p[i],ma)));
        sum+=t.x+t.y;
    }
    z.x=z.y=1/sum;
    s=__float22bfloat162_rn(z);
    for(uint32_t	i=0;	i<R/2;	i++)	p[i]=__hmul2(h2exp(__hsub2(p[i],ma)),s);
}
__global__	void	_sexyfp(uint32_t	H,	uint32_t	C,	__nv_bfloat16	*att,	__nv_bfloat16	*pe) {
    uint32_t	id=blockIdx.x*blockDim.x+threadIdx.x,	c=id%C,	h=(id/C)%H;
    __nv_bfloat16	*a=att+id*C,	*p=pe+h*C+c,	z= {};
    for(uint32_t	i=0;	i<=c;	i++)	a[i]=hexp(__hadd(*(p-i),a[i]));
    for(uint32_t	i=c+1;	i<C;	i++)	a[i]=z;
}
__global__	void	_sexyba(__nv_bfloat16	*gin,	__nv_bfloat16	*att) {
    uint32_t	id=(blockIdx.x*blockDim.x+threadIdx.x)<<3;
    *(bfloat8*)(gin+id)=_hmul(*(bfloat8*)(att+id),*(bfloat8*)(gin+id));
}
__global__	void	_sexybp(uint32_t	B,	uint32_t	H,	uint32_t	R,	uint32_t	C,	__nv_bfloat16	*a,	__nv_bfloat16	*pm) {
    uint32_t  id=blockIdx.x*blockDim.x+threadIdx.x,	h=id/C,	c=id%C;
    float	s=0;
    for(uint32_t	b=0;	b<B;	b++) {
        __nv_bfloat16	*p=a+b*H*C*C+h*C*C;
        for(uint32_t	i=c;	i<C;	i++)	s+=__bfloat162float(p[i*C+(i-c)]);
    }
    s/=sqrtf((C-c)*R/H*B);
    float	m=__bfloat162float(pm[id]);
    pm[id]=__float2bfloat16_rn(m+(s-m)/tiger_beta);
}
template<uint32_t	R,	uint32_t	C,	uint32_t	H,	uint32_t	B>
struct	sexy {
    static	Data<C*C*H*B>	da;
    static	Data<R*C*B>	dq,dk,dv;
    Data<C*C*H*B>	at;
    rmsnorm<R,C*B,H>	n,nq,nk,nv;
    Data32<C*H>	pe32;
    Data<C*H>	pe,pm;
    linear<R,R,C*B>	q,k,v;
    Data<R*C*B>	out;
    sexy() {
        for(uint32_t	h=0;	h<H;	h++)	for(uint32_t	i=0;	i<C;	i++)	pe32.data[h*C+i]=-log1pf(i);
        pm.zero();
    }
    void	save(FILE	*F) {
        pe32.save(F);
        q.save(F);
        k.save(F);
        v.save(F);
    }
    void	load(FILE	*F) {
        pe32.load(F);
        q.load(F);
        k.load(F);
        v.load(F);
    }
    uint64_t	size(void) {
        return	pe32.size()+q.size()+k.size()+v.size();
    }
    void	fw(Data<R*C*B>	&inp) {
        float	alf=1/sqrtf(R/H),alf1=1,bet=0;
        q.fw(inp);	k.fw(inp);	v.fw(inp);
        nq.fw(q.out);	nk.fw(k.out);	nv.fw(v.out);
        for(uint32_t	b=0;	b<B;	b++)	cublasGemmStridedBatchedEx(handle,CUBLAS_OP_T,CUBLAS_OP_N,C,C,R/H,&alf,k.out.data+b*R*C,CUDA_R_16BF,R,R/H,q.out.data+b*R*C,CUDA_R_16BF,R,R/H,&bet,at.data+b*H*C*C,CUDA_R_16BF,C,C*C,H,CUBLAS_COMPUTE_32F,CUBLAS_GEMM_DEFAULT);
        _save16<<<C*H/8/16,16>>>(pe32.data,pe.data);
        _sexyfp<<<C*H*B/16,16>>>(H,C,at.data,pe.data);
        for(uint32_t	b=0;	b<B;	b++)	cublasGemmStridedBatchedEx(handle,CUBLAS_OP_N,CUBLAS_OP_N,R/H,C,C,&alf1,v.out.data+b*R*C,CUDA_R_16BF,R,R/H,at.data+b*H*C*C,CUDA_R_16BF,C,C*C,&bet,out.data+b*R*C,CUDA_R_16BF,R,R/H,H,CUBLAS_COMPUTE_32F,CUBLAS_GEMM_DEFAULT);
        n.fw(out);
    }
    void	bk(Data<R*C*B>	&inp,	Data<R*C*B>	&gin,	Data<R*C*B> &gra) {
        float	alf=1/sqrtf(R/H),alf1=1,bet=0;
        n.bk(out,gin);
        for(uint32_t	b=0;	b<B;	b++) {
            cublasGemmStridedBatchedEx(handle,CUBLAS_OP_T,CUBLAS_OP_N,C,C,R/H,&alf1,v.out.data+b*R*C,CUDA_R_16BF,R,R/H,gin.data+b*R*C,CUDA_R_16BF,R,R/H,&bet,da.data+b*H*C*C,CUDA_R_16BF,C,C*C,H,CUBLAS_COMPUTE_32F,CUBLAS_GEMM_DEFAULT);
            cublasGemmStridedBatchedEx(handle,CUBLAS_OP_N,CUBLAS_OP_T,R/H,C,C,&alf1,gin.data+b*R*C,CUDA_R_16BF,R,R/H,at.data+b*H*C*C,CUDA_R_16BF,C,C*C,&bet,dv.data+b*R*C,CUDA_R_16BF,R,R/H,H,CUBLAS_COMPUTE_32F,CUBLAS_GEMM_DEFAULT);
        }
        _sexyba<<<C*C*H*B/8/16,16>>>(da.data,at.data);
        _sexybp<<<C*H/16,16>>>(B,H,R,C,da.data,pm.data);
        _sqrtum<<<C*H/8/16,16>>>(pe32.data,pm.data,eta);
        for(uint32_t	b=0;	b<B;	b++) {
            cublasGemmStridedBatchedEx(handle,CUBLAS_OP_N,CUBLAS_OP_T,R/H,C,C,&alf,q.out.data+b*R*C,CUDA_R_16BF,R,R/H,da.data+b*H*C*C,CUDA_R_16BF,C,C*C,&bet,dk.data+b*R*C,CUDA_R_16BF,R,R/H,H,CUBLAS_COMPUTE_32F,CUBLAS_GEMM_DEFAULT);
            cublasGemmStridedBatchedEx(handle,CUBLAS_OP_N,CUBLAS_OP_N,R/H,C,C,&alf,k.out.data+b*R*C,CUDA_R_16BF,R,R/H,da.data+b*H*C*C,CUDA_R_16BF,C,C*C,&bet,dq.data+b*R*C,CUDA_R_16BF,R,R/H,H,CUBLAS_COMPUTE_32F,CUBLAS_GEMM_DEFAULT);
        }
        nq.bk(q.out,dq);
        nk.bk(k.out,dk);
        nv.bk(v.out,dv);
        q.bk(inp,dq,gra);
        k.bk(inp,dk,gra,true);
        v.bk(inp,dv,gra,true);
    }
};
template<uint32_t	R,	uint32_t	C,	uint32_t	H,	uint32_t	B>
Data<C*C*H*B>	sexy<R,C,H,B>::da;
template<uint32_t	R,	uint32_t	C,	uint32_t	H,	uint32_t	B>
Data<R*C*B>	sexy<R,C,H,B>::dq;
template<uint32_t	R,	uint32_t	C,	uint32_t	H,	uint32_t	B>
Data<R*C*B>	sexy<R,C,H,B>::dk;
template<uint32_t	R,	uint32_t	C,	uint32_t	H,	uint32_t	B>
Data<R*C*B>	sexy<R,C,H,B>::dv;

template<uint32_t	R,	uint32_t	C,	uint32_t	H,	uint32_t	B>
struct	self {
    rmsnorm<R,C*B,H>	n;
    linear<R,R,C*B>	x;
    Data<R*C*B>	&out=x.out;
    void	save(FILE	*F) {
        x.save(F);
    }
    void	load(FILE	*F) {
        x.load(F);
    }
    uint64_t	size(void) {
        return	x.size();
    }
    void	fw(Data<R*C*B>	&inp) {
        x.fw(inp);
        n.fw(out);
    }
    void	bk(Data<R*C*B>	&inp,	Data<R*C*B>	&gin,	Data<R*C*B> &gra) {
        n.bk(out,gin);
        x.bk(inp,gin,gra);
    }
};

__global__	void	_gatef(__nv_bfloat16	*u,	__nv_bfloat16	*v,	__nv_bfloat16	*out) {
    uint32_t	id=(blockIdx.x*blockDim.x+threadIdx.x)<<3;
    *(bfloat8*)(out+id)=_hmul(*(bfloat8*)(u+id),*(bfloat8*)(v+id));
}
__global__	void	_gateb(__nv_bfloat16	*u,	__nv_bfloat16	*v,	__nv_bfloat16	*gin,	__nv_bfloat16	*dv) {
    uint32_t	id=(blockIdx.x*blockDim.x+threadIdx.x)<<3;
    *(bfloat8*)(dv+id)=_hmul(*(bfloat8*)(u+id),*(bfloat8*)(gin+id));
    *(bfloat8*)(gin+id)=_hmul(*(bfloat8*)(v+id),*(bfloat8*)(gin+id));
}
template<class	U,	class	V,	uint32_t	R,	uint32_t	C,	uint32_t	H,	uint32_t	B>
struct	comb {
    static	Data<R*C*B>	grt;
    U	u;
    V	v;
    Data<R*C*B>	tmp;
    rmsnorm<R,C*B,H>	n;
    linear<R,R,C*B>	o;
    Data<R*C*B>	&out=o.out;
    void	save(FILE	*F) {
        u.save(F);
        v.save(F);
        o.save(F);
    }
    void	load(FILE	*F) {
        u.load(F);
        v.load(F);
        o.load(F);
    }
    uint64_t	size(void) {
        return	u.size()+v.size()+o.size();
    }
    void	fw(Data<R*C*B>	&inp) {
        u.fw(inp);
        v.fw(inp);
        _gatef<<<R*C*B/8/16,16>>>(u.out.data,v.out.data,tmp.data);
        n.fw(tmp);
        o.fw(tmp);
        _sexyadd<<<R*C*B/8/16,16>>>(inp.data,out.data);
    }
    void	bk(Data<R*C*B>	&inp,	Data<R*C*B>	&gin,	Data<R*C*B> &gra) {
        o.bk(tmp,gin,gra);
        n.bk(tmp,gra);
        _gateb<<<R*C*B/8/16,16>>>(u.out.data,v.out.data,gra.data,tmp.data);
        u.bk(inp,gra,grt);
        v.bk(inp,tmp,gra);
        _sexyadd<<<R*C*B/8/16,16>>>(grt.data,gra.data);
        _sexyadd<<<R*C*B/8/16,16>>>(gin.data,gra.data);
    }
};
template<class	U,	class	V,	uint32_t	R,	uint32_t	C,	uint32_t	H,	uint32_t	B>
Data<R*C*B>	comb<U,V,R,C,H,B>::grt;

template<uint32_t	R,	uint32_t	C,	uint32_t	H,	uint32_t	B>
struct	wyGPT {
    static	Data<R*C*B>	gi;
    comb<sexy<R,C,H,B>,self<R,C,H,B>,R,C,H,B>	a;
    comb<self<R,C,H,B>,self<R,C,H,B>,R,C,H,B>	b;
    Data<R*C*B>	&out=b.out;
    void	save(FILE	*F) {
        a.save(F);
        b.save(F);
    }
    void	load(FILE	*F) {
        a.load(F);
        b.load(F);
    }
    uint32_t	size(void) {
        return	a.size()+b.size();
    }
    void	fw(Data<R*C*B>	&inp) {
        a.fw(inp);
        b.fw(a.out);
    }
    void	bk(Data<R*C*B>	&inp,	Data<R*C*B>	&gin,	Data<R*C*B> &gra) {
        b.bk(a.out,gin,gi);
        a.bk(inp,gi,gra);
    }
};
template<uint32_t	R,	uint32_t	C,	uint32_t	H,	uint32_t	B>
Data<R*C*B>	wyGPT<R,C,H,B>::gi;
__global__	void	_emb(uint32_t	R,	uint32_t	C,	uint16_t	*inp,	__nv_bfloat16	*out) {
    uint32_t	id=blockIdx.x*blockDim.x+threadIdx.x,	r=id%R,	c=(id/R)%C,	b=(id/R)/C;
    out[id]=__float2bfloat16_rn((_wyhash64(inp[b*(C+1)+c],r)&1)*2-1.0f);
}
__global__	void	dlossf(uint32_t	B,	uint32_t	C,	uint32_t	O,	__nv_bfloat16	*a,	uint16_t	*x,	float	*y,	uint16_t	*table) {
    float	loss=0;
    for(uint32_t	b=0;	b<B;	b++)	for(uint32_t	i=0;	i<C;	i++) {
            __nv_bfloat16	*p=a+(b*C+i)*O+table[x[b*(C+1)+i+1]];
            float	z=__bfloat162float(*p);
            loss-=logf(fmaxf(z,FLT_MIN));
            *p=__float2bfloat16_rn(z-1);
        }
    *y=loss;
}
template<uint32_t	C,	uint32_t	E,	uint32_t	D,	uint32_t	H,	uint32_t	O,	uint32_t	B>
struct	Denisovan {
  private:
    float	*ret;
    uint16_t	*data,	*code,	*table;
    Data<E*C*B>	n0g;
  public:
    uint64_t	srng=time(NULL);
    Data<E*C*B>	emb;
    wyGPT<E,C,H,B>	tra[D];
    rmsnorm<E,C*B,1>	n1;
    linear<E,O,C*B>	ou;
    Denisovan() {
        cudaMallocManaged(&data,	B*2*(C+1));
        cudaMallocManaged(&code,	2*O);
        cudaMallocManaged(&table,	2*65536);
        cudaMallocManaged(&ret,	sizeof(float));
    }
    ~Denisovan() {
        cudaFree(data);
        cudaFree(code);
        cudaFree(table);
        cudaFree(ret);
    }
    void	load_voca(const	char	*F) {
        FILE	*f=fopen(F,"rt");
        for(uint32_t	i=0;	i<65536;	i++)	table[i]=0;
        for(uint32_t	i=0;	i<O;	i++) {
            fscanf(f,"%hu",code+i);
            table[code[i]]=i;
        }
        fclose(f);
    }
    bool	save(const	char	*F) {
        FILE	*f=fopen(F,"wb");
        if(f==NULL)	return	false;
        uint32_t	x;
        x=C;
        fwrite(&x,4,1,f);
        x=E;
        fwrite(&x,4,1,f);
        x=D;
        fwrite(&x,4,1,f);
        x=H;
        fwrite(&x,4,1,f);
        x=O;
        fwrite(&x,4,1,f);
        fwrite(code,O*2,1,f);
        for(uint32_t	i=0;	i<D;	i++)	tra[i].save(f);
        ou.save(f);
        fclose(f);
        return	true;
    }
    bool	load(const	char	*F) {
        FILE	*f=fopen(F,"rb");
        if(f==NULL)	return	false;
        uint32_t	x;
        if(fread(&x,4,1,f)!=1||x!=C)	fprintf(stderr,"C=%u\n",x);
        if(fread(&x,4,1,f)!=1||x!=E)	fprintf(stderr,"E=%u\n",x);
        if(fread(&x,4,1,f)!=1||x!=D)	fprintf(stderr,"D=%u\n",x);
        if(fread(&x,4,1,f)!=1||x!=H)	fprintf(stderr,"H=%u\n",x);
        if(fread(&x,4,1,f)!=1||x!=O)	fprintf(stderr,"O=%u\n",x);
        if(fread(code,O*2,1,f)!=1)	return	false;
        for(uint32_t	i=0;	i<D;	i++)	tra[i].load(f);
        ou.load(f);
        fclose(f);
        return	true;
    }
    uint64_t	size(void) {
        return	tra[0].size()*D+ou.size();
    }
    float	train(uint16_t	*text,	uint64_t	len) {
        for(uint32_t	i=0;	i<B;	i++)	cudaMemcpy(data+i*(C+1),text+(wyrand(&srng)%(len-C)),2*(C+1),cudaMemcpyHostToDevice);
        _emb<<<E*C*B/16,16>>>(E,C,data,emb.data);
        for(uint32_t	d=0;	d<D;	d++)	tra[d].fw(d?tra[d-1].out:emb);
        n1.fw(tra[D-1].out);
        ou.fw(tra[D-1].out);
        _softmaxf<<<C*B/16,16>>>(O,ou.out.data);
        dlossf<<<1,1>>>(B,C,O,ou.out.data,data,ret,table);
        ou.bk(tra[D-1].out,ou.out,n0g);
        n1.bk(tra[D-1].out,n0g);
        for(uint32_t	d=D-1;	d<D;	d--)	tra[d].bk(d?tra[d-1].out:emb,d<D-1?tra[d+1].out:n0g,tra[d].out);
        cudaDeviceSynchronize();
        return	*ret;
    }
};
#include	"config"
Denisovan<context,embed,depth,heads,voca,batch>	model;
void	document(void) {
    cerr<<"usage:	training [options] input.txt\n";
    cerr<<"\t-i:	input model=NULL\n";
    cerr<<"\t-o:	output model=model\n";
    cerr<<"\t-s:	trained sample=0\n";
    cerr<<"\t-b:	benchmark only=off\n";
    exit(0);
}
uint16_t	*ptr;
int	fd;
struct	stat	sb;
int	main(int	ac,	char	**av) {
    cublasCreate(&handle);
    string	in,out="model";
    int	opt,bench=0;
    uint64_t	training=0;
    while((opt=getopt(ac,	av,	"i:o:s:b"))>=0) {
        switch(opt) {
        case	'i':
            in=optarg;
            break;
        case	'o':
            out=optarg;
            break;
        case	's': {
            training=atoi(optarg);
            training<<=20;
        }
        break;
        case	'b':
            bench=1;
            model.srng=0;
            break;
        default:
            document();
        }
    }
    if(ac<optind+1) {
        document();
        return	0;
    }
    fd=open(av[optind],	O_RDONLY);
    fstat(fd,	&sb);
    ptr=(uint16_t*)mmap(NULL,	sb.st_size,	PROT_READ,	MAP_SHARED,	fd,	0);
    cerr.precision(4);
    cerr.setf(ios::fixed);
    double	loss0=FLT_MAX/2,	loss;
    timeval	beg,	end;
    uint64_t	para=model.size();
    cerr<<av[optind]<<'\t'<<sb.st_size/2<<"\npara\t"<<para<<'\n';
    model.load_voca("voca.txt");
    if(in.size())	model.load(in.c_str());
    for(;;) {
        loss=0;
        gettimeofday(&beg,NULL);
        for(uint32_t	i=0;	i<fullbatch;	i++) {
            eta=powf(para*log1pf(depth)+training/sqrtf(batch*context),-1.0f/4);
            training+=context*batch;
            loss+=model.train(ptr,sb.st_size/2);
        }
        loss/=batch*context*fullbatch;
        if(!bench) {
            if(loss<loss0+0.02)	model.save(out.c_str());
            else	break;
        }
        loss0=loss;
        gettimeofday(&end,NULL);
        double	t=(end.tv_sec-beg.tv_sec+1e-6*(end.tv_usec-beg.tv_usec));
        cerr<<(training>>20)<<'\t'<<loss<<'\t'<<t<<'\n';
    }
    munmap(ptr,sb.st_size);
    close(fd);
    cublasDestroy(handle);
    return	0;
}

