#include	<cuda_runtime.h>
#include	<cublas_v2.h>
#include	<cuda_bf16.h>
#include	<sys/mman.h>
#include	<sys/stat.h>
#include	<sys/time.h>
#include	<iostream>
#include	<stdint.h>
#include	<unistd.h>
#include	<fcntl.h>
#include	<cfloat>
using	namespace	std;
#define	tiger_beta	128.0f
cublasHandle_t	handle;
float	eta;
uint64_t	prng=time(NULL);
static inline uint64_t wyrand(uint64_t	*seed){	*seed+=0xa0761d6478bd642full;	uint64_t  see1=*seed^0xe7037ed1a0b428dbull;	see1*=(see1>>32)|(see1<<32);	return	(*seed*((*seed>>32)|(*seed<<32)))^((see1>>32)|(see1<<32));	}
static inline float wy2gau(uint64_t r){	const float	_wynorm=1.0/(1ull<<20);	return ((r&0x1fffff)+((r>>21)&0x1fffff)+((r>>42)&0x1fffff))*_wynorm-3.0f;	}
__device__ void _wymum(uint64_t *A,	uint64_t *B){	uint64_t	hh=(*A>>32)*(*B>>32), hl=(*A>>32)*(uint32_t)*B, lh=(uint32_t)*A*(*B>>32), ll=(uint64_t)(uint32_t)*A*(uint32_t)*B;	*A=((hl>>32)|(hl<<32))^hh;	*B=((lh>>32)|(lh<<32))^ll;	}
__device__	uint64_t	_wyhash64(uint64_t	A,	uint64_t	B){	A^=0xa0761d6478bd642full;	B^=0xa0761d6478bd642full;	_wymum(&A,&B);	A^=0xa0761d6478bd642full;	B^=0xa0761d6478bd642full;	_wymum(&A,&B);	return	A^B;	}
struct	short8{	__nv_bfloat162	x,y,z,w;	};	struct	float8{	float2	x,y,z,w;	};
__device__	short8	_convertfb(float8	*x){	short8	y;	y.x=__float22bfloat162_rn(x->x);	y.y=__float22bfloat162_rn(x->y);	y.z=__float22bfloat162_rn(x->z);	y.w=__float22bfloat162_rn(x->w);	return	y;	}
__device__	float8	_convertbf(short8	*x){	float8	y;	y.x=__bfloat1622float2(x->x);	y.y=__bfloat1622float2(x->y);	y.z=__bfloat1622float2(x->z);	y.w=__bfloat1622float2(x->w);	return	y;	}
__device__	float	sum8(float8	*x){	return	x->x.x+x->x.y+x->y.x+x->y.y+x->z.x+x->z.y+x->w.x+x->w.y;	}
__device__	float	sum8x(float8	*x,	float8	*y){	return	x->x.x*y->x.x+x->x.y*y->x.y+x->y.x*y->y.x+x->y.y*y->y.y+x->z.x*y->z.x+x->z.y*y->z.y+x->w.x*y->w.x+x->w.y*y->w.y;	}
__device__	short8	_hmul(short8	*x,	short8	*y){	short8	z;	z.x=__hmul2(x->x,y->x);	z.y=__hmul2(x->y,y->y);	z.z=__hmul2(x->z,y->z);	z.w=__hmul2(x->w,y->w);	return	z;	}
__device__	short8	_hadd(short8	*x,	short8	*y){	short8	z;	z.x=__hadd2(x->x,y->x);	z.y=__hadd2(x->y,y->y);	z.z=__hadd2(x->z,y->z);	z.w=__hadd2(x->w,y->w);	return	z;	}
__device__	float	_ssqrt(float	x){	return	x>=0?sqrtf(x):-sqrtf(-x);	}
__global__	void	_sqrtum(float	*w, __nv_bfloat16	*m,	float	lr){	
	uint32_t	i=(blockIdx.x*blockDim.x+threadIdx.x)<<3;	float8	*w4=(float8*)(w+i),	z=_convertbf((short8*)(m+i));
	z.x.x=w4->x.x-lr*_ssqrt(z.x.x);	z.x.y=w4->x.y-lr*_ssqrt(z.x.y);
	z.y.x=w4->y.x-lr*_ssqrt(z.y.x);	z.y.y=w4->y.y-lr*_ssqrt(z.y.y);
	z.z.x=w4->z.x-lr*_ssqrt(z.z.x);	z.z.y=w4->z.y-lr*_ssqrt(z.z.y);
	z.w.x=w4->w.x-lr*_ssqrt(z.w.x);	z.w.y=w4->w.y-lr*_ssqrt(z.w.y);
	*w4=z;
}
#define Q8F     9.48683298050514f
#define Q8B     0.105409255338946f
#define Q8M     90.0f
__global__	void	_quant(float	*inp, __nv_bfloat16	*out){	
	uint32_t	i=(blockIdx.x*blockDim.x+threadIdx.x)<<3;	float8	*i8=(float8*)(inp+i);	short8	x;
	x.x.x=__float2bfloat16_rn(fminf(fmaxf(roundf(i8->x.x*Q8F),-Q8M),Q8M)*Q8B);	x.x.y=__float2bfloat16_rn(fminf(fmaxf(roundf(i8->x.y*Q8F),-Q8M),Q8M)*Q8B);	
	x.y.x=__float2bfloat16_rn(fminf(fmaxf(roundf(i8->y.x*Q8F),-Q8M),Q8M)*Q8B);	x.y.y=__float2bfloat16_rn(fminf(fmaxf(roundf(i8->y.y*Q8F),-Q8M),Q8M)*Q8B);	
	x.z.x=__float2bfloat16_rn(fminf(fmaxf(roundf(i8->z.x*Q8F),-Q8M),Q8M)*Q8B);	x.z.y=__float2bfloat16_rn(fminf(fmaxf(roundf(i8->z.y*Q8F),-Q8M),Q8M)*Q8B);	
	x.w.x=__float2bfloat16_rn(fminf(fmaxf(roundf(i8->w.x*Q8F),-Q8M),Q8M)*Q8B);	x.w.y=__float2bfloat16_rn(fminf(fmaxf(roundf(i8->w.y*Q8F),-Q8M),Q8M)*Q8B);
	*(short8*)(out+i)=x;
}
__global__	void	_save8(float	*inp, char	*out){	uint32_t	i=blockIdx.x*blockDim.x+threadIdx.x;	out[i]=fminf(fmaxf(roundf(inp[i]*Q8F),-Q8M),Q8M);	}
__global__	void	_load8(float	*inp, char	*out){	uint32_t	i=blockIdx.x*blockDim.x+threadIdx.x;	inp[i]=out[i]*Q8B;	}
template<uint32_t	N>
struct	Data{
	__nv_bfloat16	*data;
	Data(){	cudaMallocManaged(&data,	N<<1);	}
	~Data(){	cudaFree(data);	}
	void	zero(void){	cudaMemset(data,	0,	N<<1);	}
};
template<uint32_t	N>
struct	Data32{
	float	*data;
	Data32(){	cudaMallocManaged(&data,	N<<2);	}
	~Data32(){	cudaFree(data);	}
	void	save(FILE	*F){	Data<N/2>	tmp;	_save8<<<N/16,16>>>(data,(char*)tmp.data);	cudaDeviceSynchronize();	fwrite(tmp.data,N,1,F);	}
	void	load(FILE	*F){	Data<N/2>	tmp;	fread(tmp.data,N,1,F);	_load8<<<N/16,16>>>(data,(char*)tmp.data);	cudaDeviceSynchronize();	}
	uint64_t	size(void){	return	N;	}
	void	zero(void){	cudaMemset(data,	0,	N<<2);	}
	void	rand(float	norm=1){	for(uint32_t	i=0;	i<N;	i++)	data[i]=norm*wy2gau(wyrand(&prng));	}
};
template<uint32_t	R0,	uint32_t	R1,	uint32_t	C>
struct	linear{
	Data<R0*R1>	wei,wem;
	Data32<R0*R1>	wei32;
	Data<R1*C>	out;
	linear(){	wei32.rand(Q8B);	wem.zero();	}
	void	save(FILE	*F){	wei32.save(F);	}
	void	load(FILE	*F){	wei32.load(F);	}
	uint64_t	size(void){	return	wei32.size();	}
	void	fw(Data<R0*C>	&inp){
		float	alf=1/sqrtf(R0),	bet=0;
		_quant<<<R0*R1/8/16,16>>>(wei32.data,wei.data);
		cublasGemmEx(handle,CUBLAS_OP_T,CUBLAS_OP_N,R1,C,R0,&alf,wei.data,CUDA_R_16BF,R0,inp.data,CUDA_R_16BF,R0,&bet,out.data,CUDA_R_16BF,R1,CUBLAS_COMPUTE_32F,CUBLAS_GEMM_DEFAULT);
	}
	void	bk(Data<R0*C>	&inp,	Data<R1*C>	&gin,	Data<R0*C>	&gra,	bool	accumulate=false){
		float	alf=1/sqrtf(R0),	alf1=1/sqrtf(R0*C)/tiger_beta,	bet=(tiger_beta-1.0f)/tiger_beta,	bet1=accumulate;
		cublasGemmEx(handle,CUBLAS_OP_N,CUBLAS_OP_T,R0,R1,C,&alf1,inp.data,CUDA_R_16BF,R0,gin.data,CUDA_R_16BF,R1,&bet,wem.data,CUDA_R_16BF,R0,CUBLAS_COMPUTE_32F,CUBLAS_GEMM_DEFAULT);		
		cublasGemmEx(handle,CUBLAS_OP_N,CUBLAS_OP_N,R0,C,R1,&alf,wei.data,CUDA_R_16BF,R0,gin.data,CUDA_R_16BF,R1,&bet1,gra.data,CUDA_R_16BF,R0,CUBLAS_COMPUTE_32F,CUBLAS_GEMM_DEFAULT);
		_sqrtum<<<R0*R1/8/16,16>>>(wei32.data,wem.data,eta);
	}
};
__global__	void	_layernormf(uint32_t	R,	uint32_t	C,	uint32_t	H,	__nv_bfloat16	*inp,	float	*norm){
	uint32_t	id=blockIdx.x*blockDim.x+threadIdx.x,	r=R/H;	float	sum=0,	nor=0;		
	for(uint32_t	i=0;	i<r;	i+=8){	float8	z=_convertbf((short8*)(inp+id*r+i));	sum+=sum8(&z);	nor+=sum8x(&z,&z);	}
	sum/=r;	nor=fmaxf(nor-sum*sum*r,1e-18f);	norm[id]=nor;	nor=sqrtf(r/nor);
	for(uint32_t	i=0;	i<r;	i+=8){
		float8	z=_convertbf((short8*)(inp+id*r+i));	
		z.x.x=(z.x.x-sum)*nor;	z.x.y=(z.x.y-sum)*nor;	z.y.x=(z.y.x-sum)*nor;	z.y.y=(z.y.y-sum)*nor;
		z.z.x=(z.z.x-sum)*nor;	z.z.y=(z.z.y-sum)*nor;	z.w.x=(z.w.x-sum)*nor;	z.w.y=(z.w.y-sum)*nor;
		*(short8*)(inp+id*r+i)=_convertfb(&z);
	}
}
__global__	void	_layernormb(uint32_t	R,	uint32_t	C,	uint32_t	H,	__nv_bfloat16	*inp,	__nv_bfloat16	*gin,	float	*norm){
	uint32_t	id=blockIdx.x*blockDim.x+threadIdx.x,	r=R/H;
	float	mg=0,	 sgi=0,sou=0,	s=sqrtf(r/norm[id]),	sum=0;
	for(uint32_t	i=0;	i<r;	i+=8){	
		float8	a=_convertbf((short8*)(gin+id*r+i)),b=_convertbf((short8*)(inp+id*r+i));
		sgi+=sum8(&a);	sou+=sum8(&b);	mg+=sum8x(&a,&b);
	}
	mg/=norm[id]*s;	sum=(s*sgi-mg*sou)/r;
	for(uint32_t	i=0;	i<r;	i+=8){
		float8	a=_convertbf((short8*)(gin+id*r+i)),b=_convertbf((short8*)(inp+id*r+i));
		a.x.x=s*a.x.x-mg*b.x.x-sum;	a.x.y=s*a.x.y-mg*b.x.y-sum;		a.y.x=s*a.y.x-mg*b.y.x-sum;	a.y.y=s*a.y.y-mg*b.y.y-sum;
		a.z.x=s*a.z.x-mg*b.z.x-sum;	a.z.y=s*a.z.y-mg*b.z.y-sum;		a.w.x=s*a.w.x-mg*b.w.x-sum;	a.w.y=s*a.w.y-mg*b.w.y-sum;
		*(short8*)(gin+id*r+i)=_convertfb(&a);
	}
}
template<uint32_t	R,	uint32_t	C,	uint32_t	H>
struct	layernorm{
	Data32<C*H>	nor;
	void	fw(Data<R*C>	&inp){	_layernormf<<<C*H/16,16>>>(R,C,H,inp.data,nor.data);	}
	void	bk(Data<R*C>	&inp,	Data<R*C>	&gin){	_layernormb<<<C*H/16,16>>>(R,C,H,inp.data,gin.data,nor.data);	}
};
__global__	void	_softmaxf(uint32_t	R,	__nv_bfloat16	*inp){
	uint32_t	id=blockIdx.x*blockDim.x+threadIdx.x;	float	sum=0;	float2	z={-FLT_MAX/2,-FLT_MAX/2};
	__nv_bfloat162	*p=(__nv_bfloat162*)(inp+id*R),	ma=__float22bfloat162_rn(z),	s;
	for(uint32_t	i=0;	i<R/2;	i++)	ma=__hmax2(p[i],ma);
	ma.x=ma.y=__hmax(ma.x,ma.y);
	for(uint32_t	i=0;	i<R/2;	i++){	float2	t=__bfloat1622float2(h2exp(__hsub2(p[i],ma)));	sum+=t.x+t.y;	}	
	z.x=z.y=1/sum;	s=__float22bfloat162_rn(z);
	for(uint32_t	i=0;	i<R/2;	i++)	p[i]=__hmul2(h2exp(__hsub2(p[i],ma)),s);	
}
__global__	void	_sexyfp(uint32_t	H,	uint32_t	C,	__nv_bfloat16	*att,	__nv_bfloat16	*pe){
	uint32_t	id=blockIdx.x*blockDim.x+threadIdx.x,	c=id%C,	h=(id/C)%H;	__nv_bfloat16	*a=att+id*C,	*p=pe+h*C+c,	z=__float2bfloat16_rn(0);
	for(uint32_t	i=0;	i<=c;	i++)	a[i]=hexp(__hadd(*(p-i),a[i]));
	for(uint32_t	i=c+1;	i<C;	i++)	a[i]=z;
}
__global__	void	_sexyfsuv(uint32_t	R,	__nv_bfloat16	*u,	__nv_bfloat16	*v,	__nv_bfloat16	*out){	uint32_t	id=(blockIdx.x*blockDim.x+threadIdx.x)<<3;	*(short8*)(out+id)=_hmul((short8*)(u+(id/R)*4*R+3*R+(id%R)),(short8*)(v+id));	}
__global__	void	_sexybsuv(uint32_t	R,	__nv_bfloat16	*u,	__nv_bfloat16	*v,	__nv_bfloat16	*gin,	__nv_bfloat16	*gx){	uint32_t	id=(blockIdx.x*blockDim.x+threadIdx.x)<<3;	*(short8*)(gx+(id/R)*4*R+3*R+(id%R))=_hmul((short8*)(v+id),(short8*)(gin+id));	*(short8*)(gin+id)=_hmul((short8*)(u+(id/R)*4*R+3*R+(id%R)),(short8*)(gin+id));	}
__global__	void	_sexyba(__nv_bfloat16	*gin,	__nv_bfloat16	*att){	uint32_t	id=(blockIdx.x*blockDim.x+threadIdx.x)<<3;	*(short8*)(gin+id)=_hmul((short8*)(att+id),(short8*)(gin+id));	}
__global__	void	_sexybp(uint32_t	B,	uint32_t	H,	uint32_t	R,	uint32_t	C,	__nv_bfloat16	*a,	__nv_bfloat16	*pm){
	uint32_t  id=blockIdx.x*blockDim.x+threadIdx.x,	h=id/C,	c=id%C;	float	s=0;
	for(uint32_t	b=0;	b<B;	b++){	__nv_bfloat16	*p=a+b*H*C*C+h*C*C;	for(uint32_t	i=c;	i<C;	i++)	s+=__bfloat162float(p[i*C+(i-c)]);	}	
	s/=sqrtf((C-c)*R/H*B);	float	m=__bfloat162float(pm[id]);	pm[id]=__float2bfloat16(m+(s-m)/tiger_beta);
}
__global__	void	_sexyadd(__nv_bfloat16	*u,	__nv_bfloat16	*v){	uint32_t  id=(blockIdx.x*blockDim.x+threadIdx.x)<<3;	*(short8*)(v+id)=_hadd((short8*)(u+id),(short8*)(v+id));	}
template<uint32_t	R,	uint32_t	C,	uint32_t	H,	uint32_t	B>
struct	sexy{
	static	Data<C*C*H*B>	da;
	static	Data<R*C*B>	gi;
	static	Data<4*R*C*B>	gx;
	Data<C*C*H*B>	at;
	Data<R*C*B>	va,tmp;
	layernorm<R,C*B,H>	n1;
	layernorm<4*R,C*B,4*H>	n4;
	Data32<C*H>	pe32;
	Data<C*H>	pe,pm;
	linear<R,4*R,C*B>	x;
	linear<R,R,C*B>	o;
	Data<R*C*B>	&out=o.out;
	sexy(){	pe32.zero();	pm.zero();	}
	void	save(FILE	*F){	pe32.save(F);	x.save(F);	o.save(F);	}
	void	load(FILE	*F){	pe32.load(F);	x.load(F);	o.load(F);	}
	uint64_t	size(void){	return	pe32.size()+x.size()+o.size();	}
	void	fw(Data<R*C*B>	&inp){
		float	alf=1/sqrtf(R/H),alf1=1,bet=0;
		x.fw(inp);	n4.fw(x.out);
		for(uint32_t	b=0;	b<B;	b++)	cublasGemmStridedBatchedEx(handle,CUBLAS_OP_T,CUBLAS_OP_N,C,C,R/H,&alf,x.out.data+b*4*R*C,CUDA_R_16BF,4*R,R/H,x.out.data+b*4*R*C+R,CUDA_R_16BF,4*R,R/H,&bet,at.data+b*H*C*C,CUDA_R_16BF,C,C*C,H,CUBLAS_COMPUTE_32F,CUBLAS_GEMM_DEFAULT);
		_quant<<<C*H/8/16,16>>>(pe32.data,pe.data);	_sexyfp<<<C*H*B/16,16>>>(H,C,at.data,pe.data);
		for(uint32_t	b=0;	b<B;	b++)	cublasGemmStridedBatchedEx(handle,CUBLAS_OP_N,CUBLAS_OP_N,R/H,C,C,&alf1,x.out.data+b*4*R*C+2*R,CUDA_R_16BF,4*R,R/H,at.data+b*H*C*C,CUDA_R_16BF,C,C*C,&bet,va.data+b*R*C,CUDA_R_16BF,R,R/H,H,CUBLAS_COMPUTE_32F,CUBLAS_GEMM_DEFAULT);
		n1.fw(va);	_sexyfsuv<<<R*C*B/8/16,16>>>(R,x.out.data,va.data,tmp.data);	
		o.fw(tmp);	_sexyadd<<<R*C*B/8/16,16>>>(inp.data,out.data);	
	}
	void	bk(Data<R*C*B>	&inp,	Data<R*C*B>	&gin,	Data<R*C*B> &gra){
		float	alf=1/sqrtf(R/H),alf1=1,bet=0;
		o.bk(tmp,gin,gi);	_sexybsuv<<<R*C*B/8/16,16>>>(R,x.out.data,va.data,gi.data,gx.data);	n1.bk(va,gi);
		for(uint32_t	b=0;	b<B;	b++){
			cublasGemmStridedBatchedEx(handle,CUBLAS_OP_T,CUBLAS_OP_N,C,C,R/H,&alf1,x.out.data+b*4*R*C+2*R,CUDA_R_16BF,4*R,R/H,gi.data+b*R*C,CUDA_R_16BF,R,R/H,&bet,da.data+b*H*C*C,CUDA_R_16BF,C,C*C,H,CUBLAS_COMPUTE_32F,CUBLAS_GEMM_DEFAULT);
			cublasGemmStridedBatchedEx(handle,CUBLAS_OP_N,CUBLAS_OP_T,R/H,C,C,&alf1,gi.data+b*R*C,CUDA_R_16BF,R,R/H,at.data+b*H*C*C,CUDA_R_16BF,C,C*C,&bet,gx.data+b*4*R*C+2*R,CUDA_R_16BF,4*R,R/H,H,CUBLAS_COMPUTE_32F,CUBLAS_GEMM_DEFAULT);
		}
		_sexyba<<<C*C*H*B/8/16,16>>>(da.data,at.data);	_sexybp<<<C*H/16,16>>>(B,H,R,C,da.data,pm.data);	_sqrtum<<<C*H/8/16,16>>>(pe32.data,pm.data,eta);
		for(uint32_t	b=0;	b<B;	b++){
			cublasGemmStridedBatchedEx(handle,CUBLAS_OP_N,CUBLAS_OP_T,R/H,C,C,&alf,x.out.data+b*4*R*C+R,CUDA_R_16BF,4*R,R/H,da.data+b*H*C*C,CUDA_R_16BF,C,C*C,&bet,gx.data+b*4*R*C,CUDA_R_16BF,4*R,R/H,H,CUBLAS_COMPUTE_32F,CUBLAS_GEMM_DEFAULT);
			cublasGemmStridedBatchedEx(handle,CUBLAS_OP_N,CUBLAS_OP_N,R/H,C,C,&alf,x.out.data+b*4*R*C,CUDA_R_16BF,4*R,R/H,da.data+b*H*C*C,CUDA_R_16BF,C,C*C,&bet,gx.data+b*4*R*C+R,CUDA_R_16BF,4*R,R/H,H,CUBLAS_COMPUTE_32F,CUBLAS_GEMM_DEFAULT);
		}	
		n4.bk(x.out,gx);	x.bk(inp,gx,gra);	_sexyadd<<<R*C*B/8/16,16>>>(gin.data,gra.data);	
	}	
};
template<uint32_t	R,	uint32_t	C,	uint32_t	H,	uint32_t	B>
Data<C*C*H*B>	sexy<R,C,H,B>::da;
template<uint32_t	R,	uint32_t	C,	uint32_t	H,	uint32_t	B>
Data<R*C*B>	sexy<R,C,H,B>::gi;
template<uint32_t	R,	uint32_t	C,	uint32_t	H,	uint32_t	B>
Data<4*R*C*B>	sexy<R,C,H,B>::gx;
__global__	void	_selffsuv(uint32_t	S,	__nv_bfloat16	*u,	__nv_bfloat16	*out){	uint32_t	id=(blockIdx.x*blockDim.x+threadIdx.x)<<3;	*(short8*)(out+id)=_hmul((short8*)(u+(id/S)*2*S+(id%S)),(short8*)(u+(id/S)*2*S+S+(id%S)));	}
__global__	void	_selfbsuv(uint32_t	S,	__nv_bfloat16	*u,	__nv_bfloat16	*gin,	__nv_bfloat16	*d){	uint32_t	id=(blockIdx.x*blockDim.x+threadIdx.x)<<3;	*(short8*)(d+(id/S)*2*S+(id%S))=_hmul((short8*)(u+(id/S)*2*S+S+(id%S)),(short8*)(gin+id));	*(short8*)(d+(id/S)*2*S+S+(id%S))=_hmul((short8*)(u+(id/S)*2*S+(id%S)),(short8*)(gin+id));	}
template<uint32_t	R,	uint32_t	C,	uint32_t	H>
struct	self{
	static	Data<R*C>	gi;
	static	Data<2*R*C>	du;
	Data<R*C>	tmp;
	layernorm<2*R,C,2*H>	n2;
	linear<R,2*R,C>	u;
	linear<R,R,C>	o;
	Data<R*C>	&out=o.out;
	void	save(FILE	*F){	u.save(F);	o.save(F);	}
	void	load(FILE	*F){	u.load(F);	o.load(F);	}
	uint64_t	size(void){	return	u.size()+o.size();	}
	void	fw(Data<R*C>	&inp){	u.fw(inp);	n2.fw(u.out);	_selffsuv<<<R*C/8/16,16>>>(R,u.out.data,tmp.data);	o.fw(tmp);	_sexyadd<<<R*C/8/16,16>>>(inp.data,out.data);	}
	void	bk(Data<R*C>	&inp,	Data<R*C>	&gin,	Data<R*C> &gra){	o.bk(tmp,gin,gi);	_selfbsuv<<<R*C/8/16,16>>>(R,u.out.data,gi.data,du.data);	n2.bk(u.out,du);	u.bk(inp,du,gra);	_sexyadd<<<R*C/8/16,16>>>(gin.data,gra.data);	}
};
template<uint32_t	R,	uint32_t	C,	uint32_t	H>
Data<R*C>	self<R,C,H>::gi;
template<uint32_t	R,	uint32_t	C,	uint32_t	H>
Data<2*R*C>	self<R,C,H>::du;
template<uint32_t	R,	uint32_t	C,	uint32_t	H,	uint32_t	B>
struct	wyGPT{
	self<R,C*B,H>	a;	sexy<R,C,H,B>	b;	self<R,C*B,H>	c;	Data<R*C*B>	&out=c.out;
	void	save(FILE	*F){	a.save(F);	b.save(F);	c.save(F);	}
	void	load(FILE	*F){	a.load(F);	b.load(F);	c.load(F);	}
	uint32_t	size(void){	return	a.size()+b.size()+c.size();	}
	void	fw(Data<R*C*B>	&inp){	a.fw(inp);	b.fw(a.out);	c.fw(b.out);	}
	void	bk(Data<R*C*B>	&inp,	Data<R*C*B>	&gin,	Data<R*C*B> &gra){	c.bk(b.out,gin,gra);	b.bk(a.out,gra,gin);	a.bk(inp,gin,gra);	}
};
__global__	void	_emb(uint32_t	R,	uint32_t	C,	uint16_t	*inp,	__nv_bfloat16	*out){	uint32_t	id=blockIdx.x*blockDim.x+threadIdx.x,	r=id%R,	c=(id/R)%C,	b=(id/R)/C;	out[id]=__float2bfloat16_rn((_wyhash64(inp[b*(C+1)+c],r)&1)*2-1.0f);	}
__global__	void	dlossf(uint32_t	B,	uint32_t	C,	uint32_t	O,	__nv_bfloat16	*a,	uint16_t	*x,	float	*y,	uint16_t	*table){
	float	loss=0;
	for(uint32_t	b=0;	b<B;	b++)	for(uint32_t	i=0;	i<C;	i++){
		__nv_bfloat16	*p=a+(b*C+i)*O+table[x[b*(C+1)+i+1]];	float	z=__bfloat162float(*p);
		loss-=logf(fmaxf(z,FLT_MIN));	*p=__float2bfloat16_rn(z-1);
	}
	*y=loss;
}
template<uint32_t	C,	uint32_t	E,	uint32_t	D,	uint32_t	H,	uint32_t	O,	uint32_t	B>
struct	Denisovan{
private:
	float	*ret;
	uint16_t	*data,	*code,	*table;
	Data<E*C*B>	n0g,trag[2];
public:
	uint64_t	srng=time(NULL);
	Data<E*C*B>	emb;
	wyGPT<E,C,H,B>	tra[D];
	layernorm<E,C*B,H>	n1;
	linear<E,O,C*B>	ou;
	Denisovan(){	cudaMallocManaged(&data,	B*2*(C+1));	cudaMallocManaged(&code,	2*O);	cudaMallocManaged(&table,	2*65536);	cudaMallocManaged(&ret,	sizeof(float));	}
	~Denisovan(){	cudaFree(data);	cudaFree(code);	cudaFree(table);	cudaFree(ret);	}
	void	load_voca(const	char	*F){
		FILE	*f=fopen(F,"rt");
		for(uint32_t	i=0;	i<65536;	i++)	table[i]=0;
		for(uint32_t	i=0;	i<O;	i++){	fscanf(f,"%hu",code+i);	table[code[i]]=i;	}
		fclose(f);	
	}
	bool	save(const	char	*F){
		FILE	*f=fopen(F,"wb");	if(f==NULL)	return	false;
		uint32_t	x;
		x=C;	fwrite(&x,4,1,f);
		x=E;	fwrite(&x,4,1,f);
		x=D;	fwrite(&x,4,1,f);
		x=H;	fwrite(&x,4,1,f);
		x=O;	fwrite(&x,4,1,f);
		fwrite(code,O*2,1,f);
		for(uint32_t	i=0;	i<D;	i++)	tra[i].save(f);
		ou.save(f);	fclose(f);	return	true;
	}
	bool	load(const	char	*F){
		FILE	*f=fopen(F,"rb");	if(f==NULL)	return	false;
		uint32_t	x;
		if(fread(&x,4,1,f)!=1||x!=C)	fprintf(stderr,"C=%u\n",x);
		if(fread(&x,4,1,f)!=1||x!=E)	fprintf(stderr,"E=%u\n",x);
		if(fread(&x,4,1,f)!=1||x!=D)	fprintf(stderr,"D=%u\n",x);
		if(fread(&x,4,1,f)!=1||x!=H)	fprintf(stderr,"H=%u\n",x);
		if(fread(&x,4,1,f)!=1||x!=O)	fprintf(stderr,"O=%u\n",x);		
		if(fread(code,O*2,1,f)!=1)	return	false;
		for(uint32_t	i=0;	i<D;	i++)	tra[i].load(f);
		ou.load(f);	fclose(f);	return	true;
	}
	uint64_t	size(void){	return	tra[0].size()*D+ou.size();	}
	float	train(uint16_t	*text,	uint64_t	len){
		for(uint32_t	i=0;	i<B;	i++)	cudaMemcpy(data+i*(C+1),text+(wyrand(&srng)%(len-C)),2*(C+1),cudaMemcpyHostToDevice);
		_emb<<<E*C*B/16,16>>>(E,C,data,emb.data);
		for(uint32_t	d=0;	d<D;	d++)	tra[d].fw(d?tra[d-1].out:emb);
		n1.fw(tra[D-1].out);	ou.fw(tra[D-1].out);
		_softmaxf<<<C*B/16,16>>>(O,ou.out.data);
		dlossf<<<1,1>>>(B,C,O,ou.out.data,data,ret,table);
		ou.bk(tra[D-1].out,ou.out,n0g);	n1.bk(tra[D-1].out,n0g);
		for(uint32_t	d=D-1;	d<D;	d--)	tra[d].bk(d?tra[d-1].out:emb,d<D-1?trag[(d+1)%2]:n0g,trag[d%2]);
		cudaDeviceSynchronize();	return	*ret;
	}
};
#include	"config"
Denisovan<context,embed,depth,heads,voca,batch>	model;
void	document(void){
	cerr<<"usage:	training [options] input.txt\n";
	cerr<<"\t-i:	input model=NULL\n";
	cerr<<"\t-o:	output model=model\n";
	cerr<<"\t-s:	trained sample=0\n";
	cerr<<"\t-b:	benchmark only=off\n";
	exit(0);
}
uint16_t	*ptr;	int	fd;	struct	stat	sb;
int	main(int	ac,	char	**av){
	if(embed&31){	cerr<<"embed%32!=0\n";	return	0;	}
	cublasCreate(&handle);	string	in,out="model";	int	opt,bench=0;	uint64_t	training=0;
	while((opt=getopt(ac,	av,	"i:o:s:b"))>=0){
		switch(opt){
		case	'i':	in=optarg;	break;
		case	'o':	out=optarg;	break;
		case	's':{	training=atoi(optarg);	training<<=20;	}	break;
		case	'b':	bench=1;	model.srng=0;	break;
		default:	document();
		}
	}
	if(ac<optind+1){	document();	return	0;	}
	fd=open(av[optind],	O_RDONLY);	fstat(fd,	&sb);
	ptr=(uint16_t*)mmap(NULL,	sb.st_size,	PROT_READ,	MAP_SHARED,	fd,	0);
	cerr.precision(4);	cerr.setf(ios::fixed);
	double	loss0=FLT_MAX/2,	loss;	timeval	beg,	end;	
	uint64_t	para=model.size();	cerr<<av[optind]<<'\t'<<sb.st_size/2<<"\npara\t"<<para<<'\n';	
	model.load_voca("voca.txt");
	if(in.size())	model.load(in.c_str());
	for(;;){
		loss=0;	gettimeofday(&beg,NULL);
		for(uint32_t	i=0;	i<fullbatch;	i++){
			eta=powf(para+training/sqrtf(batch*context),-1.0f/4);	training+=context*batch;
			loss+=model.train(ptr,sb.st_size/2);
		}
		loss/=batch*context*fullbatch;
		if(!bench){	if(loss<loss0+0.02)	model.save(out.c_str());	else	break;	}
		loss0=loss;	gettimeofday(&end,NULL);
		double	t=(end.tv_sec-beg.tv_sec+1e-6*(end.tv_usec-beg.tv_usec));
		cerr<<(training>>20)<<'\t'<<loss<<'\t'<<t<<'\n';
	}
	munmap(ptr,sb.st_size);	close(fd);
	cublasDestroy(handle);
	return	0;
}

