#ifndef	transformer_included
#define	transformer_included

__global__	void	_transformerfp(uint32_t	H,	uint32_t	C,	bfloat	*att,	bfloat	*pe) {
    uint32_t	id=blockIdx.x*blockDim.x+threadIdx.x,	c=id%C,	h=(id/C)%H,	s=min((((c+1)>>2)+1)<<2,C),i;
    bfloat	*a=att+id*C,	*p=pe+h*C+c;
    for(i=0;	i+1<=c;	i+=2) {
        __nv_bfloat162	x;
        x.x=*(p-i);
        x.y=*(p-i-1);
        *(__nv_bfloat162*)(a+i)=h2exp(__hadd2(*(__nv_bfloat162*)(a+i),x));
    }
    if(i<=c)	a[c]=hexp(__hadd(*(p-c),a[c]));
    uint16_t	*b=(uint16_t*)a;
    for(uint32_t	i=c+1;	i<s;	i++)	b[i]=0;
    for(uint32_t	i=s;	i<C;	i+=4)	*(short4*)(b+i)= {};
}

__global__	void	_transformerf(uint32_t	R,	bfloat	*u,	bfloat	*v,	bfloat	*out) {
    uint32_t	id=(blockIdx.x*blockDim.x+threadIdx.x)<<3,r=id%R,c=id/(2*R),t=(id%(2*R))/R;
    bfloat	*pu=u+c*6*R+(3+t)*R+r,	*pv=t?u+c*6*R+(4+t)*R+r:v+c*R+r;
    *(bfloat8*)(out+id)=_hmul8(*(bfloat8*)pu,*(bfloat8*)pv);
}

__global__	void	_transformerb(uint32_t	R,	bfloat	*u,	bfloat	*v,	bfloat	*gin,	bfloat	*U,	bfloat	*V) {
    uint32_t	id=(blockIdx.x*blockDim.x+threadIdx.x)<<3,r=id%R,c=id/(2*R),t=(id%(2*R))/R;
    bfloat	*pu=u+c*6*R+(3+t)*R+r,*pv=t?u+c*6*R+(4+t)*R+r:v+c*R+r;
    bfloat	*du=U+c*6*R+(3+t)*R+r,*dv=t?U+c*6*R+(4+t)*R+r:V+c*R+r;
    *(bfloat8*)du=_hmul8(*(bfloat8*)pv,*(bfloat8*)(gin+id));
    *(bfloat8*)dv=_hmul8(*(bfloat8*)pu,*(bfloat8*)(gin+id));
}

__global__	void	_transformerba(bfloat	*gin,	bfloat	*att) {
    uint32_t	id=(blockIdx.x*blockDim.x+threadIdx.x)<<3;
    *(bfloat8*)(gin+id)=_hmul8(*(bfloat8*)(att+id),*(bfloat8*)(gin+id));
}

__global__	void	_transformerbp(uint32_t	B,	uint32_t	H,	uint32_t	S,	uint32_t	C,	bfloat	*a,	bfloat	*pm) {
    uint32_t  id=blockIdx.x*blockDim.x+threadIdx.x,	h=id/C,	c=id%C;
    float	s=0;
    for(uint32_t	b=0;	b<B;	b++) {
        bfloat	*p=a+b*H*C*C+h*C*C;
        for(uint32_t	i=c;	i<C;	i++)	s+=__bfloat162float(p[i*C+(i-c)]);
    }
    s/=sqrtf((C-c)*B);
    float	m=__bfloat162float(pm[id]);
    pm[id]=__float2bfloat16_rn(m+parameter_beta*(s-m));
}

template<uint32_t	R,	uint32_t	C,	uint32_t	H,	uint32_t	S,	uint32_t	B>
struct	transformer {
    static	tensor<bfloat,C*C*H*B>	da;
    static	tensor<bfloat,R*C*B>	tg;
    static	tensor<bfloat,2*S*H*C*B>	gi;
    static	tensor<bfloat,S*H*C*B>	dv;
    static	tensor<bfloat,6*S*H*C*B>	gx;
    tensor<bfloat,C*C*H*B>	at;
    tensor<bfloat,R*C*B>	in;
    tensor<bfloat,S*H*C*B>	va;
    tensor<bfloat,2*S*H*C*B>	tmp;
    rmsnorm<6*S*H,C*B,6*H>	nx;
    rmsnorm<2*S*H,C*B,2*H>	no;
    parameter<C*H>	p;
    linear<R,6*S*H,C*B>	x;
    tensor<bfloat,6*S*H*C*B>	xo;
    linear<2*S*H,R,C*B>	o;

    void	save(FILE	*F) {
        p.save(F);
        x.save(F);
        o.save(F);
    }

    void	load(FILE	*F) {
        p.load(F);
        x.load(F);
        o.load(F);
    }

    uint64_t	size(void) {
        return	p.size()+x.size()+o.size();
    }

    void	fw(tensor<float,R*C*B>	&inp) {
        float	alf=M_SQRT2/sqrtf(S),alf1=1,bet=0;
        float2bfloat(inp,in);
        x.fw(in,xo,0);
        nx.fw(xo);
        for(uint32_t	b=0;	b<B;	b++)	cublasGemmStridedBatchedEx(handle,CUBLAS_OP_T,CUBLAS_OP_N,C,C,S,&alf,xo.p+b*6*S*H*C,CUDA_R_16BF,6*S*H,S,xo.p+b*6*S*H*C+S*H,CUDA_R_16BF,6*S*H,S,&bet,at.p+b*H*C*C,CUDA_R_16BF,C,C*C,H,CUBLAS_COMPUTE_32F,CUBLAS_GEMM_DEFAULT);
        p.z2w();
        uint32_t	w0=warp_size(1,C*H*B);
        _transformerfp<<<C*H*B/w0,w0>>>(H,C,at.p,p.w.p);
        for(uint32_t	b=0;	b<B;	b++)	cublasGemmStridedBatchedEx(handle,CUBLAS_OP_N,CUBLAS_OP_N,S,C,C,&alf1,xo.p+b*6*S*H*C+2*S*H,CUDA_R_16BF,6*S*H,S,at.p+b*H*C*C,CUDA_R_16BF,C,C*C,&bet,va.p+b*S*H*C,CUDA_R_16BF,S*H,S,H,CUBLAS_COMPUTE_32F,CUBLAS_GEMM_DEFAULT);
        uint32_t	w1=warp_size(8,2*S*H*C*B);
        _transformerf<<<2*S*H*C*B/8/w1,w1>>>(S*H,xo.p,va.p,tmp.p);
        no.fw(tmp);
        o.fw(tmp,inp,1);
    }

    void	bk(tensor<float,R*C*B>	&gin) {
        float	alf=M_SQRT2/sqrtf(S),alf1=1,bet=0;
        float2bfloat(gin,tg);
        o.bk(tmp,tg,gi,0);
        no.bk(tmp,gi);
        uint32_t	w0=warp_size(8,2*S*H*C*B);
        _transformerb<<<2*S*H*C*B/8/w0,w0>>>(S*H,xo.p,va.p,gi.p,gx.p,dv.p);
        for(uint32_t	b=0;	b<B;	b++) {
            cublasGemmStridedBatchedEx(handle,CUBLAS_OP_T,CUBLAS_OP_N,C,C,S,&alf1,xo.p+b*6*S*H*C+2*S*H,CUDA_R_16BF,6*S*H,S,dv.p+b*S*H*C,CUDA_R_16BF,S*H,S,&bet,da.p+b*H*C*C,CUDA_R_16BF,C,C*C,H,CUBLAS_COMPUTE_32F,CUBLAS_GEMM_DEFAULT);
            cublasGemmStridedBatchedEx(handle,CUBLAS_OP_N,CUBLAS_OP_T,S,C,C,&alf1,dv.p+b*S*H*C,CUDA_R_16BF,S*H,S,at.p+b*H*C*C,CUDA_R_16BF,C,C*C,&bet,gx.p+b*6*S*H*C+2*S*H,CUDA_R_16BF,6*S*H,S,H,CUBLAS_COMPUTE_32F,CUBLAS_GEMM_DEFAULT);
        }
        uint32_t	w1=warp_size(8,C*C*H*B);
        _transformerba<<<C*C*H*B/8/w1,w1>>>(da.p,at.p);
        uint32_t	w2=warp_size(1,C*H);
        _transformerbp<<<C*H/w2,w2>>>(B,H,S,C,da.p,p.m.p);
        p.m2z(etalr/sqrtf(M_SQRT2*S));
        for(uint32_t	b=0;	b<B;	b++) {
            cublasGemmStridedBatchedEx(handle,CUBLAS_OP_N,CUBLAS_OP_T,S,C,C,&alf,xo.p+b*6*S*H*C+S*H,CUDA_R_16BF,6*S*H,S,da.p+b*H*C*C,CUDA_R_16BF,C,C*C,&bet,gx.p+b*6*S*H*C,CUDA_R_16BF,6*S*H,S,H,CUBLAS_COMPUTE_32F,CUBLAS_GEMM_DEFAULT);
            cublasGemmStridedBatchedEx(handle,CUBLAS_OP_N,CUBLAS_OP_N,S,C,C,&alf,xo.p+b*6*S*H*C,CUDA_R_16BF,6*S*H,S,da.p+b*H*C*C,CUDA_R_16BF,C,C*C,&bet,gx.p+b*6*S*H*C+S*H,CUDA_R_16BF,6*S*H,S,H,CUBLAS_COMPUTE_32F,CUBLAS_GEMM_DEFAULT);
        }
        nx.bk(xo,gx);
        x.bk(in,gx,gin,1);
    }
};

template<uint32_t	R,	uint32_t	C,	uint32_t	H,	uint32_t	S,	uint32_t	B>
tensor<bfloat,C*C*H*B>	transformer<R,C,H,S,B>::da;
template<uint32_t	R,	uint32_t	C,	uint32_t	H,	uint32_t	S,	uint32_t	B>
tensor<bfloat,R*C*B>	transformer<R,C,H,S,B>::tg;
template<uint32_t	R,	uint32_t	C,	uint32_t	H,	uint32_t	S,	uint32_t	B>
tensor<bfloat,S*H*C*B>	transformer<R,C,H,S,B>::dv;
template<uint32_t	R,	uint32_t	C,	uint32_t	H,	uint32_t	S,	uint32_t	B>
tensor<bfloat,2*S*H*C*B>	transformer<R,C,H,S,B>::gi;
template<uint32_t	R,	uint32_t	C,	uint32_t	H,	uint32_t	S,	uint32_t	B>
tensor<bfloat,6*S*H*C*B>	transformer<R,C,H,S,B>::gx;

#endif

