#ifndef	linear_included
#define	linear_included

template<uint32_t	I,	uint32_t	O,	uint32_t	C>
struct	linear {
    parameter<I*O>	wei;
    linear() {
        wei.rand(_C2F);
    }
    void	save(FILE	*F) {
        wei.save(F);
    }
    void	load(FILE	*F) {
        wei.load(F);
    }
    uint32_t	size(void) {
        return	wei.size();
    }
    void	fw(tensor<__half,I*C>	&inp,	tensor<__half,O*C>	&out,	float	acc) {
        __half	alf=__float2half_rn(1/sqrtf(I)),bet=__float2half_rn(acc);
        wei.z2w();
        cublasGemmEx(handle,CUBLAS_OP_T,CUBLAS_OP_N,O,C,I,&alf,wei.w.p,CUDA_R_16F,I,inp.p,CUDA_R_16F,I,&bet,out.p,CUDA_R_16F,O,CUBLAS_COMPUTE_16F,CUBLAS_GEMM_DEFAULT);
    }
    void	bk(tensor<__half,I*C>	&inp,	tensor<__half,O*C>	&gin,	tensor<__half,I*C>	&gra,	float	acc) {
        __half	alf=__float2half_rn(1/sqrtf(I)),bet=__float2half_rn(acc),alf1=__float2half_rn(parameter_beta/sqrtf(I*C)),	bet1=__float2half_rn(1-parameter_beta);
        cublasGemmEx(handle,CUBLAS_OP_N,CUBLAS_OP_T,I,O,C,&alf1,inp.p,CUDA_R_16F,I,gin.p,CUDA_R_16F,O,&bet1,wei.m.p,CUDA_R_16F,I,CUBLAS_COMPUTE_16F,CUBLAS_GEMM_DEFAULT);
        cublasGemmEx(handle,CUBLAS_OP_N,CUBLAS_OP_N,I,C,O,&alf,wei.w.p,CUDA_R_16F,I,gin.p,CUDA_R_16F,O,&bet,gra.p,CUDA_R_16F,I,CUBLAS_COMPUTE_16F,CUBLAS_GEMM_DEFAULT);
        wei.m2z();
    }
};

#endif
