#ifndef	parameter_included
#define	parameter_included

#define	parameter_beta	0.015625f

__device__	float	_sign(float	x) {
    return	x>0?1:(x<0?-1:0);
}

__global__	void	_optimizer(float	*w, __half	*m,	float	lr) {
    uint32_t	i=(blockIdx.x*blockDim.x+threadIdx.x)<<3;
    float8	w8=*(float8*)(w+i),	m8=_h2f8(*(half8*)(m+i));
    w8.x.x-=lr*_sign(m8.x.x);
    w8.x.y-=lr*_sign(m8.x.y);
    w8.y.x-=lr*_sign(m8.y.x);
    w8.y.y-=lr*_sign(m8.y.y);
    w8.z.x-=lr*_sign(m8.z.x);
    w8.z.y-=lr*_sign(m8.z.y);
    w8.w.x-=lr*_sign(m8.w.x);
    w8.w.y-=lr*_sign(m8.w.y);
    *(float8*)(w+i)=w8;
}

__global__	void	_random(float	*w,	uint64_t	seed,	float	norm) {
    uint32_t	i=blockIdx.x*blockDim.x+threadIdx.x;
    w[i]=norm*wy2gau(wyhash64(i,seed));
}

template<uint32_t	N>
struct	parameter {
    tensor<float,N>	z;
    tensor<__half,N>	w,m;
    void	rand(float	norm) {
        const	uint32_t	warp=warp_size(1,N);
        _random<<<N/warp,warp>>>(z.p,wyrand(&init_prng),norm);
    }

    void	z2w(void) {
        float2char2half(z,w);
    }

    void	m2z(float	lr=etalr) {
        const	uint32_t	warp=warp_size(8,N);
        _optimizer<<<N/8/warp,warp>>>(z.p,m.p,lr);
    }

    void	save(FILE	*F) {
        tensor<char,N>	tmp;
        float2char(z,tmp);
        cudaDeviceSynchronize();
        tmp.save(F);
    }

    void	load(FILE	*F) {
        tensor<char,N>	tmp;
        tmp.load(F);
        char2float(tmp,z);
        cudaDeviceSynchronize();
    }

    uint32_t	size(void) {
        return	N;
    }
};

#endif
