#ifndef	parameter_included
#define	parameter_included

#define	parameter_beta	0.0078125f
__global__	void	_optimizer(float	*w, half	*m,	float	lr,	float	g,	uint64_t	seed) {
    uint32_t	i=(blockIdx.x*blockDim.x+threadIdx.x)<<3;
    uint64_t	r=wyhash64(i,seed);
    float8	w8=*(float8*)(w+i),	m8=_b2f8(*(half8*)(m+i));
    m8.x.x+=parameter_beta*(g*(((r>>0)&1)-0.5f)-m8.x.x);
    m8.x.y+=parameter_beta*(g*(((r>>1)&1)-0.5f)-m8.x.y);
    m8.y.x+=parameter_beta*(g*(((r>>2)&1)-0.5f)-m8.y.x);
    m8.y.y+=parameter_beta*(g*(((r>>3)&1)-0.5f)-m8.y.y);
    m8.z.x+=parameter_beta*(g*(((r>>4)&1)-0.5f)-m8.z.x);
    m8.z.y+=parameter_beta*(g*(((r>>5)&1)-0.5f)-m8.z.y);
    m8.w.x+=parameter_beta*(g*(((r>>6)&1)-0.5f)-m8.w.x);
    m8.w.y+=parameter_beta*(g*(((r>>7)&1)-0.5f)-m8.w.y);
    w8.x.x-=lr*((m8.x.x>0)-0.5f);
    w8.x.y-=lr*((m8.x.y>0)-0.5f);
    w8.y.x-=lr*((m8.y.x>0)-0.5f);
    w8.y.y-=lr*((m8.y.y>0)-0.5f);
    w8.z.x-=lr*((m8.z.x>0)-0.5f);
    w8.z.y-=lr*((m8.z.y>0)-0.5f);
    w8.w.x-=lr*((m8.w.x>0)-0.5f);
    w8.w.y-=lr*((m8.w.y>0)-0.5f);
    *(float8*)(w+i)=w8;
    *(half8*)(m+i)=_f2b8(m8);
}

__global__	void	_random(float	*w,	uint64_t	seed,	float	norm) {
    uint32_t	i=blockIdx.x*blockDim.x+threadIdx.x;
    w[i]=norm*wy2gau(wyhash64(i,seed));
}

__global__	void	_origin(float	*z, half	*w) {
    uint32_t	i=(blockIdx.x*blockDim.x+threadIdx.x)<<3;
    *(half8*)(w+i)=_f2b8(*(float8*)(z+i));
}

__global__	void	_perturb(float	*z, half	*w,	uint64_t	seed) {
    uint32_t	i=(blockIdx.x*blockDim.x+threadIdx.x)<<3;
    uint64_t	r=wyhash64(i,seed);
    float8	z8=*(float8*)(z+i);
    z8.x.x+=(((r>>0)&1)-0.5f)*0.0078125f;
    z8.x.y+=(((r>>1)&1)-0.5f)*0.0078125f;
    z8.y.x+=(((r>>2)&1)-0.5f)*0.0078125f;
    z8.y.y+=(((r>>3)&1)-0.5f)*0.0078125f;
    z8.z.x+=(((r>>4)&1)-0.5f)*0.0078125f;
    z8.z.y+=(((r>>5)&1)-0.5f)*0.0078125f;
    z8.w.x+=(((r>>6)&1)-0.5f)*0.0078125f;
    z8.w.y+=(((r>>7)&1)-0.5f)*0.0078125f;
    *(half8*)(w+i)=_f2b8(z8);
}

template<uint32_t	N>
struct	parameter {
    uint64_t	seed;
    tensor<float,N>	z;
    tensor<half,N>	w,m;

    void	rand(float	norm) {
        const	uint32_t	warp=warp_size(1,N);
        _random<<<N/warp,warp>>>(z.p,wyrand(&init_prng),norm);
    }
    void	z2w(void) {
        const	uint32_t	warp=warp_size(8,N);
        if(stage==0)	_origin<<<N/8/warp,warp>>>(z.p,w.p);
        else {
            seed=wyrand(&prng);
            _perturb<<<N/8/warp,warp>>>(z.p,w.p,seed);
        }
    }

    void	m2z(float	lr=etalr) {
        const	uint32_t	warp=warp_size(8,N);
        _optimizer<<<N/8/warp,warp>>>(z.p,m.p,lr,gradient,seed);
    }

    void	save(FILE	*F) {
        tensor<char,N>	tmp;
        float2char(z,tmp);
        cudaDeviceSynchronize();
        tmp.save(F);
    }

    void	load(FILE	*F) {
        tensor<char,N>	tmp;
        tmp.load(F);
        char2float(tmp,z);
        cudaDeviceSynchronize();
    }

    uint32_t	size(void) {
        return	N;
    }

    float	norm(void) {
        half	nor;
        cublasNrm2Ex(handle,N,m.p,CUDA_R_16F,1,&nor,CUDA_R_16F,CUDA_R_32F);
        cudaDeviceSynchronize();
        return	__half2float(nor)/sqrtf(N);
    }
};

#endif
