#ifndef	softmax_included
#define	softmax_included

__global__	void	_softmaxf(uint32_t	R,	half	*inp) {
    uint32_t	id=blockIdx.x*blockDim.x+threadIdx.x;
    float	sum=0;
    float2	z= {-10000,-10000};
    half2	*p=(half2*)(inp+id*R),	ma=__float22half2_rn(z),	s;
    for(uint32_t	i=0;	i<R/2;	i++)	ma=__hmax2(p[i],ma);
    ma.x=ma.y=__hmax(ma.x,ma.y);
    for(uint32_t	i=0;	i<R/2;	i++) {
        float2	t=__half22float2(h2exp(__hsub2(p[i],ma)));
        sum+=t.x+t.y;
    }
    z.x=z.y=1/sum;
    s=__float22half2_rn(z);
    for(uint32_t	i=0;	i<R/2;	i++)	p[i]=__hmul2(h2exp(__hsub2(p[i],ma)),s);
}

template<uint32_t	R,	uint32_t	C>
struct	softmax {
    void	fw(tensor<half,R*C>	&inp) {
        const	uint32_t	warp=warp_size(1,C);
        _softmaxf<<<C/warp,warp>>>(R,inp.p);
    }
};

#endif
