#ifndef	tensor_included
#define	tensor_included

template<class	T,	uint32_t	N>
struct	tensor {
    T	*p;

    tensor() {
        cudaMallocManaged(&p,	N*sizeof(T));
        zero();
    }

    ~tensor() {
        cudaFree(p);
    }

    void	save(FILE	*F) {
        fwrite(p,N*sizeof(T),1,F);
    }

    bool	load(FILE	*F) {
        return fread(p,N*sizeof(T),1,F)==1;
    }

    uint32_t	size(void) {
        return	N;
    }

    void	zero(void) {
        cudaMemset(p,0,N*sizeof(T));
    }
};

template<uint32_t	N>
float	norm(tensor<half,N>	&inp) {
    tensor<half,1>	n;
    cublasNrm2Ex(handle,N,inp.p,CUDA_R_16F,1,n.p,CUDA_R_16F,CUDA_R_32F);
    cudaDeviceSynchronize();
    return	__half2float(n.p[0]);
}

__global__ void	_transpose(uint32_t	R,	uint32_t	C,	half*	inp, half*	out) {
    __shared__	half	block[8][8];
    uint32_t i=blockIdx.y*blockDim.y+threadIdx.y;
    uint32_t j=blockIdx.x*blockDim.x+threadIdx.x;
    block[threadIdx.y][threadIdx.x]=inp[i*R+j];
    __syncthreads();
    out[j*C+i]=block[threadIdx.y][threadIdx.x];
}

template<uint32_t	R,	uint32_t	C>
static	inline	void	transpose(tensor<half,R*C>	&inp,	tensor<half,C*R>	&out) {
    dim3 threadblock(8, 8);
    dim3 grid(R/threadblock.x,C/threadblock.y);
    _transpose<<<grid, threadblock>>>(R,C,inp.p,out.p);
}

__global__	void	_float2half(float	*inp, half	*out) {
    uint32_t	id=(blockIdx.x*blockDim.x+threadIdx.x)<<3;
    *(half8*)(out+id)=_f2b8(*(float8*)(inp+id));
}

template<uint32_t	N>
static	inline	void	float2half(tensor<float,N>	&inp,	tensor<half,N>	&out) {
    const	uint32_t	warp=warp_size(8,N);
    _float2half<<<N/8/warp,warp>>>(inp.p,out.p);
}

__global__	void	_float2char2half(float	*inp, half	*out) {
    uint32_t	id=(blockIdx.x*blockDim.x+threadIdx.x)<<3;
    *(half8*)(out+id)=_f2b8(_c2f8(_f2c8(*(float8*)(inp+id))));
}

template<uint32_t	N>
static	inline	void	float2char2half(tensor<float,N>	&inp,	tensor<half,N>	&out) {
    const	uint32_t	warp=warp_size(8,N);
    _float2char2half<<<N/8/warp,warp>>>(inp.p,out.p);
}

__global__	void	_half2float(half	*inp, float	*out) {
    uint32_t	id=(blockIdx.x*blockDim.x+threadIdx.x)<<3;
    *(float8*)(out+id)=_b2f8(*(half8*)(inp+id));
}

template<uint32_t	N>
static	inline	void	half2float(tensor<half,N>	&inp,	tensor<float,N>	&out) {
    const	uint32_t	warp=warp_size(8,N);
    _half2float<<<N/8/warp,warp>>>(inp.p,out.p);
}

__global__	void	_float2char(float	*inp, char	*out) {
    uint32_t	id=(blockIdx.x*blockDim.x+threadIdx.x)<<3;
    *(char8*)(out+id)=_f2c8(*(float8*)(inp+id));
}

template<uint32_t	N>
static	inline	void	float2char(tensor<float,N>	&inp,	tensor<char,N>	&out) {
    const	uint32_t	warp=warp_size(8,N);
    _float2char<<<N/8/warp,warp>>>(inp.p,out.p);
}

__global__	void	_char2float(char	*inp, float	*out) {
    uint32_t	id=(blockIdx.x*blockDim.x+threadIdx.x)<<3;
    *(float8*)(out+id)=_c2f8(*(char8*)(inp+id));
}

template<uint32_t	N>
static	inline	void	char2float(tensor<char,N>	&inp,	tensor<float,N>	&out) {
    const	uint32_t	warp=warp_size(8,N);
    _char2float<<<N/8/warp,warp>>>(inp.p,out.p);
}

#endif
