#include "../inc/ff_utils_cuda.cuh"

namespace feifei
{
int SetDeviceId;
void InitGpuRuntime(void)
{
	int RuntimeVersion;
	int DeviceCount;
	cudaDeviceProp DeviceProp;

	INFO("");
	PrintSeperator('=');
	PrintHeader("Runtime", '=');
	PrintSeperator('=');
	cudaDeviceReset();
	CUDA_ASSERT(cudaRuntimeGetVersion(&RuntimeVersion));
	INFO("Runtime Version: %d.%d.", RuntimeVersion / 1000, RuntimeVersion % 10);

	CUDA_ASSERT(cudaGetDeviceCount(&DeviceCount));
	INFO("System has %d devices.", DeviceCount);

	SetDeviceId = 0;
	INFO("Sellect device ID: %d.", SetDeviceId);
	cudaSetDevice(SetDeviceId);

	CUDA_ASSERT(cudaGetDeviceProperties(&DeviceProp, SetDeviceId));
	INFO("\t- Device Name: %s.", DeviceProp.name);
	INFO("\t- Compute Capability: %d.%d", DeviceProp.major, DeviceProp.minor);
	INFO("\t- Clock Rate: %.3f(GHz).", DeviceProp.clockRate / 1000.0 / 1000.0);
	INFO("\t- Multiprocessors(SM) Number: %d.", DeviceProp.multiProcessorCount);
	INFO("\t-");
	INFO("\t- Memory Clock Rate: %.3f(GHz).", DeviceProp.memoryClockRate / 1000.0 / 1000.0);
	INFO("\t- Memory Bus Width: %d(bit).", DeviceProp.memoryBusWidth);
	INFO("\t- Global Memory: %.2f(GB).", DeviceProp.totalGlobalMem / 1024.0 / 1024.0 / 1024.0);
	INFO("\t- Shared Memory per Block: %.2f(KB).", DeviceProp.sharedMemPerBlock / 1024.0);
	INFO("\t- Shared Memory per SM: %.2f(KB).", DeviceProp.sharedMemPerMultiprocessor / 1024.0);
	INFO("\t-");
	INFO("\t- Warp Size: %d.", DeviceProp.warpSize);
	INFO("\t- Max Thread Number per SM: %d.", DeviceProp.maxThreadsPerMultiProcessor);
	PrintSeperator('-');
}
void ReleaseGpuRuntime(void)
{
}

void LogDataDev(const float2* d_addr, const size_t len,
	std::string name,
	uint64_t startIdx, bool isHex,
	int numPerRow, int fmtLen,
	FILE* of)
{
	cplx<float>* t = (cplx<float>*)d_addr;
	LogDataDev(t, len, name, startIdx, isHex, numPerRow, fmtLen, of);
}
void LogDataDev(const double2* d_addr, const size_t len,
	std::string name,
	uint64_t startIdx, bool isHex,
	int numPerRow, int fmtLen,
	FILE* of)
{
	cplx<double>* t = (cplx<double>*)d_addr;
	LogDataDev(t, len, name, startIdx, isHex, numPerRow, fmtLen, of);
}
void DumpDataDev(const float2* d_addr, const size_t len,
	std::string full_file_name, bool isAppend,
	uint64_t startIdx, bool isHex,
	int numPerRow, int fmtLen)
{
	cplx<float>* t = (cplx<float>*)d_addr;
	DumpDataDev(t, len, full_file_name, isAppend, startIdx, isHex, numPerRow, fmtLen);
}
void DumpDataDev(const double2* d_addr, const size_t len,
	std::string full_file_name, bool isAppend,
	uint64_t startIdx, bool isHex,
	int numPerRow, int fmtLen)
{
	cplx<double>* t = (cplx<double>*)d_addr;
	DumpDataDev(t, len, full_file_name, isAppend, startIdx, isHex, numPerRow, fmtLen);
}

template<typename SrcT, typename DstT> __global__ void cvt_type_kernel(const void * d_src, void * d_dst, size_t len)
{
	uint32_t glbIdx = blockDim.x * blockIdx.x + threadIdx.x;
	if(glbIdx >= len)
		return;
	
	SrcT * ps = (SrcT *)d_src;
	DstT * pd = (DstT *)d_dst;
	pd[glbIdx] = (float)(ps[glbIdx]);
}
void cvt_type_dev(float * d_src, double ** d_dst, size_t len)
{
	size_t memSize;
	memSize = len * sizeof(double);
	
	if (*d_dst == NULL)
		CUDA_ASSERT(cudaMalloc(d_dst, memSize));
	
	dim3 global_size, group_size, group_num;
	group_size.x = MAX_GRP_SIZE;
	global_size.x = (uint32_t)len;
	group_num.x = (global_size.x + group_size.x - 1) / group_size.x;
	cvt_type_kernel<float, double><<<group_num, group_size >>>((void*)d_src, (void*)(*d_dst), len);
}
void cvt_type_dev(double * d_src, float ** d_dst, size_t len)
{
	size_t memSize;
	memSize = len * sizeof(float);
	
	if (*d_dst == NULL)
		CUDA_ASSERT(cudaMalloc(d_dst, memSize));
	
	dim3 global_size, group_size, group_num;
	group_size.x = MAX_GRP_SIZE;
	global_size.x = (uint32_t)len;
	group_num.x = (global_size.x + group_size.x - 1) / group_size.x;
	cvt_type_kernel<double, float><<<group_num, group_size >>>((void*)d_src, (void*)(*d_dst), len);
}

template<typename SrcT, typename DstT> __global__ void cvt_cplx_type_kernel(const void * d_src, void * d_dst, size_t len)
{
	uint32_t glbIdx = blockDim.x * blockIdx.x + threadIdx.x;
	if(glbIdx >= len)
		return;
	
	SrcT * ps = (SrcT *)d_src;
	DstT * pd = (DstT *)d_dst;
	pd[glbIdx * 2 + 0] = (DstT)(ps[glbIdx * 2 + 0]);
	pd[glbIdx * 2 + 1] = (DstT)(ps[glbIdx * 2 + 1]);
}
void cvt_cplx_type_dev(float2 * d_src, double2 ** d_dst, size_t len)
{
	size_t memSize;
	memSize = len * sizeof(double2);
	
	if (*d_dst == NULL)
		CUDA_ASSERT(cudaMalloc(d_dst, memSize));
	
	dim3 global_size, group_size, group_num;
	group_size.x = MAX_GRP_SIZE;
	global_size.x = (uint32_t)len;
	group_num.x = (global_size.x + group_size.x - 1) / group_size.x;
	cvt_cplx_type_kernel<float, double><<<group_num, group_size >>>((void*)d_src, (void*)(*d_dst), len);
}
void cvt_cplx_type_dev(double2 * d_src, float2 ** d_dst, size_t len)
{
	size_t memSize;
	memSize = len * sizeof(float2);
	
	if (*d_dst == NULL)
		CUDA_ASSERT(cudaMalloc(d_dst, memSize));
	
	dim3 global_size, group_size, group_num;
	group_size.x = MAX_GRP_SIZE;
	global_size.x = (uint32_t)len;
	group_num.x = (global_size.x + group_size.x - 1) / group_size.x;
	cvt_cplx_type_kernel<double, float><<<group_num, group_size >>>((void*)d_src, (void*)(*d_dst), len);
}

template<typename SrcT, typename DstT> __global__ void seperate_cplx_kernel(const void * d_cplx, void * d_real, void * d_imag, size_t len)
{
	uint32_t glbIdx = blockDim.x * blockIdx.x + threadIdx.x;
	if(glbIdx >= len)
		return;
	
	SrcT * ps = (SrcT *)d_cplx;
	DstT * pr = (DstT *)d_real;
	DstT * pi = (DstT *)d_imag;
	pr[glbIdx] = (DstT)(ps[glbIdx * 2 + 0]);
	pi[glbIdx] = (DstT)(ps[glbIdx * 2 + 1]);
}
void seperate_cplx_dev(float2 * d_cplx, float ** d_real, float ** d_imag, size_t len)
{	
	size_t memSize;
	memSize = len * sizeof(float);
	
	if (*d_real == NULL)
		CUDA_ASSERT(cudaMalloc(d_real, memSize));
	if (*d_imag == NULL)
		CUDA_ASSERT(cudaMalloc(d_imag, memSize));
	
	dim3 global_size, group_size, group_num;
	group_size.x = MAX_GRP_SIZE;
	global_size.x = (uint32_t)len;
	group_num.x = (global_size.x + group_size.x - 1) / group_size.x;
	seperate_cplx_kernel<float, float><<<group_num, group_size >>>((void*)d_cplx, (void*)(*d_real), (void*)(*d_imag), len);
}
void seperate_cplx_dev(float2 * d_cplx, double ** d_real, double ** d_imag, size_t len)
{	
	size_t memSize;
	memSize = len * sizeof(double);

	if (*d_real == NULL)
		CUDA_ASSERT(cudaMalloc(d_real, memSize));
	if (*d_imag == NULL)
		CUDA_ASSERT(cudaMalloc(d_imag, memSize));
	
	dim3 global_size, group_size, group_num;
	group_size.x = MAX_GRP_SIZE;
	global_size.x = (uint32_t)len;
	group_num.x = (global_size.x + group_size.x - 1) / group_size.x;
	seperate_cplx_kernel<float, double><<<group_num, group_size >>>((void*)d_cplx, (void*)(*d_real), (void*)(*d_imag), len);
}
void seperate_cplx_dev(double2 * d_cplx, double ** d_real, double ** d_imag, size_t len)
{	
	size_t memSize;
	memSize = len * sizeof(double);
	
	if (*d_real == NULL)
		CUDA_ASSERT(cudaMalloc(d_real, memSize));
	if (*d_imag == NULL)
		CUDA_ASSERT(cudaMalloc(d_imag, memSize));
	
	dim3 global_size, group_size, group_num;
	group_size.x = MAX_GRP_SIZE;
	global_size.x = (uint32_t)len;
	group_num.x = (global_size.x + group_size.x - 1) / group_size.x;
	seperate_cplx_kernel<double, double><<<group_num, group_size >>>((void*)d_cplx, (void*)(*d_real), (void*)(*d_imag), len);
}
void seperate_cplx_dev(double2 * d_cplx, float ** d_real, float ** d_imag, size_t len)
{	
	size_t memSize;
	memSize = len * sizeof(float);
	
	if (*d_real == NULL)
		CUDA_ASSERT(cudaMalloc(d_real, memSize));
	if (*d_imag == NULL)
		CUDA_ASSERT(cudaMalloc(d_imag, memSize));
	
	dim3 global_size, group_size, group_num;
	group_size.x = MAX_GRP_SIZE;
	global_size.x = (uint32_t)len;
	group_num.x = (global_size.x + group_size.x - 1) / group_size.x;
	seperate_cplx_kernel<double, float><<<group_num, group_size >>>((void*)d_cplx, (void*)(*d_real), (void*)(*d_imag), len);
}

template<typename SrcT, typename DstT> __global__ void form_cplx_kernel(void* d_real, void* d_imag, void* d_cplx, size_t len)
{
	uint32_t glbIdx = blockDim.x * blockIdx.x + threadIdx.x;
	if (glbIdx >= len)
		return;

	SrcT* pr = (SrcT*)d_real;
	SrcT* pi = (SrcT*)d_imag;
	DstT* pc = (DstT*)d_cplx;
	pc[glbIdx * 2 + 0] = pr[glbIdx];
	pc[glbIdx * 2 + 1] = pi[glbIdx];
}
void form_cplx_dev(float* d_real, float* d_imag, float2** d_cplx, size_t len)
{
	size_t memSize;
	memSize = len * sizeof(float2);

	if (*d_cplx == NULL)
		CUDA_ASSERT(cudaMalloc(d_cplx, memSize));

	dim3 global_size, group_size, group_num;
	group_size.x = MAX_GRP_SIZE;
	global_size.x = (uint32_t)len;
	group_num.x = (global_size.x + group_size.x - 1) / group_size.x;
	form_cplx_kernel<float, float> << <group_num, group_size >> > ((void*)d_real, (void*)d_imag, (void*)(*d_cplx), len);
}
void form_cplx_dev(float* d_real, float* d_imag, double2** d_cplx, size_t len)
{
	size_t memSize;
	memSize = len * sizeof(double2);

	if (*d_cplx == NULL)
		CUDA_ASSERT(cudaMalloc(d_cplx, memSize));

	dim3 global_size, group_size, group_num;
	group_size.x = MAX_GRP_SIZE;
	global_size.x = (uint32_t)len;
	group_num.x = (global_size.x + group_size.x - 1) / group_size.x;
	form_cplx_kernel<float, double> << <group_num, group_size >> > ((void*)d_real, (void*)d_imag, (void*)(*d_cplx), len);
}
void form_cplx_dev(double* d_real, double* d_imag, float2** d_cplx, size_t len)
{
	size_t memSize;
	memSize = len * sizeof(float2);

	if (*d_cplx == NULL)
		CUDA_ASSERT(cudaMalloc(d_cplx, memSize));

	dim3 global_size, group_size, group_num;
	group_size.x = MAX_GRP_SIZE;
	global_size.x = (uint32_t)len;
	group_num.x = (global_size.x + group_size.x - 1) / group_size.x;
	form_cplx_kernel<double, float> << <group_num, group_size >> > ((void*)d_real, (void*)d_imag, (void*)(*d_cplx), len);
}
void form_cplx_dev(double* d_real, double* d_imag, double2** d_cplx, size_t len)
{
	size_t memSize;
	memSize = len * sizeof(double2);

	if (*d_cplx == NULL)
		CUDA_ASSERT(cudaMalloc(d_cplx, memSize));

	dim3 global_size, group_size, group_num;
	group_size.x = MAX_GRP_SIZE;
	global_size.x = (uint32_t)len;
	group_num.x = (global_size.x + group_size.x - 1) / group_size.x;
	form_cplx_kernel<double, double> << <group_num, group_size >> > ((void*)d_real, (void*)d_imag, (void*)(*d_cplx), len);
}

} // end namespace feifei
