#include "../inc/ff_utils_cuda.h"

namespace feifei
{
	typedef enum class IsaArchEnum
	{
		Cuda60 = 60,
		Cuda61 = 61,
		Cuda62 = 62,
		Cuda70 = 70,
		Cuda72 = 72,
		Cuda75 = 75
	}E_IsaArch;
	typedef struct PlatformInfoType
	{
		std::string name;
		std::string version;
		std::string vendor;
	}T_PlatformInfo;
	typedef struct DeviceInfoType
	{
		std::string Name;
		E_IsaArch Arch;

		uint64_t CoreClkFreqHz;
		int CuNum;
		int SimdNumPerCu;
		int TotalSimdNum;

		int WaveSize;

		uint64_t MemClkFreqHz;
		size_t LdsSize;
		size_t GlobalMemSize;
		int GlobalMemWidth;
		int L2CacheSize;

		int major;
		int minor;
		CUuuid uuid;

		int maxThreadsPerBlock;     /**< Maximum number of threads per block */
		int maxThreadsDim[3];       /**< Maximum size of each dimension of a block */
		int maxGridSize[3];         /**< Maximum size of each dimension of a grid */

		int enGlobalL1;
		int enLocalL1;
		int regsPerBlock;           /**< 32-bit registers available per block */
		int totalConstantMemory;    /**< Constant memory available on device in bytes */
		int maxPitchMem;               /**< Maximum pitch in bytes allowed by memory copies */
		int textureAlign;           /**< Alignment requirement for textures */
	}T_DeviceInfo;

	static int nGpuArchCoresPerSM[8][8] = // [major][minor]
	{
		// minor: 0,    1,    2,    3,    4,    5,    6,    7     // major
				 -1,   -1,   -1,   -1,   -1,   -1,   -1,   -1,    // 0
				 -1,   -1,   -1,   -1,   -1,   -1,   -1,   -1,    // 1
				 -1,   -1,   -1,   -1,   -1,   -1,   -1,   -1,    // 2
				192,   -1,  192,   -1,   -1,  192,   -1,  192,    // 3
				 -1,   -1,   -1,   -1,   -1,   -1,   -1,   -1,    // 4
				128,   -1,  128,  128,   -1,   -1,   -1,   -1,    // 5
				 64,  128,  128,   -1,   -1,   -1,   -1,   -1,    // 6
				 64,   -1,   64,   -1,   -1,   64,   -1,   -1     // 7
	};
	static CUcontext context = nullptr;
	static T_PlatformInfo PlatformInfo;
	static T_DeviceInfo DeviceInfo;
	static CUdevice DeviceId;
	static int DeviceCount;
	static int SellectDeviceIndex = 0;

	static const char* cuGetErrorInfo(CUresult error)
	{
		switch (error)
		{
		case CUDA_SUCCESS:								return "CUDA_SUCCESS";
			// 1
		case CUDA_ERROR_INVALID_VALUE:					return "CUDA_ERROR_INVALID_VALUE";
		case CUDA_ERROR_OUT_OF_MEMORY:					return "CUDA_ERROR_OUT_OF_MEMORY";
		case CUDA_ERROR_NOT_INITIALIZED:				return "CUDA_ERROR_NOT_INITIALIZED";
		case CUDA_ERROR_DEINITIALIZED:					return "CUDA_ERROR_DEINITIALIZED";
		case CUDA_ERROR_PROFILER_DISABLED:				return "CUDA_ERROR_PROFILER_DISABLED";
		case CUDA_ERROR_PROFILER_NOT_INITIALIZED:		return "CUDA_ERROR_PROFILER_NOT_INITIALIZED";
		case CUDA_ERROR_PROFILER_ALREADY_STARTED:		return "CUDA_ERROR_PROFILER_ALREADY_STARTED";
		case CUDA_ERROR_PROFILER_ALREADY_STOPPED:		return "CUDA_ERROR_PROFILER_ALREADY_STOPPED";
			// 100
		case CUDA_ERROR_NO_DEVICE:						return "CUDA_ERROR_NO_DEVICE";
		case CUDA_ERROR_INVALID_DEVICE:					return "CUDA_ERROR_INVALID_DEVICE";
			// 200
		case CUDA_ERROR_INVALID_IMAGE:					return "CUDA_ERROR_INVALID_IMAGE";
		case CUDA_ERROR_INVALID_CONTEXT:				return "CUDA_ERROR_INVALID_CONTEXT";
		case CUDA_ERROR_CONTEXT_ALREADY_CURRENT:		return "CUDA_ERROR_CONTEXT_ALREADY_CURRENT";
		case CUDA_ERROR_MAP_FAILED:						return "CUDA_ERROR_MAP_FAILED";
		case CUDA_ERROR_UNMAP_FAILED:					return "CUDA_ERROR_UNMAP_FAILED";
		case CUDA_ERROR_ARRAY_IS_MAPPED:				return "CUDA_ERROR_ARRAY_IS_MAPPED";
		case CUDA_ERROR_ALREADY_MAPPED:					return "CUDA_ERROR_ALREADY_MAPPED";
		case CUDA_ERROR_NO_BINARY_FOR_GPU:				return "CUDA_ERROR_NO_BINARY_FOR_GPU";
			// 210
		case CUDA_ERROR_ALREADY_ACQUIRED:				return "CUDA_ERROR_ALREADY_ACQUIRED";
		case CUDA_ERROR_NOT_MAPPED:						return "CUDA_ERROR_NOT_MAPPED";
		case CUDA_ERROR_NOT_MAPPED_AS_ARRAY:			return "CUDA_ERROR_NOT_MAPPED_AS_ARRAY";
		case CUDA_ERROR_NOT_MAPPED_AS_POINTER:			return "CUDA_ERROR_NOT_MAPPED_AS_POINTER";
		case CUDA_ERROR_ECC_UNCORRECTABLE:				return "CUDA_ERROR_ECC_UNCORRECTABLE";
		case CUDA_ERROR_UNSUPPORTED_LIMIT:				return "CUDA_ERROR_UNSUPPORTED_LIMIT";
		case CUDA_ERROR_CONTEXT_ALREADY_IN_USE:			return "CUDA_ERROR_CONTEXT_ALREADY_IN_USE";
		case CUDA_ERROR_PEER_ACCESS_UNSUPPORTED:		return "CUDA_ERROR_PEER_ACCESS_UNSUPPORTED";
		case CUDA_ERROR_INVALID_PTX:					return "CUDA_ERROR_INVALID_PTX";
		case CUDA_ERROR_INVALID_GRAPHICS_CONTEXT:		return "CUDA_ERROR_INVALID_GRAPHICS_CONTEXT";
		case CUDA_ERROR_NVLINK_UNCORRECTABLE:			return "CUDA_ERROR_NVLINK_UNCORRECTABLE";
		case CUDA_ERROR_JIT_COMPILER_NOT_FOUND:			return "CUDA_ERROR_JIT_COMPILER_NOT_FOUND";
			// 300
		case CUDA_ERROR_INVALID_SOURCE:					return "CUDA_ERROR_INVALID_SOURCE";
		case CUDA_ERROR_FILE_NOT_FOUND:					return "CUDA_ERROR_FILE_NOT_FOUND";
		case CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND:	return "CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND";
		case CUDA_ERROR_SHARED_OBJECT_INIT_FAILED:		return "CUDA_ERROR_SHARED_OBJECT_INIT_FAILED";
		case CUDA_ERROR_OPERATING_SYSTEM:				return "CUDA_ERROR_OPERATING_SYSTEM";
			// 400
		case CUDA_ERROR_INVALID_HANDLE:					return "CUDA_ERROR_INVALID_HANDLE";
		case CUDA_ERROR_ILLEGAL_STATE:					return "CUDA_ERROR_ILLEGAL_STATE";
		case CUDA_ERROR_NOT_FOUND:						return "CUDA_ERROR_NOT_FOUND";
		case CUDA_ERROR_NOT_READY:						return "CUDA_ERROR_NOT_READY";
			// 700
		case CUDA_ERROR_ILLEGAL_ADDRESS:				return "CUDA_ERROR_ILLEGAL_ADDRESS";
		case CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES:		return "CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES";
		case CUDA_ERROR_LAUNCH_TIMEOUT:					return "CUDA_ERROR_LAUNCH_TIMEOUT";
		case CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING:	return "CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING";
		case CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED:	return "CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED";
		case CUDA_ERROR_PEER_ACCESS_NOT_ENABLED:		return "CUDA_ERROR_PEER_ACCESS_NOT_ENABLED";
		case CUDA_ERROR_PRIMARY_CONTEXT_ACTIVE:			return "CUDA_ERROR_PRIMARY_CONTEXT_ACTIVE";
		case CUDA_ERROR_CONTEXT_IS_DESTROYED:			return "CUDA_ERROR_CONTEXT_IS_DESTROYED";
			// 710
		case CUDA_ERROR_ASSERT:							return "CUDA_ERROR_ASSERT";
		case CUDA_ERROR_TOO_MANY_PEERS:					return "CUDA_ERROR_TOO_MANY_PEERS";
		case CUDA_ERROR_HOST_MEMORY_ALREADY_REGISTERED:	return "CUDA_ERROR_HOST_MEMORY_ALREADY_REGISTERED";
		case CUDA_ERROR_HOST_MEMORY_NOT_REGISTERED:		return "CUDA_ERROR_HOST_MEMORY_NOT_REGISTERED";
		case CUDA_ERROR_HARDWARE_STACK_ERROR:			return "CUDA_ERROR_HARDWARE_STACK_ERROR";
		case CUDA_ERROR_ILLEGAL_INSTRUCTION:			return "CUDA_ERROR_ILLEGAL_INSTRUCTION";
		case CUDA_ERROR_MISALIGNED_ADDRESS:				return "CUDA_ERROR_MISALIGNED_ADDRESS";
		case CUDA_ERROR_INVALID_ADDRESS_SPACE:			return "CUDA_ERROR_INVALID_ADDRESS_SPACE";
		case CUDA_ERROR_INVALID_PC:						return "CUDA_ERROR_INVALID_PC";
		case CUDA_ERROR_LAUNCH_FAILED:					return "CUDA_ERROR_LAUNCH_FAILED";
		case CUDA_ERROR_COOPERATIVE_LAUNCH_TOO_LARGE:	return "CUDA_ERROR_COOPERATIVE_LAUNCH_TOO_LARGE";
			// 800
		case CUDA_ERROR_NOT_PERMITTED:					return "CUDA_ERROR_NOT_PERMITTED";
		case CUDA_ERROR_NOT_SUPPORTED:					return "CUDA_ERROR_NOT_SUPPORTED";
		case CUDA_ERROR_SYSTEM_NOT_READY:				return "CUDA_ERROR_SYSTEM_NOT_READY";
			//		case CUDA_ERROR_SYSTEM_DRIVER_MISMATCH:			return "CUDA_ERROR_SYSTEM_DRIVER_MISMATCH";
			//		case CUDA_ERROR_COMPAT_NOT_SUPPORTED_ON_DEVICE:	return "CUDA_ERROR_COMPAT_NOT_SUPPORTED_ON_DEVICE";
					// 900
		case CUDA_ERROR_STREAM_CAPTURE_UNSUPPORTED:		return "CUDA_ERROR_STREAM_CAPTURE_UNSUPPORTED";
		case CUDA_ERROR_STREAM_CAPTURE_INVALIDATED:		return "CUDA_ERROR_STREAM_CAPTURE_INVALIDATED";
		case CUDA_ERROR_STREAM_CAPTURE_MERGE:			return "CUDA_ERROR_STREAM_CAPTURE_MERGE";
		case CUDA_ERROR_STREAM_CAPTURE_UNMATCHED:		return "CUDA_ERROR_STREAM_CAPTURE_UNMATCHED";
		case CUDA_ERROR_STREAM_CAPTURE_UNJOINED:		return "CUDA_ERROR_STREAM_CAPTURE_UNJOINED";
		case CUDA_ERROR_STREAM_CAPTURE_ISOLATION:		return "CUDA_ERROR_STREAM_CAPTURE_ISOLATION";
		case CUDA_ERROR_STREAM_CAPTURE_IMPLICIT:		return "CUDA_ERROR_STREAM_CAPTURE_IMPLICIT";
		case CUDA_ERROR_CAPTURED_EVENT:					return "CUDA_ERROR_CAPTURED_EVENT";
			//		case CUDA_ERROR_STREAM_CAPTURE_WRONG_THREAD:	return "CUDA_ERROR_STREAM_CAPTURE_WRONG_THREAD";
					// 999 
		case CUDA_ERROR_UNKNOWN:						return "CUDA_ERROR_UNKNOWN";
		}

		return "<unknown>";
	}

	void cuGetPlatformInfo()
	{
		CUresult errNum;
		int tmpInt;

		errNum = cuDriverGetVersion(&tmpInt);
		int major = tmpInt / 1000; int minor = tmpInt % 100 / 10;
		PlatformInfo.version = "CUDA " + std::to_string(major) + "." + std::to_string(minor);
		PlatformInfo.vendor = "NVIDIA Corporation";
		PlatformInfo.name = "NVIDIA CUDA";
	}
	void cuGetDeviceInfo()
	{
		CUresult errNum;
		int tmpInt;
		char* tmpChar;
		int CHAR_BUFF_SIZE = 1024;

		tmpChar = (char*)alloca(CHAR_BUFF_SIZE);
		errNum = cuDeviceGetName(tmpChar, CHAR_BUFF_SIZE, DeviceId);
		DeviceInfo.Name = std::string(tmpChar);

		errNum = cuDeviceComputeCapability(&DeviceInfo.major, &DeviceInfo.minor, DeviceId);
		errNum = cuDeviceTotalMem(&DeviceInfo.GlobalMemSize, DeviceId);
		if (DeviceInfo.major == 6)
		{
			if (DeviceInfo.minor == 0) DeviceInfo.Arch = E_IsaArch::Cuda60;
			if (DeviceInfo.minor == 1) DeviceInfo.Arch = E_IsaArch::Cuda61;
			if (DeviceInfo.minor == 2) DeviceInfo.Arch = E_IsaArch::Cuda62;
		}
		if (DeviceInfo.major == 7)
		{
			if (DeviceInfo.minor == 0) DeviceInfo.Arch = E_IsaArch::Cuda70;
			if (DeviceInfo.minor == 2) DeviceInfo.Arch = E_IsaArch::Cuda72;
			if (DeviceInfo.minor == 5) DeviceInfo.Arch = E_IsaArch::Cuda75;
		}

		CUdevprop devProp;
		errNum = cuDeviceGetProperties(&devProp, DeviceId);
		DeviceInfo.CoreClkFreqHz = devProp.clockRate * 1000;
		DeviceInfo.maxThreadsPerBlock = devProp.maxThreadsPerBlock;
		DeviceInfo.WaveSize = devProp.SIMDWidth;
		DeviceInfo.maxThreadsDim[0] = devProp.maxThreadsDim[0];
		DeviceInfo.maxThreadsDim[1] = devProp.maxThreadsDim[1];
		DeviceInfo.maxThreadsDim[2] = devProp.maxThreadsDim[2];
		DeviceInfo.maxGridSize[0] = devProp.maxGridSize[0];
		DeviceInfo.maxGridSize[1] = devProp.maxGridSize[1];
		DeviceInfo.maxGridSize[2] = devProp.maxGridSize[2];
		DeviceInfo.regsPerBlock = devProp.regsPerBlock;
		DeviceInfo.LdsSize = devProp.sharedMemPerBlock;
		DeviceInfo.totalConstantMemory = devProp.totalConstantMemory;
		DeviceInfo.maxPitchMem = devProp.memPitch;
		DeviceInfo.textureAlign = devProp.textureAlign;

		errNum = cuDeviceGetAttribute(&DeviceInfo.CuNum, CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT, DeviceId);
		DeviceInfo.SimdNumPerCu = nGpuArchCoresPerSM[DeviceInfo.major][DeviceInfo.minor];
		DeviceInfo.TotalSimdNum = DeviceInfo.SimdNumPerCu * DeviceInfo.CuNum;
		errNum = cuDeviceGetAttribute(&tmpInt, CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE, DeviceId);
		DeviceInfo.MemClkFreqHz = (uint64_t)tmpInt * 1000;
		errNum = cuDeviceGetAttribute(&DeviceInfo.GlobalMemWidth, CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH, DeviceId);
		errNum = cuDeviceGetAttribute(&DeviceInfo.L2CacheSize, CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE, DeviceId);
		errNum = cuDeviceGetAttribute(&DeviceInfo.enGlobalL1, CU_DEVICE_ATTRIBUTE_GLOBAL_L1_CACHE_SUPPORTED, DeviceId);
		errNum = cuDeviceGetAttribute(&DeviceInfo.enLocalL1, CU_DEVICE_ATTRIBUTE_LOCAL_L1_CACHE_SUPPORTED, DeviceId);
	}
	void PrintDeviceDetails()
	{
		PrintSeperator('-');

		INFO("\t- Device Name: " + DeviceInfo.Name);
		INFO("\t- Compute Capability: %d.%d", DeviceInfo.major, DeviceInfo.minor);
		//INFO("+ uuid: %s", DeviceInfo.uuid.bytes);

		INFO("\t- Core Clock: " + fmtFreq(DeviceInfo.CoreClkFreqHz));
		INFO("\t- Multi Processor Number: %d", DeviceInfo.CuNum);
		INFO("\t- CUDA Core Number per SM: %d", DeviceInfo.SimdNumPerCu);
		INFO("\t- Total CUDA Core Number: %d", DeviceInfo.TotalSimdNum);
		INFO("\t- Max Threads per Block: %d", DeviceInfo.maxThreadsPerBlock);
		INFO("\t- Warp Size: %d", DeviceInfo.WaveSize);
		INFO("\t- Max Threads dim: (%d, %d, %d)", DeviceInfo.maxThreadsDim[0], DeviceInfo.maxThreadsDim[1], DeviceInfo.maxThreadsDim[2]);
		INFO("\t- Max Grid Size: (%d, %d, %d)", DeviceInfo.maxGridSize[0], DeviceInfo.maxGridSize[1], DeviceInfo.maxGridSize[2]);

		INFO("\t- Memory Clock: " + fmtFreq(DeviceInfo.MemClkFreqHz));
		INFO("\t- Global Memory Bus Width: %d(bit)", DeviceInfo.GlobalMemWidth);
		INFO("\t- L2 Cache Size: " + fmtSize(DeviceInfo.L2CacheSize));
		INFO("\t- Support Global L1 Cache: %s", DeviceInfo.enGlobalL1 ? "TRUE" : "FALSE");
		INFO("\t- Support Local L1 Cache: %s", DeviceInfo.enLocalL1 ? "TRUE" : "FALSE");
		INFO("\t- Reg per Block: %d", DeviceInfo.regsPerBlock);
		INFO("\t- Shared Memory per Block: " + fmtSize(DeviceInfo.LdsSize));
		INFO("\t- Total Constant Memory: " + fmtSize(DeviceInfo.totalConstantMemory));
		INFO("\t- Total Global Memory: " + fmtSize(DeviceInfo.GlobalMemSize));
		INFO("\t- Max Pitch Memory: " + fmtSize(DeviceInfo.maxPitchMem));
		INFO("\t- Texture Align: %d", DeviceInfo.textureAlign);

		PrintSeperator('-');
	}
	void PrintDeviceInfoShort()
	{
		double perf = 2.0 * DeviceInfo.TotalSimdNum * DeviceInfo.CoreClkFreqHz;	// 2 opts(mult & add) in one cycle
		INFO("\t- Device Name: " + DeviceInfo.Name);
		INFO("\t- Compute Unit Number = %d", DeviceInfo.CuNum);
		INFO("\t- Shading Core per CU = %d", DeviceInfo.SimdNumPerCu);
		INFO("\t- Core Clock: " + fmtFreq(DeviceInfo.CoreClkFreqHz));
		INFO("\t- Performance(fp32) = %.3f(TFlops)", perf * 1e-12);
		INFO("\t-");
		INFO("\t- Memory Clock: " + fmtFreq(DeviceInfo.MemClkFreqHz));
		INFO("\t- Shared Memory per Block: " + fmtSize(DeviceInfo.LdsSize));
		INFO("\t- Total Global Memory: " + fmtSize(DeviceInfo.GlobalMemSize));
		PrintSeperator('-');
	}
	void PrintGpuRuntimeInfo(bool isFullInfo)
	{
		INFO("");
		PrintSeperator('=');
		PrintHeader("Runtime", '=');
		PrintSeperator('=');

		// platform
		//INFO("- Platform Name: " + PlatformInfo.name);
		INFO("- Version: " + PlatformInfo.version);
		//INFO("- Vendor Name: " + PlatformInfo.vendor);

		INFO("- System has %d devices.", DeviceCount);
		INFO("- Sellect device ID: %d.", SellectDeviceIndex);

		if (isFullInfo)
		{
			PrintDeviceDetails();
		}
		else
		{
			PrintDeviceInfoShort();
		}
	}
	E_ffState cuInitDevice()
	{
		uint32_t flag = CU_CTX_MAP_HOST;
		CUresult errNum;

		errNum = cuCtxCreate(&context, flag, DeviceId);
		if (errNum != CUDA_SUCCESS)
		{
			ERR("faild to create context: %s.", cuGetErrorInfo(errNum));
			return E_ffState::RTN_ERR;
		}
		cuGetDeviceInfo();

		return E_ffState::SUCCESS;
	}
	E_ffState InitGpuRuntime()
	{
		CUresult errNum;

		// runtime & platform
		errNum = cuInit(0);
		if (errNum != CUDA_SUCCESS)
		{
			ERR("faild to init cuda platform: %s.", cuGetErrorInfo(errNum));
			return E_ffState::RTN_ERR;
		}
		cuGetPlatformInfo();

		// devices
		errNum = cuDeviceGetCount(&DeviceCount);
		if ((errNum != CUDA_SUCCESS) || (DeviceCount == 0))
		{
			ERR("faild to init cuda platform: %s.", cuGetErrorInfo(errNum));
		}

		cuDeviceGet(&DeviceId, SellectDeviceIndex);
		cuInitDevice();

		PrintGpuRuntimeInfo();
		return E_ffState::SUCCESS;
	}
	void ReleaseGpuRuntime()
	{
		cuCtxDestroy(context);
	}
#if 0
	E_ffState InitRuntime()
	{
		int RuntimeVersion;
		int DeviceCount;
		cudaDeviceProp DeviceProp;

		INFO("");
		PrintSeperator('=');
		PrintHeader("Runtime", '=');
		PrintSeperator('=');
		cudaDeviceReset();
		CUDA_ASSERT(cudaRuntimeGetVersion(&RuntimeVersion));
		INFO("Runtime Version: %d.%d.", RuntimeVersion / 1000, RuntimeVersion % 10);

		CUDA_ASSERT(cudaGetDeviceCount(&DeviceCount));
		INFO("System has %d devices.", DeviceCount);

		SellectDeviceIndex = 0;
		INFO("Sellect device ID: %d.", SellectDeviceIndex);
		cudaSetDevice(SellectDeviceIndex);

		CUDA_ASSERT(cudaGetDeviceProperties(&DeviceProp, SellectDeviceIndex));
		INFO("\t- Device Name: %s.", DeviceProp.name);
		INFO("\t- Compute Capability: %d.%d", DeviceProp.major, DeviceProp.minor);
		INFO("\t- Clock Rate: %.3f(GHz).", DeviceProp.clockRate / 1000.0 / 1000.0);
		INFO("\t- Multiprocessors(SM) Number: %d.", DeviceProp.multiProcessorCount);
		INFO("\t-");
		INFO("\t- Memory Clock Rate: %.3f(GHz).", DeviceProp.memoryClockRate / 1000.0 / 1000.0);
		INFO("\t- Memory Bus Width: %d(bit).", DeviceProp.memoryBusWidth);
		INFO("\t- Global Memory: %.2f(GB).", DeviceProp.totalGlobalMem / 1024.0 / 1024.0 / 1024.0);
		INFO("\t- Shared Memory per Block: %.2f(KB).", DeviceProp.sharedMemPerBlock / 1024.0);
		INFO("\t- Shared Memory per SM: %.2f(KB).", DeviceProp.sharedMemPerMultiprocessor / 1024.0);
		INFO("\t-");
		INFO("\t- Warp Size: %d.", DeviceProp.warpSize);
		INFO("\t- Max Thread Number per SM: %d.", DeviceProp.maxThreadsPerMultiProcessor);
		PrintSeperator('-');

		return E_ffState::SUCCESS;
	}
#endif

	void LogDataDev(const float2* d_addr, const size_t len,
		std::string name,
		uint64_t startIdx, bool isHex,
		int numPerRow, int fmtLen,
		FILE* of)
	{
		cplx<float>* t = (cplx<float>*)d_addr;
		LogDataDev(t, len, name, startIdx, isHex, numPerRow, fmtLen, of);
	}
	void LogDataDev(const double2* d_addr, const size_t len,
		std::string name,
		uint64_t startIdx, bool isHex,
		int numPerRow, int fmtLen,
		FILE* of)
	{
		cplx<double>* t = (cplx<double>*)d_addr;
		LogDataDev(t, len, name, startIdx, isHex, numPerRow, fmtLen, of);
	}
	void DumpDataDev(const float2* d_addr, const size_t len,
		std::string full_file_name, bool isAppend,
		uint64_t startIdx, bool isHex,
		int numPerRow, int fmtLen)
	{
		cplx<float>* t = (cplx<float>*)d_addr;
		DumpDataDev(t, len, full_file_name, isAppend, startIdx, isHex, numPerRow, fmtLen);
	}
	void DumpDataDev(const double2* d_addr, const size_t len,
		std::string full_file_name, bool isAppend,
		uint64_t startIdx, bool isHex,
		int numPerRow, int fmtLen)
	{
		cplx<double>* t = (cplx<double>*)d_addr;
		DumpDataDev(t, len, full_file_name, isAppend, startIdx, isHex, numPerRow, fmtLen);
	}

	void* DevMalloc(size_t byteNum)
	{
		CUresult errNum;
		CUdeviceptr d_mem;

		errNum = cuMemAlloc(&d_mem, byteNum);
		if ((errNum != CUDA_SUCCESS) || (d_mem == 0))
		{
			LOG("failed for nvrtc to create program.");
			return nullptr;
		}

		return (void*)d_mem;
	}
	void* HstMalloc(size_t byteNum, E_MemType memType)
	{
		CUresult errNum;
		void* h_mem;

		switch (memType)
		{
		case E_MemType::Page:
			h_mem = (float*)malloc(byteNum);
			break;
		case E_MemType::Pin:
			errNum = cuMemHostAlloc((void**)&h_mem, byteNum, 0);
			break;
		case E_MemType::WrCmb:
			errNum = cuMemHostAlloc((void**)&h_mem, byteNum, CU_MEMHOSTALLOC_WRITECOMBINED);
			break;
		case E_MemType::Map:
			errNum = cuMemHostAlloc((void**)&h_mem, byteNum, CU_MEMHOSTALLOC_DEVICEMAP);
			break;
		default:
			h_mem = (float*)malloc(byteNum);
		}

		if ((errNum != CUDA_SUCCESS) || (h_mem == nullptr))
		{
			WARN("HostMalloc");
			h_mem = nullptr;
		}
		return (void*)h_mem;
	}

	E_ffState MemCopyH2D(void* d_mem, void* h_mem, size_t byteNum)
	{
		CUresult errNum;

		errNum = cuMemcpyHtoD((CUdeviceptr)d_mem, h_mem, byteNum);
		if (errNum != CUDA_SUCCESS)
		{
			ERR("Failed to copy memory to device %d Byte: %s",
				byteNum, cuGetErrorInfo(errNum));
		}

		return E_ffState::SUCCESS;
	}
	E_ffState MemCopyD2H(void* h_mem, void* d_mem, size_t byteNum)
	{
		CUresult errNum;

		errNum = cuMemcpyDtoH(h_mem, (CUdeviceptr)d_mem, byteNum);
		if (errNum != CUDA_SUCCESS)
		{
			ERR("Failed to copy memory to device %d Byte: %s",
				byteNum, cuGetErrorInfo(errNum));
		}

		return E_ffState::SUCCESS;
	}
	E_ffState MemCopyD2D(void* d_dst, void* d_src, size_t byteNum)
	{
		CUresult errNum;

		errNum = cuMemcpyDtoD((CUdeviceptr)d_dst, (CUdeviceptr)d_src, byteNum);
		if (errNum != CUDA_SUCCESS)
		{
			ERR("Failed to copy memory to device %d Byte: %s",
				byteNum, cuGetErrorInfo(errNum));
		}

		return E_ffState::SUCCESS;
	}
#if 0
	E_ffState MemCopyH2DAsync(void* d_mem, void* h_mem, size_t byteNum)
	{
		CUresult errNum;

		errNum = cuMemcpyHtoDAsync((CUdeviceptr)d_mem, h_mem, byteNum, stream);
		if (errNum != CUDA_SUCCESS)
		{
			ERR("Failed to copy memory to device %d Byte: %s",
				byteNum, cuGetErrorInfo(errNum));
		}

		return E_ffState::SUCCESS;
	}
	E_ffState MemCopyD2HAsync(void* h_mem, void* d_mem, size_t byteNum)
	{
		CUresult errNum;

		errNum = cuMemcpyDtoHAsync(h_mem, (CUdeviceptr)d_mem, byteNum, stream);
		if (errNum != CUDA_SUCCESS)
		{
			ERR("Failed to copy memory to device %d Byte: %s",
				byteNum, cuDrvGetErrorInfo(errNum));
		}

		return E_ffState::SUCCESS;
	}
	E_ffState MemCopyD2DAsync(void* d_dst, void* d_src, size_t byteNum)
	{
		CUresult errNum;

		errNum = cuMemcpyDtoDAsync((CUdeviceptr)d_dst, (CUdeviceptr)d_src, byteNum, stream);
		if (errNum != CUDA_SUCCESS)
		{
			ERR("Failed to copy memory to device %d Byte: %s",
				byteNum, cuDrvGetErrorInfo(errNum));
		}

		return E_ffState::SUCCESS;
	}
#endif

	enum class BwDir
	{
		H2D = 1,
		D2H = 2,
		D2D = 3,
		H2H = 4
	};
	void bandwidthTest(size_t width, size_t height, E_MemType mem, bool isPitch, BwDir dir, bool isAsync)
	{
		size_t pitch = width * sizeof(float);
		size_t dataSize = width * height;
		size_t memSize = dataSize * sizeof(float);
		int iteration = 100;
		float elapsedTimeInMs = 0.0f;
		float bandwidthInMBs = 0.0f;

		float* h_data1, * h_data2;
		CUdeviceptr d_data1, d_data2;

		CUstream stream;
		CUevent startEvt, stopEvt;
		cuStreamCreate(&stream, 0);
		cuEventCreate(&startEvt, 0);
		cuEventCreate(&stopEvt, 0);

		// 1. allocate device memory
		if (isPitch)
		{
			cuMemAllocPitch(&d_data1, &pitch, width * sizeof(float), height, sizeof(float));
			cuMemAllocPitch(&d_data2, &pitch, width * sizeof(float), height, sizeof(float));
			memSize = pitch * height;
		}
		else
		{
			cuMemAlloc(&d_data1, memSize);
			cuMemAlloc(&d_data2, memSize);
		}

		// 2. allocate host memory
		switch (mem)
		{
		case E_MemType::Page:
			h_data1 = (float*)malloc(memSize);
			h_data2 = (float*)malloc(memSize);
			break;
		case E_MemType::Pin:
			cuMemHostAlloc((void**)&h_data1, memSize, 0);
			cuMemHostAlloc((void**)&h_data2, memSize, 0);
			break;
		case E_MemType::WrCmb:
			cuMemHostAlloc((void**)&h_data1, memSize, CU_MEMHOSTALLOC_WRITECOMBINED);
			cuMemHostAlloc((void**)&h_data2, memSize, CU_MEMHOSTALLOC_WRITECOMBINED);
			break;
		case E_MemType::Map:
			cuMemHostAlloc((void**)&h_data1, memSize, CU_MEMHOSTALLOC_DEVICEMAP);
			cuMemHostAlloc((void**)&h_data2, memSize, CU_MEMHOSTALLOC_DEVICEMAP);
			break;
		default:
			h_data1 = (float*)malloc(memSize);
			h_data2 = (float*)malloc(memSize);
		}

		// 3. initialize memory
		for (uint64_t i = 0; i < memSize / sizeof(float); i++)
			h_data1[i] = (i & 0xff) * 1.0f;
		if (dir == BwDir::D2H || dir == BwDir::D2D)
			cuMemcpyHtoD(d_data1, h_data1, memSize);

		// 4. copy data
		CUDA_MEMCPY2D cpParam;
		memset(&cpParam, 0, sizeof(CUDA_MEMCPY2D));
		if (isPitch)
		{
			cpParam.WidthInBytes = width * sizeof(float);
			cpParam.Height = height;
			cpParam.srcPitch = pitch;
			cpParam.dstPitch = pitch;

			switch (dir)
			{
			case BwDir::H2D:
				cpParam.srcMemoryType = CU_MEMORYTYPE_HOST;
				cpParam.srcHost = h_data1;
				cpParam.dstMemoryType = CU_MEMORYTYPE_DEVICE;
				cpParam.dstDevice = d_data1;
				break;
			case BwDir::D2H:
				cpParam.srcMemoryType = CU_MEMORYTYPE_DEVICE;
				cpParam.srcDevice = d_data1;
				cpParam.dstMemoryType = CU_MEMORYTYPE_HOST;
				cpParam.dstHost = h_data1;
				break;
			case BwDir::D2D:
				cpParam.srcMemoryType = CU_MEMORYTYPE_DEVICE;
				cpParam.srcDevice = d_data1;
				cpParam.dstMemoryType = CU_MEMORYTYPE_DEVICE;
				cpParam.dstDevice = d_data2;
				break;
			}
		}

		cuEventRecord(startEvt, stream);
		for (int i = 0; i < iteration; i++)
		{
			if (isPitch)
			{
				if (isAsync)
				{
					cuMemcpy2D(&cpParam);
				}
				else
				{
					cuMemcpy2DAsync(&cpParam, stream);
				}
			}
			else
			{
				if (isAsync)
				{
					switch (dir)
					{
					case BwDir::H2D:	cuMemcpyHtoDAsync(d_data1, h_data1, memSize, stream); break;
					case BwDir::D2H:	cuMemcpyDtoHAsync(h_data2, d_data1, memSize, stream); break;
					case BwDir::D2D:	cuMemcpyDtoDAsync(d_data2, d_data1, memSize, stream); break;
					}
				}
				else
				{
					switch (dir)
					{
					case BwDir::H2D:	cuMemcpyHtoD(d_data1, h_data1, memSize); break;
					case BwDir::D2H:	cuMemcpyDtoH(h_data2, d_data1, memSize); break;
					case BwDir::D2D:	cuMemcpyDtoD(d_data2, d_data1, memSize); break;
					}
				}
			}
		}

		// make sure GPU has finished copying
		//cuDeviceSynchronize();

		//get the total elapsed time in ms
		cuEventRecord(stopEvt, stream);
		cuEventSynchronize(stopEvt);
		cuEventElapsedTime(&elapsedTimeInMs, startEvt, stopEvt);

		//calculate bandwidth in MB/s
		bandwidthInMBs = ((float)(1 << 10) * memSize * (float)iteration) / (elapsedTimeInMs * (float)(1 << 20));

		if (mem == E_MemType::Page)
		{
			free(h_data1);
			free(h_data2);
		}
		else
		{
			cuMemFreeHost(h_data1);
			cuMemFreeHost(h_data2);
		}
		cuMemFree(d_data1);
		cuMemFree(d_data2);
		cuEventDestroy(startEvt);
		cuEventDestroy(stopEvt);
		cuStreamDestroy(stream);

		std::string logstr;
		if (isAsync)logstr = "Async";
		else		logstr = "Sync ";
		switch (dir)
		{
		case BwDir::H2D:	logstr += " H -> D"; break;
		case BwDir::D2H:	logstr += " D -> H"; break;
		case BwDir::D2D:	logstr += " D -> D"; break;
		}
		if (isPitch)
		{
			logstr += " Pitch  ";
		}
		else
		{
			logstr += " UnPitch";
		}
		if (dir == BwDir::D2D)
		{
			logstr += "         ";
		}
		else
		{
			switch (mem)
			{
			case E_MemType::Page:	logstr += " pageable"; break;
			case E_MemType::Pin:	logstr += " Pinned  "; break;
			case E_MemType::WrCmb:logstr += " wr cmb  "; break;
			case E_MemType::Map:	logstr += " mapped  "; break;
			}
		}
		LOG("%s %d(MB) = %.3f(GB/s).", logstr.data(), memSize / 1024 / 1024, bandwidthInMBs / 1024);
	}
	void BandwidthTest()
	{
		size_t width = 16 * 1024;
		size_t height = 2 * 1024;
		PrintSeperator('=');
		INFO("BandWidth Test");

		PrintSeperator('-');
		LOG("H->D VS D->H VS D->D");
		bandwidthTest(width, height, E_MemType::Pin, false, BwDir::H2D, false);
		bandwidthTest(width, height, E_MemType::Pin, false, BwDir::D2H, false);
		bandwidthTest(width, height, E_MemType::Pin, false, BwDir::D2D, false);
		PrintSeperator('-');
		LOG("Pageable VS Pinned");
		bandwidthTest(width, height, E_MemType::Page, false, BwDir::H2D, false);
		bandwidthTest(width, height, E_MemType::Pin, false, BwDir::H2D, false);
		PrintSeperator('-');
		LOG("Write Combined");
		bandwidthTest(width, height, E_MemType::Pin, false, BwDir::H2D, false);
		bandwidthTest(width, height, E_MemType::WrCmb, false, BwDir::H2D, false);
		PrintSeperator('-');
		LOG("Sync VS Async");
		bandwidthTest(width, height, E_MemType::Pin, false, BwDir::H2D, true);
		bandwidthTest(width, height, E_MemType::Pin, false, BwDir::H2D, false);
		PrintSeperator('-');
		width = 16 * 1024 + 1023; height = 2 * 1024;
		LOG("Pintch Memory: width = %d(K), height = %d(K).", width / 1024, height / 1024);
		bandwidthTest(width, height, E_MemType::Pin, true, BwDir::H2D, false);
		bandwidthTest(width, height, E_MemType::Pin, false, BwDir::H2D, false);

		PrintSeperator('=');
	}


	static std::string gKernelPath = "";
	std::string get_kernel_path()
	{
		if (gKernelPath == "")
			gKernelPath = get_work_path() + "kernel" + DIR_SPT;

		LOG("set kernel folder  : " + gKernelPath);
		return gKernelPath;
	}

	GpuKernel::GpuKernel(std::string kernelName, std::string srcFile)
	{
		this->kernelName = kernelName;
		if (srcFile == "")
			sourceFile = get_kernel_path() + DIR_SPT + kernelName + ".cu";
		else
			sourceFile = srcFile;
		if (creatKernelFromCppFile() != E_ffState::SUCCESS)
			throw(-1);
	}
	E_ffState GpuKernel::creatKernelFromCppString()
	{
		CUresult errNum;
		nvrtcResult rtcErr;

		// create program
		rtcErr = nvrtcCreateProgram(&program, sourceString, sourceFile.c_str(), 0, NULL, NULL);
		if (rtcErr != NVRTC_SUCCESS)
		{
			ERR("failed for nvrtc to create program.");
			return E_ffState::RTN_ERR;
		}

		// compiler to ptx
		// https://docs.nvidia.com/cuda/nvrtc/index.html#group__options
		{
			int optCnt = 0;
			char* opts[10];

			char* opt0;
			switch (DeviceInfo.Arch)
			{
			case E_IsaArch::Cuda60:opt0 = (char*)"--gpu-architecture=compute_60"; opts[optCnt] = opt0; optCnt++; break;
			case E_IsaArch::Cuda61:opt0 = (char*)"--gpu-architecture=compute_61"; opts[optCnt] = opt0; optCnt++; break;
			case E_IsaArch::Cuda62:opt0 = (char*)"--gpu-architecture=compute_62"; opts[optCnt] = opt0; optCnt++; break;
			case E_IsaArch::Cuda70:opt0 = (char*)"--gpu-architecture=compute_70"; opts[optCnt] = opt0; optCnt++; break;
			case E_IsaArch::Cuda72:opt0 = (char*)"--gpu-architecture=compute_72"; opts[optCnt] = opt0; optCnt++; break;
			case E_IsaArch::Cuda75:opt0 = (char*)"--gpu-architecture=compute_75"; opts[optCnt] = opt0; optCnt++; break;
			default:return E_ffState::RTN_ERR;
			}

			char* opt1 = (char*)"-DNV_KERNEL";
			opts[optCnt] = opt1; optCnt++;
			//char * opt2 = (char*)"-IE:\\proj\\feifei_dsp\\src\\operator\\kernel\\ ";
			//opts[optCnt] = opt2; optCnt++;

			rtcErr = nvrtcCompileProgram(program, optCnt, opts);
			if (rtcErr != NVRTC_SUCCESS)
			{
				ERR("failed for nvrtc to compiler to ptx.");
				return E_ffState::RTN_ERR;
			}

			// log
			size_t logSize;
			nvrtcGetProgramLogSize(program, &logSize);
			char* tmpLog = (char*)alloca(logSize);
			nvrtcGetProgramLog(program, tmpLog);
			//INFO("building log: " + std::string(tmpLog));

			// dump asm
			if (dumpAssembly() != E_ffState::SUCCESS)
				return E_ffState::RTN_ERR;
		}

		// create cuModule
		errNum = cuModuleLoadDataEx(&cuModule, ptxBuff, 0, 0, 0);
		if (errNum != CUDA_SUCCESS)
		{
			ERR("faild to create cuModule: %s.", cuGetErrorInfo(errNum));
			return E_ffState::RTN_ERR;
		}

		// create kernel
		errNum = cuModuleGetFunction(&cuKernel, cuModule, kernelName.c_str());
		if (errNum != CUDA_SUCCESS)
		{
			ERR("faild to create kernel: %s.", cuGetErrorInfo(errNum));
			return E_ffState::RTN_ERR;
		}

		return E_ffState::SUCCESS;
	}
	E_ffState GpuKernel::creatKernelFromCppFile()
	{
		std::ifstream kernelFile(sourceFile, std::ios::in);
		if (!kernelFile.is_open())
		{
			ERR("Failed to open file for reading: " + sourceFile);
		}
		std::ostringstream oss;
		oss << kernelFile.rdbuf();
		std::string str = oss.str();
		sourceString = (char*)(str.c_str());

		return creatKernelFromCppString();
	}
	E_ffState GpuKernel::creatKernelFromAsmFile()
	{
		switch (DeviceInfo.Arch)
		{
		case E_IsaArch::Cuda60:buildOption = "--fatbin -gencode arch=compute_60, code=sm_60 --ptxas-options=-v -Xptxas -disable-optimizer-consts"; break;
		case E_IsaArch::Cuda61:buildOption = "--fatbin -gencode arch=compute_61, code=sm_61 --ptxas-options=-v -Xptxas -disable-optimizer-consts"; break;
		case E_IsaArch::Cuda62:buildOption = "--fatbin -gencode arch=compute_62, code=sm_62 --ptxas-options=-v -Xptxas -disable-optimizer-consts"; break;
		case E_IsaArch::Cuda70:buildOption = "--fatbin -gencode arch=compute_70, code=sm_70 --ptxas-options=-v -Xptxas -disable-optimizer-consts"; break;
		case E_IsaArch::Cuda72:buildOption = "--fatbin -gencode arch=compute_72, code=sm_72 --ptxas-options=-v -Xptxas -disable-optimizer-consts"; break;
		case E_IsaArch::Cuda75:buildOption = "--fatbin -gencode arch=compute_75, code=sm_75 --ptxas-options=-v -Xptxas -disable-optimizer-consts"; break;
		default:return E_ffState::RTN_ERR;
		}

		std::string cmd = compiler + " " + buildOption + "-o " + kernelFile + " " + sourceFile;
		exec_cmd(cmd);

		return E_ffState::SUCCESS;
	}
	E_ffState GpuKernel::dumpAssembly()
	{
		nvrtcResult rtcErr;

		// fetch PTX
		size_t ptxSize;
		rtcErr = nvrtcGetPTXSize(program, &ptxSize);
		if (rtcErr != NVRTC_SUCCESS)
		{
			ERR("failed for nvrtc to get ptx size.");
			return E_ffState::RTN_ERR;
		}

		ptxBuff = (char*)malloc(ptxSize);
		rtcErr = nvrtcGetPTX(program, ptxBuff);
		if (rtcErr != NVRTC_SUCCESS)
		{
			ERR("failed for nvrtc to get ptx.");
			return E_ffState::RTN_ERR;
		}

		if (sourceFile == "")
		{
			kernelFile = gKernelPath + DIR_SPT + kernelName + ".ptx";
		}
		else
		{
			kernelFile = gKernelPath + DIR_SPT + get_file_name(sourceFile) + ".ptx";
		}

		std::ofstream fout(kernelFile.c_str(), std::ios::out);
		if (!fout.is_open())
		{
			ERR("can't open save file: " + kernelFile);
		}
		fout.write(ptxBuff, ptxSize);
		fout.close();

		return E_ffState::SUCCESS;
	}
	E_ffState GpuKernel::Launch()
	{
		CUresult errNum;

		void* config[] =
		{
			CU_LAUNCH_PARAM_BUFFER_POINTER, argsBuff,
			CU_LAUNCH_PARAM_BUFFER_SIZE,    &argsSize,
			CU_LAUNCH_PARAM_END
		};

		//if (enProf)
		//	cuEventRecord(startEvt, stream);

		errNum = cuLaunchKernel(cuKernel,
			GroupNum.x, GroupNum.y, GroupNum.z,
			GroupSize.x, GroupSize.y, GroupSize.z,
			0, NULL, NULL, config);

		if (errNum != CUDA_SUCCESS)
		{
			ERR("Failed launch kernel: %s", cuGetErrorInfo(errNum));
		}
		/*if (enProf)
		{
			float milsec;
			cuEventRecord(stopEvt, stream);
			cuEventSynchronize(stopEvt);
			cuEventElapsedTime(&milsec, startEvt, stopEvt);
			kernelExeTime = milsec * 1e-3;
		}*/

		return E_ffState::SUCCESS;
	}

static GpuKernel* cvt_type_kernel_obj = nullptr;
static std::string cvt_type_kernel_src = " \
template<typename SrcT, typename DstT> __global__ void cvt_type_kernel(const void* d_src, void* d_dst, size_t len) \
{ \
	uint32_t glbIdx = blockDim.x * blockIdx.x + threadIdx.x; \
	if (glbIdx >= len) \
		return; \
\
	SrcT* ps = (SrcT*)d_src; \
	DstT* pd = (DstT*)d_dst; \
	pd[glbIdx] = (float)(ps[glbIdx]); \
}";
void cvt_type_dev(float* d_src, double** d_dst, size_t len)
{
	size_t memSize;
	memSize = len * sizeof(double);

	if (*d_dst == NULL)
		CUDA_ASSERT(cudaMalloc(d_dst, memSize));

	if (cvt_type_kernel_obj == nullptr)
	{
		cvt_type_kernel_obj = new GpuKernel("cvt_type_kernel", cvt_type_kernel_src);
	}

	dim3 global_size, group_size, group_num;
	group_size.x = MAX_GRP_SIZE;
	global_size.x = (uint32_t)len;
	group_num.x = (global_size.x + group_size.x - 1) / group_size.x;
	//cvt_type_kernel<float, double> << <group_num, group_size >> > ((void*)d_src, (void*)(*d_dst), len);
}
void cvt_type_dev(double* d_src, float** d_dst, size_t len)
{
	size_t memSize;
	memSize = len * sizeof(float);

	if (*d_dst == NULL)
		CUDA_ASSERT(cudaMalloc(d_dst, memSize));

	dim3 global_size, group_size, group_num;
	group_size.x = MAX_GRP_SIZE;
	global_size.x = (uint32_t)len;
	group_num.x = (global_size.x + group_size.x - 1) / group_size.x;
	//cvt_type_kernel<double, float> << <group_num, group_size >> > ((void*)d_src, (void*)(*d_dst), len);
}

static GpuKernel* cvt_cplx_type_kernel_obj = nullptr;
static std::string cvt_cplx_type_kernel_src = " \
template<typename SrcT, typename DstT> __global__ void cvt_cplx_type_kernel(const void* d_src, void* d_dst, size_t len) \
{ \
	uint32_t glbIdx = blockDim.x * blockIdx.x + threadIdx.x; \
	if (glbIdx >= len) \
		return; \
 \
	SrcT* ps = (SrcT*)d_src; \
	DstT* pd = (DstT*)d_dst; \
	pd[glbIdx * 2 + 0] = (DstT)(ps[glbIdx * 2 + 0]); \
	pd[glbIdx * 2 + 1] = (DstT)(ps[glbIdx * 2 + 1]); \
}";
void cvt_cplx_type_dev(float2* d_src, double2** d_dst, size_t len)
{
	size_t memSize;
	memSize = len * sizeof(double2);

	if (*d_dst == NULL)
		CUDA_ASSERT(cudaMalloc(d_dst, memSize));

	dim3 global_size, group_size, group_num;
	group_size.x = MAX_GRP_SIZE;
	global_size.x = (uint32_t)len;
	group_num.x = (global_size.x + group_size.x - 1) / group_size.x;
	//cvt_cplx_type_kernel<float, double> << <group_num, group_size >> > ((void*)d_src, (void*)(*d_dst), len);
}
void cvt_cplx_type_dev(double2* d_src, float2** d_dst, size_t len)
{
	size_t memSize;
	memSize = len * sizeof(float2);

	if (*d_dst == NULL)
		CUDA_ASSERT(cudaMalloc(d_dst, memSize));

	dim3 global_size, group_size, group_num;
	group_size.x = MAX_GRP_SIZE;
	global_size.x = (uint32_t)len;
	group_num.x = (global_size.x + group_size.x - 1) / group_size.x;
	//cvt_cplx_type_kernel<double, float> << <group_num, group_size >> > ((void*)d_src, (void*)(*d_dst), len);
}

static GpuKernel* seperate_cplx_kernel_obj = nullptr;
static std::string seperate_cplx_kernel_src = " \
template<typename SrcT, typename DstT> __global__ void seperate_cplx_kernel(const void* d_cplx, void* d_real, void* d_imag, size_t len) \
{ \
	uint32_t glbIdx = blockDim.x * blockIdx.x + threadIdx.x; \
	if (glbIdx >= len) \
		return; \
 \
	SrcT* ps = (SrcT*)d_cplx; \
	DstT* pr = (DstT*)d_real; \
	DstT* pi = (DstT*)d_imag; \
	pr[glbIdx] = (DstT)(ps[glbIdx * 2 + 0]); \
	pi[glbIdx] = (DstT)(ps[glbIdx * 2 + 1]); \
}";
void seperate_cplx_dev(float2* d_cplx, float** d_real, float** d_imag, size_t len)
{
	size_t memSize;
	memSize = len * sizeof(float);

	if (*d_real == NULL)
		CUDA_ASSERT(cudaMalloc(d_real, memSize));
	if (*d_imag == NULL)
		CUDA_ASSERT(cudaMalloc(d_imag, memSize));

	dim3 global_size, group_size, group_num;
	group_size.x = MAX_GRP_SIZE;
	global_size.x = (uint32_t)len;
	group_num.x = (global_size.x + group_size.x - 1) / group_size.x;
	//seperate_cplx_kernel<float, float> << <group_num, group_size >> > ((void*)d_cplx, (void*)(*d_real), (void*)(*d_imag), len);
}
void seperate_cplx_dev(float2* d_cplx, double** d_real, double** d_imag, size_t len)
{
	size_t memSize;
	memSize = len * sizeof(double);

	if (*d_real == NULL)
		CUDA_ASSERT(cudaMalloc(d_real, memSize));
	if (*d_imag == NULL)
		CUDA_ASSERT(cudaMalloc(d_imag, memSize));

	dim3 global_size, group_size, group_num;
	group_size.x = MAX_GRP_SIZE;
	global_size.x = (uint32_t)len;
	group_num.x = (global_size.x + group_size.x - 1) / group_size.x;
	//seperate_cplx_kernel<float, double> << <group_num, group_size >> > ((void*)d_cplx, (void*)(*d_real), (void*)(*d_imag), len);
}
void seperate_cplx_dev(double2* d_cplx, double** d_real, double** d_imag, size_t len)
{
	size_t memSize;
	memSize = len * sizeof(double);

	if (*d_real == NULL)
		CUDA_ASSERT(cudaMalloc(d_real, memSize));
	if (*d_imag == NULL)
		CUDA_ASSERT(cudaMalloc(d_imag, memSize));

	dim3 global_size, group_size, group_num;
	group_size.x = MAX_GRP_SIZE;
	global_size.x = (uint32_t)len;
	group_num.x = (global_size.x + group_size.x - 1) / group_size.x;
	//seperate_cplx_kernel<double, double> << <group_num, group_size >> > ((void*)d_cplx, (void*)(*d_real), (void*)(*d_imag), len);
}
void seperate_cplx_dev(double2* d_cplx, float** d_real, float** d_imag, size_t len)
{
	size_t memSize;
	memSize = len * sizeof(float);

	if (*d_real == NULL)
		CUDA_ASSERT(cudaMalloc(d_real, memSize));
	if (*d_imag == NULL)
		CUDA_ASSERT(cudaMalloc(d_imag, memSize));

	dim3 global_size, group_size, group_num;
	group_size.x = MAX_GRP_SIZE;
	global_size.x = (uint32_t)len;
	group_num.x = (global_size.x + group_size.x - 1) / group_size.x;
	//seperate_cplx_kernel<double, float> << <group_num, group_size >> > ((void*)d_cplx, (void*)(*d_real), (void*)(*d_imag), len);
}

static GpuKernel* form_cplx_kernel_obj = nullptr;
static std::string form_cplx_kernel_src = " \
template<typename SrcT, typename DstT> __global__ void form_cplx_kernel(void* d_real, void* d_imag, void* d_cplx, size_t len) \
{ \
	uint32_t glbIdx = blockDim.x * blockIdx.x + threadIdx.x; \
	if (glbIdx >= len) \
		return; \
 \
	SrcT* pr = (SrcT*)d_real; \
	SrcT* pi = (SrcT*)d_imag; \
	DstT* pc = (DstT*)d_cplx; \
	pc[glbIdx * 2 + 0] = pr[glbIdx]; \
	pc[glbIdx * 2 + 1] = pi[glbIdx]; \
}";
void form_cplx_dev(float* d_real, float* d_imag, float2** d_cplx, size_t len)
{
	size_t memSize;
	memSize = len * sizeof(float2);

	if (*d_cplx == NULL)
		CUDA_ASSERT(cudaMalloc(d_cplx, memSize));

	dim3 global_size, group_size, group_num;
	group_size.x = MAX_GRP_SIZE;
	global_size.x = (uint32_t)len;
	group_num.x = (global_size.x + group_size.x - 1) / group_size.x;
	//form_cplx_kernel<float, float> << <group_num, group_size >> > ((void*)d_real, (void*)d_imag, (void*)(*d_cplx), len);
}
void form_cplx_dev(float* d_real, float* d_imag, double2** d_cplx, size_t len)
{
	size_t memSize;
	memSize = len * sizeof(double2);

	if (*d_cplx == NULL)
		CUDA_ASSERT(cudaMalloc(d_cplx, memSize));

	dim3 global_size, group_size, group_num;
	group_size.x = MAX_GRP_SIZE;
	global_size.x = (uint32_t)len;
	group_num.x = (global_size.x + group_size.x - 1) / group_size.x;
	//form_cplx_kernel<float, double> << <group_num, group_size >> > ((void*)d_real, (void*)d_imag, (void*)(*d_cplx), len);
}
void form_cplx_dev(double* d_real, double* d_imag, float2** d_cplx, size_t len)
{
	size_t memSize;
	memSize = len * sizeof(float2);

	if (*d_cplx == NULL)
		CUDA_ASSERT(cudaMalloc(d_cplx, memSize));

	dim3 global_size, group_size, group_num;
	group_size.x = MAX_GRP_SIZE;
	global_size.x = (uint32_t)len;
	group_num.x = (global_size.x + group_size.x - 1) / group_size.x;
	//form_cplx_kernel<double, float> << <group_num, group_size >> > ((void*)d_real, (void*)d_imag, (void*)(*d_cplx), len);
}
void form_cplx_dev(double* d_real, double* d_imag, double2** d_cplx, size_t len)
{
	size_t memSize;
	memSize = len * sizeof(double2);

	if (*d_cplx == NULL)
		CUDA_ASSERT(cudaMalloc(d_cplx, memSize));

	dim3 global_size, group_size, group_num;
	group_size.x = MAX_GRP_SIZE;
	global_size.x = (uint32_t)len;
	group_num.x = (global_size.x + group_size.x - 1) / group_size.x;
	//form_cplx_kernel<double, double> << <group_num, group_size >> > ((void*)d_real, (void*)d_imag, (void*)(*d_cplx), len);
}

void CompareData(float* h_data, float* d_data, uint32_t len)
{
	float* dev_rslt = (float*)malloc(len * sizeof(float));

	cuMemcpyDtoH(dev_rslt, (CUdeviceptr)d_data, len * sizeof(float));

	for (unsigned int i = 0; i < len; i++)
	{
		if (fabs(h_data[i] - dev_rslt[i]) > FLT_MIN)
		{
			printf("    - First Error:\n");
			printf("    - Host  : [%d] = %.2f.\n", i, h_data[i]);
			printf("    - Device: [%d] = %.2f.\n", i, dev_rslt[i]);
			break;
		}

		if (i == len - 1)
		{
			printf("    - Verify Success.\n");
		}
	}

	free(dev_rslt);
}
} // end namespace feifei
