#ifndef __DEVICE_UTILS_H__
#define __DEVICE_UTILS_H__

#include <stdio.h>
#include "cuda.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"

int get_core_numbers( cudaDeviceProp &prop )
{
	if ( prop.major == 1 )
	{ return 8; }
	else if ( prop.major == 2 )
	{
		if ( prop.minor == 0 )
		{ return 32; }
		if ( prop.minor == 1 )
		{ return 48; }
		else
		{ return 0; }
	}
	else
	{ return 0; }
}

void print_device_memory_usage( void )
{
	size_t free = 0;
	size_t total = 0;

	HANDLE_CUDA_ERROR( cudaMemGetInfo( &free, &total) );

	size_t used = total - free;
	//float freeMem = (float)free / 1048576.0f;
	float totalMem = (float)total / 1048576.0f;
	float usedMem = (float)(total - free) / 1048576.0f;

	printf( "Device global memory usage: %d / %d bytes used / free. (%.3f MB, %.1f%%)\n", used, free, usedMem, (usedMem / totalMem)*100 );
	return;
}

size_t get_device_memory_free( void )
{
	size_t free = 0;
	size_t total = 0;

	HANDLE_CUDA_ERROR( cudaMemGetInfo( &free, &total) );

	return free;
}

size_t get_device_memory_total( void )
{
	size_t free = 0;
	size_t total = 0;

	HANDLE_CUDA_ERROR( cudaMemGetInfo( &free, &total) );

	return total;
}

int init_device( int major, int minor )
{
	cudaDeviceProp prop;
	memset( &prop, 0, sizeof(cudaDeviceProp) );

	int devId = 0;

	std::cout << std::endl;
	HANDLE_CUDA_ERROR( cudaGetDevice( &devId ) );
	std::cout << "Current CUDA device ID: " << devId << std::endl;
	prop.major = 2;
	prop.minor = 0;
	HANDLE_CUDA_ERROR( cudaChooseDevice( &devId, &prop ) );
	std::cout << "Choosing the device ID " << devId << " which is closest to CUDA revision 2.0." << std::endl;
	HANDLE_CUDA_ERROR( cudaSetDevice( devId ) );
	HANDLE_CUDA_ERROR( cudaGetDeviceProperties( &prop, devId ) );

	if ( prop.major < 2 )
	{ std::cout << "Error: This program needs a CUDA device revision equals or greater than 2.0." << std::endl; }

	std::cout << std::endl;
	std::cout << "Name:                                        " << prop.name << std::endl;
	std::cout << "Compute capability:                          " << prop.major << "." << prop.minor << std::endl;
	std::cout << "Total constant memory:                       " << prop.totalConstMem << " (" << (prop.totalConstMem / 1024) << " KB)" << std::endl;
	std::cout << "Total global memory:                         " << prop.totalGlobalMem << " (" << (prop.totalGlobalMem / 1048576) << " MB)" << std::endl;
	std::cout << "Maximum number of threads per block:         " << prop.maxThreadsPerBlock << std::endl;
	std::cout << "Multiprocessor count:                        " << prop.multiProcessorCount << std::endl;

	int cores = get_core_numbers(prop);

	if ( cores > 0 )
	{
		std::cout << "Integer and fp cores per MP:                 " << cores << std::endl;
		std::cout << "Multiprocessors x Cores/MP = Cores:          " << (cores * prop.multiProcessorCount) << std::endl;
	}
	else
	{
		std::cout << "Integer and fp cores per MP:                 " << "unknown" << std::endl;
		std::cout << "Multiprocessors x Cores/MP = Cores:          " << "unknown" << std::endl;
	}

	std::cout << "Maximum resident threads per multiprocessor: " << prop.maxThreadsPerMultiProcessor << std::endl;
	std::cout << "32-bit registers available per block:        " << prop.regsPerBlock << std::endl;
	std::cout << "Shared memory available per block:           " << prop.sharedMemPerBlock << " (" << (prop.sharedMemPerBlock / 1024) << " KB)" << std::endl;
	std::cout << "Unified Virtual Addressing:                  " << prop.unifiedAddressing << std::endl;
	std::cout << std::endl;

	return devId;
}

void print_device_limit( void )
{
	size_t value = 0;

	HANDLE_CUDA_ERROR( cudaDeviceGetLimit ( &value, cudaLimitStackSize ) );
	std::cout << "Stack size of each GPU thread: " << value << " bytes." << std::endl;

	HANDLE_CUDA_ERROR( cudaDeviceGetLimit ( &value, cudaLimitPrintfFifoSize ) );
	std::cout << "FIFO size used by the printf() device system call: " << value << " bytes." << std::endl;

	HANDLE_CUDA_ERROR( cudaDeviceGetLimit ( &value, cudaLimitMallocHeapSize ) );
	std::cout << "Heap size used by the malloc() and free() device system calls: " << value << " bytes." << std::endl;

	return;
}

size_t get_device_stack_size_limit( void )
{
	size_t value = 0;

	HANDLE_CUDA_ERROR( cudaDeviceGetLimit ( &value, cudaLimitStackSize ) );
	return value;
}

void set_device_stack_size_limit( size_t value )
{
	HANDLE_CUDA_ERROR( cudaDeviceSetLimit ( cudaLimitStackSize, value ) );
	std::cout << "Set stack size of each GPU thread = " << value << " bytes." << std::endl;

	HANDLE_CUDA_ERROR( cudaDeviceGetLimit ( &value, cudaLimitStackSize ) );
	std::cout << "Driver set cudaLimitStackSize as " << value << " bytes." << std::endl;

	return;
}

size_t get_device_heap_size_limit( void )
{
	size_t value = 0;

	HANDLE_CUDA_ERROR( cudaDeviceGetLimit ( &value, cudaLimitMallocHeapSize ) );
	return value;
}

void set_device_heap_size_limit( size_t value )
{
	HANDLE_CUDA_ERROR( cudaDeviceSetLimit ( cudaLimitMallocHeapSize, value ) );
	std::cout << "Set heap size used by the malloc() and free() device system calls = " << value << " bytes / " << (value / (1024 * 1024)) << " MB" << std::endl;

	HANDLE_CUDA_ERROR( cudaDeviceGetLimit ( &value, cudaLimitMallocHeapSize ) );
	std::cout << "Driver set cudaLimitMallocHeapSize as " << value << " bytes / " << (value / (1024 * 1024)) << " MB" << std::endl;

	return;
}

#endif /* __DEVICE_UTILS_H__ */
