#include "stdafx.h"
#include "CudaUtils.h"

//Round a / b to nearest higher integer value
int IntDivUp(int a, int b)
{
	return (a % b != 0) ? (a / b + 1) : (a / b);
}

// compute grid and thread block size for a given number of elements
void ComputeGridSize(int n, int blockSize, int &numBlocks, int &numThreads)
{
	numThreads = __min(blockSize, n);
	numBlocks = IntDivUp(n, numThreads);
}

// compute grid and thread block size for a given number of elements
void ComputeGridSizeDim(int p_Elements, int p_ThreadsPerBlock, dim3& p_Blocks, dim3& p_Threads)
{
	p_Threads.x = __min(p_ThreadsPerBlock, p_Elements);
	p_Threads.y = p_Threads.z = 1;
	
	p_Blocks.x = IntDivUp(p_Elements, p_ThreadsPerBlock);
	p_Blocks.y = p_Blocks.z = 1;
}

void CudaCheckError(const char *errorMessage)
{
	cudaError_t err = cudaGetLastError();
	if( cudaSuccess != err) {
		fprintf(stderr, "CudaCheckError() CUTIL CUDA error: %s: %s.\n",
			errorMessage, cudaGetErrorString( err) );
#ifdef _DEBUG
		system("pause");
#endif
		exit(-1);
	}
#ifdef _DEBUG
	err = cudaThreadSynchronize();
	if( cudaSuccess != err) {
		fprintf(stderr, "CudaCheckError cudaThreadSynchronize error: %s: %s.\n",
			errorMessage, cudaGetErrorString( err) );
		system("pause");
		exit(-1);
	}
#endif
}


size_t g_PeakMemUsage;
size_t g_CurMemUsage;
typedef std::map<size_t, size_t> AllocMap;
AllocMap g_AllocMap;

void StartAllocationStats()
{
	g_PeakMemUsage = g_CurMemUsage = 0;
	g_AllocMap.clear();
}

cudaError_t CudaMallocRec(void **devPtr, size_t size)
{
	g_CurMemUsage += size;
	g_PeakMemUsage = __max(g_PeakMemUsage, g_CurMemUsage);
	cudaError_t res = cudaMalloc(devPtr, size);
	g_AllocMap[reinterpret_cast<size_t>(*devPtr)] = size;
	return res;
}

cudaError_t CudaFreeRec(void *devPtr)
{
	AllocMap::const_iterator it = g_AllocMap.find(reinterpret_cast<size_t>(devPtr));
	if(it != g_AllocMap.end())
		g_CurMemUsage -= (*it).second;
	else
		g_CurMemUsage = -1;
	return cudaFree(devPtr);
}

cudaError_t CudaMallocPitchRec(void **devPtr, size_t *pitch, size_t width, size_t height)
{
	cudaError_t res = cudaMallocPitch(devPtr, pitch, width, height);
	g_CurMemUsage += (*pitch * height);
	g_PeakMemUsage = __max(g_PeakMemUsage, g_CurMemUsage);
	g_AllocMap[reinterpret_cast<size_t>(*devPtr)] = (*pitch * height);
	return res;
}

cudaError_t CudaMallocArrayRec(struct cudaArray **array, const struct cudaChannelFormatDesc *desc, size_t width, size_t height)
{
	size_t totalSize = (desc->x + desc->y + desc->z + desc->w) / 8;
	totalSize *= (width * height);
	cudaError_t res = cudaMallocArray(array, desc, width, height);
	g_CurMemUsage += totalSize;
	g_PeakMemUsage = __max(g_PeakMemUsage, g_CurMemUsage);
	g_AllocMap[reinterpret_cast<size_t>(*array)] = totalSize;
	return res;
}

cudaError_t CudaFreeArrayRec(struct cudaArray *array)
{
	AllocMap::const_iterator it = g_AllocMap.find(reinterpret_cast<size_t>(array));
	if(it != g_AllocMap.end())
		g_CurMemUsage -= (*it).second;
	else
		g_CurMemUsage = -1;
	return cudaFreeArray(array);
}

void EndAllocationStats()
{
	_tprintf(_T("Memory statistics\n"));
	_tprintf(_T("  Memory in use: %d bytes (%.2f MB)\n"), g_CurMemUsage, (float)g_CurMemUsage / (1024 * 1024));
	_tprintf(_T("  Memory allocation peak: %d bytes (%.2f MB)\n"), g_PeakMemUsage, (float)g_PeakMemUsage / (1024 * 1024));
}

