////////////////////////////////////////////////////////////////
///
/// File: NVIDIALib.cpp
/// Author: Cristian Dittamo
/// Last update: 11 Dec 2009
/// Description: This file contains the definition of the 
///              main functions implementation needed for invoking
///				 nVidia driver functions.
/// To do: 
/// ------------------------------------------------------------
/// Copyright (c) 2009 Cristian Dittamo (dittamo@di.unipi.it)
/// 
/// The use and distribution terms for this software are 
/// contained in the file named license.txt, which can be found 
/// in the root of this distribution.
/// By using this software in any fashion, you are agreeing to 
/// be bound by the terms of this license.
///
/// You must not remove this notice, or any other, from this
/// software.
/// ------------------------------------------------------------
////////////////////////////////////////////////////////////////
// NVIDIALib.cpp : Defines the exported functions for the DLL application.

#include "stdafx.h"
#include "NVIDIALib.h"

using namespace std;

ofstream codeFile("prova.cpp");

//////////////////
//	Stefano
//////////////////
#define MEMCOPY_ITERATIONS  10
#define LATENCY_ITERATIONS 100000
#define INIT_SIZE       (4 * ( 1 << 20 ))
#define END_SIZE        (64 * ( 1 << 20 ))
#define INCREMENT_SIZE   (4 * (1 << 20))

enum memcpyKind { DEVICE_TO_HOST, HOST_TO_DEVICE, DEVICE_TO_DEVICE };
enum memoryMode { PINNED, PAGEABLE };
cudaEvent_t start, stop;

float testDeviceToHostTransfer(unsigned int memSize, 
							   memoryMode memMode, 
							   bool wc,
							   bool latency)
{
    float elapsedTimeInMs = 0.0f;
    float bandwidthInMBs = 0.0f;
    unsigned char *h_idata = NULL;
    unsigned char *h_odata = NULL;

    cudaEvent_t start, stop;
    cutilSafeCall  ( cudaEventCreate( &start ) );
    cutilSafeCall  ( cudaEventCreate( &stop ) );
    
    //allocate host memory
    if( PINNED == memMode )
    {
        //pinned memory mode - use special function to get OS-pinned memory
#if CUDART_VERSION >= 2020
		cutilSafeCall( cudaHostAlloc( (void**)&h_idata, memSize, (wc) ? cudaHostAllocWriteCombined : 0 ) );
        cutilSafeCall( cudaHostAlloc( (void**)&h_odata, memSize, (wc) ? cudaHostAllocWriteCombined : 0 ) );
#else
		cutilSafeCall( cudaMallocHost( (void**)&h_idata, memSize ) );
        cutilSafeCall( cudaMallocHost( (void**)&h_odata, memSize ) );
#endif
    }
    else
    {
        //pageable memory mode - use malloc
        h_idata = (unsigned char *)malloc( memSize );
        h_odata = (unsigned char *)malloc( memSize );
    }
    //initialize the memory
    for(unsigned int i = 0; i < memSize/sizeof(unsigned char); i++)
    {
        h_idata[i] = (unsigned char) (i & 0xff);
    }

    // allocate device memory
    unsigned char* d_idata;
    cutilSafeCall( cudaMalloc( (void**) &d_idata, memSize));

    //initialize the device memory
    cutilSafeCall( cudaMemcpy( d_idata, h_idata, memSize,
                                cudaMemcpyHostToDevice) );

	int loop = latency ? LATENCY_ITERATIONS : MEMCOPY_ITERATIONS;

    //copy data from GPU to Host
    cutilSafeCall( cudaEventRecord( start, 0 ) );
    if( PINNED == memMode )
    {
        for( unsigned int i = 0; i < loop; i++ )
        {
            cutilSafeCall( cudaMemcpyAsync( h_odata, d_idata, memSize,
                                    cudaMemcpyDeviceToHost, 0) );
        }
    }
    else
    {
        for( unsigned int i = 0; i < loop; i++ )
        {
            cutilSafeCall( cudaMemcpy( h_odata, d_idata, memSize,
                                    cudaMemcpyDeviceToHost) );
        }
    }
    cutilSafeCall( cudaEventRecord( stop, 0 ) );

    // make sure GPU has finished copying
    cutilSafeCall( cudaThreadSynchronize() );
    //get the the total elapsed time in ms
    cutilSafeCall( cudaEventElapsedTime( &elapsedTimeInMs, start, stop ) );
    
    //calculate bandwidth in MB/s
    bandwidthInMBs = (1e3f * memSize * (float)loop) / 
                                        (elapsedTimeInMs * (float)(1 << 20));

    //clean up memory
    cutilSafeCall( cudaEventDestroy(stop) );
    cutilSafeCall( cudaEventDestroy(start) );

    if( PINNED == memMode )
    {
        cutilSafeCall( cudaFreeHost(h_idata) );
        cutilSafeCall( cudaFreeHost(h_odata) );
    }
    else
    {
        free(h_idata);
        free(h_odata);
    }
    cutilSafeCall(cudaFree(d_idata));
    
    return latency ? elapsedTimeInMs /(float)loop : bandwidthInMBs;
}

float testHostToDeviceTransfer(unsigned int memSize, 
							   memoryMode memMode, 
							   bool wc,
							   bool latency)
{
    float elapsedTimeInMs = 0.0f;
    float bandwidthInMBs = 0.0f;

    cudaEvent_t start, stop;
    cutilSafeCall( cudaEventCreate( &start ) );
    cutilSafeCall( cudaEventCreate( &stop ) );

    //allocate host memory
    unsigned char *h_odata = NULL;
    if( PINNED == memMode )
    {
#if CUDART_VERSION >= 2020
        //pinned memory mode - use special function to get OS-pinned memory
        cutilSafeCall( cudaHostAlloc( (void**)&h_odata, memSize, (wc) ? cudaHostAllocWriteCombined : 0 ) );
#else
        //pinned memory mode - use special function to get OS-pinned memory
        cutilSafeCall( cudaMallocHost( (void**)&h_odata, memSize ) );
#endif
    }
    else
    {
        //pageable memory mode - use malloc
        h_odata = (unsigned char *)malloc( memSize );
    }    
    //initialize the memory
    for(unsigned int i = 0; i < memSize/sizeof(unsigned char); i++)
    {
        h_odata[i] = (unsigned char) (i & 0xff);
    }

    //allocate device memory
    unsigned char* d_idata;
    cutilSafeCall( cudaMalloc( (void**) &d_idata, memSize));

	int loop = latency ? LATENCY_ITERATIONS : MEMCOPY_ITERATIONS;

    cutilSafeCall( cudaEventRecord( start, 0 ) );
    //copy host memory to device memory
    if( PINNED == memMode )
    {
        for(unsigned int i = 0; i < loop; i++)
        {
            cutilSafeCall( cudaMemcpyAsync( d_idata, h_odata, memSize,
                                    cudaMemcpyHostToDevice, 0) );
        }
    }
    else {
        for(unsigned int i = 0; i < loop; i++)
        {
            cutilSafeCall( cudaMemcpy( d_idata, h_odata, memSize,
                                    cudaMemcpyHostToDevice) );
        }
    }
    cutilSafeCall( cudaEventRecord( stop, 0 ) );
    cutilSafeCall( cudaThreadSynchronize() );
    //total elapsed time in ms
    cutilSafeCall( cudaEventElapsedTime( &elapsedTimeInMs, start, stop ) );
        
    //calculate bandwidth in MB/s
    bandwidthInMBs = (1e3f * memSize * (float)loop) / 
                                        (elapsedTimeInMs * (float)(1 << 20));

    //clean up memory
    cutilSafeCall( cudaEventDestroy(stop) );
    cutilSafeCall( cudaEventDestroy(start) );
    if( PINNED == memMode )
    {
        cutilSafeCall( cudaFreeHost(h_odata) );
    }
    else
    {
        free(h_odata);
    }
    cutilSafeCall(cudaFree(d_idata));

	return latency ? elapsedTimeInMs / (float)loop : bandwidthInMBs;
}

float testDeviceToDeviceTransfer(unsigned int memSize,
								 bool latency)
{
    float elapsedTimeInMs = 0.0f;
    float bandwidthInMBs = 0.0f;

    cudaEvent_t start, stop;
    cutilSafeCall( cudaEventCreate( &start ) );
    cutilSafeCall( cudaEventCreate( &stop ) );

    //allocate host memory
    unsigned char *h_idata = (unsigned char *)malloc( memSize );
    
    //initialize the host memory
    for(unsigned int i = 0; i < memSize/sizeof(unsigned char); i++)
    {
        h_idata[i] = (unsigned char) (i & 0xff);
    }

    //allocate device memory
    unsigned char *d_idata;
    cutilSafeCall( cudaMalloc( (void**) &d_idata, memSize));
    unsigned char *d_odata;
    cutilSafeCall( cudaMalloc( (void**) &d_odata, memSize));

    //initialize memory
    cutilSafeCall( cudaMemcpy( d_idata, h_idata, memSize,
                                cudaMemcpyHostToDevice) );

	int loop = latency ? 1000000 : MEMCOPY_ITERATIONS;

    //run the memcopy
    cutilSafeCall( cudaEventRecord( start, 0 ) );

    for( unsigned int i = 0; i < loop; i++ )
    {
        cutilSafeCall( cudaMemcpy( d_odata, d_idata, memSize,
                                cudaMemcpyDeviceToDevice) );
    }
    cutilSafeCall( cudaEventRecord( stop, 0 ) );
  
    //Since device to device memory copies are non-blocking,
    //cudaThreadSynchronize() is required in order to get
    //proper timing.
    cutilSafeCall( cudaThreadSynchronize() );

    //get the the total elapsed time in ms
    cutilSafeCall( cudaEventElapsedTime( &elapsedTimeInMs, start, stop ) );
    
    //calculate bandwidth in MB/s
    bandwidthInMBs = 2.0f * (1e3f * memSize * (float)loop) / 
                                        (elapsedTimeInMs * (float)(1 << 20));
    
    //clean up memory
    free(h_idata);
    cutilSafeCall(cudaEventDestroy(stop));
    cutilSafeCall(cudaEventDestroy(start));
    cutilSafeCall(cudaFree(d_idata));
    cutilSafeCall(cudaFree(d_odata));

	return latency ? elapsedTimeInMs / (float)loop : bandwidthInMBs;
}

float testBandwidthRange(unsigned int start, 
						 unsigned int end, 
						 unsigned int increment, 
						 memcpyKind kind, 
						 memoryMode memMode, 
						 bool wc)
{
    //count the number of copies we're going to run
    unsigned int count = 1 + ((end - start) / increment);
    
    unsigned int *memSizes = ( unsigned int * )malloc( count * sizeof( unsigned int ) );
    float *bandwidths = ( float * ) malloc( count * sizeof(float) );

    // Before calculating the cumulative bandwidth, initialize bandwidths array to NULL
    for (unsigned int i = 0; i < count; i++)
        bandwidths[i] = 0.0;

    //run each of the copies
    for(unsigned int i = 0; i < count; i++)
    {
		memSizes[i] = start + i * increment;
        switch(kind)
        {
			case DEVICE_TO_HOST:    bandwidths[i] = testDeviceToHostTransfer( memSizes[i], memMode, wc, false);
				break;
			case HOST_TO_DEVICE:    bandwidths[i] = testHostToDeviceTransfer( memSizes[i], memMode, wc, false);
				break;
			case DEVICE_TO_DEVICE:  bandwidths[i] = testDeviceToDeviceTransfer( memSizes[i], false );
				break;
        }
	}

	//calculating bandwidth average
	float bandwidthAverage = 0.0;
	float temp = 0.0;
	for (int i = 0 ; i < count ; i++)
		temp += bandwidths[i];
	bandwidthAverage = temp / (float)count;

    //clean up
    free(memSizes);
    free(bandwidths);

	return bandwidthAverage;
}

float testLatency(unsigned int start, 
				  unsigned int end, 
				  unsigned int increment, 
				  memcpyKind kind, 
				  memoryMode memMode, 
				  bool wc)
{
    float latency;
    switch(kind)
    {
		case DEVICE_TO_HOST:    latency = testDeviceToHostTransfer( 1, memMode, wc, true);
			break;
		case HOST_TO_DEVICE:    latency = testHostToDeviceTransfer( 1, memMode, wc, true);
			break;
		case DEVICE_TO_DEVICE:  latency = testDeviceToDeviceTransfer( 1, true );
			break;
    }

	return latency;
}

extern "C" NVIDIALIB_API float bandwidthTest(memcpyKind kind, memoryMode mode, bool wc)
{
	return testBandwidthRange(INIT_SIZE, END_SIZE, INCREMENT_SIZE, kind, mode, wc);
}

extern "C" NVIDIALIB_API float latencyTest(memcpyKind kind, memoryMode mode, bool wc)
{
	return testLatency(1, 1, 1, kind, mode, wc);
}


extern "C" NVIDIALIB_API void StartTimer()
{
	cudaEventCreate(&start);
	cudaEventCreate(&stop);
	cudaEventRecord(start,0);
}

extern "C" NVIDIALIB_API void StopTimer()
{
	cudaEventRecord(stop, 0);
	cudaThreadSynchronize();
}
extern "C" NVIDIALIB_API float GetElapsedTime()
{
	float time;
	cudaEventElapsedTime(&time, start, stop);
	cudaEventDestroy(start);
	cudaEventDestroy(stop);
	return time;
}

extern "C" NVIDIALIB_API int GetWarpSize(int deviceID)
{
	cudaDeviceProp dp;
	cudaGetDeviceProperties(&dp, deviceID);
	return dp.warpSize;
}

extern "C" NVIDIALIB_API int GetSMCount(int deviceID)
{
	cudaDeviceProp dp;
	cudaGetDeviceProperties(&dp, deviceID);
	return dp.multiProcessorCount;
}

extern "C" NVIDIALIB_API int GetClockRate(int deviceID)
{
	cudaDeviceProp dp;
	cudaGetDeviceProperties(&dp, deviceID);
	return dp.clockRate;
}
extern "C" NVIDIALIB_API char *GetDeviceName(int deviceID)
{
	cudaDeviceProp dp;
	cudaGetDeviceProperties(&dp, deviceID);
	return dp.name;
}
///////////////////////////////////////////
///////////////////////////////////////////

void printLog(char* func, CUresult result)
{
	if(result == CUDA_SUCCESS)
		codeFile << "\tSUCCESS - " << func << std::endl;
	else
		codeFile << "\tERROR   - " << func << std::endl;
}

//void LogResult(CUresult result)
//{
//	if(result == CUDA_SUCCESS)
//		codeFile << "\tCUDA_OK" << std::endl;
//	else
//		codeFile << "\tCUDA_ERR" << std::endl;
//}

extern "C" NVIDIALIB_API int CALLTYPE GetDevPrpVersion(CUdevice devId, int* major, int* minor)
{
	int maj = 0;
	int mir = 0;
	CUresult result = cuDeviceComputeCapability(&maj, &mir, 0);
	 
	*major = maj;
	*minor = mir;

	printLog("cuDeviceGetAttribute", result);

	return result;

}

extern "C" NVIDIALIB_API int CALLTYPE GetDevPrpPROCNUM(CUdevice devId, int* multiProcessorCount)
{
	CUresult result = cuDeviceGetAttribute( multiProcessorCount, CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT, devId);
	
	printLog("cuDeviceGetAttribute", result);

	if(result == CUDA_SUCCESS)
		return 0;
	else
		return -1;
}

CUDAComp::CUDAComp(int num_args, int num_funcs)
{
	CUresult result = cuInit(0);
	
	if( result == CUDA_SUCCESS)
	{
		codeFile << "\tSUCCESS - cuInit(0)" << endl;
		
		CUresult resultGet = cuDeviceGet( &cuDevice, 0 );
		printLog("cuDeviceGet", result);

		dptr = (CUdeviceptr **)malloc(num_args * sizeof(CUdeviceptr*));
		dptrIndex = 0;
		funcs = (CUfunction **)malloc(num_funcs * sizeof(CUfunction*));
		funcIndex = 0;
		max_args = num_args;
		max_funcs = num_funcs;
	}
	else
		codeFile << "\tERROR - cuInit(0)" << endl;
}

CUDAComp::~CUDAComp()
{
	for(int i = 0; i < max_args; i++)
	{
		cutilDrvSafeCallNoSync(cuMemFree(*(dptr[i])));
		codeFile << "\t\tcutilDrvSafeCallNoSync(cuMemFree dptr[" << i << "] )" << endl;
	}
	
	for(int i = 0; i < max_funcs; i++)
		free(funcs[i]);

	free(funcs);

	for(int i = 0; i < max_funcs; i++)
		free(dptr[i]);

	free(dptr);
}

CUmodule* CUDAComp::CreateModule(char* ptxFile)
{
	//TODO: compilation with parameters

	CUresult result = cuModuleLoad( &cuModule, ptxFile);
	codeFile << "\t\tCreateModule(&cuModule, 0, " << ptxFile << ");" << std::endl;
	printLog("cuModuleLoad", result);
	
	if(result == CUDA_SUCCESS)
		return &cuModule;
	else
	{
		switch(result)
		{
			case CUDA_ERROR_DEINITIALIZED:
				codeFile << "\t\tCUDA_ERROR_DEINITIALIZED" << std::endl;
				break;
			case CUDA_ERROR_NOT_INITIALIZED:
				codeFile << "\t\tCUDA_ERROR_NOT_INITIALIZED" << std::endl;
				break;
			case CUDA_ERROR_INVALID_CONTEXT:
				codeFile << "\t\tCUDA_ERROR_INVALID_CONTEXT" << std::endl;
				break;
			case CUDA_ERROR_INVALID_VALUE:
				codeFile << "\t\tCUDA_ERROR_INVALID_VALUE" << std::endl;
				break;
			case CUDA_ERROR_NOT_FOUND:
				codeFile << "\t\tCUDA_ERROR_NOT_FOUND" << std::endl;
				break;
			case CUDA_ERROR_OUT_OF_MEMORY:
				codeFile << "\t\tCUDA_ERROR_OUT_OF_MEMORY" << std::endl;
				break;
			case CUDA_ERROR_FILE_NOT_FOUND:
				codeFile << "\t\tCUDA_ERROR_FILE_NOT_FOUND" << std::endl;
				break;
		}
		return ERR;
	}
}

void CUDAComp::DeleteModule()
{
	CUresult result = cuModuleUnload(cuModule);
	printLog("cuModuleUnload", result);
}

CUcontext* CUDAComp::CreateContext()
{
	CUresult result = cuCtxCreate( &cuContext, 0, cuDevice );

	printLog("cuCtxCreate", result);
	
	if ( result != CUDA_SUCCESS)
	{
		cuCtxDetach(cuContext);
        return ERR;
	}
	return &cuContext;
}

void CUDAComp::DeleteContext()
{
	cutilDrvSafeCallNoSync(cuCtxDetach(cuContext));
	codeFile << "\t\tcuCtxDetach(cuContext);" << std::endl;
}

CUfunction* CUDAComp::AddFunction(char* kernel_name)
{
	codeFile << "\t\tAddFunction" << endl;

	if( funcIndex > max_funcs)
	{
		codeFile << "\t\tException on AddFunction: funcIndex is out of range" << endl;
		return ERR;
	}
	funcs[funcIndex] = (CUfunction *)malloc(sizeof(CUfunction));
	CUresult result = cuModuleGetFunction( funcs[funcIndex], cuModule, kernel_name );
	//codeFile << "\tCUresult " << result << " = cuModuleGetFunction( funcs[" << funcIndex << "] , " << kernel_name << ");" << std::endl;
	printLog("cuModuleGetFunction", result);

	if( result == CUDA_SUCCESS)
		return funcs[funcIndex++];
	else
		return ERR;
}

int CUDAComp::GPUMemAlloc(int mem_size)
{
	if(dptrIndex > max_args)
	{
		codeFile << "\t\tException on GPUMemAlloc: dptrIndex is out of range" << endl;
		return ERR;
	}
	else
	{
		int oldvalue = dptrIndex;
		dptr[dptrIndex] = (CUdeviceptr *)malloc(sizeof(CUdeviceptr));
		
		CUresult result = cuMemAlloc( dptr[dptrIndex] , mem_size );
		printLog("cuMemAlloc", result);
		codeFile << "\t\tmem_size: " << mem_size << ", dptrIndex: " << dptrIndex << ";" << endl;

		++dptrIndex;

		return oldvalue;
	}
}

void CUDAComp::MemCpyH2D(int dev_index, void* srcHost, int mem_size)
{
	codeFile << "\t\tMemCpyH2D - dev index: " << dev_index << endl;
	if((dev_index > max_args) || (dev_index < 0))
		codeFile << "\t\tException on MemCpyH2D: index is out of range" << endl;
	else
	{
		CUresult result = cuMemcpyHtoD( *(dptr[dev_index]), srcHost, mem_size );
		
		printLog("cuMemcpyHtoD", result);
		codeFile << "\t\tdestDev[" << dev_index << "], mem_size, " <<  mem_size << endl;
	}
}


void CUDAComp::MemCpyD2H(int dev_index, void* destHost, int mem_size)
{
	int dev = dev_index;
	if((dev > max_args) || (dev < 0))
		codeFile << "\t\tException on MemCpyD2H: index is out of range" << endl;
	else
	{
		CUresult result = cuMemcpyDtoH((void *)destHost, *(dptr[dev]), mem_size);
		printLog("cuMemcpyDtoH", result);
		codeFile << "\t\tdestHost[0] " << ((int*)destHost)[0] << ", dev: " << dev << " mem_size: " <<  mem_size << ");" << endl;	
	}
}

int CUDAComp::SetParamV(CUfunction* fun, int offset, int dev_index)
{
	int data = offset;
	codeFile << "\t\tdev_index: " << dev_index << endl;
	if((dev_index > max_args) || (dev_index < 0))
		codeFile << "\t\tException on SetParamV: index is out of range - dev index: " << dev_index << endl;
	else
	{
		void* ptr = (void *)(*(dptr[dev_index]));
		data = (data + __alignof(ptr) - 1) & ~(__alignof(ptr) - 1);
		
		int align1 = __alignof(ptr);

		CUresult result = cuParamSetv( *(fun), data, &ptr, sizeof(ptr));
		printLog("cuParamSetv", result);

		codeFile << "\t\tdev_index: " << dev_index << ", align: " << align1 << ", data: " << data << endl;	
		
		data += sizeof(ptr);
	}
	return data;
}

int CUDAComp::SetParamI(CUfunction* fun, int offset, int value)
{
	int data = offset;

	data = (data + __alignof(value) - 1) & ~(__alignof(value) - 1);
	
	CUresult result = cuParamSeti( *(fun), data, value );
	printLog("cuParamSeti", result);
	
	codeFile << "\t\tSetParamI( value: " << value << ")" << endl;	
	
	data += sizeof(value);
	return data;
}

extern "C" NVIDIALIB_API CUDAComp* CALLTYPE InitEnv(int num_args, int num_funcs)
{
	codeFile << "nVidia CUDA driver - Initialization device: num_args " << num_args << ", num_funcs: " << num_funcs << std::endl;
		
	/*codeFile << "\tcutilDeviceInit(3, device nr." << devID << ");" << std::endl;

	char *argv[2];
	argv[0] = (char *)malloc(20 * sizeof(char));
	sprintf(argv[0],"device %d", devID);
	
	cutilDeviceInitDrv(devID, 3, argv);
	*/
	return new CUDAComp(num_args, num_funcs);
}

extern "C" NVIDIALIB_API CUcontext* CALLTYPE createContext(CUDAComp* compute)
{
	return compute->CreateContext();
}

extern "C" NVIDIALIB_API void CALLTYPE deleteContext(CUDAComp* compute)
{
	compute->DeleteContext();
}

extern "C" NVIDIALIB_API void CALLTYPE GetDevices(int *nrDevices)
{
	cudaGetDeviceCount(nrDevices);
	codeFile << "\t\tcutilSafeCall(cudaGetDeviceCount(gpu_n))" << std::endl;
}

extern "C" NVIDIALIB_API void CALLTYPE Exit(CUDAComp* compute)
{
	char** argv = (char** )malloc(sizeof(char *));
	argv[0] = (char* )malloc(sizeof(char));
	cutilExit(1, argv);
	codeFile << "\t\tcutilExit(argc, argv);" << endl;
	free(argv[0]);
	free(argv);
	delete compute;
}

extern "C" NVIDIALIB_API CUmodule* CALLTYPE LoadBin(CUDAComp* compute, char* ptxFile)
{
	return compute->CreateModule(ptxFile);
}

extern "C" NVIDIALIB_API void CALLTYPE UnLoadModule(CUDAComp* compute)
{
	compute->DeleteModule();
}

extern "C" NVIDIALIB_API CUfunction* CALLTYPE LoadFunc(CUDAComp* compute, char* kernel_name)
{
	return compute->AddFunction(kernel_name);
}

extern "C" NVIDIALIB_API int CALLTYPE MAllocOnGPU ( CUDAComp* compute, int mem_size )
{
	return compute->GPUMemAlloc(mem_size);
}

//extern "C" NVIDIALIB_API void CALLTYPE MFree(CUdeviceptr* mem)
//{
//	cutilDrvSafeCallNoSync(cuMemFree(*(mem)));
//	codeFile << "\tcutilDrvSafeCallNoSync(cuMemFree)" << endl;
//}

extern "C" NVIDIALIB_API void CALLTYPE MCpyCPU2GPU(CUDAComp* compute, int dev_index, void* srcHost, int mem_size)
{
	compute->MemCpyH2D(dev_index, srcHost, mem_size);
}

extern "C" NVIDIALIB_API void CALLTYPE MCpyGPU2CPU(CUDAComp* compute, void* dstHost,  int dev_index, int mem_size)
{
	compute->MemCpyD2H(dev_index, dstHost, mem_size);
}

extern "C" NVIDIALIB_API int CALLTYPE SetParamV(CUDAComp* compute, CUfunction* fun, int offset, int dev_index)
{
	return compute->SetParamV(fun, offset, dev_index);
}

extern "C" NVIDIALIB_API int CALLTYPE SetParamI(CUDAComp* compute, CUfunction* fun, int offset, int value)
{
	return compute->SetParamI(fun, offset, value);
}

extern "C" NVIDIALIB_API int CALLTYPE SetBlockShape(CUfunction* fun, int x, int y, int z)
{
	if( (x == 0) || (y == 0) || (z == 0))
	{
		codeFile << "\t\tcuFuncSetBlockShape - ERROR: Inputs cannot be equal to 0 - (" << x << ", " << y << ", " << z << "));" << endl;
		return -1;
	}

	CUresult result = cuFuncSetBlockShape( *fun, x, y, z );

	if( result == CUDA_SUCCESS)
	{
		codeFile << "\t\tcutilDrvSafeCallNoSync(cuFuncSetBlockShape(" << x << ", " << y << ", " << z << "));" << endl;
		return 0;
	}
	else
	{
		codeFile << "\t\tcuFuncSetBlockShape - ERROR - (" << x << ", " << y << ", " << z << "));" << endl;
		return -1;
	}
}

extern "C" NVIDIALIB_API void CALLTYPE SetSharedSize(CUfunction* fun, int mem_size)
{
	cutilDrvSafeCallNoSync(cuFuncSetSharedSize( *(fun), mem_size ) );
	codeFile << "\t\tFuncSetSharedSize( " << mem_size << "));" << endl;
}

extern "C" NVIDIALIB_API void CALLTYPE SetParamSize(CUfunction* fun, int mem_size)
{
	cutilDrvSafeCallNoSync(cuParamSetSize( *(fun), mem_size ));
	codeFile << "\t\tParamSetSize( " << mem_size << "));" << endl;
}

extern "C" NVIDIALIB_API void CALLTYPE LaunchGrid(CUfunction* fun, int gridWidth, int gridHeight)
{
	cutilDrvSafeCallNoSync(cuLaunchGrid( *(fun), gridWidth, gridHeight ));
	codeFile << "\t\tcutilDrvSafeCallNoSync(cuLaunchGrid( " << gridWidth << ", " << gridHeight << "));" << endl;
}