//
/*   CUDA helper code
*    Copyright (C) 2009 Goffredo Marocchi
*
*    This program is free software; you can redistribute it and/or modify
*    it under the terms of the GNU General Public License as published by
*    the Free Software Foundation; either version 2 of the License, or
*    (at your option) any later version.
*
*    This program is distributed in the hope that it will be useful,
*    but WITHOUT ANY WARRANTY; without even the implied warranty of
*    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
*    GNU General Public License for more details.
*
*    You should have received a copy of the GNU General Public License along
*    with this program; if not, write to the Free Software Foundation, Inc.,
*    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/

#ifndef IGGS_CUDA_HELPER_H
#define IGGS_CUDA_HELPER_H

#include <cuda_runtime_api.h>
#include <cutil_inline.h>

#include "IGGS_MatTest_defines.h"
#include "msglib.h"
#include "errlib.h"
#include "IGGS_CUDA_checks.h"

///////////////////////////////////////////////////////////////////////////////////////////////////////////

void start_timer (unsigned int* t);
void stop_timer (unsigned int t, float* t_ms);

extern "C" void gpu_print_free_memory_ ();
extern "C" void gpu_init_ ();

////////////////////////////////////////////////////
/////CUDA
////////////////////////////////////////////////////

#define Allocate_GPU_memory(d_ptr, data_size)  __Allocate_GPU_memory   ((d_ptr), (data_size), __FILE__, __LINE__)
#define Free_GPU_memory(d_ptr)  __Free_GPU_memory   ((d_ptr), __FILE__, __LINE__)
#define Write_to_GPU(d_ptr, h_ptr, data_size)	__Write_to_GPU   ((d_ptr), (h_ptr), (data_size), __FILE__, __LINE__)
#define Read_from_GPU(h_ptr, d_ptr, data_size)  __Read_from_GPU   ((h_ptr),  (d_ptr), (data_size), __FILE__, __LINE__)
#define BindTex1D_rmetFP(offset, texref, devPtr)  __BindTex1D_rmetFP   ((offset), (texref), (devPtr), __FILE__, __LINE__)
#define UnbindTex1D_rmetFP(texref)  __UnbindTex1D_rmetFP   ((texref), __FILE__, __LINE__)
#define AllocateHostMemory(h_ptr, data_size)		__AllocateHostMemory  ((h_ptr), (data_size), __FILE__, __LINE__)
#define FreeHostMemory(h_ptr)		__FreeHostMemory  ((h_ptr), __FILE__, __LINE__)

/** \brief General data allocation function.
*
*  This function dynamically allocates a memory region of size data_size in host memory (main RAM)
*  using cudaMallocHost() or malloc() (depending on the value of the variable PINNED).\n
*	This wrapper is useful to neatly check for errors.
*
*  \param h_ptr pointer to h memory, T**
*  \param data_size size of the memory region we want to allocate (in bytes), const int
*  \param file name of the file this function was invoked from, const char*
*  \param line line number of the function call, const int
*	\return void
*/
template <class T>
inline void __AllocateHostMemory (T** h_ptr, const u32 data_size, const char* file, const int line) {

#if PINNED == 1
	CheckCall (" ...Allocating host memory... ", cudaMallocHost ( (void **) h_ptr, data_size), file, line);
#endif

#if PINNED == 0
	*h_ptr = (T*)malloc(data_size); //malloc (<n_items> * sizeof(<item type>))
#endif

    if( !(*h_ptr)) {
        fprintf(stderr, "cutilSafeMalloc host malloc failure in file <%s>, line %i\n",
                file, line);
        exit(-1);
    }

	return;
}

/** \brief General data allocation function.
*
*  This function frees a dynamically allocated memory region in host memory (main RAM)
*  using cudaFreeHost() or free() (depending on the value of the variable PINNED).\n
*	This wrapper is useful to neatly check for errors.
*
*  \param h_ptr pointer to host memory, T*
*  \param file name of the file this function was invoked from, const char*
*  \param line line number of the function call, const int
*	\return void
*/
template <class T>
inline void __FreeHostMemory (T* h_ptr, const char* file, const int line) {

	if (null == h_ptr) {
		err_sys ("null == h_ptr... at line %i of %s", line, file);
	}

#if PINNED == 1
	CheckCall(" ...Freeing GPU memory... ",  cudaFreeHost(h_ptr), file, line);
#endif

#if PINNED == 0
	free(h_ptr);
#endif

	return;
}

/** \brief General data allocation function.
*
*  This function dynamically allocates a memory region of size data_size in device memory (GPU RAM)
*  using cudaMalloc().\n
*	This wrapper is useful to neatly check for errors.
*
*  \param d_ptr pointer to device memory, T*
*  \param data_size size of the memory region we want to allocate (in bytes), const int
*  \param file name of the file this function was invoked from, const char*
*  \param line line number of the function call, const int
*	\return void
*/
template <class T>
inline void __Allocate_GPU_memory (T * &d_ptr, const u32 data_size, const char* file, const int line) {

	CheckCall(" ...Allocating GPU memory... ", cudaMalloc((void **)&d_ptr, data_size), file, line);

	return;

}

/** \brief General data allocation function.
*
*  This function frees a dynamically allocated memory region in device memory (GPU RAM)
*  using cudaFree().\n
*	This wrapper is useful to neatly check for errors.
*
*  \param d_ptr pointer to device memory, T*
*  \param file name of the file this function was invoked from, const char*
*  \param line line number of the function call, const int
*	\return void
*/
template <class T>
inline void __Free_GPU_memory (T* d_ptr, const char *file, const int line) {

	CheckCall(" ...Freeing GPU memory... ",  cudaFree(d_ptr), file, line);

	return;

}

/** \brief General data upload function.
*
*	This function uploads a block of host memory to device memory (main RAM --> GPU RAM).\n
*	Both memory regions must have been already allocated before this function is called.\n
*	This wrapper is useful to neatly check for errors.
*
*	\param d_ptr reference to the memory block in device memory, T*
*	\param h_ptr reference memory block in host memory, T*
*	\param data_size size of the memory block we want to upload from host memory to device memory, const u32
*  \param file name of the file this function was invoked from, const char*
*  \param line line number of the function call, const int
*	\return void
*/
template <class T>
inline void __Write_to_GPU (T* &d_ptr,T* h_ptr, const u32 data_size, \
		const char *file, const int line) {

	CheckCall(" ...Writing data to GPU memory... ",\
			cudaMemcpy(d_ptr, h_ptr, data_size, cudaMemcpyHostToDevice), file, line );

	return;

}

/** \brief General data readback function.
*
*	This function transfers a block of device memory to host memory (GPU RAM --> main RAM).\n
*	Both memory regions must have been already allocated before this function is called.\n
*	This wrapper is useful to neatly check for errors.
*
*	\param d_ptr reference to the memory block in device memory, T*
*	\param h_ptr reference memory block in host memory, T*
*	\param data_size size of the memory block we want to upload from host memory to device memory, const u32
*	\param file name of the file this function was invoked from, const char*
*	\param line line number of the function call, const int
*	\return void
*/
template <class T>
inline void __Read_from_GPU (T* h_ptr,  T* &d_ptr, const u32 data_size, \
		const char *file, const int line) {

	CheckCall(" ...Reading data from GPU memory... ",\
			cudaMemcpy(h_ptr, d_ptr, data_size, cudaMemcpyDeviceToHost), file, line );

	return;

}

/** \brief Texture object binding function.
*
* This function wrapper binds a device memory location to a  <Floating Point, 1D, cudaReadModeElementType>
* texture object.
* This wrapper is useful to neatly check for errors.
*
*	\param devPtr reference to the memory block in device memory, float*&
*	\param texref reference to a texture object, texture <float, 1, cudaReadModeElementType> &
*	\param offset offset from the device memory base address, size_t*
*  \param file name of the file this function was invoked from, const char*
*  \param line line number of the function call, const int
*	\return void
*/
inline void __BindTex1D_rmetFP (size_t *offset, texture <float, 1, cudaReadModeElementType> &texref, 	\
		const void *devPtr, const char *file, const int line) {

	CheckCall (" ...Binding Texture... ", cudaBindTexture(offset, texref, devPtr), file, line);

	return;

}

/** \brief Texture object unbinding function.
*
* This function wrapper removes the binding between
* a device memory location and a  <Floating Point, 1D, cudaReadModeElementType>
* texture object.
* This wrapper is useful to neatly check for errors.
*
*	\param texref reference to a texture object, texture <float, 1, cudaReadModeElementType> &
*  \param file name of the file this function was invoked from, const char*
*  \param line line number of the function call, const int
*	\return void
*/
inline void __UnbindTex1D_rmetFP (texture <float, 1, cudaReadModeElementType> &texref, \
		const char *file, const int line)  {

	  CheckCall (" ...Unbinding Texture... ", cudaUnbindTexture(texref), file, line);

	  return;

}

#endif
