//----------------------------------------------------------------------------
// M A R I T I M E  R E S E A R C H  I N S T I T U T E  N E T H E R L A N D S
//----------------------------------------------------------------------------
// Copyright (C) 2011 - MARIN - All rights reserved - http://www.marin.nl
//----------------------------------------------------------------------------
// Program    : mermaid
// Module     : vbm
// File       : ipdiagmisc.h
// Author     : M.A. de Jong
//----------------------------------------------------------------------------

#ifndef IPDIAGMISC_H
#define	IPDIAGMISC_H

template <class T>
__global__ void memset_kernel(T* A, const int n, const T val)
{
    int tid = blockIdx.x * blockDim.x + threadIdx.x;
    if (tid < n)
        A[tid] = val;
}

inline void checkCudaError(cudaError_t status, const char msg[])
{
   if (status != cudaSuccess)
   {
      std::cout << msg << std::endl;
      exit(EXIT_FAILURE);
   }

   return;
}

template <class T> 
float throughput(float time, const int size, const int val)
{
    return (float)(size * val * sizeof(T)) / (1e3 * time);
}

namespace ipdiagmisc
{

template <class T>
inline void freeGpuVector(T* ptr)
{
    checkCudaError(cudaFree(ptr), "free");
}

template <class T, bool flag>
inline T* initGpuVector(const int n, const T val)
{
    T* dst;
    checkCudaError(cudaMalloc((void **)&dst, n * sizeof(T)), "malloc");

    if (flag) 
    {
        int nblocks = (int)ceil((float)n / NUM_THREADS);
        memset_kernel<T> <<< nblocks, NUM_THREADS >>> (dst, n, val);
        checkCudaError(cudaGetLastError(), "memset kernel");
    }

    return dst;
}

template <class T>
inline void memcpy(T* dst, T* src, int size)
{
    cudaMemcpy(dst, src, size * sizeof(T), cudaMemcpyDeviceToDevice);
}

} // namespace ipdiagmisc

#endif // IPDIAGMISC_H

