//----------------------------------------------------------------------------
// M A R I T I M E  R E S E A R C H  I N S T I T U T E  N E T H E R L A N D S
//----------------------------------------------------------------------------
// Copyright (C) 2011 - MARIN - All rights reserved - http://www.marin.nl
//----------------------------------------------------------------------------
// Program    : mermaid
// Module     : vbm
// File       : misc.h
// Author     : M.A. de Jong
//----------------------------------------------------------------------------

#ifndef MISC_H
#define	MISC_H

#include "kernels/misc_kernels.h"

#ifdef USEDOUBLE
// if double precision textures needed: use int2
#else
texture <float, 2, cudaReadModeElementType> texRef;
texture <float, 2, cudaReadModeElementType> texCC;
texture <float, 2, cudaReadModeElementType> texNN;
texture <float, 2, cudaReadModeElementType> texEE;
texture <float, 2, cudaReadModeElementType> texSS;
texture <float, 2, cudaReadModeElementType> texWW;
texture <float, 2, cudaReadModeElementType> texNE;
texture <float, 2, cudaReadModeElementType> texSE;
texture <float, 2, cudaReadModeElementType> texSW;
texture <float, 2, cudaReadModeElementType> texNW;
#endif

#define checkCudaError(cudaErrorStatusValue, msg) \
{ \
    cudaError_t status = (cudaErrorStatusValue); \
    if (status != cudaSuccess) \
    { \
        std::cout << "CUDA error: " << cudaGetErrorString(status) << " in " << msg << \
            " (" << __FILE__ << ":" << __LINE__ << ")" << std::endl; \
        exit(EXIT_FAILURE); \
    } \
}

/*
inline void checkCudaError(cudaError_t status, const char msg[])
{
    if (status != cudaSuccess)
    {
        std::cout << "CUDA error: " << cudaGetErrorString(status) << " in " << msg << std::endl;
        exit(EXIT_FAILURE);
    }
}
*/



namespace common
{

template <class T, bool flag>
inline T* initCpuVector(const int size, const T val)
{
    T *ptr;
#if USE_PAGELOCKED
    checkCudaError(cudaHostAlloc((void **)&ptr, size * sizeof(T),
        cudaHostAllocDefault), "initCpuVector (pagelocked)");
#else
    ptr = (T *)malloc(size * sizeof(T));
#endif

    if (flag)
    {
        for (int i = 0; i < size; ++i)
            ptr[i] = val;
    }

    return ptr;
}

template <class T, bool flag>
inline T* initGpuVector(const int size, const T val)
{
    T* ptr;
    checkCudaError(cudaMalloc((void **)&ptr, size * sizeof(T)), 
        "initGpuVector (out of memory)");

    if (flag) 
    {
        int nblocks = (int)ceil((float)size / NUM_THREADS);
        kernel_memset<T> <<< nblocks, NUM_THREADS >>> (ptr, size, val);
        checkCudaError(cudaGetLastError(), "kernel_memset");
    }

    return ptr;
}

template <class T>
inline void fillGpuVector(T *dst, const int doff, size_t pitch, 
    const Array2D<T> *src, const int soff)
{
    checkCudaError(cudaMemcpy2D(dst + doff, pitch, src->getElements() + soff, 
      src->columns() * sizeof(T), src->columns() * sizeof(T), src->rows(), 
        cudaMemcpyHostToDevice), "fillGpuVector (copy)");
}


template <class T>
inline void  getGpuVector(Array2D<T> *dst, const T *src, 
    const int soff, const size_t pitch)
{
    checkCudaError(cudaMemcpy2D(dst->getElements(), dst->columns() * sizeof(T), 
      src + soff, pitch, dst->columns() * sizeof(T), dst->rows(), 
        cudaMemcpyDeviceToHost), "getGpuVector (copy)");
}

template <class T>
inline void freeGpuVector(T* ptr)
{
    checkCudaError(cudaFree(ptr), "freeGpuVector (free)");
}

template <class T>
inline void freeCpuVector(T* ptr)
{
#if USE_PAGELOCKED
    checkCudaError(cudaFreeHost(ptr), "freeCpuVector (free)");
#else
    free(ptr);
#endif    
}

template <class T>
inline void from4to1(T* dst, T* src, const int nx0, const int ny0,
    const Grid grid)
{
    dimGrid.x = (nx0 - BORDER_WIDTH2) / DIM_COMPUTE_BLOCK;
    dimGrid.y = (ny0 - BORDER_WIDTH2) / DIM_COMPUTE_BLOCK;
    dimBlock.x = DIM_COMPUTE_BLOCK;
    dimBlock.y = DIM_COMPUTE_BLOCK / T1TO4_BF;

    kernel_from4to1<T> <<< dimGrid, dimBlock >>> (dst, src, grid, nx0);
    checkCudaError(cudaGetLastError(), "kernel_from4to1");
}

template <class T>
inline void from1to4(T* dst, const T* src, 
    const int nx0, const int ny0, const Grid grid)
{
    dimGrid.x = (nx0 - BORDER_WIDTH2) / DIM_COMPUTE_BLOCK;
    dimGrid.y = (ny0 - BORDER_WIDTH2) / DIM_COMPUTE_BLOCK;
    dimBlock.x = DIM_COMPUTE_BLOCK;
    dimBlock.y = DIM_COMPUTE_BLOCK / T1TO4_BF;

    kernel_from1to4<T> <<< dimGrid, dimBlock >>> (dst, src, grid, nx0);
    checkCudaError(cudaGetLastError(), "kernel_from1to4");
}

template <class T>
inline void memcpy(T* dst, const T* src, const int size)
{
    cudaMemcpy(dst, src, size * sizeof(T), cudaMemcpyDeviceToDevice);
}

template <class T>
inline void divideR2in4(Grid grid1, const Grid grid0)
{
    dimGrid.x = grid0.cx / DIM_COMPUTE_BLOCK;
    dimGrid.y = grid0.cy / DIM_COMPUTE_BLOCK;
    dimBlock.x = DIM_COMPUTE_BLOCK;
    dimBlock.y = DIM_COMPUTE_BLOCK / R2TO4_BF;

    kernel_divideR2in4<T> <<< dimGrid, dimBlock >>> (grid1, grid0); 
    checkCudaError(cudaGetLastError(), "kernel_divideR2in4");
}

template <class T>
inline void compress4toR2(Grid grid1, const Grid grid0)
{
    dimGrid.x = grid1.cx / DIM_COMPUTE_BLOCK;
    dimGrid.y = grid1.cy / DIM_COMPUTE_BLOCK;
    dimBlock.x = DIM_COMPUTE_BLOCK;
    dimBlock.y = DIM_COMPUTE_BLOCK / R2TO4_BF;

    kernel_compress4toR2<T> <<< dimGrid, dimBlock >>> (grid1, grid0);
    checkCudaError(cudaGetLastError(), "kernel_compress4toR2");
}


// Copies data from the host stored in the Array2D format to the device
// into a larger, more optimal, grid (having boundaries of BORDER_WIDTH width)
template <class T>
inline void setGpu2DVector(T *dst,
                           const Array2D<T> *src,
                           const unsigned int ld) // leading dimension destination array
{
    const unsigned int dstOffset = (BORDER_WIDTH - 1) * ld + (BORDER_WIDTH - 1);
    const size_t dstPitch  = ld * sizeof(T);

    checkCudaError(cudaMemcpy2D(dst + dstOffset, dstPitch, src->getElements(),
      src->columns() * sizeof(T), src->columns() * sizeof(T), src->rows(),
        cudaMemcpyHostToDevice), "setGpu2DVector (copy)");
}


// Copies data from the device stored in the optimal embedding grid to the
// host which uses the Array2D storage format
template <class T>
inline void getGpu2DVector(Array2D<T> *dst, // destination
                           const T *src,    // source
                           const int ld)    // leading dimension source array
{
    const int srcOffset    = (BORDER_WIDTH - 1) * ld + (BORDER_WIDTH - 1);
    const size_t srcPitch  = ld * sizeof(T); 

    checkCudaError(cudaMemcpy2D(dst->getElements(), dst->columns() * sizeof(T), 
      src + srcOffset, srcPitch, dst->columns() * sizeof(T), dst->rows(), 
        cudaMemcpyDeviceToHost), "getGpu2DVector (copy)");
}


template <class T>
inline void setSingleValue(T* dst,
                           const T *src)
{
    checkCudaError(cudaMemcpy(dst, src, sizeof(T), cudaMemcpyHostToDevice), 
        "setSingleValue (copy)");
}


template <class T>
inline void setGpuBdVector(T *dst,
                           const Array2D<T> *src)
{
    const unsigned int dstOffset = 0;

    checkCudaError(cudaMemcpy(dst + dstOffset, src->getElements() + 3, 
      3 * (src->rows() - 3) * sizeof(T),
         cudaMemcpyHostToDevice), "setGpuBdVector (copy)");
}


template <class T>
inline void setGpuIWVector(T *dst,
                           const int dstPitch,
                           const Array2D<T> *src,
                           const int numberofwaves) // TODO not necessary; replace by rows()
{
    const unsigned int dstOffset = 0;

    checkCudaError(cudaMemcpy2D(dst + dstOffset, dstPitch * sizeof(T), 
      src->getElements() + src->columns() + 1,
        src->columns() * sizeof(T), (src->columns() - 3) * sizeof(T), 
          (src->rows() - 1),
            cudaMemcpyHostToDevice), "setGpuIWVector (copy)");
}


template <class T>
inline void getGpuBdVector(Array2D<T> *dst,
                           const T *src)
{
    const unsigned int srcOffset = 0;

    checkCudaError(cudaMemcpy(dst->getElements() + 3, src + srcOffset,
      3 * (dst->rows() - 3) * sizeof(T),
         cudaMemcpyDeviceToHost), "getGpuBdVector (copy)");
}


template <class T>
inline void setGpu1DVector(T *dst, 
                           const Array1D<T> *src)
{
    const unsigned int dstOffset = 0;

    checkCudaError(cudaMemcpy(dst + dstOffset, src->getElements() + 1, 
      (src->size() - 3) * sizeof(T),
        cudaMemcpyHostToDevice), "setGpu1DVector (copy)");
}


template <class T>
inline void getGpu1DVector(Array1D<T> *dst, 
                           const T *src)
{
    const unsigned int srcOffset = 0;

    checkCudaError(cudaMemcpy(dst->getElements() + 1, src + srcOffset, 
      (dst->size() - 3) * sizeof(T),
        cudaMemcpyDeviceToHost), "getGpu1DVector (copy)");
}

template <class T>
inline void getGpu1DVectorAsync(Array1D<T> *dst, 
                           const T *src,
                           cudaStream_t stream)
{
    const unsigned int srcOffset = 0;

    checkCudaError(cudaMemcpyAsync(dst->getElements() + 1, src + srcOffset, 
      (dst->size() - 3) * sizeof(T),
        cudaMemcpyDeviceToHost, stream), "getGpu1DVectorAsync (copy)");
}

} // namespace common

#endif // MISC_H

