/**
 * @file Utils.cuh
 * @brief Define some cuda utilities functions, this file should not be included by interface header files.
 * @author zpfeng
 * @copyright (c) Copyright 2013~2050 - PANGO MICROSYSTEMS, INC.
 * ALL RIGHTS RESERVED.
 */

#ifndef __UTILS_CUH__
#define __UTILS_CUH__

#include <assert.h>
#include <chrono>
#include <cmath>
#include <cstdlib>
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <fstream>
#include <iostream>
#include <sstream>
#include <stdexcept>
#include <string>
#include <type_traits>
#include <utility>

namespace fabric_space
{
/**
 * Template specialization for non-integral types.
 */
template <typename T> inline __host__ __device__ typename std::enable_if<!std::is_integral<T>::value, T>::type CeilDiv(T a, T b)
{
    return a / b;
}

/**
 * Template specialization for integral types.
 */
template <typename T> inline __host__ __device__ typename std::enable_if<std::is_integral<T>::value, T>::type CeilDiv(T a, T b)
{
    return (a + b - 1) / b;
}

/**
 * Define constant value PI.
 */
#define PI (3.141592653589793238462643383279502884197169399375105820974944592307816406286208998628034825342117067982148086513282306647093844609550582231725359408128481)

#ifndef __FUNCTION_NAME__
#ifdef _MSC_VER
#define __FUNCTION_NAME__ __FUNCTION__
#else
#define __FUNCTION_NAME__ __PRETTY_FUNCTION__
#endif
#endif

#define THROW_EC(exception_class, ...) throw exception_class(__VA_ARGS__, __FILE__, __FUNCTION_NAME__, __LINE__)

/**
 * Development error for CUDA accelerated place operator.
 * @author zpfeng
 */
class PODevError : public std::runtime_error
{
public:
    PODevError(std::string reason, const char *file_name, const char *func_name, unsigned int line_num)
        : std::runtime_error(Format(reason, file_name, func_name, line_num))
    {
    }
    static inline std::string Format(const std::string &reason, const char *file_name, const char *func_name, unsigned int line_num)
    {
        std::ostringstream oss;
        oss << "Development Error: " << reason << " in function " << std::string(func_name) << " in file " << std::string(file_name) << " at line " << line_num << ",";
        return oss.str();
    }
};

inline void __cudaSafeCall(cudaError_t status, const char *file, const char *func_name, const int line)
{
    if (status != cudaSuccess)
    {
        fprintf(stderr, "CUDA error in file '%s', function '%s', line %d\n \nerror %d \nterminating!\n%s\n", file, func_name, line, status, cudaGetErrorString(status));
        getchar();
        cudaDeviceReset();
        if (status != cudaSuccess)
        {
            THROW_EC(PODevError, "CUDA error");
        }
    }
}

inline bool __cudaSafeCallCheck(cudaError_t status)
{
    if (status != cudaSuccess)
    {
        return true;
    }
    return true;
}

template <typename T, typename SizeType> inline void __AllocateCUDA(T *var, SizeType size, const char *file, const char *func_name, const int line)
{
    //std::cout << "element size in allocate: " << sizeof(typename std::remove_pointer<T>::type) << std::endl;
#ifdef ALWAYS_USE_MALLOC_MANAGED
    // we could visualize data in VS if we always use cudaMallocManaged to allocate GPU memory data
    __cudaSafeCall(cudaMallocManaged(var, (size) * sizeof(typename std::remove_pointer<T>::type)), file, func_name, line);
#else
    __cudaSafeCall(cudaMalloc(var, (size) * sizeof(typename std::remove_pointer<T>::type)), file, func_name, line);
#endif
    if (var == nullptr)
    {
        THROW_EC(PODevError, "fail to malloc");
    }
}

#define AllocateCUDA(var, size) __AllocateCUDA(var, size, __FILE__, __FUNCTION_NAME__, __LINE__)

template <typename T, typename SizeType> inline void __AllocateManagedCUDA(T *var, SizeType size, const char *file, const char *func_name, const int line)
{
    __cudaSafeCall(cudaMallocManaged(var, (size) * sizeof(typename std::remove_pointer<T>::type)), file, func_name, line);
    if (var == nullptr)
    {
        THROW_EC(PODevError, "fail to malloc");
    }
}

#define AllocateManagedCUDA(var, size) __AllocateManagedCUDA(var, size, __FILE__, __FUNCTION_NAME__, __LINE__)

template <typename T> inline void __DestroyCUDA(T *&var, const char *file, const char *func_name, const int line)
{
    if (!var)
    {
        return;
    }
    __cudaSafeCall(cudaFree(var), file, func_name, line);
    var = nullptr;
}

#define DestroyCUDA(var) __DestroyCUDA(var, __FILE__, __FUNCTION_NAME__, __LINE__)

template <typename T, typename U, typename SizeType> inline void __MemsetCUDA(T var, U value, SizeType size, const char *file, const char *func_name, const int line)
{
    if (!var)
    {
        return;
    }
    // std::cout << "memset element size: " << sizeof(typename std::remove_pointer<decltype(var)>::type) << std::endl;
    __cudaSafeCall(cudaMemset(var, value, (size) * sizeof(typename std::remove_pointer<decltype(var)>::type)), file, func_name, line);
}

#define MemsetCUDA(var, value, size) __MemsetCUDA(var, value, size, __FILE__, __FUNCTION_NAME__, __LINE__)

template <typename T, typename U, typename SizeType> inline void __AllocateMemsetCUDA(T *var, U value, SizeType size, const char *file, const char *func_name, const int line)
{
    __AllocateCUDA(var, size, file, func_name, line);
    __MemsetCUDA(*var, value, size, file, func_name, line);
}

#define AllocateMemsetCUDA(var, value, size) __AllocateMemsetCUDA(var, value, size, __FILE__, __FUNCTION_NAME__, __LINE__)

template <typename T, typename U, typename SizeType> inline void __AllocateManagedMemsetCUDA(T *var, U value, SizeType size, const char *file, const char *func_name, const int line)
{
    __AllocateManagedCUDA(var, size, file, func_name, line);
    __MemsetCUDA(*var, value, size, file, func_name, line);
}

#define AllocateManagedMemsetCUDA(var, value, size) __AllocateManagedMemsetCUDA(var, value, size, __FILE__, __FUNCTION_NAME__, __LINE__)

template <typename T, typename U, typename SizeType> inline void __MemcpyHostToDevice(T var, U rhs, SizeType size, const char *file, const char *func_name, const int line)
{
    if (!var)
    {
        return;
    }
    __cudaSafeCall(cudaMemcpy(var, rhs, sizeof(typename std::remove_pointer<decltype(var)>::type) * (size), cudaMemcpyHostToDevice), file, func_name, line);
}

#define MemcpyHostToDevice(var, rhs, size) __MemcpyHostToDevice(var, rhs, size, __FILE__, __FUNCTION_NAME__, __LINE__)

template <typename T, typename U, typename SizeType> inline void __MemcpyDeviceToHost(T var, U rhs, SizeType size, const char *file, const char *func_name, const int line)
{
    if (!var)
    {
        return;
    }
    __cudaSafeCall(cudaMemcpy(var, rhs, sizeof(typename std::remove_pointer<decltype(var)>::type) * (size), cudaMemcpyDeviceToHost), file, func_name, line);
}

#define MemcpyDeviceToHost(var, rhs, size) __MemcpyDeviceToHost(var, rhs, size, __FILE__, __FUNCTION_NAME__, __LINE__)

template <typename T, typename U, typename SizeType> inline void __MemcpyDeviceToDevice(T var, U rhs, SizeType size, const char *file, const char *func_name, const int line)
{
    if (!var)
    {
        return;
    }
    __cudaSafeCall(cudaMemcpy(var, rhs, sizeof(typename std::remove_pointer<decltype(var)>::type) * (size), cudaMemcpyDeviceToDevice), file, func_name, line);
}

#define MemcpyDeviceToDevice(var, rhs, size) __MemcpyDeviceToDevice(var, rhs, size, __FILE__, __FUNCTION_NAME__, __LINE__)

template <typename T, typename U, typename SizeType> inline void __AllocateCopyCUDA(T *var, U rhs, SizeType size, const char *file, const char *func_name, const int line)
{
    __AllocateCUDA(var, size, file, func_name, line);
    __MemcpyHostToDevice(*var, rhs, size, file, func_name, line);
}

#define AllocateCopyCUDA(var, rhs, size) __AllocateCopyCUDA(var, rhs, size, __FILE__, __FUNCTION_NAME__, __LINE__)

/**
 * Return true if a number is power of 2.
 */
template <typename T = unsigned> inline __device__ __host__ bool IsPowerOf2(T val)
{
    return val && (val & (val - 1)) == 0;
}

template <typename T> inline __device__ __host__ void Swap(T &x, T &y)
{
    T tmp = x;
    x = y;
    y = tmp;
}

inline __device__ int INDEX(const int hid, const int wid, const int N)
{
    return (hid * N + wid);
}

inline __device__ __host__ int LogBase2(uint64_t n)
{
    static const int table[64] = { 0, 58, 1, 59, 47, 53, 2, 60, 39, 48, 27, 54, 33, 42, 3, 61, 51, 37, 40, 49, 18, 28, 20, 55, 30, 34, 11, 43, 14, 22, 4, 62, 57, 46, 52, 38, 26,
        32, 41, 50, 36, 17, 19, 29, 10, 13, 21, 56, 45, 25, 31, 35, 16, 9, 12, 44, 24, 15, 8, 23, 7, 6, 5, 63 };

    n |= n >> 1;
    n |= n >> 2;
    n |= n >> 4;
    n |= n >> 8;
    n |= n >> 16;
    n |= n >> 32;

    return table[(n * 0x03f6eaf2cd271461) >> 58];
}

template <typename T> T **AllocateMatrix(int M, int N)
{
    T **data;
    data = new T *[M];
    for (int i = 0; i < M; i++)
    {
        data[i] = new T[N];
    }
    return data;
}

template <typename T> void DestroyMatrix(T **&data, int M)
{
    for (int i = 0; i < M; i++)
    {
        delete[] data[i];
    }
    delete[] data;
}

template <typename T> void PrintCUDAArray(const T *x, const int n, const char *str)
{
    printf("%s[%d] = ", str, n);
    T *host_x = (T *)malloc(n * sizeof(T));
    if (host_x == NULL)
    {
        printf("failed to allocate memory on CPU\n");
        return;
    }
    cudaMemcpy(host_x, x, n * sizeof(T), cudaMemcpyDeviceToHost);
    for (int i = 0; i < n; ++i)
    {
        printf("%g ", double(host_x[i]));
    }
    printf("\n");

    free(host_x);
}

template <typename T> void PrintCUDAArrayToFile(const T *x, const int n, std::string fn)
{
    std::ofstream ofs(fn.c_str(), std::ios_base::out | std::ios_base::trunc);
    T *host_x = (T *)malloc(n * sizeof(T));
    if (host_x == NULL)
    {
        printf("failed to allocate memory on CPU\n");
        return;
    }
    cudaMemcpy(host_x, x, n * sizeof(T), cudaMemcpyDeviceToHost);
    for (int i = 0; i < n; ++i)
    {
        ofs << host_x[i] << std::endl;
    }
    free(host_x);
    ofs.close();
}

template <typename T> void PrintCUDAComplexArray(const T *x, const int n, const char *str)
{
    printf("%s[%d] = ", str, n);
    T *host_x = (T *)malloc(n * sizeof(T));
    if (host_x == NULL)
    {
        printf("failed to allocate memory on CPU\n");
        return;
    }
    cudaMemcpy(host_x, x, n * sizeof(T), cudaMemcpyDeviceToHost);
    for (int i = 0; i < n; ++i)
    {
        printf("%g+%gi ", host_x[i].x, host_x[i].y);
    }
    printf("\n");

    free(host_x);
}

template <typename T> void PrintCUDAScalar(const T &x, const char *str)
{
    printf("%s = ", str);
    T *host_x = (T *)malloc(sizeof(T));
    if (host_x == NULL)
    {
        printf("failed to allocate memory on CPU\n");
        return;
    }
    cudaMemcpy(host_x, &x, sizeof(T), cudaMemcpyDeviceToHost);
    printf("%g\n", double(*host_x));

    free(host_x);
}

template <typename T> void PrintCUDA2DArray(const T *x, const int m, const int n, const char *str)
{
    printf("%s[%dx%d] = \n", str, m, n);
    T *host_x = (T *)malloc(m * n * sizeof(T));
    if (host_x == NULL)
    {
        printf("failed to allocate memory on CPU\n");
        return;
    }
    cudaMemcpy(host_x, x, m * n * sizeof(T), cudaMemcpyDeviceToHost);
    for (int i = 0; i < m * n; ++i)
    {
        if (i && (i % n) == 0)
        {
            printf("\n");
        }
        printf("%g ", T(host_x[i]));
    }
    printf("\n");

    free(host_x);
}

template <typename T> void PrintCUDA2DComplexArray(const T *x, const int m, const int n, const char *str)
{
    printf("%s[%dx%d] = \n", str, m, n);
    T *host_x = (T *)malloc(m * n * sizeof(T));
    if (host_x == NULL)
    {
        printf("failed to allocate memory on CPU\n");
        return;
    }
    cudaMemcpy(host_x, x, m * n * sizeof(T), cudaMemcpyDeviceToHost);
    for (int i = 0; i < m * n; ++i)
    {
        if (i && (i % n) == 0)
        {
            printf("\n");
        }
        printf("%g+%gi ", host_x[i].x, host_x[i].y);
    }
    printf("\n");

    free(host_x);
}

/**
 * Triangle density compute function, allow negative.
 */
template <typename T> inline T __host__ __device__ TriangleDensity(T node_xl, T node_xh, T bin_xl, T bin_xh)
{
    return min(node_xh, bin_xh) - max(node_xl, bin_xl);
}

/**
 * Exact triangle density compute function, non-negative.
 */
template <typename T> inline T __host__ __device__ ExactDensity(T node_xl, T node_xh, T bin_xl, T bin_xh)
{
    return max((T)0.0, TriangleDensity(node_xl, node_xh, bin_xl, bin_xh));
}

/**
 * A class generalized scaled atomic addition for floating point number and integers.
 * For integer, we use it as a fixed point number with the LSB part for fractions.
 */
template <typename T, bool = std::is_integral<T>::value> struct AtomicAdd
{
    explicit AtomicAdd(T = 1)
    {
    }
    template <typename V> __device__ __forceinline__ T operator()(T *dst, V v) const
    {
        // call cuda atomicAdd
        return atomicAdd(dst, (T)v);
    }
};

/**
 * For atomic addition of fixed point number using integers.
 */
template <typename T> struct AtomicAdd<T, true>
{
    T _scale_factor;  //!< a scale factor to scale fraction into integer

    explicit AtomicAdd(T sf = 1)
        : _scale_factor(sf)
    {
    }
    T ScaleFactor() const
    {
        return _scale_factor;
    }
    template <typename V> __device__ __forceinline__ T operator()(T *dst, V v) const
    {
        T sv = v * _scale_factor;
        // call cuda atomicAdd
        return atomicAdd(dst, sv);
    }
};

template <typename T, typename U, typename V> __global__ void CopyScaleArray(T *dst, U *src, V scale_factor, int n)
{
    int i = blockIdx.x * blockDim.x + threadIdx.x;
    if (i < n)
    {
        dst[i] = src[i] * scale_factor;
    }
}

/**
 * Per element multiply.
 * @author zpfeng
 */
template <typename T> __global__ void PerElemMulArray(T *dst, T *lhs, T *rhs, int n)
{
    int i = blockIdx.x * blockDim.x + threadIdx.x;
    if (i < n)
    {
        dst[i] = lhs[i] * rhs[i];
    }
}

/**
 * Per element multiply.
 * @author zpfeng
 */
template <typename T> __global__ void PerElemMulElem(T *dst, T *lhs, T elem, int n)
{
    int i = blockIdx.x * blockDim.x + threadIdx.x;
    if (i < n)
    {
        dst[i] = lhs[i] * elem;
    }
}

/**
 * Per element inverse.
 * @author zpfeng
 */
template <typename T> __global__ void PerElemInvArray(T *dst, T *lhs, int n)
{
    int i = blockIdx.x * blockDim.x + threadIdx.x;
    if (i < n)
    {
        dst[i] = (T)(1.0 / lhs[i]);
    }
}

/**
 * Per element inverse.
 * @author zpfeng
 */
template <typename T> __global__ void ScaleIdxArray(T *dst, T scale_coef, int n)
{
    int i = blockIdx.x * blockDim.x + threadIdx.x;
    if (i < n)
    {
        dst[i] = scale_coef * i;
    }
}

/**
 * Compute per element 1.0 / (wu * wu + wv * wv) for row major matrix.
 * @author zpfeng
 */
template <typename T> __global__ void WUVSquareInvPlusMat(T *dst, T *wu, T *wv, int row_size, int col_size)
{
    int i = blockIdx.x * blockDim.x + threadIdx.x;
    if (i >= row_size * col_size)
    {
        return;
    }
    int r = i / row_size, c = i % row_size;
    if (i == 0)
    {
        dst[i] = (T)0.0;
    }
    else
    {
        dst[i] = (T)1.0 / (wu[c] * wu[c] + wv[r] * wv[r]);
    }
}

/**
 * Compute per element wu * val * scale_factor/wv * val * scale_factor for row major matrix.
 * @author zpfeng
 */
template <typename T> __global__ void WUVMulScaleMat(T *wu_dst, T *wv_dst, T *lhs, T scale_coef, T *wu, T *wv, int row_size, int col_size)
{
    int i = blockIdx.x * blockDim.x + threadIdx.x;
    if (i >= row_size * col_size)
    {
        return;
    }
    int r = i / row_size, c = i % row_size;
    wu_dst[i] = wu[c] * lhs[i] * scale_coef;
    wv_dst[i] = wv[r] * lhs[i] * scale_coef;
}

}  //!< end of namespace fabric_space

#endif
