#pragma once
#ifndef TM_GPU_CU_METHODS_H_
#define TM_GPU_CU_METHODS_H_

#include <cuda_runtime.h>

namespace recognition {
namespace gpu {

template < typename T>
T* dev_to_host(T* dev_ptr, uint nelements);
template < typename T>
T* host_to_dev(T* host_ptr, uint nelements);

typedef void (*pointReduceWithIdxDoubleFunction1_t) (volatile double*, volatile uint*, uint, double, uint);
typedef void (*pointReduceWithIdxDoubleFunction2_t) (volatile double*, volatile uint*, uint, uint);

template < typename T >
__device__ void ReduceMaxDoulbe1(volatile T* sdata, volatile uint* sidxs, uint sdata_idx, T gdata_val, uint gdata_idx);

template < typename T >
__device__ void ReduceMaxDoulbe2(volatile T* sdata, volatile uint* sidxs, uint sdata_idx, uint ridx);

extern __device__ pointReduceWithIdxDoubleFunction1_t d_ReduceMaxDoulbe1;
extern __device__ pointReduceWithIdxDoubleFunction2_t d_ReduceMaxDoulbe2;

extern __device__ pointReduceWithIdxDoubleFunction1_t d_ReduceMinDoulbe1;
extern __device__ pointReduceWithIdxDoubleFunction2_t d_ReduceMinDoulbe2;

template < uint blockSize, typename T >
__device__ void _cu_warp_reduce(volatile T* sdata, volatile uint* sidxs, uint sdata_idx, void (*func)(volatile T*, volatile uint*, uint, uint));

template < uint blockSize, typename T >
__global__ void _cu_reduce__shared__(T* in_data, uint n, T no_value, void (*func1)(volatile T*, volatile uint*, uint, T, uint),
		void (*func2)(volatile T*, volatile uint*, uint, uint), T* out_data, uint* out_idxs);

// Host interface for \ref _cu_reduce__shared__(T*, uint, T, void(*func1)(volatile T*, volatile uint*, uint, T, uint),
// void (*func2)(volatile T*, volatile uint*, uint, uint), T*, uint*)
template < uint blockSize, typename T >
void _cu_reduce(T* in_data, uint n, T no_value, void (*func1)(volatile T*, volatile uint*, uint, T, uint),
		void (*func2)(volatile T*, volatile uint*, uint, uint), T*& out_data, uint*& out_idxs, uint& out_size);

//////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////    Reduce by value and count the elements    ////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////////
typedef void (*pointReduceCountLesserOrEqualDoubleFunction_t) (volatile uint*, uint, double, double);

template < typename T >
__device__ void ReduceCountLesserOrEqual(volatile uint* sdata, uint sdata_idx, T gdata_val, T val);

extern __device__ pointReduceCountLesserOrEqualDoubleFunction_t d_ReduceCountLesserOrEqualDoulbe;

template < uint blockSize >
__device__ void _cu_warp_reduce_count(volatile uint* sdata, uint sdata_idx);

template < uint blockSize, typename T >
__global__ void _cu_reduce_count__shared__(T* in_data, uint n, void (*func)(volatile uint*, uint, T, T), T val, uint* counts);

// Host interface for \ref _cu_reduce_count__shared__(T*, uint, void(*func)(volatile uint*, uint, T, T), T, uint*)
template < uint blockSize, typename T >
void _cu_reduce_count(T* in_data, uint n, T no_value, void (*func)(volatile uint*, uint, T, T), T val, uint& count);

// Park-Miller quasirandom number generation kernel
//
// \param output	- output array of random numbers
// \param seed		- starting element in seed sequence
// \param cycles	- number of iterations to find one random number
// \param n			- size of \p output
__global__ void _cu_ParkMiller_Kernel(uint *output, uint seed, uint cycles, uint n);

// Fills i1..i6
//
// \param i			- input decimal number
// \param i1..i6	- bits of 6 positions vector in scale of notation by 3
__host__ __device__ void idx_to_vector_dim6(uint i, int& i1, int& i2, int& i3, int& i4, int& i5, int& i6);

}	// namespace gpu
}	// namespace recognition

#endif	// TM_GPU_CU_METHODS_H_
