#include <stdio.h>

#include <cuda_runtime.h>
#include <helper_cuda.h>
#include <helper_functions.h>

#include <base/types.cuh>
#include <gpu/cu_methods.cuh>

namespace recognition {
namespace gpu {

template < typename T>
T* dev_to_host(T* dev_ptr, uint nelements) {
	T* host_ptr;
	checkCudaErrors( cudaMallocHost((void **) &host_ptr, nelements * sizeof(T)) );
	checkCudaErrors( cudaMemcpy(host_ptr, dev_ptr, nelements * sizeof(T), cudaMemcpyDeviceToHost) );
	return host_ptr;
}

template double* dev_to_host(double* dev_ptr, uint nelements);
template int* dev_to_host(int* dev_ptr, uint nelements);
template uint* dev_to_host(uint* dev_ptr, uint nelements);
template bool* dev_to_host(bool* dev_ptr, uint nelements);

template < typename T>
T* host_to_dev(T* host_ptr, uint nelements) {
	T* dev_ptr;
	checkCudaErrors( cudaMalloc((void **) &dev_ptr, nelements * sizeof(T)) );
	checkCudaErrors( cudaMemcpy(dev_ptr, host_ptr, nelements * sizeof(T), cudaMemcpyHostToDevice) );
	return dev_ptr;
}

template double* host_to_dev(double* host_ptr, uint nelements);
template int* host_to_dev(int* host_ptr, uint nelements);
template uint* host_to_dev(uint* host_ptr, uint nelements);
template bool* host_to_dev(bool* host_ptr, uint nelements);

template < typename T >
__device__ void ReduceMaxDoulbe1(volatile T* sdata, volatile uint* sidxs, uint sdata_idx, T gdata_val, uint gdata_idx) {
	if (sdata[sdata_idx] < gdata_val) {
		sdata[sdata_idx] = gdata_val;
		sidxs[sdata_idx] = gdata_idx;
	}
}

template < typename T >
__device__ void ReduceMaxDoulbe2(volatile T* sdata, volatile uint* sidxs, uint sdata_idx, uint ridx) {
	if (sdata[sdata_idx] < sdata[ridx]) {
		sdata[sdata_idx] = sdata[ridx];
		sidxs[sdata_idx] = sidxs[ridx];
	}
}

template __device__ void ReduceMaxDoulbe1(volatile double* sdata, volatile uint* sidxs, uint sdata_idx, double gdata_val, uint gdata_idx);
template __device__ void ReduceMaxDoulbe2(volatile double* sdata, volatile uint* sidxs, uint sdata_idx, uint ridx);

template < typename T >
__device__ void ReduceMinDoulbe1(volatile T* sdata, volatile uint* sidxs, uint sdata_idx, T gdata_val, uint gdata_idx) {
	if (sdata[sdata_idx] > gdata_val) {
		sdata[sdata_idx] = gdata_val;
		sidxs[sdata_idx] = gdata_idx;
	}
}

template < typename T >
__device__ void ReduceMinDoulbe2(volatile T* sdata, volatile uint* sidxs, uint sdata_idx, uint ridx) {
	if (sdata[sdata_idx] > sdata[ridx]) {
		sdata[sdata_idx] = sdata[ridx];
		sidxs[sdata_idx] = sidxs[ridx];
	}
}

template __device__ void ReduceMinDoulbe1(volatile double* sdata, volatile uint* sidxs, uint sdata_idx, double gdata_val, uint gdata_idx);
template __device__ void ReduceMinDoulbe2(volatile double* sdata, volatile uint* sidxs, uint sdata_idx, uint ridx);

__device__ pointReduceWithIdxDoubleFunction1_t d_ReduceMaxDoulbe1 = ReduceMaxDoulbe1<double>;
__device__ pointReduceWithIdxDoubleFunction2_t d_ReduceMaxDoulbe2 = ReduceMaxDoulbe2<double>;

__device__ pointReduceWithIdxDoubleFunction1_t d_ReduceMinDoulbe1 = ReduceMinDoulbe1<double>;
__device__ pointReduceWithIdxDoubleFunction2_t d_ReduceMinDoulbe2 = ReduceMinDoulbe2<double>;

template < uint blockSize, typename T >
__device__ void _cu_warp_reduce(volatile T* sdata, volatile uint* sidxs, uint sdata_idx, void (*func)(volatile T*, volatile uint*, uint, uint)) {
	if (blockSize >= 64) func(sdata, sidxs, sdata_idx, sdata_idx + 32);
	if (blockSize >= 32) func(sdata, sidxs, sdata_idx, sdata_idx + 16);
	if (blockSize >= 16) func(sdata, sidxs, sdata_idx, sdata_idx + 8);
	if (blockSize >= 8) func(sdata, sidxs, sdata_idx, sdata_idx + 4);
	if (blockSize >= 4) func(sdata, sidxs, sdata_idx, sdata_idx + 2);
	if (blockSize >= 2) func(sdata, sidxs, sdata_idx, sdata_idx + 1);
}

template < uint blockSize, typename T >
__global__ void _cu_reduce__shared__(T* in_data, uint n, T no_value, void (*func1)(volatile T*, volatile uint*, uint, T, uint),
		void (*func2)(volatile T*, volatile uint*, uint, uint), T* out_data, uint* out_idxs) {
	__shared__ T sdata[blockSize * 2];
	uint* sidxs = (uint*) &(sdata[blockSize]);	// sizeof(T) >= sizeof(uint) !!!
	uint tid = threadIdx.x;
	uint i = blockIdx.x * (blockSize * 2) + tid;
	uint gridSize = blockSize * 2 * gridDim.x;

	// Initialize first element
	sdata[tid] = no_value;
	sidxs[tid] = 0;

	while (i < n) {
		func1(sdata, sidxs, tid, in_data[i], i);
		func1(sdata, sidxs, tid, in_data[i + blockSize], i + blockSize);
		i += gridSize;
	}
	__syncthreads();

	if (blockSize >= 512) { if (tid < 256) { func2(sdata, sidxs, tid, tid + 256); } __syncthreads(); }
	if (blockSize >= 256) { if (tid < 128) { func2(sdata, sidxs, tid, tid + 128); } __syncthreads(); }
	if (blockSize >= 128) { if (tid < 64) { func2(sdata, sidxs, tid, tid + 64); } __syncthreads(); }

	if (tid < 32) _cu_warp_reduce<blockSize>(sdata, sidxs, tid, func2);
	if (tid == 0) {
		out_data[blockIdx.x] = sdata[0];
		out_idxs[blockIdx.x] = sidxs[0];
	}
}

template __global__ void _cu_reduce__shared__<1024, double>(double* in_data, uint n, double no_value,
		void (*func1)(volatile double*, volatile uint*, uint, double, uint), void (*func2)(volatile double*, volatile uint*, uint, uint),
		double* out_data, uint* out_idxs);
template __global__ void _cu_reduce__shared__< 512, double>(double* in_data, uint n, double no_value,
		void (*func1)(volatile double*, volatile uint*, uint, double, uint), void (*func2)(volatile double*, volatile uint*, uint, uint),
		double* out_data, uint* out_idxs);
template __global__ void _cu_reduce__shared__< 256, double>(double* in_data, uint n, double no_value,
		void (*func1)(volatile double*, volatile uint*, uint, double, uint), void (*func2)(volatile double*, volatile uint*, uint, uint),
		double* out_data, uint* out_idxs);
template __global__ void _cu_reduce__shared__< 128, double>(double* in_data, uint n, double no_value,
		void (*func1)(volatile double*, volatile uint*, uint, double, uint), void (*func2)(volatile double*, volatile uint*, uint, uint),
		double* out_data, uint* out_idxs);
template __global__ void _cu_reduce__shared__<  64, double>(double* in_data, uint n, double no_value,
		void (*func1)(volatile double*, volatile uint*, uint, double, uint), void (*func2)(volatile double*, volatile uint*, uint, uint),
		double* out_data, uint* out_idxs);
template __global__ void _cu_reduce__shared__<  32, double>(double* in_data, uint n, double no_value,
		void (*func1)(volatile double*, volatile uint*, uint, double, uint), void (*func2)(volatile double*, volatile uint*, uint, uint),
		double* out_data, uint* out_idxs);
template __global__ void _cu_reduce__shared__<  16, double>(double* in_data, uint n, double no_value,
		void (*func1)(volatile double*, volatile uint*, uint, double, uint), void (*func2)(volatile double*, volatile uint*, uint, uint),
		double* out_data, uint* out_idxs);
template __global__ void _cu_reduce__shared__<   8, double>(double* in_data, uint n, double no_value,
		void (*func1)(volatile double*, volatile uint*, uint, double, uint), void (*func2)(volatile double*, volatile uint*, uint, uint),
		double* out_data, uint* out_idxs);
template __global__ void _cu_reduce__shared__<   4, double>(double* in_data, uint n, double no_value,
		void (*func1)(volatile double*, volatile uint*, uint, double, uint), void (*func2)(volatile double*, volatile uint*, uint, uint),
		double* out_data, uint* out_idxs);
template __global__ void _cu_reduce__shared__<   2, double>(double* in_data, uint n, double no_value,
		void (*func1)(volatile double*, volatile uint*, uint, double, uint), void (*func2)(volatile double*, volatile uint*, uint, uint),
		double* out_data, uint* out_idxs);
template __global__ void _cu_reduce__shared__<   1, double>(double* in_data, uint n, double no_value,
		void (*func1)(volatile double*, volatile uint*, uint, double, uint), void (*func2)(volatile double*, volatile uint*, uint, uint),
		double* out_data, uint* out_idxs);

template < uint blockSize, typename T >
void _cu_reduce(T* in_data, uint n, T no_value, void (*func1)(volatile T*, volatile uint*, uint, T, uint),
		void (*func2)(volatile T*, volatile uint*, uint, uint), T*& out_data, uint*& out_idxs, uint& out_size) {
	const uint grid_reduction = 16;
	const uint reduction_block_size = blockSize * grid_reduction;
	uint d_n = n % reduction_block_size == 0 ? n : (n - n % reduction_block_size + reduction_block_size);
	T* h_in_data = in_data;
	T* d_in_data;
	checkCudaErrors( cudaMalloc((void **) &d_in_data, d_n * sizeof(T)) );
	checkCudaErrors( cudaMemcpy(d_in_data, h_in_data, n * sizeof(T), cudaMemcpyHostToDevice) );
	if (d_n > n) {
		T *h_rest_data;
		checkCudaErrors( cudaMallocHost((void **) &h_rest_data, (d_n - n) * sizeof(T)) );
		for (int i = 0; i < d_n - n; ++i) {
			h_rest_data[i] = no_value;
		}
		checkCudaErrors( cudaMemcpy(&(d_in_data[n]), h_rest_data, (d_n - n) * sizeof(T), cudaMemcpyHostToDevice) );
		checkCudaErrors( cudaFreeHost(h_rest_data) );
	}

	dim3 dimBlock(FULL_BLOCK_SIZE);
	dim3 dimGrid(d_n / REDUCTION_BLOCK_SIZE);
	T* d_out_data;
	checkCudaErrors( cudaMalloc((void **) &d_out_data, dimGrid.x * sizeof(T)) );
	uint* d_out_idxs;
	checkCudaErrors( cudaMalloc((void **) &d_out_idxs, dimGrid.x * sizeof(uint)) );

	_cu_reduce__shared__<FULL_BLOCK_SIZE, T><<<dimGrid, dimBlock>>>(d_in_data, d_n, no_value,
			func1, func2, d_out_data, d_out_idxs);
	checkCudaErrors( cudaDeviceSynchronize() );

	out_size = dimGrid.x;
	out_data = dev_to_host<T>(d_out_data, out_size);
	out_idxs = dev_to_host<uint>(d_out_idxs, out_size);

	checkCudaErrors( cudaFree(d_out_idxs) );
	checkCudaErrors( cudaFree(d_out_data) );
	checkCudaErrors( cudaFree(d_in_data) );
}

template void _cu_reduce<1024, double>(double* in_data, uint n, double no_value, void (*func1)(volatile double*, volatile uint*, uint, double, uint),
		void (*func2)(volatile double*, volatile uint*, uint, uint), double*& out_data, uint*& out_idxs, uint& out_size);
template void _cu_reduce< 512, double>(double* in_data, uint n, double no_value, void (*func1)(volatile double*, volatile uint*, uint, double, uint),
		void (*func2)(volatile double*, volatile uint*, uint, uint), double*& out_data, uint*& out_idxs, uint& out_size);
template void _cu_reduce< 256, double>(double* in_data, uint n, double no_value, void (*func1)(volatile double*, volatile uint*, uint, double, uint),
		void (*func2)(volatile double*, volatile uint*, uint, uint), double*& out_data, uint*& out_idxs, uint& out_size);
template void _cu_reduce< 128, double>(double* in_data, uint n, double no_value, void (*func1)(volatile double*, volatile uint*, uint, double, uint),
		void (*func2)(volatile double*, volatile uint*, uint, uint), double*& out_data, uint*& out_idxs, uint& out_size);
template void _cu_reduce<  64, double>(double* in_data, uint n, double no_value, void (*func1)(volatile double*, volatile uint*, uint, double, uint),
		void (*func2)(volatile double*, volatile uint*, uint, uint), double*& out_data, uint*& out_idxs, uint& out_size);
template void _cu_reduce<  32, double>(double* in_data, uint n, double no_value, void (*func1)(volatile double*, volatile uint*, uint, double, uint),
		void (*func2)(volatile double*, volatile uint*, uint, uint), double*& out_data, uint*& out_idxs, uint& out_size);
template void _cu_reduce<  16, double>(double* in_data, uint n, double no_value, void (*func1)(volatile double*, volatile uint*, uint, double, uint),
		void (*func2)(volatile double*, volatile uint*, uint, uint), double*& out_data, uint*& out_idxs, uint& out_size);
template void _cu_reduce<   8, double>(double* in_data, uint n, double no_value, void (*func1)(volatile double*, volatile uint*, uint, double, uint),
		void (*func2)(volatile double*, volatile uint*, uint, uint), double*& out_data, uint*& out_idxs, uint& out_size);
template void _cu_reduce<   4, double>(double* in_data, uint n, double no_value, void (*func1)(volatile double*, volatile uint*, uint, double, uint),
		void (*func2)(volatile double*, volatile uint*, uint, uint), double*& out_data, uint*& out_idxs, uint& out_size);
template void _cu_reduce<   2, double>(double* in_data, uint n, double no_value, void (*func1)(volatile double*, volatile uint*, uint, double, uint),
		void (*func2)(volatile double*, volatile uint*, uint, uint), double*& out_data, uint*& out_idxs, uint& out_size);
template void _cu_reduce<   1, double>(double* in_data, uint n, double no_value, void (*func1)(volatile double*, volatile uint*, uint, double, uint),
		void (*func2)(volatile double*, volatile uint*, uint, uint), double*& out_data, uint*& out_idxs, uint& out_size);

template < typename T >
__device__ void ReduceCountLesserOrEqual(volatile uint* sdata, uint sdata_idx, T gdata_val, T val) {
	sdata[sdata_idx] += gdata_val <= val;
}

template __device__ void ReduceCountLesserOrEqual(volatile uint* sdata, uint sdata_idx, double gdata_val, double val);

__device__ pointReduceCountLesserOrEqualDoubleFunction_t d_ReduceCountLesserOrEqualDoulbe = ReduceCountLesserOrEqual<double>;

template < uint blockSize >
__device__ void _cu_warp_reduce_count(volatile uint* sdata, uint sdata_idx) {
	if (blockSize >= 64) sdata[sdata_idx] += sdata[sdata_idx + 32];
	if (blockSize >= 32) sdata[sdata_idx] += sdata[sdata_idx + 16];
	if (blockSize >= 16) sdata[sdata_idx] += sdata[sdata_idx + 8];
	if (blockSize >= 8) sdata[sdata_idx] += sdata[sdata_idx + 4];
	if (blockSize >= 4) sdata[sdata_idx] += sdata[sdata_idx + 2];
	if (blockSize >= 2) sdata[sdata_idx] += sdata[sdata_idx + 1];
}

template < uint blockSize, typename T >
__global__ void _cu_reduce_count__shared__(T* in_data, uint n, void (*func)(volatile uint*, uint, T, T), T val, uint* counts) {
	__shared__ uint sdata[blockSize];
	uint tid = threadIdx.x;
	uint i = blockIdx.x * (blockSize * 2) + tid;
	uint gridSize = blockSize * 2 * gridDim.x;

	// Initialize first element
	sdata[tid] = 0;

	while (i < n) {
		func(sdata, tid, in_data[i], val);
		func(sdata, tid, in_data[i + blockSize], val);
		i += gridSize;
	}
	__syncthreads();

	if (blockSize >= 512) { if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads(); }
	if (blockSize >= 256) { if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads(); }
	if (blockSize >= 128) { if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads(); }

	if (tid < 32) _cu_warp_reduce_count<blockSize>(sdata, tid);
	if (tid == 0) {
		counts[blockIdx.x] = sdata[0];
	}
}

template __global__ void _cu_reduce_count__shared__<1024, double>(double* in_data, uint n,
		void (*func)(volatile uint*, uint, double, double), double val, uint* counts);
template __global__ void _cu_reduce_count__shared__< 512, double>(double* in_data, uint n,
		void (*func)(volatile uint*, uint, double, double), double val, uint* counts);
template __global__ void _cu_reduce_count__shared__< 256, double>(double* in_data, uint n,
		void (*func)(volatile uint*, uint, double, double), double val, uint* counts);
template __global__ void _cu_reduce_count__shared__< 128, double>(double* in_data, uint n,
		void (*func)(volatile uint*, uint, double, double), double val, uint* counts);
template __global__ void _cu_reduce_count__shared__<  64, double>(double* in_data, uint n,
		void (*func)(volatile uint*, uint, double, double), double val, uint* counts);
template __global__ void _cu_reduce_count__shared__<  32, double>(double* in_data, uint n,
		void (*func)(volatile uint*, uint, double, double), double val, uint* counts);
template __global__ void _cu_reduce_count__shared__<  16, double>(double* in_data, uint n,
		void (*func)(volatile uint*, uint, double, double), double val, uint* counts);
template __global__ void _cu_reduce_count__shared__<   8, double>(double* in_data, uint n,
		void (*func)(volatile uint*, uint, double, double), double val, uint* counts);
template __global__ void _cu_reduce_count__shared__<   4, double>(double* in_data, uint n,
		void (*func)(volatile uint*, uint, double, double), double val, uint* counts);
template __global__ void _cu_reduce_count__shared__<   2, double>(double* in_data, uint n,
		void (*func)(volatile uint*, uint, double, double), double val, uint* counts);
template __global__ void _cu_reduce_count__shared__<   1, double>(double* in_data, uint n,
		void (*func)(volatile uint*, uint, double, double), double val, uint* counts);

template < uint blockSize, typename T >
void _cu_reduce_count(T* in_data, uint n, T no_value, void (*func)(volatile uint*, uint, T, T), T val, uint& count) {
	const uint grid_reduction = 16;
	const uint reduction_block_size = blockSize * grid_reduction;
	uint d_n = n % reduction_block_size == 0 ? n : (n - n % reduction_block_size + reduction_block_size);
	T* h_in_data = in_data;
	T* d_in_data;
	checkCudaErrors( cudaMalloc((void **) &d_in_data, d_n * sizeof(T)) );
	checkCudaErrors( cudaMemcpy(d_in_data, h_in_data, n * sizeof(T), cudaMemcpyHostToDevice) );
	if (d_n > n) {
		T *h_rest_data;
		checkCudaErrors( cudaMallocHost((void **) &h_rest_data, (d_n - n) * sizeof(T)) );
		for (int i = 0; i < d_n - n; ++i) {
			h_rest_data[i] = no_value;
		}
		checkCudaErrors( cudaMemcpy(&(d_in_data[n]), h_rest_data, (d_n - n) * sizeof(T), cudaMemcpyHostToDevice) );
		checkCudaErrors( cudaFreeHost(h_rest_data) );
	}

	dim3 dimBlock(FULL_BLOCK_SIZE);
	dim3 dimGrid(d_n / REDUCTION_BLOCK_SIZE);
	uint* d_counts;
	checkCudaErrors( cudaMalloc((void **) &d_counts, dimGrid.x * sizeof(uint)) );

	_cu_reduce_count__shared__<FULL_BLOCK_SIZE, T><<<dimGrid, dimBlock>>>(d_in_data, d_n, func, val, d_counts);
	checkCudaErrors( cudaDeviceSynchronize() );

	uint* counts = dev_to_host<uint>(d_counts, dimGrid.x);
	uint count_ = 0;
	for (uint i = 0; i < dimGrid.x; ++i)
		count_ += counts[i];
	count = count_;

	checkCudaErrors( cudaFreeHost(counts) );
	checkCudaErrors( cudaFree(d_counts) );
	checkCudaErrors( cudaFree(d_in_data) );
}

template void _cu_reduce_count<1024, double>(double* in_data, uint n, double no_value,
		void (*func)(volatile uint*, uint, double, double), double val, uint& count);
template void _cu_reduce_count< 512, double>(double* in_data, uint n, double no_value,
		void (*func)(volatile uint*, uint, double, double), double val, uint& count);
template void _cu_reduce_count< 256, double>(double* in_data, uint n, double no_value,
		void (*func)(volatile uint*, uint, double, double), double val, uint& count);
template void _cu_reduce_count< 128, double>(double* in_data, uint n, double no_value,
		void (*func)(volatile uint*, uint, double, double), double val, uint& count);
template void _cu_reduce_count<  64, double>(double* in_data, uint n, double no_value,
		void (*func)(volatile uint*, uint, double, double), double val, uint& count);
template void _cu_reduce_count<  32, double>(double* in_data, uint n, double no_value,
		void (*func)(volatile uint*, uint, double, double), double val, uint& count);
template void _cu_reduce_count<  16, double>(double* in_data, uint n, double no_value,
		void (*func)(volatile uint*, uint, double, double), double val, uint& count);
template void _cu_reduce_count<   8, double>(double* in_data, uint n, double no_value,
		void (*func)(volatile uint*, uint, double, double), double val, uint& count);
template void _cu_reduce_count<   4, double>(double* in_data, uint n, double no_value,
		void (*func)(volatile uint*, uint, double, double), double val, uint& count);
template void _cu_reduce_count<   2, double>(double* in_data, uint n, double no_value,
		void (*func)(volatile uint*, uint, double, double), double val, uint& count);
template void _cu_reduce_count<   1, double>(double* in_data, uint n, double no_value,
		void (*func)(volatile uint*, uint, double, double), double val, uint& count);

__global__ void _cu_ParkMiller_Kernel(uint *output, uint seed, uint cycles, uint n) {
    uint      tid = blockDim.x * blockIdx.x + threadIdx.x;
    uint  threadN = blockDim.x * gridDim.x;
	double const a    = 16807;      //ie 7**5
	double const m    = 2147483647; //ie 2**31-1
	double const reciprocal_m = 1.0/m;

	if (tid >= n)
		return;

    for(uint pos = tid; pos < n; pos += threadN){
    	uint result = 0;
    	uint data = seed + pos;

		for (int i = 1; i <= cycles; ++i) {
			// W. Langdon cs.ucl.ac.uk 5 May 1994
			double temp = data * a;
			result = (int) (temp - m * floor ( temp * reciprocal_m ));
			data = result;
		}

        output[pos] = result;
    }
}

__host__ __device__ void idx_to_vector_dim6(uint i, int& i1, int& i2, int& i3, int& i4, int& i5, int& i6) {
	i = (i >= 729) ? 728 : i;	// 3^6
	int i1_tmp, i2_tmp, i3_tmp, i4_tmp, i5_tmp, i6_tmp;
	i6_tmp = i;
	i6 = i6_tmp % 3;
	i5_tmp = (i6_tmp - i6) / 3;
	i5 = i5_tmp % 3;
	i4_tmp = (i5_tmp - i5) / 3;
	i4 = i4_tmp % 3;
	i3_tmp = (i4_tmp - i4) / 3;
	i3 = i3_tmp % 3;
	i2_tmp = (i3_tmp - i3) / 3;
	i2 = i2_tmp % 3;
	i1_tmp = (i2_tmp - i2) / 3;
	i1 = i1_tmp % 3;
}


}	// namespace gpu
}	// namespace recognition
