#ifndef CUDA_REDUCE_MIN_H
#define CUDA_REDUCE_MIN_H

template <unsigned int blockSize, typename T>
__global__ void reduceMin(T *g_idata, T *g_odata, unsigned int n, unsigned int query_width, int* g_oIndices, bool removeMin)
{
	extern __shared__ T sdata[];
	int* sIndices = (int*)(sdata + blockSize*2);

	unsigned int tid = threadIdx.x;
	unsigned int queryIdx = blockIdx.y;
	unsigned int refIdx = blockIdx.x*(blockSize*2) + tid;
	unsigned int dataIdx = queryIdx * n + refIdx;
	
	float nextData;

	sdata[tid] = 1E+37;
	sIndices[tid] = refIdx;

	// load min of two values to shared mem
	if (refIdx < n)
		sdata[tid] = g_idata[dataIdx];
	if (refIdx+blockSize < n)
	{
		nextData = g_idata[dataIdx+blockSize];
		if (nextData < sdata[tid])
		{
			sdata[tid] = nextData;
			sIndices[tid] = refIdx+blockSize;
		}
	}
	__syncthreads();

	// reduction
	if (blockSize >= 512) 
	{ 
		if (tid < 256) 
		{ 
			nextData = sdata[tid+256];
			if (nextData < sdata[tid])
			{
				sdata[tid] = nextData;
				sIndices[tid] = sIndices[tid+256];
			}
		} 
		__syncthreads(); 
	}
	if (blockSize >= 256) 
	{ 
		if (tid < 128) 
		{ 
			nextData = sdata[tid+128];
			if (nextData < sdata[tid])
			{
				sdata[tid] = nextData;
				sIndices[tid] = sIndices[tid+128];
			}
		} 
		__syncthreads(); 
	}
	if (blockSize >= 128) 
	{ 
		if (tid < 64) 
		{ 
			nextData = sdata[tid+64];
			if (nextData < sdata[tid])
			{
				sdata[tid] = nextData;
				sIndices[tid] = sIndices[tid+64];
			}
		} 
		__syncthreads(); 
	}
	if (tid < 32) {
		if (blockSize >= 64)
		{
			nextData = sdata[tid+32];
			if (nextData < sdata[tid])
			{
				sdata[tid] = nextData;
				sIndices[tid] = sIndices[tid+32];
			}
		}
		if (blockSize >= 32)
		{
			nextData = sdata[tid+16];
			if (nextData < sdata[tid])
			{
				sdata[tid] = nextData;
				sIndices[tid] = sIndices[tid+16];
			}
		}
		if (blockSize >= 16)
		{
			nextData = sdata[tid+8];
			if (nextData < sdata[tid])
			{
				sdata[tid] = nextData;
				sIndices[tid] = sIndices[tid+8];
			}
		}
		if (blockSize >= 8)
		{
			nextData = sdata[tid+4];
			if (nextData < sdata[tid])
			{
				sdata[tid] = nextData;
				sIndices[tid] = sIndices[tid+4];
			}
		}
		if (blockSize >= 4)
		{
			nextData = sdata[tid+2];
			if (nextData < sdata[tid])
			{
				sdata[tid] = nextData;
				sIndices[tid] = sIndices[tid+2];
			}
		}
		if (blockSize >= 2)
		{
			nextData = sdata[tid+1];
			if (nextData < sdata[tid])
			{
				sdata[tid] = nextData;
				sIndices[tid] = sIndices[tid+1];
			}
		}
	}

	if (tid == 0)
	{
		g_odata[queryIdx * gridDim.x + blockIdx.x] = sdata[0];
		g_oIndices[queryIdx * gridDim.x + blockIdx.x] = sIndices[0];

		if (removeMin)
			g_idata[queryIdx * n + sIndices[0]] = 1E+37;
	}
}

template <unsigned int blockSize, typename T>
__global__ void reduceMinWithIndices(T *g_idata, int* g_iIndices, T *g_odata, unsigned int n, unsigned int numIndex, 
	unsigned int query_width, int* g_oIndices, bool removeMin)
{
	extern __shared__ T sdata[];
	int* sIndices = (int*)(sdata + blockSize*2);

	unsigned int tid = threadIdx.x;
	unsigned int queryIdx = blockIdx.y;
	unsigned int refIdx = g_iIndices[queryIdx * numIndex + blockIdx.x *(blockSize*2) + tid];
	unsigned int dataIdx = queryIdx * n + refIdx;

	float nextData;

	sdata[tid] = FLOAT_MAX;
	sIndices[tid] = refIdx;

	// load min of two values to shared mem
	if (blockIdx.x*(blockSize*2) + tid < numIndex)
		sdata[tid] = g_idata[dataIdx];
	if (blockIdx.x*(blockSize*2) + tid + blockSize < numIndex)
	{
		nextData = g_idata[dataIdx+blockSize];
		if (nextData < sdata[tid])
		{
			sdata[tid] = nextData;
			sIndices[tid] = g_iIndices[blockIdx.x*(blockSize*2) + tid + blockSize];
		}
	}
	__syncthreads();

	// reduction
	if (blockSize >= 512) 
	{ 
		if (tid < 256) 
		{ 
			nextData = sdata[tid+256];
			if (nextData < sdata[tid])
			{
				sdata[tid] = nextData;
				sIndices[tid] = sIndices[tid+256];
			}
		} 
		__syncthreads(); 
	}
	if (blockSize >= 256) 
	{ 
		if (tid < 128) 
		{ 
			nextData = sdata[tid+128];
			if (nextData < sdata[tid])
			{
				sdata[tid] = nextData;
				sIndices[tid] = sIndices[tid+128];
			}
		} 
		__syncthreads(); 
	}
	if (blockSize >= 128) 
	{ 
		if (tid < 64) 
		{ 
			nextData = sdata[tid+64];
			if (nextData < sdata[tid])
			{
				sdata[tid] = nextData;
				sIndices[tid] = sIndices[tid+64];
			}
		} 
		__syncthreads(); 
	}
	if (tid < 32) {
		if (blockSize >= 64)
		{
			nextData = sdata[tid+32];
			if (nextData < sdata[tid])
			{
				sdata[tid] = nextData;
				sIndices[tid] = sIndices[tid+32];
			}
		}
		if (blockSize >= 32)
		{
			nextData = sdata[tid+16];
			if (nextData < sdata[tid])
			{
				sdata[tid] = nextData;
				sIndices[tid] = sIndices[tid+16];
			}
		}
		if (blockSize >= 16)
		{
			nextData = sdata[tid+8];
			if (nextData < sdata[tid])
			{
				sdata[tid] = nextData;
				sIndices[tid] = sIndices[tid+8];
			}
		}
		if (blockSize >= 8)
		{
			nextData = sdata[tid+4];
			if (nextData < sdata[tid])
			{
				sdata[tid] = nextData;
				sIndices[tid] = sIndices[tid+4];
			}
		}
		if (blockSize >= 4)
		{
			nextData = sdata[tid+2];
			if (nextData < sdata[tid])
			{
				sdata[tid] = nextData;
				sIndices[tid] = sIndices[tid+2];
			}
		}
		if (blockSize >= 2)
		{
			nextData = sdata[tid+1];
			if (nextData < sdata[tid])
			{
				sdata[tid] = nextData;
				sIndices[tid] = sIndices[tid+1];
			}
		}
	}

	if (tid == 0)
	{
		g_odata[queryIdx * gridDim.x + blockIdx.x] = sdata[0];
		g_oIndices[queryIdx * gridDim.x + blockIdx.x] = sIndices[0];

		if (removeMin)
			g_idata[queryIdx * n + sIndices[0]] = FLOAT_MAX;
	}
}

template <unsigned int blockSize, typename T>
__global__ void reduceMax(T *g_idata, T *g_odata, unsigned int n)
{
	extern __shared__ T sMaxData[];

	unsigned int tid = threadIdx.x;
	unsigned int i = blockIdx.y*(gridDim.x*blockSize*2) + blockIdx.x*(blockSize*2) + tid;

	sMaxData[tid] = 1000000;

	// load max of two values to shared mem
	if (i < n)
		sMaxData[tid] = g_idata[i];
	if (i+blockSize < n)
	{
		sMaxData[tid] = max(sMaxData[tid], g_idata[i+blockSize]);
	}
	__syncthreads();

	// reduction
	if (blockSize >= 512) 
	{ 
		if (tid < 256) 
		{ 
			sMaxData[tid] = max(sMaxData[tid], sMaxData[tid+256]);
		} 
		__syncthreads(); 
	}
	if (blockSize >= 256) 
	{ 
		if (tid < 128) 
		{ 
			sMaxData[tid] = max(sMaxData[tid], sMaxData[tid+128]);
		} 
		__syncthreads(); 
	}
	if (blockSize >= 128) 
	{ 
		if (tid < 64) 
		{ 
			sMaxData[tid] = max(sMaxData[tid], sMaxData[tid+64]);
		} 
		__syncthreads(); 
	}
	if (tid < 32) {
		if (blockSize >= 64)
		{
			sMaxData[tid] = max(sMaxData[tid], sMaxData[tid+32]);
		}
		if (blockSize >= 32)
		{
			sMaxData[tid] = max(sMaxData[tid], sMaxData[tid+16]);
		}
		if (blockSize >= 16)
		{
			sMaxData[tid] = max(sMaxData[tid], sMaxData[tid+8]);
		}
		if (blockSize >= 8)
		{
			sMaxData[tid] = max(sMaxData[tid], sMaxData[tid+4]);
		}
		if (blockSize >= 4)
		{
			sMaxData[tid] = max(sMaxData[tid], sMaxData[tid+2]);
		}
		if (blockSize >= 2)
		{
			sMaxData[tid] = max(sMaxData[tid], sMaxData[tid+1]);
		}
	}

	if (tid == 0)
	{
		g_odata[blockIdx.x] = sMaxData[0];
	}
}
#endif