#ifndef _BITONIC_SORT_
#define _BITONIC_SORT_

#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#include "mapImpl.cu"
#include "bitonicSort_kernel.cu"
#include "common.h"

/*
@ d_Rin, the input pointer array.
@ rLen, the number of tuples.
@ d_Rout, the output pointer array.
*/
void bitonicSortGPU(struct sortRecord * d_Rin, int rLen, struct sortRecord *d_Rout, 
					int numThreadPB=NUM_BLOCKS_CHUNK, int numBlock=NUM_BLOCKS_CHUNK)
{
	unsigned int numRecordsR;

	unsigned int size = rLen;
	unsigned int level = 0;
	while( size != 1 )
	{
		size = size/2;
		level++;
	}

	if( (1<<level) < rLen )
	{
		level++;
	}

	numRecordsR = (1<<level);
	
	//if( rLen <= 256*1024 )
	if(1)
	{
		
		unsigned int numThreadsSort = numThreadPB;
		if(numRecordsR<numThreadPB)
			numThreadsSort=numRecordsR;
		unsigned int numBlocksXSort = numRecordsR/numThreadsSort;
		unsigned int numBlocksYSort = 1;
		dim3 gridSort( numBlocksXSort, numBlocksYSort );		
		unsigned int memSizeRecordsR = sizeof( sortRecord ) * numRecordsR;
	
		struct sortRecord* d_R;
		cudaMalloc( (void**) &d_R, memSizeRecordsR) ;
		struct sortRecord tempValue;
		tempValue.key = UINT_MAX;
		tempValue.pos = UINT_MAX;
		mapInit(d_R, rLen, numRecordsR, tempValue);
		CUDA_SAFE_CALL( cudaMemcpy( d_R, d_Rin, rLen*sizeof(struct sortRecord), cudaMemcpyDeviceToDevice) );
		

		for( int k = 2; k <= numRecordsR; k *= 2 )
		{
			for( int j = k/2; j > 0; j /= 2 )
			{
				bitonicKernel<<<gridSort, numThreadsSort>>>(d_R, numRecordsR, k, j);
			}
		}
		CUDA_SAFE_CALL( cudaMemcpy( d_Rout, d_R, sizeof(struct sortRecord)*rLen, cudaMemcpyDeviceToDevice) );
		cudaFree( d_R );
		cudaThreadSynchronize();
	}
	else
	{
		unsigned int numThreadsSort = numThreadPB;
		unsigned int numBlocksYSort = 1;
		unsigned int numBlocksXSort = (numRecordsR/numThreadsSort)/numBlocksYSort;
		if(numBlocksXSort>=(1<<16))
		{
			numBlocksXSort=(1<<15);
			numBlocksYSort=(numRecordsR/numThreadsSort)/numBlocksXSort;
		}
		unsigned int numBlocksChunk = numBlock;
		unsigned int numThreadsChunk = numThreadPB;
		
		unsigned int chunkSize = numBlocksChunk*numThreadsChunk;
		unsigned int numChunksR = numRecordsR/chunkSize;

		dim3 gridSort( numBlocksXSort, numBlocksYSort );
		unsigned int memSizeRecordsR = sizeof( struct sortRecord ) * numRecordsR;

		struct sortRecord* d_R;
		cudaMalloc( (void**) &d_R, memSizeRecordsR);
		struct sortRecord tempValue;
		tempValue.key = UINT_MAX;
		tempValue.pos = UINT_MAX;
		mapInit(d_R, rLen, numRecordsR, tempValue);
		CUDA_SAFE_CALL_NO_SYNC( cudaMemcpy( d_R, d_Rin, rLen*sizeof(struct sortRecord), cudaMemcpyDeviceToDevice) );
		int sharedMemSize=numThreadPB*sizeof(struct sortRecord);
		for( int chunkIdx = 0; chunkIdx < numChunksR; chunkIdx++ )
		{
			unitBitonicSortKernel<<< numBlocksChunk, numThreadsChunk, sharedMemSize>>>(  d_R, numRecordsR, 
				chunkIdx,numThreadsChunk, chunkSize, numBlocksChunk);
		}
		int j;
		for( int k = numThreadsChunk*2; k <= numRecordsR; k *= 2 )
		{
			for( j = k/2; j > numThreadsChunk/2; j /= 2 )
			{
				bitonicKernel<<<gridSort, numThreadsSort>>>( d_R, numRecordsR, k, j);
			}

			for( int chunkIdx = 0; chunkIdx < numChunksR; chunkIdx++ )
			{
				partBitonicSortKernel<<< numBlocksChunk, numThreadsChunk, sharedMemSize>>>(d_R, numRecordsR, 
					chunkIdx, k/numThreadsSort,numThreadsChunk, chunkSize, numBlocksChunk );
			}
		}
		CUDA_SAFE_CALL_NO_SYNC( cudaMemcpy( d_Rout, d_R, sizeof(struct sortRecord)*rLen, cudaMemcpyDeviceToDevice) );
		CUDA_SAFE_CALL_NO_SYNC(cudaFree( d_R ));
		CUDA_SAFE_CALL_NO_SYNC(cudaDeviceSynchronize());
	}
}


#endif
