#include <stdio.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/fcntl.h>
#include <sys/mman.h>
#include <unistd.h>
#include <string.h>
#include <cuda.h>
#include "scanImpl.cu"

#define BI 1000000000
#define SAMPLE_STRIDE 128
#define SHARED_SIZE_LIMIT 1024 

#  define CUDA_SAFE_CALL_NO_SYNC( call) do {                                \
    cudaError err = call;                                                    \
    if( cudaSuccess != err) {                                                \
        fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n",        \
                __FILE__, __LINE__, cudaGetErrorString( err) );              \
        exit(EXIT_FAILURE);                                                  \
    } } while (0)
#  define CUDA_SAFE_CALL( call) do {                                        \
    CUDA_SAFE_CALL_NO_SYNC(call);                                            \
    cudaError err = cudaThreadSynchronize();                                 \
    if( cudaSuccess != err) {                                                \
        fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n",        \
                __FILE__, __LINE__, cudaGetErrorString( err) );              \
        exit(EXIT_FAILURE);                                                  \
    } } while (0)

__device__ __host__ static int gpu_atoi(const char *string, int size)
{
	int i, length = 0;
	i=0;

	while(length <size){
		i=10*i + (*string - '0');
		string++;
		length ++;
	}

	return i;
}

#define W (sizeof(int) * 8)
static inline __device__ int nextPowerOfTwo(int x)
{
    /*
        --x;
        x |= x >> 1;
        x |= x >> 2;
        x |= x >> 4;
        x |= x >> 8;
        x |= x >> 16;
        return ++x;
    */
    return 1U << (W - __clz(x - 1));
}

static inline __host__ __device__ int iDivUp(int a, int b)
{
    return ((a % b) == 0) ? (a / b) : (a / b + 1);
}

static inline __host__ __device__ int getSampleCount(int dividend)
{
    return iDivUp(dividend, SAMPLE_STRIDE);
}

static inline __device__ int binarySearchIn(int val, int *data, int L, int stride, int sortDir)
{
    if (L == 0)
    {
        return 0;
    }

    int pos = 0;

    for (; stride > 0; stride >>= 1)
    {
        int newPos = umin(pos + stride, L);

        if ((sortDir && (data[newPos - 1] <= val)) || (!sortDir && (data[newPos - 1] >= val)))
        {
            pos = newPos;
        }
    }

    return pos;
}

static inline __device__ int binarySearchEx(int val, int *data, int L, int stride, int sortDir)
{
    if (L == 0)
    {
        return 0;
    }

    int pos = 0;

    for (; stride > 0; stride >>= 1)
    {
        int newPos = umin(pos + stride, L);

        if ((sortDir && (data[newPos - 1] < val)) || (!sortDir && (data[newPos - 1] > val)))
        {
            pos = newPos;
        }
    }

    return pos;
}


__global__ void mergeSortSharedKernel(
    int *d_DstKey,
    int *d_DstVal,
    int *d_SrcKey,
    int arrayLength,
    int sortDir
)
{
    __shared__ int s_key[SHARED_SIZE_LIMIT];
    __shared__ int s_val[SHARED_SIZE_LIMIT];

    d_SrcKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
    d_DstKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
    d_DstVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
    s_key[threadIdx.x +                       0] = d_SrcKey[                      0];
    s_val[threadIdx.x +                       0] = blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x; 
    s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcKey[(SHARED_SIZE_LIMIT / 2)];
    s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x + SHARED_SIZE_LIMIT/2; 

    for (int stride = 1; stride < arrayLength; stride <<= 1)
    {
        int     lPos = threadIdx.x & (stride - 1);
        int *baseKey = s_key + 2 * (threadIdx.x - lPos);
        int *baseVal = s_val + 2 * (threadIdx.x - lPos);

        __syncthreads();
        int keyA = baseKey[lPos +      0];
        int valA = baseVal[lPos +      0];
        int keyB = baseKey[lPos + stride];
        int valB = baseVal[lPos + stride];
        int posA = binarySearchEx(keyA, baseKey + stride, stride, stride, sortDir) + lPos;
        int posB = binarySearchIn(keyB, baseKey +      0, stride, stride, sortDir) + lPos;

        __syncthreads();
        baseKey[posA] = keyA;
        baseVal[posA] = valA;
        baseKey[posB] = keyB;
        baseVal[posB] = valB;
    }

    __syncthreads();
    d_DstKey[                      0] = s_key[threadIdx.x +                       0];
    d_DstVal[                      0] = s_val[threadIdx.x +                       0];
    d_DstKey[(SHARED_SIZE_LIMIT / 2)] = s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)];
    d_DstVal[(SHARED_SIZE_LIMIT / 2)] = s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)];
}

static void mergeSortShared(
    int *d_DstKey,
    int *d_DstVal,
    int *d_SrcKey,
    int batchSize,
    int arrayLength,
    int sortDir
)
{
    if (arrayLength < 2)
    {
        return;
    }

    assert(SHARED_SIZE_LIMIT % arrayLength == 0);
    assert(((batchSize * arrayLength) % SHARED_SIZE_LIMIT) == 0);
    int  blockCount = batchSize * arrayLength / SHARED_SIZE_LIMIT;
    int threadCount = SHARED_SIZE_LIMIT / 2;

    mergeSortSharedKernel<<<blockCount, threadCount>>>(d_DstKey, d_DstVal, d_SrcKey, arrayLength,sortDir);
}


__global__ void generateSampleRanksKernel(
    int *d_RanksA,
    int *d_RanksB,
    int *d_SrcKey,
    int stride,
    int N,
    int threadCount,
	int sortDir
)
{
    int pos = blockIdx.x * blockDim.x + threadIdx.x;

    if (pos >= threadCount)
    {
        return;
    }

    const int           i = pos & ((stride / SAMPLE_STRIDE) - 1);
    const int segmentBase = (pos - i) * (2 * SAMPLE_STRIDE);
    d_SrcKey += segmentBase;
    d_RanksA += segmentBase / SAMPLE_STRIDE;
    d_RanksB += segmentBase / SAMPLE_STRIDE;

    const int segmentElementsA = stride;
    const int segmentElementsB = umin(stride, N - segmentBase - stride);
    const int  segmentSamplesA = getSampleCount(segmentElementsA);
    const int  segmentSamplesB = getSampleCount(segmentElementsB);

    if (i < segmentSamplesA)
    {
        d_RanksA[i] = i * SAMPLE_STRIDE;
        d_RanksB[i] = binarySearchEx(
                          d_SrcKey[i * SAMPLE_STRIDE], d_SrcKey + stride,
                          segmentElementsB, nextPowerOfTwo(segmentElementsB),sortDir
                      );
    }

    if (i < segmentSamplesB)
    {
        d_RanksB[(stride / SAMPLE_STRIDE) + i] = i * SAMPLE_STRIDE;
        d_RanksA[(stride / SAMPLE_STRIDE) + i] = binarySearchIn(
                                                     d_SrcKey[stride + i * SAMPLE_STRIDE], d_SrcKey + 0,
                                                     segmentElementsA, nextPowerOfTwo(segmentElementsA),sortDir
                                                 );
    }
}

static void generateSampleRanks(
    int *d_RanksA,
    int *d_RanksB,
    int *d_SrcKey,
    int stride,
    int N,
    int sortDir
)
{
    int lastSegmentElements = N % (2 * stride);
    int threadCount = (lastSegmentElements > stride) ? (N + 2 * stride - lastSegmentElements) / (2 * SAMPLE_STRIDE) : (N - lastSegmentElements) / (2 * SAMPLE_STRIDE);

        generateSampleRanksKernel<<<iDivUp(threadCount, 256), 256>>>(d_RanksA, d_RanksB, d_SrcKey, stride, N, threadCount, sortDir);
}



__global__ void mergeRanksAndIndicesKernel(
    int *d_Limits,
    int *d_Ranks,
    int stride,
    int N,
    int threadCount
)
{
    int pos = blockIdx.x * blockDim.x + threadIdx.x;

    if (pos >= threadCount)
    {
        return;
    }

    const int           i = pos & ((stride / SAMPLE_STRIDE) - 1);
    const int segmentBase = (pos - i) * (2 * SAMPLE_STRIDE);
    d_Ranks  += (pos - i) * 2;
    d_Limits += (pos - i) * 2;

    const int segmentElementsA = stride;
    const int segmentElementsB = umin(stride, N - segmentBase - stride);
    const int  segmentSamplesA = getSampleCount(segmentElementsA);
    const int  segmentSamplesB = getSampleCount(segmentElementsB);

    if (i < segmentSamplesA)
    {
        int dstPos = binarySearchEx(d_Ranks[i], d_Ranks + segmentSamplesA, segmentSamplesB, nextPowerOfTwo(segmentSamplesB),1) + i;
        d_Limits[dstPos] = d_Ranks[i];
    }

    if (i < segmentSamplesB)
    {
        int dstPos = binarySearchIn(d_Ranks[segmentSamplesA + i], d_Ranks, segmentSamplesA, nextPowerOfTwo(segmentSamplesA),1) + i;
        d_Limits[dstPos] = d_Ranks[segmentSamplesA + i];
    }
}

static void mergeRanksAndIndices(
    int *d_LimitsA,
    int *d_LimitsB,
    int *d_RanksA,
    int *d_RanksB,
    int stride,
    int N
)
{
    int lastSegmentElements = N % (2 * stride);
    int         threadCount = (lastSegmentElements > stride) ? (N + 2 * stride - lastSegmentElements) / (2 * SAMPLE_STRIDE) : (N - lastSegmentElements) / (2 * SAMPLE_STRIDE);

    mergeRanksAndIndicesKernel<<<iDivUp(threadCount, 256), 256>>>(
        d_LimitsA,
        d_RanksA,
        stride,
        N,
        threadCount
    );

    mergeRanksAndIndicesKernel<<<iDivUp(threadCount, 256), 256>>>(
        d_LimitsB,
        d_RanksB,
        stride,
        N,
        threadCount
    );
}



inline __device__ void merge(
    int *dstKey,
    int *dstVal,
    int *srcAKey,
    int *srcAVal,
    int *srcBKey,
    int *srcBVal,
    int lenA,
    int nPowTwoLenA,
    int lenB,
    int nPowTwoLenB,
	int sortDir
)
{
    int keyA, valA, keyB, valB, dstPosA, dstPosB;

    if (threadIdx.x < lenA)
    {
        keyA = srcAKey[threadIdx.x];
        valA = srcAVal[threadIdx.x];
        dstPosA = binarySearchEx(keyA, srcBKey, lenB, nPowTwoLenB, sortDir) + threadIdx.x;
    }

    if (threadIdx.x < lenB)
    {
        keyB = srcBKey[threadIdx.x];
        valB = srcBVal[threadIdx.x];
        dstPosB = binarySearchIn(keyB, srcAKey, lenA, nPowTwoLenA, sortDir) + threadIdx.x;
    }

    __syncthreads();

    if (threadIdx.x < lenA)
    {
        dstKey[dstPosA] = keyA;
        dstVal[dstPosA] = valA;
    }

    if (threadIdx.x < lenB)
    {
        dstKey[dstPosB] = keyB;
        dstVal[dstPosB] = valB;
    }
}

__global__ void mergeElementaryIntervalsKernel(
    int *d_DstKey,
    int *d_DstVal,
    int *d_SrcKey,
    int *d_SrcVal,
    int *d_LimitsA,
    int *d_LimitsB,
    int stride,
    int N,
	int sortDir
)
{
    __shared__ int s_key[2 * SAMPLE_STRIDE];
    __shared__ int s_val[2 * SAMPLE_STRIDE];

    const int   intervalI = blockIdx.x & ((2 * stride) / SAMPLE_STRIDE - 1);
    const int segmentBase = (blockIdx.x - intervalI) * SAMPLE_STRIDE;
    d_SrcKey += segmentBase;
    d_SrcVal += segmentBase;
    d_DstKey += segmentBase;
    d_DstVal += segmentBase;

    //Set up threadblock-wide parameters
    __shared__ int startSrcA, startSrcB, lenSrcA, lenSrcB, startDstA, startDstB;

    if (threadIdx.x == 0)
    {
        int segmentElementsA = stride;
        int segmentElementsB = umin(stride, N - segmentBase - stride);
        int  segmentSamplesA = getSampleCount(segmentElementsA);
        int  segmentSamplesB = getSampleCount(segmentElementsB);
        int   segmentSamples = segmentSamplesA + segmentSamplesB;

        startSrcA    = d_LimitsA[blockIdx.x];
        startSrcB    = d_LimitsB[blockIdx.x];
        int endSrcA = (intervalI + 1 < segmentSamples) ? d_LimitsA[blockIdx.x + 1] : segmentElementsA;
        int endSrcB = (intervalI + 1 < segmentSamples) ? d_LimitsB[blockIdx.x + 1] : segmentElementsB;
        lenSrcA      = endSrcA - startSrcA;
        lenSrcB      = endSrcB - startSrcB;
        startDstA    = startSrcA + startSrcB;
        startDstB    = startDstA + lenSrcA;
    }

    //Load main input data
    __syncthreads();

    if (threadIdx.x < lenSrcA)
    {
        s_key[threadIdx.x +             0] = d_SrcKey[0 + startSrcA + threadIdx.x];
        s_val[threadIdx.x +             0] = d_SrcVal[0 + startSrcA + threadIdx.x];
    }

    if (threadIdx.x < lenSrcB)
    {
        s_key[threadIdx.x + SAMPLE_STRIDE] = d_SrcKey[stride + startSrcB + threadIdx.x];
        s_val[threadIdx.x + SAMPLE_STRIDE] = d_SrcVal[stride + startSrcB + threadIdx.x];
    }

    //Merge data in shared memory
    __syncthreads();
    merge(
        s_key,
        s_val,
        s_key + 0,
        s_val + 0,
        s_key + SAMPLE_STRIDE,
        s_val + SAMPLE_STRIDE,
        lenSrcA, SAMPLE_STRIDE,
        lenSrcB, SAMPLE_STRIDE,
		sortDir
    );

    //Store merged data
    __syncthreads();

    if (threadIdx.x < lenSrcA)
    {
        d_DstKey[startDstA + threadIdx.x] = s_key[threadIdx.x];
        d_DstVal[startDstA + threadIdx.x] = s_val[threadIdx.x];
    }

    if (threadIdx.x < lenSrcB)
    {
        d_DstKey[startDstB + threadIdx.x] = s_key[lenSrcA + threadIdx.x];
        d_DstVal[startDstB + threadIdx.x] = s_val[lenSrcA + threadIdx.x];
    }
}

static void mergeElementaryIntervals(
    int *d_DstKey,
    int *d_DstVal,
    int *d_SrcKey,
    int *d_SrcVal,
    int *d_LimitsA,
    int *d_LimitsB,
    int stride,
    int N,
    int sortDir
)
{
    int lastSegmentElements = N % (2 * stride);
    int          mergePairs = (lastSegmentElements > stride) ? getSampleCount(N) : (N - lastSegmentElements) / SAMPLE_STRIDE;

        mergeElementaryIntervalsKernel<<<mergePairs, SAMPLE_STRIDE>>>(
            d_DstKey,
            d_DstVal,
            d_SrcKey,
            d_SrcVal,
            d_LimitsA,
            d_LimitsB,
            stride,
            N,
			sortDir
        );
}


static int *d_RanksA, *d_RanksB, *d_LimitsA, *d_LimitsB;
static const int MAX_SAMPLE_COUNT = 32768;

extern "C" void initMergeSort(void)
{
    CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&d_RanksA,  MAX_SAMPLE_COUNT * sizeof(int)));
    CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&d_RanksB,  MAX_SAMPLE_COUNT * sizeof(int)));
    CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&d_LimitsA, MAX_SAMPLE_COUNT * sizeof(int)));
    CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&d_LimitsB, MAX_SAMPLE_COUNT * sizeof(int)));
}



__device__ static inline void Comparator(
    int &keyA,
    int &valA,
    int &keyB,
    int &valB,
    int dir
)
{   
    int t;
    
    if ((keyA > keyB) == dir)
    {
        t = keyA;
        keyA = keyB;
        keyB = t;
        t = valA;
        valA = valB;
        valB = t;
    }
}       

#define NTHREAD  (SHARED_SIZE_LIMIT/2)

__global__ static void sort_key_shared(char * key, int tupleNum, int keySize, char *result, char *pos,int dir){
	int lid = threadIdx.x;
	int bid = blockIdx.x;

	__shared__ int bufKey[NTHREAD*2];
	__shared__ int bufVal[NTHREAD*2];

	int gid = bid * NTHREAD*2 + lid;

	bufKey[lid] = ((int *)key)[gid];
	bufVal[lid] = gid;
	bufKey[lid+NTHREAD] = ((int *)key)[gid+NTHREAD];
	bufVal[lid+NTHREAD] = gid+NTHREAD;

	__syncthreads();

	for (int size = 2; size < 2*NTHREAD; size <<= 1){
		int ddd = ((threadIdx.x & (size / 2)) != 0);

		for (int stride = size / 2; stride > 0; stride >>= 1){
			__syncthreads();
			int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
			Comparator(
				bufKey[pos +      0], bufVal[pos +      0],
				bufKey[pos + stride ], bufVal[pos + stride],
				ddd
			);  
		}   
	}

    {
	int ddd = blockIdx.x &1;
	for (int stride = NTHREAD ; stride > 0; stride >>= 1)
	{ 
	    __syncthreads();
	    int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
	    Comparator(
		bufKey[pos +      0], bufVal[pos +      0],
		bufKey[pos + stride ], bufVal[pos + stride],
		ddd
	    );
	}
    }
    
    __syncthreads();

	((int * )result)[gid] = bufKey[lid];
	((int *)pos)[gid] = bufVal[lid];
	((int * )result)[gid+NTHREAD] = bufKey[lid+NTHREAD];
	((int *)pos)[gid+NTHREAD] = bufVal[lid+NTHREAD];

}
__global__ static void sort_key(char * key, int tupleNum, int keySize, char *result, char *pos,int dir){
	int lid = threadIdx.x;
	int bid = blockIdx.x;

	__shared__ int bufKey[SHARED_SIZE_LIMIT];
	__shared__ int bufVal[SHARED_SIZE_LIMIT];

	int gid = bid * SHARED_SIZE_LIMIT + lid;

	bufKey[lid] = ((int *)key)[gid];
	bufVal[lid] = gid;
	bufKey[lid+blockDim.x] = ((int *)key)[gid+blockDim.x];
	bufVal[lid+blockDim.x] = gid+ blockDim.x;

	__syncthreads();

	for (int size = 2; size < tupleNum && size < SHARED_SIZE_LIMIT; size <<= 1){
		int ddd = dir ^ ((threadIdx.x & (size / 2)) != 0);

		for (int stride = size / 2; stride > 0; stride >>= 1){
			__syncthreads();
			int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
			Comparator(
				bufKey[pos +      0], bufVal[pos +      0],
				bufKey[pos + stride ], bufVal[pos + stride],
				ddd
			);  
		}   
	}

    {
	for (int stride = blockDim.x ; stride > 0; stride >>= 1)
	{ 
	    __syncthreads();
	    int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
	    Comparator(
		bufKey[pos +      0], bufVal[pos +      0],
		bufKey[pos + stride ], bufVal[pos + stride],
	 	dir
	    );
	}
    }
    
    __syncthreads();

	((int * )result)[gid] = bufKey[lid];
	((int *)pos)[gid] = bufVal[lid];
	((int * )result)[gid+blockDim.x] = bufKey[lid+blockDim.x];
	((int *)pos)[gid+blockDim.x] = bufVal[lid+blockDim.x];

}

__global__ static void merge_key(char *key, char *pos,int pSize, int tupleNum, int keySize, char *result, char *resPos){
	
	int stride = blockDim.x * gridDim.x;
        int tid = blockIdx.x * blockDim.x + threadIdx.x;

	int pNum = tupleNum/pSize/2;		// how many parallel merge for this kernel launch 
	int tNum = stride/pNum;			// the number of threads for each merge 

	int mid = tid / tNum;			// the merge id
	int  firstP = mid * pSize*2;		// the start pos for the first partition of the merge
	int secondP = firstP + pSize;		// the start pos for the second partition of the merge

	int outPos[2], outNum[2];
	int delta;

	if((tid+1)%tNum !=0){
		outPos[0] = firstP + pSize *(tid % tNum)/tNum +  pSize/tNum;
		outPos[1] = secondP + pSize *(tid % tNum)/tNum + pSize/tNum;

		delta = outNum[0] = outNum[1] = pSize/tNum;

		while (1){
			if(outNum[0] == 0 || outNum[1] == 0 )
				break;

			int key1 = ((int *)key)[outPos[0]-1];
			int key2 = ((int *)key)[outPos[1]-1];
			int key3 = ((int *)key)[outPos[0]];
			int key4 = ((int *)key)[outPos[1]];

			if(key1 < key4 && key2<key3)
				break;

			if(key1 >= key4){
				delta = (delta+1) / 2;
				outPos[0] -= delta;
				outNum[0] -= delta;

				outPos[1] += delta;
				outNum[1] += delta;

			}else {
				delta = (delta + 1) / 2;
				outPos[1] -= delta;
				outNum[1] -= delta;
				
				outPos[0] += delta;
				outNum[1] += delta;

			}
		}
	}
}

__global__ static void gather_result(char *key, char * keyPos, char * col, int tupleNum, int colNum, int keySize, char *result){
	int stride = blockDim.x * gridDim.x;
        int index = blockIdx.x * blockDim.x + threadIdx.x;

	for(int i=index;i<tupleNum;i+=stride){
		int offset = (colNum ) * i;
		int pos = ((int *)keyPos)[i];
		for(int j=0;j<colNum;j++){
			((int *)result)[offset] =  ((int *)col)[pos + j*tupleNum];
			offset += 1; 
		}
	}
}

int main(int argc, char * argv[]){
	int *sortCol, sortKey;

	char *gpu_bucket, * gpu_key;

	struct timespec start,end;
	float gpuTime;
	char **sortTable;
	char *keyTable;

	if(argc != 4){
		printf("error: sortTablePrefix colNum sortkeyWidth\n");
		exit(-1);
	}

	int * tmp;
	CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&tmp, sizeof(int)));
	CUDA_SAFE_CALL_NO_SYNC(cudaFree(tmp));

	int colNum = atoi(argv[2]);
	int keySize = atoi(argv[3]);

	sortCol = (int *) malloc(sizeof(int) * colNum);
	sortTable = (char **)malloc(sizeof(char *) * colNum);

	if(!sortCol || !sortTable){
		printf("Failed to allocate host memory\n");
		exit(-1);
	}

	char buf[128] = {0};
	for(int i=0;i<colNum;i++){
		memset(buf,0,sizeof(buf));
		sprintf(buf,"%s%d",argv[1],i);
		sortCol[i] = open(buf,O_RDONLY);
	}

	int colSize = lseek(sortCol[0],0,SEEK_END);
	int tupleNum = colSize/sizeof(int);

	for(int i=0;i<colNum;i++){
		sortTable[i] = (char *) malloc(colSize);
		char *sortT = (char *)mmap(0,colSize,PROT_READ,MAP_SHARED,sortCol[i],0);
		memcpy(sortTable[i],sortT,colSize);
		munmap(sortT,colSize);
	}

	memset(buf,0,sizeof(buf));
	sprintf(buf,"sortKey");
	sortKey = open(buf, O_RDONLY);
	
	keyTable = (char *) malloc(tupleNum * keySize);
	char *keyT = (char *)mmap(0,tupleNum * keySize,PROT_READ,MAP_SHARED,sortKey,0);
	memcpy(keyTable,keyT,tupleNum * keySize);
	munmap(keyT,tupleNum * keySize);

	char * host_result = (char *)malloc(tupleNum * (keySize + sizeof(int)*colNum));
	memset(host_result, 0, tupleNum * (keySize + sizeof(int)*colNum));

	int nblock = tupleNum/SHARED_SIZE_LIMIT;

	dim3 grid(nblock);
	dim3 block(NTHREAD);
	char * gpu_col;

	clock_gettime(CLOCK_REALTIME,&start);

	cudaEvent_t startGPU, stopGPU;
	cudaEventCreate(&startGPU);
	cudaEventCreate(&stopGPU);
	cudaEventRecord(startGPU,0);

	initMergeSort();

	int stageCount = 0;

    	for (int i = SHARED_SIZE_LIMIT; i < tupleNum; i <<= 1, stageCount++);

    	int *ikey, *ival, *okey, *oval;

	CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void**)&gpu_col, colSize*colNum));

	int offset = 0;

	for(int i=0;i<colNum;i++){
		CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(gpu_col+offset, sortTable[i],colSize,cudaMemcpyHostToDevice));
		offset += colSize;
	}

	CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void**)&gpu_key,tupleNum * keySize));
	CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(gpu_key,keyTable,tupleNum * keySize,cudaMemcpyHostToDevice));

	char * gpuSortedKey;
	char * gpuKeyPos;
	char * gpuResult;
	char * d_BufKey;
	char * d_BufVal;


	CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&d_BufKey, tupleNum * keySize));
	CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&d_BufVal, tupleNum * keySize));

	CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&gpuSortedKey, tupleNum * keySize));
	CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&gpuKeyPos, tupleNum * sizeof(int)));
	CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void**)&gpuResult, colSize*colNum + tupleNum * keySize));

    	if (stageCount & 1){
        	ikey = (int *)d_BufKey;
        	ival = (int *)d_BufVal;
        	okey = (int *)gpuSortedKey;
        	oval = (int *)gpuKeyPos;
    	}else{
        	ikey = (int *)gpuSortedKey;
        	ival = (int *)gpuKeyPos;
        	okey = (int *)d_BufKey;
        	oval = (int *)d_BufVal;
    	}

	assert(tupleNum <= (SAMPLE_STRIDE * MAX_SAMPLE_COUNT));
    	assert(tupleNum % SHARED_SIZE_LIMIT == 0);

	if(tupleNum < SHARED_SIZE_LIMIT){
		sort_key<<<1,tupleNum/2>>>(gpu_key,tupleNum,keySize,gpuSortedKey,gpuKeyPos,1);
		gather_result<<<1,tupleNum/2>>>(gpuSortedKey,gpuKeyPos,gpu_col, tupleNum, colNum, sizeof(int),gpuResult);
		return 0;
	}

	sort_key<<<grid,block>>>(gpu_key,tupleNum,keySize, (char *)ikey, (char *)ival,1);

	for(int i=2*NTHREAD;i<tupleNum;i*=2){
		int lastSegmentElements = tupleNum % (2 * i);

        	generateSampleRanks(d_RanksA, d_RanksB, ikey, i, tupleNum, 1);

        	mergeRanksAndIndices(d_LimitsA, d_LimitsB, d_RanksA, d_RanksB, i, tupleNum);

        	mergeElementaryIntervals(okey, oval, ikey, ival, d_LimitsA, d_LimitsB, i, tupleNum, 1);

        	if (lastSegmentElements <= i){
            		CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(okey + (tupleNum - lastSegmentElements), ikey + (tupleNum - lastSegmentElements), lastSegmentElements * sizeof(int), cudaMemcpyDeviceToDevice));
            		CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(oval + (tupleNum - lastSegmentElements), ival + (tupleNum - lastSegmentElements), lastSegmentElements * sizeof(int), cudaMemcpyDeviceToDevice));
        	}

        	int *t;
        	t = ikey;
        	ikey = okey;
        	okey = t;
        	t = ival;
        	ival = oval;
        	oval = t;
	}

	gather_result<<<512,128>>>(gpuSortedKey, gpuKeyPos, gpu_col, tupleNum, colNum, sizeof(int), gpuResult );

	CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(host_result, gpuResult, (colNum +1) * sizeof(int)*tupleNum, cudaMemcpyDeviceToHost));

	CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpu_key));
	CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpuSortedKey));
	CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpuKeyPos));

	for(int i=0;i<colNum;i++){
		close(sortCol[i]);
		free(sortTable[i]);
	}
	close(sortKey);
	free(keyTable);

	cudaEventRecord(stopGPU,0);
	cudaEventSynchronize(stopGPU);
	cudaEventElapsedTime(&gpuTime,startGPU,stopGPU);
	printf("GPU total time:%lf\n",gpuTime);

	clock_gettime(CLOCK_REALTIME,&end);
	double timeE = (end.tv_sec -  start.tv_sec)* BI + end.tv_nsec - start.tv_nsec;
	printf("CPU time: %lf\n", timeE);
	return 0;
}
