#include "book.h"
#include "cuPrintf.cu"
#define MAX_NUM_LISTS 32
#define NUM_ELEM 1024
#define numThreadsPerBlock 32 
#define topN  10


struct cuRadix {
int score;
int DN;

};

__device__ void radix_sort2(cuRadix * const sort_tmp,
const uint num_lists,
const uint num_elements,
const uint tid,
cuRadix * const sort_tmp_1)
{
// Sort into num_list, lists
// Apply radix sort on 32 bits of data
	for (uint bit = 0;bit<32;bit++)
	{
		const uint bit_mask = (1 << bit);
		uint base_cnt_0 = 0;
		uint base_cnt_1 = 0;
		
	for (uint i=0; i<num_elements; i+=num_lists)
	{
		const uint score = sort_tmp[i+tid].score;
		const uint DN = sort_tmp[i+tid].DN;
		if ( (score & bit_mask) > 0 )
		{
			sort_tmp_1[base_cnt_1+tid].score = score;
			sort_tmp_1[base_cnt_1+tid].DN = DN;
			base_cnt_1+=num_lists;
		}
		else
		{
			sort_tmp[base_cnt_0+tid].score = score;
			sort_tmp[base_cnt_0+tid].DN = DN;
			base_cnt_0+=num_lists;
		}
	}

		// Copy data back to source from the one’s list
		for (uint i=0; i<base_cnt_1; i+=num_lists)
		{
			sort_tmp[base_cnt_0+i+tid].DN = sort_tmp_1[i+tid].DN;
			sort_tmp[base_cnt_0+i+tid].score = sort_tmp_1[i+tid].score;
		}
	}
	__syncthreads();
}

// Uses multiple threads for merge
// Deals with multiple identical entries in the data
__device__ void merge_array6(const cuRadix  * const src_array,
	cuRadix  * const dest_array,
	const uint num_lists,
	const uint num_elements,
	const uint tid)
{
	const uint num_elements_per_list = (num_elements / num_lists);
	__shared__ uint list_indexes[MAX_NUM_LISTS];
	list_indexes[tid] = 0;
	// Wait for list_indexes[tid] to be cleared
	__syncthreads();
	// Iterate over all elements
	for (uint i=0; i<num_elements;i++)
	{
		// Create a value shared with the other threads
		__shared__ uint min_val;
		__shared__ uint min_tid;
		// Use a temp register for work purposes
		uint score;
		uint DN;
		// If the current list has not already been
		// emptied then read from it, else ignore it
		if (list_indexes[tid] < num_elements_per_list)
		{
			// Work out from the list_index, the index into
			// the linear array
			const uint src_idx = tid + (list_indexes[tid] * num_lists);
			// Read the data from the list for the given
			// thread
			score = src_array[src_idx].score;
			DN = src_array[src_idx].DN;
			
		}
		else
		{
			score = 0xFFFFFFFF;
		}
		// Have thread zero clear the min values
		if (tid == 0)
		{
			// Write a very large value so the first
			// thread thread wins the min
			min_val = 0xFFFFFFFF;
			min_tid = 0xFFFFFFFF;
		}
		// Wait for all threads
		__syncthreads();
		// Have every thread try to store it’s value into
		// min_val. Only the thread with the lowest value
		// will win
		atomicMin(&min_val, score);
		// Make sure all threads have taken their turn.
		__syncthreads();
		// If this thread was the one with the minimum
		if (min_val == score)
		{
			// Check for equal values
			// Lowest tid wins and does the write
			atomicMin(&min_tid, tid);
		}
		// Make sure all threads have taken their turn.
		__syncthreads();
		// If this thread has the lowest tid
		if (tid == min_tid)
		{
			// Incremene the list pointer for this thread
			list_indexes[tid]++;
			// Store the winning value
			dest_array[i].score = score;
			dest_array[i].DN = DN;
			//cuPrintf("the score : %d document id : %d  \n",dest_array[i].score ,dest_array[i].DN );
		}
	}
}


__device__ void copy_data_to_shared(const cuRadix * const data,
cuRadix * const sort_tmp,
const uint num_lists,
const uint num_elements,
const uint tid)
{
	// Copy data into temp store
	for (uint i=0; i<num_elements; i+=num_lists)
	{
		sort_tmp[i+tid].score = data[i+tid].score;
		sort_tmp[i+tid].DN = data[i+tid].DN;
		//cuPrintf("the score : %d document id : %d  \n",sort_tmp[i+tid].score ,sort_tmp[i+tid].DN );
	}
	__syncthreads();
}

__global__ void gpu_sort_array_array(
cuRadix * const data,
const uint num_lists,
const uint num_elements)
{
	const uint tid = (blockIdx.x * blockDim.x) + threadIdx.x;
	__shared__ cuRadix sort_tmp[NUM_ELEM];
	__shared__ cuRadix sort_tmp_1[NUM_ELEM];
	copy_data_to_shared(data, sort_tmp, num_lists,
	num_elements, tid);
	radix_sort2(sort_tmp, num_lists, num_elements,
	tid, sort_tmp_1);
	merge_array6(sort_tmp, data, num_lists,
	num_elements, tid);
}




int main (void)
{
	cuRadix *h_input;
	cuRadix *d_input;
	int mem_size;
	mem_size = (int)numThreadsPerBlock *21;
	cuRadix *h_output ;  


 	
	//Allocate Input array on the host
  	h_input = (cuRadix *) malloc (mem_size * sizeof(cuRadix));
  
	//allocate the input array
  	HANDLE_ERROR(cudaMalloc((void **)&d_input,mem_size *sizeof(cuRadix)  ));
 	
 	//allocate the output array on host
	h_output = (cuRadix *) malloc (mem_size * sizeof(cuRadix));

	//Initialize the host input array
	int i ;
	for(i=0;i < mem_size; i ++ )
	{
		h_input[i].score= (i + 3)%10;
		h_input[i].DN = i;
	} 
  
  	//Copy the input arry from host ot device
	HANDLE_ERROR(cudaMemcpy(d_input,h_input, sizeof(cuRadix) * mem_size,cudaMemcpyHostToDevice));

	cudaPrintfInit();
	int threads_needed;

	if (mem_size >=numThreadsPerBlock )
	{
		threads_needed = numThreadsPerBlock;
		gpu_sort_array_array <<< 1, threads_needed >>> (d_input,threads_needed,mem_size);
		
	}
	else if (mem_size < numThreadsPerBlock)
	{
		threads_needed = mem_size; 
		gpu_sort_array_array <<< 1, threads_needed >>> (d_input,threads_needed,mem_size);
	}

	
	cudaPrintfDisplay(stdout, true);
	cudaPrintfEnd();

	HANDLE_ERROR (cudaMemcpy (h_output, d_input, sizeof(cuRadix) * mem_size,cudaMemcpyDeviceToHost )  ) ;	
	
	for (i = 0 ; i < mem_size ; i ++ )
	{
		//printf ("the score : %d document id : %d  \n",h_input[i].score ,h_input[i].DN );		
		printf ("the score : %d document id : %d  \n",h_output[i].score ,h_output[i].DN );
	}
	cudaFree(d_input);
	free(h_input);
	free(h_output);

}
