 #include <cutil_inline.h>
#include <device_launch_parameters.h>
#include <GL\freeglut.h>
#include <thrust\sort.h>
#include <thrust\device_vector.h>
#include <thrust\functional.h>
#include <thrust\transform.h>
#include "config.cuh"

typedef int test_t;
typedef thrust::tuple<int, float> KeyType;
struct Comp : public thrust::binary_function<KeyType, KeyType, bool>
{
	__host__ __device__
		bool operator () (const KeyType& a, const KeyType& b) const
	{
		if(thrust::get<0>(a)!=thrust::get<0>(b))
			return thrust::get<0>(a) < thrust::get<0>(b);
		else
			return thrust::get<1>(a) < thrust::get<1>(b);

	}
};

__global__ void init_kernel(int* d_init, int* d_baseId, int* d_no, int* d_etype, 
	float* d_time, float* d_duration, float* d_speed, float* d_pos, int N)
{
	unsigned int i = threadIdx.x+blockIdx.x*blockDim.x;
	if(i<N){
		int eidx = d_init[i];
		int no = d_no[eidx];
		float time = d_time[eidx];
		float duration = d_duration[eidx];
		float speed = d_speed[eidx];
		float pos = d_pos[eidx];
		d_time[eidx]= time;

		float termiPos = pos+duration*speed/3600;
		if(termiPos>40)
			termiPos = 40;
		float currPos = pos;
		float nextPos = ((int)(currPos/2)+1)*2;
		while(nextPos<termiPos){
			eidx++;
			d_time[eidx] = time = time+(nextPos-currPos)/speed*3600;
			d_speed[eidx] = speed;
			d_pos[eidx] = nextPos;
			d_no[eidx] = no;
			d_duration[eidx] = 0;
			d_etype[eidx] = TERMI;
			d_baseId[eidx] = currPos/2;

			eidx++;
			d_time[eidx] = time;
			d_speed[eidx] = speed;
			d_pos[eidx] = nextPos;
			d_no[eidx] = no;
			d_duration[eidx] = duration-(nextPos-pos)/speed*3600;
			d_etype[eidx] = HANDO;
			d_baseId[eidx] = nextPos/2;

			currPos = nextPos;
			nextPos = nextPos+2;
		}
		eidx++;
		d_time[eidx] = time+(termiPos-currPos)/speed*3600;
		d_speed[eidx] = speed;
		d_pos[eidx] = termiPos;
		d_no[eidx] = no;
		d_duration[eidx] = 0;
		d_etype[eidx] = TERMI;
		d_baseId[eidx] = currPos/2;
	}
}

//float* d_init, float* d_duration, float* d_speed, float* d_pos can be freed
extern "C" void launch_init_kernel(da_t da, int n)
{
	//n = SAMPLENO;
	init_kernel<<< n/100,100>>>(da.d_init, da.d_baseId, da.d_no, da.d_etype, 
		da.d_time, da.d_duration, da.d_speed, da.d_pos, n);
	cudaFree(da.d_speed);
	cudaFree(da.d_pos);
	cudaFree(da.d_duration);
	cudaFree(da.d_init);
}

//float *d_time can be freed
extern "C" void launch_sort_kernel(da_t &da, int n)
{
	thrust::device_ptr<int> d_baseId_ptr(da.d_baseId);
	thrust::device_ptr<int> d_no_ptr(da.d_no);
	thrust::device_ptr<int> d_etype_ptr(da.d_etype);
	thrust::device_ptr<float> d_time_ptr(da.d_time);

	typedef thrust::device_vector<int>::iterator IntIter;
	typedef thrust::device_vector<float>::iterator FloatIter;
	typedef thrust::tuple<IntIter, FloatIter> KeyIterTuple;
	typedef thrust::tuple<IntIter, IntIter> ValIterTuple;
	typedef thrust::zip_iterator<KeyIterTuple> KeyIter;
	typedef thrust::zip_iterator<ValIterTuple> ValIter;

	KeyIter key_begin(thrust::make_tuple(d_baseId_ptr, d_time_ptr));
	KeyIter key_end(thrust::make_tuple(d_baseId_ptr+n, d_time_ptr+n));
	ValIter val_begin(thrust::make_tuple(d_no_ptr, d_etype_ptr));

	thrust::sort_by_key(key_begin, key_end, val_begin, Comp());
	cudaFree(da.d_time);
}

//target, which baseId one want to find, temp's size is blockSize
__global__ void sep_kernel(int *d_baseId, int n, int *seps){
	const unsigned int i = threadIdx.x+blockIdx.x*blockDim.x;
	const unsigned int tid = threadIdx.x;
	__shared__ int s_baseId[BLOCKSIZE+1];
	if(tid == 0){
		if(i==0)
			s_baseId[0]=0;
		else
			s_baseId[0]=d_baseId[i-1];
	}
	if(i<n){
		s_baseId[tid+1]=d_baseId[i];
		__syncthreads();
		if(s_baseId[tid+1]==s_baseId[tid]+1)
			seps[s_baseId[tid+1]]=i;
	}
}

extern "C" void launch_separation_kernel(da_t da, int n, int *seps){
	int threadAmount = BLOCKSIZE;
	int blockAmount = (n%threadAmount!=0)?(n/threadAmount+1):(n/threadAmount);
	int *d_seps;
	cudaMalloc((void**)&d_seps, BASENO*sizeof(int));
	sep_kernel<<<blockAmount, threadAmount>>>(da.d_baseId, n, d_seps);
	cudaMemcpy(seps, d_seps, BASENO*sizeof(int), cudaMemcpyDeviceToHost);
}

__global__ void sum_kernel(int* d_baseId, int* d_etype, int n, int strd){
	unsigned int i = threadIdx.x+blockIdx.x*blockDim.x;
	if(i<n){
		int x = i%strd;
		int y = i%(strd/2);
		if(x>=strd/2 && d_baseId[i]==d_baseId[i-y-1])
			d_etype[i]=d_etype[i]+d_etype[i-y-1];
	}
}

ofstream sumout("sum.txt");
extern "C" void launch_sum_kernel(da_t da, int n, int ptr){
	//n = SAMPLENO;
	int threadAmount = BLOCKSIZE;
	int blockAmount = (n%threadAmount!=0)?(n/threadAmount+1):(n/threadAmount);

	int* d_baseId=&da.d_baseId[ptr];
	int* d_etype=&da.d_etype[ptr];
	int* d_no=&da.d_no[ptr];

	int strd = 2;
	while(strd<=2*n){
		sum_kernel<<<blockAmount, threadAmount>>>(d_baseId, d_etype, n, strd);
		strd = strd*2;
	}
}

__global__ void find_min_kernel(int* d_etype, int* d_index, int n, int* t_etype, int* t_index){
	const unsigned int tid = threadIdx.x;
	const unsigned int i = threadIdx.x+blockIdx.x*blockDim.x;
	__shared__ int s_index[BLOCKSIZE];
	__shared__ int s_etype[BLOCKSIZE];
	volatile int* v_etype=s_etype;
	volatile int* v_index=s_index;

	if(i<n){
		if(d_etype != t_etype){
			s_index[tid]=i;
			s_etype[tid]=d_etype[i];
		}else{
			s_index[tid]=t_index[i];
			s_etype[tid]=t_etype[i];
		}
	}

	int s = 512;//stride
	while(s>32){
		if (blockDim.x >= 2*s && tid < s && i+s<n){
			if((s_etype[tid]<=10&&s_etype[tid+s]>10)||
				(s_etype[tid]>10&&s_etype[tid+s]>10&&s_index[tid]>s_index[tid+s])){
					s_etype[tid]=s_etype[tid+s];
					s_index[tid]=s_index[tid+s];
			}
		}
		s=s/2;
		__syncthreads(); 
	}

	while(s>=1){
		if (blockDim.x >= 2*s && tid<s && i+s<n){
			if((v_etype[tid]<=10&&v_etype[tid+s]>10)||
				(v_etype[tid]>10&&v_etype[tid+s]>10&&v_index[tid]>v_index[tid+s])){
					v_etype[tid]=v_etype[tid+s]; 
					v_index[tid]=v_index[tid+s]; 
			}
		}
		s=s/2;
	}
	if(tid==0){
		t_etype[blockIdx.x]=s_etype[tid];
		t_index[blockIdx.x]=s_index[tid];
	}
}

int launch_findmin_kernel(da_t da, int n, int ptr){
	const int threadAmount = BLOCKSIZE;
	int blockAmount = (n%threadAmount!=0)?(n/threadAmount+1):(n/threadAmount);

	int *d_etype = &(da.d_etype[ptr]);
	int *t_etype, *t_index;
	cudaMalloc((void**)&t_etype, blockAmount*sizeof(int));
	cudaMalloc((void**)&t_index, blockAmount*sizeof(int));

	find_min_kernel<<<blockAmount, threadAmount>>>(d_etype, NULL, n, t_etype, t_index);
	int temp_n=n/threadAmount+1;
	blockAmount = blockAmount/threadAmount+1;
	while(temp_n != 1){
		find_min_kernel<<<blockAmount, threadAmount>>>(t_etype, t_index, temp_n, t_etype, t_index);
		temp_n=temp_n/threadAmount+1;
		blockAmount = blockAmount/threadAmount+1;
	}

	int h_etype, h_index;
	cudaMemcpy(&h_index, t_index, sizeof(int), cudaMemcpyDeviceToHost);
	//debug print
	//cudaMemcpy(&h_etype, t_etype, sizeof(int), cudaMemcpyDeviceToHost);
	//cout<<h_index<<":"<<h_etype<<endl;

	return h_index;
}

//would it be better if using the shared memory?
__global__ void decrease_kernel(int *d_etype, int *d_no, const int decptr, const int n, int *temp){
	const unsigned int i = threadIdx.x+blockIdx.x*blockDim.x;
	if(i==decptr)
		temp[0] = d_no[i];
	if(i>=decptr && i<n)
		d_etype[i]--;
}

__global__ void find_incptr_kernel(int *d_etype, int *d_no, const int decptr, const int n, int *temp){
	const unsigned int i = threadIdx.x+blockIdx.x*blockDim.x;	
	if(i>decptr && i<n && d_no[i] == temp[0])
		temp[0]=i;
}

__global__ void increase_kernel(int* etype, int n, int incptr){
	const unsigned int i = threadIdx.x+blockIdx.x*blockDim.x;
	if(i>=incptr && i<n)
		etype[i]++;
}

void launch_decrease_kernel(da_t da, int n, int ptr, int decptr){
	const int threadAmount = BLOCKSIZE;
	const int blockAmount = (n%threadAmount!=0)?(n/threadAmount+1):(n/threadAmount);

	int *d_etype = &(da.d_etype[ptr]);
	int *d_no = &(da.d_no[ptr]);

	int *temp, h_temp;
	cudaMalloc((void**)&temp, sizeof(int));
	decrease_kernel<<<blockAmount, threadAmount>>>(d_etype, d_no, decptr, n, temp);
	find_incptr_kernel<<<blockAmount, threadAmount>>>(d_etype, d_no, decptr, n, temp);
	//debug print
	//cudaMemcpy(&h_temp, temp, sizeof(int), cudaMemcpyDeviceToHost);
	//cout<<"buddy:"<<h_temp<<endl;

	increase_kernel<<<blockAmount, threadAmount>>>(d_etype, n, h_temp);

	cudaFree(temp);
}

__global__ void adjust_kernel(int *d_etype, int *d_no, int *d_baseId, int n, int decno){
	const unsigned int i = threadIdx.x+blockIdx.x*blockDim.x;
	if(i<n && d_no[i] == decno){
		d_etype[i]=0;
	}
}

void launch_adjust_kernel(da_t da, int n, int decptr){
	const int threadAmount = BLOCKSIZE;
	const int blockAmount = (n%threadAmount!=0)?(n/threadAmount+1):(n/threadAmount);
	int decno = 0;	
	cudaMemcpy(&decno, &da.d_no[decptr], sizeof(int), cudaMemcpyDeviceToHost);
	//debug print
	//cout<<"arrival no selected is: "<<decno<<endl;

	adjust_kernel<<<blockAmount, threadAmount>>>(da.d_etype, da.d_no, da.d_baseId, n, decno);
}

int eventGetCaught = 0;;
/*n is the number of events of one base, total_n is the total event amount*/
extern "C" int  launch_correction_kernel(da_t da, int n, int total_n, int ptr){
	//n = SAMPLENO;
	int decptr = launch_findmin_kernel(da, n, ptr);
	if(decptr != 0){
		//debug print
		//cout<<"eventGetCaught:"<<eventGetCaught++<<"\t";
		launch_decrease_kernel(da, n, ptr, decptr);
		launch_adjust_kernel(da, total_n, decptr+ptr);
	}
	return decptr;
}
                