#include <iostream>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <pthread.h>
#include <sys/time.h> 
#include <stdio.h>
#include<thrust/iterator/counting_iterator.h>
#include<thrust/binary_search.h>
#include<thrust/functional.h>
#include "CudaRand.h"
using namespace std;

#define THREADS_PER_BLK 10
#define BLKS 4
#define NUM_PEOPLE (THREADS_PER_BLK * BLKS * GPU_CORES)
#define NUM_LOCATION 8
#define GPU_CORES 4
#define ITERS 1
#define INFECT_RATE_1 0.1
#define INFECT_RATE_2 0.2

pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
pthread_barrier_t barrier; 

/* allocate NUM_LOCATION number of blocks */
__global__ void multi_gpu_work(
	int2* peer_id_status[GPU_CORES],int2* peer_ppl_result[GPU_CORES],
	int* peer_location_start[GPU_CORES],int* peer_location_end[GPU_CORES],RandGen* rand_vec){

	int index = blockIdx.x * blockDim.x + threadIdx.x;
	
	int place_per_core = NUM_LOCATION / GPU_CORES;
	int gpu_id = blockIdx.x / place_per_core; // indicate which GPU status the thread access
	int location_id = blockIdx.x % place_per_core;
	
	int block_start_idx = peer_location_start[gpu_id][location_id];
	int block_end_idx = peer_location_end[gpu_id][location_id];
	int block_size = block_end_idx-block_start_idx;
	
	int thread_size = block_size/blockDim.x + (block_size%blockDim.x != 0);
	int thread_start_idx = block_start_idx + thread_size * threadIdx.x;
	int thread_end_idx = thread_start_idx + thread_size;
	thread_end_idx = thread_end_idx < block_end_idx ? thread_end_idx : block_end_idx;
	
	for(int i = thread_start_idx; i < thread_end_idx; i++){
		if(peer_id_status[gpu_id][i].y == 1){ // if it is infected
			for(int j = 0; j < GPU_CORES; j++){
				int start_idx = peer_location_start[j][location_id];
				int end_idx = peer_location_end[j][location_id];
				for(int k = start_idx; k < end_idx; k++){
					if(peer_id_status[j][k].y == 0){
						if(rand_vec[index].cudaRandUni() < INFECT_RATE_1){
							if(rand_vec[index].cudaNormal() < INFECT_RATE_2){
								peer_ppl_result[j][k].y = 1;
							}
						}
					}
				}
			}
		}
	}
}

struct Comp{
	__device__
	bool operator()(const int&a, const int& b){
		double tmp = (double)a / (double)b;
		for(int i = 0; i < 5; i++) tmp += sin(tmp);
		return a < b;
	}
};

struct RandLocation{
	__device__
	int operator()(RandGen& rand_obj){
		return rand_obj.cudaRandInt() % NUM_LOCATION;
	}
};

__global__ void normal(int* data_in, int* data_out, int num){
	int idx = threadIdx.x + blockIdx.x * blockDim.x;
	for(int i = 0; i < 1000; i++){
		//data_out[idx] = sin((double)data_in[idx]) + 1;
		data_out[idx] = data_in[idx] + num;
	}
	
}

struct ThreadParam{
	int id;
	int rand_seed;
	thrust::device_vector<int2>* id_status;
	thrust::device_vector<int>*  ppl_location;
	
	thrust::device_vector<int2>* peer_id_status[GPU_CORES];
	thrust::device_vector<int2>* peer_ppl_result[GPU_CORES];
	
	thrust::device_vector<int>* location_start;
	thrust::device_vector<int>* location_end;
	
	thrust::device_vector<int>* peer_location_start[GPU_CORES];
	thrust::device_vector<int>* peer_location_end[GPU_CORES];
	
	thrust::device_vector<RandGen>* rand_vec;
	
	cudaStream_t stream[GPU_CORES];
	cudaEvent_t start;
	cudaEvent_t stop;
};

ThreadParam global_param[GPU_CORES];

void *infect_kernel(void* argument){
	ThreadParam& param = *((ThreadParam*)argument);
	int thread_id = param.id;
	cudaSetDevice(thread_id);
	
	// pthread_mutex_lock(&mutex);
	// cout << "thread " << thread_id << " running" << endl;
	// pthread_mutex_unlock(&mutex);
	
	for(int i = 0; i < GPU_CORES; i++){
		cudaStreamCreate(&(param.stream[i]));
		if(i == thread_id) continue;
		cudaDeviceEnablePeerAccess(i,0);
	}
	
	cudaEventCreate(&(param.start));
	cudaEventCreate(&(param.stop));
	thrust::counting_iterator<unsigned int> counter(0);

	cudaEventRecord(param.start);
	for(int days = 0; days < ITERS; days++){
		/* generate random location */
		thrust::transform(
			param.rand_vec->begin(),param.rand_vec->begin()+(NUM_PEOPLE/GPU_CORES),
			param.ppl_location->begin(),RandLocation());
	
		/* sort according to location */
		thrust::sort_by_key(param.ppl_location->begin(),param.ppl_location->end(),param.id_status->begin());
	
		/* find the location start and end */
		thrust::lower_bound(
			param. ppl_location->begin(),param.ppl_location->end(),
			counter,counter+NUM_LOCATION,
			param.location_start->begin());
		thrust::upper_bound(
			param. ppl_location->begin(),param.ppl_location->end(),
			counter,counter+NUM_LOCATION,
			param.location_end->begin());
		
		// pthread_mutex_lock(&mutex);
		// cout << "gpu:" << thread_id << endl;
		// for(int i = 0; i < NUM_PEOPLE/GPU_CORES; i++){
			// int2 tmp = (*param.id_status)[i];
			// cout << "(" << tmp.x << " " << (*param.ppl_location)[i] << ") ";
		// }
		// cout << "\n----------------------" << endl;
		// for(int i = 0; i < NUM_LOCATION; i++){
			// cout << "location:" << i << " is from " << 
			// (*param.location_start)[i] << " to " << (*param.location_end)[i] << endl;
		// }
		// cout << "xxxxxxx" << endl;
		// pthread_mutex_unlock(&mutex);
		
		pthread_barrier_wait(&barrier);
		/* copy indexes */	
		for(int i = 0; i < GPU_CORES; i++){
			int buf_size = NUM_LOCATION/GPU_CORES*sizeof(int);
			cudaMemcpyAsync(
				thrust::raw_pointer_cast(param.peer_location_start[i]->data()),
				thrust::raw_pointer_cast(global_param[i].location_start->data())+thread_id*NUM_LOCATION/GPU_CORES,
				buf_size,cudaMemcpyDefault,param.stream[i]);
			cudaMemcpyAsync(
				thrust::raw_pointer_cast(param.peer_location_end[i]->data()),
				thrust::raw_pointer_cast(global_param[i].location_end->data())+thread_id*NUM_LOCATION/GPU_CORES,
				buf_size,cudaMemcpyDefault,param.stream[i]);
		}
		// cudaThreadSynchronize();
		// pthread_mutex_lock(&mutex);
		// cout << "result from gpu: " << thread_id << endl;
		// for(int i = 0; i < GPU_CORES; i++){
			// cout << "  peer from gpu:" << i << endl;
			// for(int j = 0; j < NUM_LOCATION/GPU_CORES; j++){
				// cout << "		location " << j + i*NUM_LOCATION/GPU_CORES << " from " <<
				// (*param.peer_location_start[i])[j] << " to " << (*param.peer_location_end[i])[j] << endl;
			// }
		// }
		// pthread_mutex_unlock(&mutex);
		
		/* copy ppl data */
		for(int i = 0; i < GPU_CORES; i++){
			int start = (*(param.peer_location_start[i]))[0];
			int end = (*(param.peer_location_end[i]))[NUM_LOCATION/GPU_CORES-1];
			int size = end - start;
			param.peer_id_status[i]->resize(size);
			param.peer_ppl_result[i]->resize(size);
			cudaMemcpyAsync(
				thrust::raw_pointer_cast(param.peer_id_status[i]->data()),
				thrust::raw_pointer_cast(global_param[i].id_status->data())+start,
				size*sizeof(int2),cudaMemcpyDefault,param.stream[i]);
		}
		
		cudaDeviceSynchronize();
		// pthread_mutex_lock(&mutex);
		// cout << "gpu:" << thread_id << endl;
		// for(int i = 0; i < GPU_CORES; i++){
			// cout << "  peer from gpu:" << i << endl;
			// for(int j = 0; j < param.peer_id_status[i]->size(); j++){
				// int2 tmp = (*param.peer_id_status[i])[j];
				// cout << tmp.x << " ";
			// }
			// cout << endl;
		// }
		// cout << "XXXXXXX" << endl;
		// pthread_mutex_unlock(&mutex);
		
		/* perform infection */
		int2* p_peer_id_status[GPU_CORES];
		int2* p_peer_ppl_result[GPU_CORES];
		int* p_peer_location_start[GPU_CORES];
		int* p_peer_location_end[GPU_CORES];
		for(int i = 0; i < GPU_CORES; i++){
			p_peer_id_status[i] = thrust::raw_pointer_cast(param.peer_id_status[i]->data());
			p_peer_ppl_result[i] = thrust::raw_pointer_cast(param.peer_ppl_result[i]->data());
			p_peer_location_start[i] = thrust::raw_pointer_cast(param.peer_location_start[i]->data());
			p_peer_location_end[i] = thrust::raw_pointer_cast(param.peer_location_end[i]->data());
		}
		multi_gpu_work<<<BLKS,THREADS_PER_BLK>>>(p_peer_id_status,p_peer_ppl_result,
			p_peer_location_start,p_peer_location_end,thrust::raw_pointer_cast(param.rand_vec->data()));
			
		cudaDeviceSynchronize();
		pthread_barrier_wait(&barrier);
		
		/* copy back the data */
		int copy_size[GPU_CORES];
		for(int i = 0; i < GPU_CORES; i++){
			cudaSetDevice(i);
			copy_size[i] = global_param[i].peer_ppl_result[thread_id]->size();
		}
		cudaSetDevice(thread_id);
		int copy_start = 0;
		for(int i = 0; i < GPU_CORES; i++){
			cudaMemcpyAsync(
				thrust::raw_pointer_cast(param.id_status->data()) + copy_start,
				thrust::raw_pointer_cast(global_param[i].peer_ppl_result[thread_id]->data()),
				copy_size[i]*sizeof(int2),cudaMemcpyDefault,stream[i]);
			copy_start += copy_size[i];
		}
		cudaDeviceSynchronize();   
	}
	
	cudaEventRecord(param.stop);
	return NULL;
}


int main(){
	srand(time(NULL));
	
	pthread_t threads[GPU_CORES];
	pthread_barrier_init(&barrier, NULL, GPU_CORES);
	
	int ppl_per_core = NUM_PEOPLE/GPU_CORES;
	
	for(int i = 0; i < GPU_CORES; i++){
		cudaSetDevice(i);
		global_param[i].id = i;
		int2* h_id_status;
		thrust::host_vector<RandGen> h_rand_vec;
		cudaMallocHost(&h_id_status,ppl_per_core*sizeof(int2));
		int start_id = i * ppl_per_core;
		for(int j = 0; j < ppl_per_core; j++){
			h_id_status[j].x = j + start_id;
			h_id_status[j].y = 0; // not infected
			h_rand_vec.push_back(RandGen(rand(),rand(),rand(),rand()));
		}
		
		global_param[i].id_status = new thrust::device_vector<int2>(ppl_per_core);
		global_param[i].ppl_location = new thrust::device_vector<int>(ppl_per_core);
		global_param[i].rand_vec = new thrust::device_vector<RandGen>(ppl_per_core);
		global_param[i].location_start = new thrust::device_vector<int>(NUM_LOCATION);
		global_param[i].location_end = new thrust::device_vector<int>(NUM_LOCATION);
		
		for(int j = 0; j < GPU_CORES; j++){
			global_param[i].peer_id_status[j] = new thrust::device_vector<int2>();
			global_param[i].peer_ppl_result[j] = new thrust::device_vector<int2>();
			global_param[i].peer_location_start[j] = new thrust::device_vector<int>(NUM_LOCATION/GPU_CORES);
			global_param[i].peer_location_end[j] = new thrust::device_vector<int>(NUM_LOCATION/GPU_CORES);
		}
		
		cudaMemcpy(
			thrust::raw_pointer_cast(global_param[i].id_status->data()),
			h_id_status,ppl_per_core*sizeof(int2),cudaMemcpyHostToDevice);
		cudaMemcpy(
			thrust::raw_pointer_cast(global_param[i].rand_vec->data()),
			thrust::raw_pointer_cast(h_rand_vec.data()),ppl_per_core*sizeof(RandGen),cudaMemcpyHostToDevice);
			
	}
	
	cout << "starting" << endl;
	struct timeval start, finish;
	
	gettimeofday(&start,NULL);
		
	for(int i = 0; i < GPU_CORES; i++){
		pthread_create(&threads[i],NULL,infect_kernel,(void*)&global_param[i]);
	}
	
	for(int i = 0; i < GPU_CORES; i++){
		pthread_join(threads[i],NULL);
	}
	
	// for(int i = 0; i < 4; i++){
		// cudaSetDevice(i);
		// cudaEventRecord(start[i],stream[i]);
		// cudaMemcpyAsync(thrust::raw_pointer_cast(d_data_in[i]->data()), h_data_in+i*SIZE/4, memsize/4,cudaMemcpyHostToDevice);
		// normal<<<BLKS,THREADS_PER_BLK,0,stream[i]>>>(
			// thrust::raw_pointer_cast(d_data_in[i]->data()),
			// thrust::raw_pointer_cast(d_data_out[i]->data()),i);
		// cudaMemcpyAsync(h_data_out+i*SIZE/4, thrust::raw_pointer_cast(d_data_out[i]->data()), memsize/4,cudaMemcpyDeviceToHost);
		// cudaEventRecord(stop[i],stream[i]);
	// }
	
	// for(int i = 0; i < 4; i++){
		// cudaSetDevice(i);
		// for(int j = 0; j < 4; j++){
			// if(j == i) continue;
			// cudaDeviceEnablePeerAccess(j,0);
		// }
		
		// cudaEventRecord(start[i],stream[i]);
		
		// for(int j = 0; j < 4; j++){
			// cudaMemcpyAsync(
				// thrust::raw_pointer_cast(d_data_out[i]->data())+j*SIZE/4,
				// thrust::raw_pointer_cast(d_data_in[j]->data())+i*SIZE/4,
				// memsize/4,cudaMemcpyDefault);
		// }
		// cudaEventRecord(stop[i],stream[i]);
	// }
	
	for(int i = 0; i < GPU_CORES; i++){
		cudaSetDevice(i);
		cudaThreadSynchronize();
	}
	
	// for(int i = 0; i < GPU_CORES; i++){
		// float time;
		// cudaEventElapsedTime(&time, global_param[i].start, global_param[i].stop);
		// cout << "Device " << i << " takes time: " << time << endl;
	// }
	
	gettimeofday(&finish,NULL);
	double t1 = (double)start.tv_sec + ((double)start.tv_usec/1000000.0);
	double t2 = (double)finish.tv_sec + ((double)finish.tv_usec/1000000.0);
	
	cout << "cpu takes time: " << t2 - t1 << endl;
	
	// for(int i = 0; i < SIZE; i++) cout << h_data_out[i] << " ";
	// cout << endl;
	// cout << "-----------------------------" << endl;
	
	// for(int i = 0; i < 4; i++){
		// cudaSetDevice(i);
		// for(int j = 0; j < SIZE; j++){
			// cout << (*thread_param[i].d_data_out)[j] << " ";
		// }
		// cout << endl;
	// }
	
	return 0;
}