#include "mpi.h"
#include <algorithm>
#include <list>
#include <map>
#include <vector>
#include "stdio.h"
#include <iostream>
using namespace std;


template <typename InputDataT, typename OutputKeyT, typename MapOutputValT, typename ReduceOutputValT>
class MapReduceScheduler{
	public: 

		typedef unsigned int UINT;
		struct KeyValT {
			KeyValT(){};
			KeyValT(const OutputKeyT &k, const ReduceOutputValT &v):key(k),val(v){};
			OutputKeyT key;
			ReduceOutputValT val;	
		};

		struct InterKeyValT {
			InterKeyValT(){}
			InterKeyValT(const OutputKeyT& k, const MapOutputValT& v):key(k),val(v){};
			OutputKeyT key;
			MapOutputValT val;
		};


		typedef std::list<MapOutputValT> MapOutputValsT;   
		typedef std::map<OutputKeyT, MapOutputValsT> InterKeyValsMapT;

		typedef std::list<ReduceOutputValT> ReduceOutputValsT;
		typedef std::map<OutputKeyT, ReduceOutputValT> ReduceKeyValsMapT;

		typedef std::map<OutputKeyT, MapOutputValsT> ReduceInputMap;
		typedef typename MapOutputValsT::const_iterator MapOutputValIter;
		typedef typename ReduceKeyValsMapT::const_iterator ReduceOutputValIter;


	public:
		int self_rank;
		int numprocs;


#ifdef DEBUG
		void print_input(int rank,InputDataT* data, UINT size){
			cout<<"rank: "<<rank<<" input:";
			for(UINT i = 0; i < size; i++){
				cout<<data[i]<<" ";
			}
			cout<<endl;
		}

		void print_mapout(int rank,InterKeyValsMapT& map){
			cout<<"rank: "<<rank<<" mapout:"<<endl;
			for(typename InterKeyValsMapT::const_iterator iter = map.begin(); iter!= map.end(); iter++){
				cout<<"key:"<<iter->first<<"\t";
				for(typename MapOutputValsT::const_iterator it = iter->second.begin();it != iter->second.end();it++){
					cout<<*it<<"\t";
				}
				cout<<endl;
			}
			cout<<endl;
		}

		void print_task(int rank, int* task,int numprocs){
			cout<<"rank: "<<rank<<" task_count:";
			for(int i = 0; i < numprocs; i++){
				cout<<task[i]<<" ";
			}
			cout<<endl;
		}

#endif

	public:
		MapReduceScheduler(int rank,int procs){
			self_rank = rank;
			numprocs = procs;
		}


		virtual ~MapReduceScheduler(){};

		void set_inputs(InputDataT* ptr, const UINT size){
			// We should always set input data in rank 0!!!
			if(self_rank == 0) {
				mInputData = ptr;
				mInputDataSize = size;
			}
		}

		void run(){
			MPI_Status status;
#ifdef TIME
			double t1,t2,t3,t4,t5,t6,t7;
#endif

#ifdef LOG
			printf("rank: %d phase 1:split data begin\n",self_rank);
#endif

#ifdef TIME
			t1 = MPI_Wtime();
#endif
			//phase 1: split data and send them in to diffrent rank
			int* task_count = new int[numprocs];
			int recv_size;

			std::fill_n(task_count, numprocs,0);

			if(self_rank == 0) {
				int map_size = mInputDataSize;
				if(map_size < numprocs){
					std::fill_n(task_count,map_size,1);
				}else{
					int average_task_count = map_size / numprocs;
					int task_of_last_procs = map_size % numprocs + average_task_count;
					std::fill_n(task_count,numprocs - 1 ,average_task_count);
					task_count[numprocs - 1] = task_of_last_procs;
				}   
			}   

			MPI_Scatter(task_count,1,MPI_INT,&recv_size,1,MPI_INT,0,MPI_COMM_WORLD);

			if(self_rank == 0){
				splitedInputData = mInputData;
				//splitedInputData = new InputDataT[recv_size];
				//copy(mInputData, &mInputData[recv_size],splitedInputData);
				int start_index = recv_size;
				for(int i = 1 ; i < numprocs ; i++){
					MPI_Send(mInputData+start_index,sizeof(InputDataT)*task_count[i],MPI_BYTE,i,status.MPI_TAG,MPI_COMM_WORLD);
					start_index += task_count[i];
				}
			}else {
				splitedInputData = new InputDataT[recv_size];
				MPI_Recv((void*)splitedInputData, sizeof(InputDataT)*recv_size, MPI_BYTE, 0, MPI_ANY_TAG, MPI_COMM_WORLD,&status);
			}

#ifdef TIME
			t2 = MPI_Wtime();
#endif

#ifdef DEBUG
			print_input(self_rank,splitedInputData,recv_size);
#endif 
#ifdef	LOG
			printf("rank: %d phase 1:split data end\n",self_rank);
			printf("rank: %d phase 2:map begin\n",self_rank);
#endif

			map(splitedInputData,recv_size);

#ifdef TIME
			t3 = MPI_Wtime();
#endif

#ifdef LOG
			printf("rank: %d phase 2:map end\n",self_rank);
			printf("rank: %d phase 3:gather map output begin\n",self_rank);
#endif
			int map_size = interMap.size();
			MPI_Gather(&map_size,1,MPI_INT,task_count,1,MPI_INT,0,MPI_COMM_WORLD);

			if(self_rank == 0){
				for(int i = 1;i < numprocs; i++) {
					for(int j =0;j < task_count[i]; j++){
						OutputKeyT key;
						MPI_Recv(&key,sizeof(OutputKeyT),MPI_BYTE,i,MPI_ANY_TAG,MPI_COMM_WORLD,&status);
						int content_size;
						MPI_Recv(&content_size,1,MPI_INT,i,status.MPI_TAG,MPI_COMM_WORLD,&status);
						MapOutputValT* content = new MapOutputValT[content_size];
						MPI_Recv((void*)content,sizeof(MapOutputValT)*content_size,MPI_BYTE,i,MPI_ANY_TAG,MPI_COMM_WORLD,&status);
						interMap[key].insert(interMap[key].end(),content,&content[content_size]);
					}
				}
			}else {
				for(typename InterKeyValsMapT::const_iterator iter = interMap.begin(); iter != interMap.end(); iter ++){
					MapOutputValsT vals = iter->second;
					MapOutputValT* content = new MapOutputValT[vals.size()];
					copy(vals.begin(),vals.end(),content);
					int content_size = vals.size();
					MPI_Send((void*)&iter->first,sizeof(OutputKeyT),MPI_BYTE,0,status.MPI_TAG,MPI_COMM_WORLD);
					MPI_Send(&content_size,1,MPI_INT,0,status.MPI_TAG,MPI_COMM_WORLD);
					MPI_Send((void*)content,sizeof(OutputKeyT)*content_size,MPI_BYTE,0,status.MPI_TAG,MPI_COMM_WORLD);
				}
			}

#ifdef TIME
			t4 = MPI_Wtime();
#endif
#ifdef DEBUG
			if(self_rank==0){
				print_mapout(self_rank,interMap);
			}
#endif

#ifdef	LOG
			printf("rank: %d phase 3:gather map output end\n",self_rank);
			printf("rank: %d phase 4:split map results begin\n",self_rank);
#endif
			std::fill_n(task_count, numprocs,0);

			if(self_rank == 0) {
				int map_size = interMap.size();
				if(map_size < numprocs){
					std::fill_n(task_count,map_size,1);
				}else{
					int average_task_count = map_size / numprocs;
					int task_of_last_procs = map_size % numprocs + average_task_count;
					std::fill_n(task_count,numprocs - 1 ,average_task_count);
					task_count[numprocs - 1] = task_of_last_procs;
				}
			}

			MPI_Scatter(task_count,1,MPI_INT,&recv_size,1,MPI_INT,0,MPI_COMM_WORLD);

#ifdef DEBUG
			if(self_rank == 0) {
				print_task(self_rank,task_count,numprocs);
			}
#endif

			if(self_rank == 0) {
				int current_send_proc = 0;
				for(typename InterKeyValsMapT::const_iterator iter = interMap.begin(); iter != interMap.end(); iter++ ){
					if(task_count[current_send_proc] == 0) {
						current_send_proc ++;
					}

					if(current_send_proc == 0){
						reduceInputMap[iter->first] = iter->second;
					}else{
						int content_size = iter->second.size();
						MapOutputValT* content = new MapOutputValT[content_size];
						copy(iter->second.begin(),iter->second.end(),content);
						MPI_Send((void*)&iter->first,sizeof(OutputKeyT),MPI_BYTE,current_send_proc,status.MPI_TAG,MPI_COMM_WORLD);
						MPI_Send(&content_size,1,MPI_INT,current_send_proc,status.MPI_TAG,MPI_COMM_WORLD);
						MPI_Send((void*)content,sizeof(OutputKeyT)*content_size,MPI_BYTE,current_send_proc,status.MPI_TAG,MPI_COMM_WORLD);

					}
					task_count[current_send_proc] --;
				}
			}else{
				for(int j=0 ;j<recv_size ;j++){
					OutputKeyT key;
					MPI_Recv(&key,sizeof(OutputKeyT),MPI_BYTE,0,MPI_ANY_TAG,MPI_COMM_WORLD,&status);
					int content_size;
					MPI_Recv(&content_size,1,MPI_INT,0,MPI_ANY_TAG,MPI_COMM_WORLD,&status);
					MapOutputValT* content = new MapOutputValT[content_size];
					MPI_Recv((void*)content,sizeof(OutputKeyT)*content_size,MPI_BYTE,0,MPI_ANY_TAG,MPI_COMM_WORLD,&status);
					reduceInputMap[key].insert(reduceInputMap[key].begin(),content,&content[content_size]);
				}
			}

#ifdef TIME
			t5 = MPI_Wtime();
#endif
#ifdef LOG
			printf("rank: %d phase 4:split map results end\n",self_rank);
			printf("rank: %d phase 5:reduce begin\n",self_rank);
#endif 
			//Reduce phase
			for(typename ReduceInputMap::const_iterator iter = reduceInputMap.begin(); iter != reduceInputMap.end();iter++){
				currentReduceKey = iter->first;
				reduce(iter->first,iter->second.begin(),iter->second.end());
			}

#ifdef TIME
			t6 = MPI_Wtime();
#endif
#ifdef LOG
			printf("rank: %d phase 5:reduce end\n",self_rank);
			printf("rank: %d phase 6:gather reduce results begin\n",self_rank);
#endif
			//Gather reduce result

			int result_size = partResult.size();
			MPI_Gather(&result_size,1,MPI_INT,task_count,1,MPI_INT,0,MPI_COMM_WORLD);

			if(self_rank == 0){
				for(int i = 1; i < numprocs; i++){
					for(int j=0 ; j< task_count[i]; j++) {
						OutputKeyT key;
						ReduceOutputValT value;
						MPI_Recv(&key,sizeof(OutputKeyT),MPI_BYTE,i,MPI_ANY_TAG,MPI_COMM_WORLD,&status);
						MPI_Recv(&value,sizeof(ReduceOutputValT),MPI_BYTE,i,MPI_ANY_TAG,MPI_COMM_WORLD,&status);
						partResult[key] = value;
					}
				}
			}else{
				for(typename ReduceKeyValsMapT::const_iterator iter = partResult.begin(); iter != partResult.end(); iter++){
					ReduceOutputValT val = iter->second;
					MPI_Send((void*)&iter->first,sizeof(OutputKeyT),MPI_BYTE,0,status.MPI_TAG,MPI_COMM_WORLD);
					MPI_Send(&val,sizeof(ReduceOutputValT),MPI_BYTE,0,status.MPI_TAG,MPI_COMM_WORLD);
				}
			}
#ifdef TIME
			t7 = MPI_Wtime();
#endif

#ifdef LOG
			printf("rank: %d phase 6:gather reduce results end\n",self_rank);
#endif

#ifdef TIME
			if(self_rank == 0){
				printf("rank: %d split and send input time: %lfs.\n",self_rank,t2-t1);
				printf("rank: %d gather map output time: %lfs.\n",self_rank,t4-t3);
				printf("rank: %d split and send mapoutput time: %lfs.\n",self_rank,t5-t4);
				printf("rank: %d gather reduce ouput time: %lfs.\n",self_rank,t7-t6);
				printf("rank: %d run() time: %lfs.\n",self_rank,t7-t1);
			}
			printf("rank: %d map time: %lfs.\n",self_rank,t3-t2);
			printf("rank: %d reduce time: %lfs.\n",self_rank,t6-t5);
#endif

		}

		ReduceKeyValsMapT get_outputs() const {
			return partResult;
		}

		void emit_intermediate(const OutputKeyT& key, const MapOutputValT& val){
			interMap[key].push_back(val);
		}

		void emit(const ReduceOutputValT& val){
			partResult[currentReduceKey] = val;
		}

		virtual void map(const InputDataT*, const UINT)=0;
		virtual void reduce(const OutputKeyT& ,const MapOutputValIter& valBegin, const MapOutputValIter& valEnd)=0;
	private:
		// for rank 0
		InputDataT*	mInputData;
		UINT mInputDataSize;

		ReduceInputMap reduceInputMap;
		//InterKeyValsMapT reduceInputMap;

		// for global
		InputDataT* splitedInputData;
		InterKeyValsMapT interMap;

		OutputKeyT currentReduceKey;

		ReduceKeyValsMapT partResult;
};
