#include "mpi.h"
#include <algorithm>
#include <list>
#include <map>
#include <vector>
#include "stdio.h"
#include <iostream>
using namespace std;


template <typename InputDataT, typename OutputKeyT, typename MapOutputValT, typename ReduceOutputValT>
class MapReduceScheduler{
public: 

	typedef unsigned int UINT;
	struct KeyValT {
		KeyValT(){};
		KeyValT(const OutputKeyT &k, const ReduceOutputValT &v):key(k),val(v){};
		OutputKeyT key;
		ReduceOutputValT val;	
	};
	
	struct InterKeyValT {
		InterKeyValT(){}
		InterKeyValT(const OutputKeyT& k, const MapOutputValT& v):key(k),val(v){};
		OutputKeyT key;
		MapOutputValT val;
	};
	

	 typedef std::list<MapOutputValT> MapOutputValsT;   
	 typedef std::map<OutputKeyT, MapOutputValsT> InterKeyValsMapT;

	 typedef std::list<ReduceOutputValT> ReduceOutputValsT;
	 typedef std::map<OutputKeyT, ReduceOutputValT> ReduceKeyValsMapT;

	 typedef std::map<OutputKeyT, MapOutputValsT> ReduceInputMap;
	 typedef typename MapOutputValsT::const_iterator MapOutputValIter;
	 typedef typename ReduceKeyValsMapT::const_iterator ReduceOutputValIter;
	 

public:
	int self_rank;
	int numprocs;


#ifdef DEBUG
	void print_input(int rank,InputDataT* data, UINT size){
		cout<<"rank: "<<rank<<" input:";
		for(UINT i = 0; i < size; i++){
			cout<<data[i]<<" ";
		}
		cout<<endl;
	}

	void print_mapout(int rank,InterKeyValsMapT& map){
		 cout<<"rank: "<<rank<<" mapout:"<<endl;
		 for(typename InterKeyValsMapT::const_iterator iter = map.begin(); iter!= map.end(); iter++){
			 cout<<"key:"<<iter->first<<"\t";
			 for(typename MapOutputValsT::const_iterator it = iter->second.begin();it != iter->second.end();it++){
				 cout<<*it<<"\t";
			 }
			 cout<<endl;
		 }
		 cout<<endl;
	}

	void print_task(int rank, int* task,int numprocs){
		 cout<<"rank: "<<rank<<" task_count:";
		 for(int i = 0; i < numprocs; i++){
             cout<<task[i]<<" ";
         }
         cout<<endl;
	}

#endif

public:
	MapReduceScheduler(int rank,int procs){
		self_rank = rank;
		numprocs = procs;
	}
	
	
	virtual ~MapReduceScheduler(){};

	void set_inputs(InputDataT* ptr, const UINT size){
		// We should always set input data in rank 0!!!
		if(self_rank == 0) {
			mInputData = ptr;
			mInputDataSize = size;
		}
	}
	
	void run(){
		MPI_Status status;
		//phase 1: split data and send them in to diffrent rank
		int recv_size;
		if(self_rank == 0){
			recv_size = ( 0 == numprocs-1) ? mInputDataSize : mInputDataSize / numprocs * 1;
			splitedInputData = new InputDataT[recv_size];
			copy(mInputData, &mInputData[recv_size],splitedInputData);
			for(int i = 1 ; i < numprocs ; i++){
				int start_index = mInputDataSize / numprocs * i;
				int end_index = (i==numprocs-1) ?  mInputDataSize : mInputDataSize / numprocs *(i+1);
				int send_size = end_index - start_index;
				MPI_Send(&send_size, 1, MPI_INT ,i, status.MPI_TAG,MPI_COMM_WORLD);
				MPI_Send(mInputData+start_index,sizeof(InputDataT)*(end_index-start_index),MPI_BYTE,i,status.MPI_TAG,MPI_COMM_WORLD);
			}
		}else {
			MPI_Recv(&recv_size,1,MPI_INT,0,MPI_ANY_TAG,MPI_COMM_WORLD,&status);
			splitedInputData = new InputDataT[recv_size];
			MPI_Recv((void*)splitedInputData, sizeof(InputDataT)*recv_size, MPI_BYTE, 0, MPI_ANY_TAG, MPI_COMM_WORLD,&status);
		}

#ifdef DEBUG
		print_input(self_rank,splitedInputData,recv_size);
#endif 
#ifdef	LOG
		printf("rank: %d phase 1:split data end\n",self_rank);
#endif
		
		map(splitedInputData,recv_size);

#ifdef LOG
		printf("rank: %d phase 2:map  end\n",self_rank);
#endif

#ifdef DEBUG
		
#endif

		if(self_rank==0){
			for(int i = 1;i < numprocs; i++) {
				int map_size;
				MPI_Recv(&map_size,1,MPI_INT,i,MPI_ANY_TAG,MPI_COMM_WORLD,&status);
				for(int j =0;j < map_size; j++){
					OutputKeyT key;
					MPI_Recv(&key,sizeof(OutputKeyT),MPI_BYTE,i,MPI_ANY_TAG,MPI_COMM_WORLD,&status);
					int content_size;
					MPI_Recv(&content_size,1,MPI_INT,i,status.MPI_TAG,MPI_COMM_WORLD,&status);
					MapOutputValT* content = new MapOutputValT[content_size];
					MPI_Recv((void*)content,sizeof(MapOutputValT)*content_size,MPI_BYTE,i,MPI_ANY_TAG,MPI_COMM_WORLD,&status);
					interMap[key].insert(interMap[key].end(),content,&content[content_size]);
				}
			}
		}else {
			int map_size = interMap.size();
			MPI_Send(&map_size,1,MPI_INT,0,status.MPI_TAG,MPI_COMM_WORLD);
			for(typename InterKeyValsMapT::const_iterator iter = interMap.begin(); iter != interMap.end(); iter ++){
				MapOutputValsT vals = iter->second;
				MapOutputValT* content = new MapOutputValT[vals.size()];
				copy(vals.begin(),vals.end(),content);
				int content_size = vals.size();
				MPI_Send((void*)&iter->first,sizeof(OutputKeyT),MPI_BYTE,0,status.MPI_TAG,MPI_COMM_WORLD);
				MPI_Send(&content_size,1,MPI_INT,0,status.MPI_TAG,MPI_COMM_WORLD);
				MPI_Send((void*)content,sizeof(OutputKeyT)*content_size,MPI_BYTE,0,status.MPI_TAG,MPI_COMM_WORLD);
			}
		}

#ifdef DEBUG
		if(self_rank==0){
			print_mapout(self_rank,interMap);
		}
#endif
	
#ifdef	LOG
		 printf("rank: %d phase 3:gather map output  end\n",self_rank);
#endif
		int* task_count = new int[numprocs];
		std::fill_n(task_count, numprocs,0);

		if(self_rank == 0) {
			int map_size = interMap.size();
			if(map_size < numprocs){
				std::fill_n(task_count,map_size,1);
			}else{
				int average_task_count = map_size / numprocs;
				int task_of_last_procs = map_size % numprocs + average_task_count;
				std::fill_n(task_count,numprocs - 1 ,average_task_count);
				task_count[numprocs - 1] = task_of_last_procs;
			}
		}

		MPI_Bcast(task_count,numprocs,MPI_INT,0,MPI_COMM_WORLD);

#ifdef DEBUG
		print_task(self_rank,task_count,numprocs);
#endif

		if(self_rank == 0) {
			int current_send_proc = 0;
			for(typename InterKeyValsMapT::const_iterator iter = interMap.begin(); iter != interMap.end(); iter++ ){
				if(task_count[current_send_proc] == 0) {
					current_send_proc ++;
				}

				if(current_send_proc == 0){
					reduceInputMap[iter->first] = iter->second;
				}else{
					int content_size = iter->second.size();
					MapOutputValT* content = new MapOutputValT[content_size];
					copy(iter->second.begin(),iter->second.end(),content);
					MPI_Send((void*)&iter->first,sizeof(OutputKeyT),MPI_BYTE,current_send_proc,status.MPI_TAG,MPI_COMM_WORLD);
					MPI_Send(&content_size,1,MPI_INT,current_send_proc,status.MPI_TAG,MPI_COMM_WORLD);
					MPI_Send((void*)content,sizeof(OutputKeyT)*content_size,MPI_BYTE,current_send_proc,status.MPI_TAG,MPI_COMM_WORLD);

				}
				task_count[current_send_proc] --;
			}
		}else{
			for(int j =0;j< task_count[self_rank];j++){
				OutputKeyT key;
				MPI_Recv(&key,sizeof(OutputKeyT),MPI_BYTE,0,MPI_ANY_TAG,MPI_COMM_WORLD,&status);
				int content_size;
				MPI_Recv(&content_size,1,MPI_INT,0,MPI_ANY_TAG,MPI_COMM_WORLD,&status);
				MapOutputValT* content = new MapOutputValT[content_size];
				MPI_Recv((void*)content,sizeof(OutputKeyT)*content_size,MPI_BYTE,0,MPI_ANY_TAG,MPI_COMM_WORLD,&status);
				reduceInputMap[key].insert(reduceInputMap[key].begin(),content,&content[content_size]);
			}
		}
#ifdef LOG
		 printf("rank: %d phase 4:split map results end\n",self_rank);
#endif 
		//Reduce phase
		for(typename ReduceInputMap::const_iterator iter = reduceInputMap.begin(); iter != reduceInputMap.end();iter++){
			currentReduceKey = iter->first;
			reduce(iter->first,iter->second.begin(),iter->second.end());
		}

#ifdef LOG
		 printf("rank: %d phase 5:reduce  end\n",self_rank);
#endif
		//Gather reduce result
		if(self_rank == 0){
			for(int i = 1; i < numprocs; i++){
				int result_size;
				MPI_Recv(&result_size,1,MPI_INT,i,MPI_ANY_TAG,MPI_COMM_WORLD,&status);
				for(int j=0 ; j< result_size; j++) {
					OutputKeyT key;
					ReduceOutputValT value;
					MPI_Recv(&key,sizeof(OutputKeyT),MPI_BYTE,i,MPI_ANY_TAG,MPI_COMM_WORLD,&status);
					MPI_Recv(&value,sizeof(ReduceOutputValT),MPI_BYTE,i,MPI_ANY_TAG,MPI_COMM_WORLD,&status);
					partResult[key] = value;
				}
			}
		}else{
			int result_size = partResult.size();
			MPI_Send(&result_size,1,MPI_INT,0,status.MPI_TAG,MPI_COMM_WORLD);
			for(typename ReduceKeyValsMapT::const_iterator iter = partResult.begin(); iter != partResult.end(); iter++){
				ReduceOutputValT val = iter->second;
				MPI_Send((void*)&iter->first,sizeof(OutputKeyT),MPI_BYTE,0,status.MPI_TAG,MPI_COMM_WORLD);
				MPI_Send(&val,sizeof(ReduceOutputValT),MPI_BYTE,0,status.MPI_TAG,MPI_COMM_WORLD);
			}
		}

#ifdef LOG
		 printf("rank: %d phase 6:gather reduce results end\n",self_rank);
#endif

	}
	
	ReduceKeyValsMapT get_outputs() const {
		return partResult;
	}

	void emit_intermediate(const OutputKeyT& key, const MapOutputValT& val){
		interMap[key].push_back(val);
	}

	void emit(const ReduceOutputValT& val){
		partResult[currentReduceKey] = val;
	}
	
	virtual void map(const InputDataT*, const UINT)=0;
	virtual void reduce(const OutputKeyT& ,const MapOutputValIter& valBegin, const MapOutputValIter& valEnd)=0;
private:
	// for rank 0
	InputDataT*	mInputData;
	UINT mInputDataSize;
	
	ReduceInputMap reduceInputMap;
	//InterKeyValsMapT reduceInputMap;

	// for global
	InputDataT* splitedInputData;
	InterKeyValsMapT interMap;

	OutputKeyT currentReduceKey;
	
	ReduceKeyValsMapT partResult;
};
